diff --git "a/4809.jsonl" "b/4809.jsonl" new file mode 100644--- /dev/null +++ "b/4809.jsonl" @@ -0,0 +1,655 @@ +{"seq_id":"29253992747","text":"import pytest\nfrom unittest import mock\n\nfrom yt.wrapper.schema import TableSchema\nfrom maps.garden.sdk.test_utils.canonization import canonize_yt_tables\n\nfrom maps.garden.libs.ymapsdf.lib.collect_statistics.collect_statistics import StatisticsRow\n\nfrom maps.garden.modules.ymapsdf_osm.defs import YT_CLUSTER\nfrom maps.garden.modules.ymapsdf_osm.lib.constants import YmapsdfTable\nfrom maps.garden.modules.ymapsdf_osm.lib.collect_statistics import CollectStatistics, UpdateAllStatisticsTable\nfrom maps.garden.modules.ymapsdf_osm.lib.schemas import LOG_TABLE_SCHEMA_WITH_ISOCODES\n\nfrom .utils import get_task_executor\n\n\nTEST_PROPERTIES = {\n \"shipping_date\": \"20220624\",\n \"region\": \"saa\",\n \"vendor\": \"osm\",\n}\n\n\n@pytest.fixture\ndef task_executor(environment_settings):\n return get_task_executor(\n environment_settings, source_folder=\"test_collect_statistics\", test_properties=TEST_PROPERTIES\n )\n\n\n@pytest.mark.use_local_yt(YT_CLUSTER)\ndef test_collect_statistics(task_executor):\n input_resources = {\n YmapsdfTable.AD: task_executor.create_ymapsdf_input_yt_table_resource(YmapsdfTable.AD),\n YmapsdfTable.ADDR: task_executor.create_ymapsdf_input_yt_table_resource(YmapsdfTable.ADDR),\n YmapsdfTable.BLD: task_executor.create_ymapsdf_input_yt_table_resource(YmapsdfTable.BLD),\n YmapsdfTable.RD: task_executor.create_ymapsdf_input_yt_table_resource(YmapsdfTable.RD),\n YmapsdfTable.FT: task_executor.create_ymapsdf_input_yt_table_resource(YmapsdfTable.FT),\n YmapsdfTable.RD_EL: task_executor.create_ymapsdf_input_yt_table_resource(YmapsdfTable.RD_EL),\n YmapsdfTable.COND: task_executor.create_ymapsdf_input_yt_table_resource(YmapsdfTable.COND),\n YmapsdfTable.COND_RD_SEQ: task_executor.create_ymapsdf_input_yt_table_resource(YmapsdfTable.COND_RD_SEQ),\n YmapsdfTable.LOG_BAD_OSM: task_executor.create_custom_input_yt_table_resource(\n table_name=YmapsdfTable.LOG_BAD_OSM,\n schema=LOG_TABLE_SCHEMA_WITH_ISOCODES\n ),\n }\n\n # This resource is not creating by the task.\n # But table `all_statistics` is appended in the end by task or created if not.\n # So this fake resource is helpful for check the result\n helper_resource = {\n \"all_statistics\": task_executor.create_custom_input_yt_table_resource(\n table_name=\"all_statistics\",\n schema=list(TableSchema.from_row_type(StatisticsRow).to_yson_type())\n )\n }\n\n build_statistic_resources = {\n YmapsdfTable.STATISTICS: task_executor.create_yt_table_resource(YmapsdfTable.STATISTICS)\n }\n\n with (\n mock.patch(\n \"maps.garden.libs.ymapsdf.lib.collect_statistics.collect_statistics.CollectStatistics._get_timestamp\",\n return_value=1655769600\n ),\n mock.patch(\n \"maps.garden.libs.ymapsdf.lib.collect_statistics.collect_statistics.UpdateAllStatisticsTable._get_all_statistics_table_path\",\n return_value=helper_resource[\"all_statistics\"].path\n )\n ):\n task_executor.execute_task(\n task=CollectStatistics(),\n input_resources=input_resources,\n output_resources=build_statistic_resources,\n )\n task_executor.execute_task(\n task=UpdateAllStatisticsTable(),\n input_resources=build_statistic_resources,\n output_resources=helper_resource,\n )\n\n return canonize_yt_tables(\n yt_table_resources=build_statistic_resources | helper_resource\n )\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"maps/tests/tasks/test_collect_statistics.py","file_name":"test_collect_statistics.py","file_ext":"py","file_size_in_byte":3517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"31795343345","text":"class Solution:\n def minimumOperations(self, nums: List[int]) -> int:\n\n n=heapq.heapify(nums)\n print(nums)\n count=0\n\n for i in range(len(nums)):\n temp=heappop(nums)\n if temp==0:\n heapify(nums)\n continue\n else:\n nums=[x-temp for x in nums]\n count+=1\n heapify(nums)\n print(count)\n\n\n return count","repo_name":"spurthym/Leetcode","sub_path":"my-folder/problems/make_array_zero_by_subtracting_equal_amounts/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71015743122","text":"import asyncio\n\nfrom mikro_next.scalars import ArrayLike, ParquetLike, FileLike\nfrom rath.links.parsing import ParsingLink\nfrom rath.operation import Operation, opify\nfrom mikro_next.io.types import Uploader\nfrom typing import Any, TYPE_CHECKING\nfrom mikro_next.io.upload import aupload_bigfile, aupload_xarray, aupload_parquet\nfrom pydantic import Field\nfrom concurrent.futures import ThreadPoolExecutor\nimport uuid\nfrom functools import partial\nfrom mikro_next.datalayer import DataLayer\n\n\nasync def apply_recursive(func, obj, typeguard):\n if isinstance(obj, dict): # if dict, apply to each key\n return {k: await apply_recursive(func, v, typeguard) for k, v in obj.items()}\n elif isinstance(obj, list): # if list, apply to each element\n return await asyncio.gather(\n *[apply_recursive(func, elem, typeguard) for elem in obj]\n )\n elif isinstance(obj, tuple): # if tuple, apply to each element\n return tuple(await apply_recursive(func, elem, typeguard) for elem in obj)\n if isinstance(obj, typeguard):\n return await func(obj)\n else:\n return obj\n\n\nasync def afake_upload(xarray: ArrayLike, *args, **kwargs) -> str:\n return str(uuid.uuid4())\n\n\nclass UploadLink(ParsingLink):\n \"\"\"Data Layer Upload Link\n\n This link is used to upload supported types to a DataLayer.\n It parses queries, mutatoin and subscription arguments and\n uploads the items to the DataLayer, and substitures the\n DataFrame with the S3 path.\n\n Args:\n ParsingLink (_type_): _description_\n\n\n \"\"\"\n\n parquet_uploader: Uploader = aupload_parquet\n xarray_uploader: Uploader = aupload_xarray\n bigfile_uploader: Uploader = aupload_bigfile\n datalayer: DataLayer\n\n executor: ThreadPoolExecutor = Field(\n default_factory=lambda: ThreadPoolExecutor(max_workers=4), exclude=True\n )\n _executor_session: Any = None\n\n async def __aenter__(self):\n self._executor_session = self.executor.__enter__()\n\n async def aget_image_credentials(self, key, datalayer) -> Any:\n from mikro_next.api.schema import RequestUploadMutation\n\n operation = opify(\n RequestUploadMutation.Meta.document,\n variables={\"key\": key, \"datalayer\": datalayer},\n )\n\n async for result in self.next.aexecute(operation):\n return RequestUploadMutation(**result.data).request_upload\n\n async def aget_table_credentials(self, key, datalayer) -> Any:\n from mikro_next.api.schema import RequestTableUploadMutation\n\n operation = opify(\n RequestTableUploadMutation.Meta.document,\n variables={\"key\": key, \"datalayer\": datalayer},\n )\n\n async for result in self.next.aexecute(operation):\n return RequestTableUploadMutation(**result.data).request_table_upload\n\n async def aget_bigfile_credentials(self, key, datalayer) -> Any:\n from mikro_next.api.schema import RequestFileUploadMutation\n\n operation = opify(\n RequestFileUploadMutation.Meta.document,\n variables={\"key\": key, \"datalayer\": datalayer},\n )\n\n async for result in self.next.aexecute(operation):\n return RequestFileUploadMutation(**result.data).request_file_upload\n\n async def aupload_parquet(\n self, datalayer: \"DataLayer\", parquet_input: ParquetLike\n ) -> str:\n assert datalayer is not None, \"Datalayer must be set\"\n endpoint_url = await datalayer.get_endpoint_url()\n\n credentials = await self.aget_table_credentials(parquet_input.key, endpoint_url)\n return await self.parquet_uploader(\n parquet_input,\n credentials,\n datalayer,\n self._executor_session,\n )\n\n async def aupload_xarray(self, datalayer: \"DataLayer\", xarray: ArrayLike) -> str:\n assert datalayer is not None, \"Datalayer must be set\"\n endpoint_url = await datalayer.get_endpoint_url()\n\n credentials = await self.aget_image_credentials(xarray.key, endpoint_url)\n return await self.xarray_uploader(\n xarray,\n credentials,\n datalayer,\n self._executor_session,\n )\n\n async def aupload_bigfile(self, datalayer: \"DataLayer\", file: FileLike) -> str:\n assert datalayer is not None, \"Datalayer must be set\"\n endpoint_url = await datalayer.get_endpoint_url()\n\n credentials = await self.aget_bigfile_credentials(file.key, endpoint_url)\n return await self.bigfile_uploader(\n file,\n credentials,\n datalayer,\n self._executor_session,\n )\n\n async def aparse(self, operation: Operation) -> Operation:\n \"\"\"Parse the operation (Async)\n\n Extracts the DataFrame from the operation and uploads it to the DataLayer.\n\n Args:\n operation (Operation): The operation to parse\n\n Returns:\n Operation: _description_\n \"\"\"\n\n datalayer = operation.context.kwargs.get(\"datalayer\", self.datalayer)\n\n operation.variables = await apply_recursive(\n partial(self.aupload_xarray, datalayer),\n operation.variables,\n ArrayLike,\n )\n operation.variables = await apply_recursive(\n partial(self.aupload_parquet, datalayer), operation.variables, ParquetLike\n )\n operation.variables = await apply_recursive(\n partial(self.aupload_bigfile, datalayer), operation.variables, FileLike\n )\n\n return operation\n\n async def adisconnect(self):\n self.executor.__exit__(None, None, None)\n\n class Config:\n arbitrary_types_allowed = True\n underscore_attrs_are_private = True\n extra = \"forbid\"\n","repo_name":"jhnnsrs/mikro_next","sub_path":"mikro_next/links/upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":5748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"24163060145","text":"import os\nimport time\nimport warnings\nimport h5py\nwarnings.filterwarnings(\"ignore\")\nimport cv2\nimport tensorflow as tf\ntf.logging.set_verbosity(tf.logging.ERROR)\n\nimport numpy as np\nimport spacy\nimport keras.backend as backend\nfrom keras.optimizers import SGD\nfrom sklearn.externals import joblib\nfrom keras.layers.convolutional import MaxPooling2D, ZeroPadding2D\nfrom keras.layers.convolutional import Conv2D as Convolution2D\nfrom keras.models import Sequential\nfrom keras.layers.core import Flatten, Dense, Dropout, Reshape, Activation, Dropout\nfrom keras.layers import LSTM, Merge, Dense\n\n\n\ndef remove_layer(model):\n model.layers.pop()\n if not model.layers:\n model.inbound_nodes = []\n model.outputs = []\n model.outbound_nodes = []\n else:\n model.layers[-1].outbound_nodes = []\n model.outputs = [model.layers[-1].output]\n model.built = False\n\n return model\n\n\ndef VGG_16(weights):\n weight_dict = h5py.File(weights, 'r')\n model = Sequential()\n model.add(ZeroPadding2D((1,1),input_shape=(3,224,224)))\n model.add(Convolution2D(64,( 3, 3), activation='relu'))\n model.add(ZeroPadding2D((1,1)))\n model.add(Convolution2D(64,( 3, 3), activation='relu'))\n model.add(MaxPooling2D((2,2), strides=(2,2)))\n\n model.add(ZeroPadding2D((1,1)))\n model.add(Convolution2D(128,( 3, 3), activation='relu'))\n model.add(ZeroPadding2D((1,1)))\n model.add(Convolution2D(128,( 3, 3), activation='relu'))\n model.add(MaxPooling2D((2,2), strides=(2,2)))\n\n model.add(ZeroPadding2D((1,1)))\n model.add(Convolution2D(256,( 3, 3), activation='relu'))\n model.add(ZeroPadding2D((1,1)))\n model.add(Convolution2D(256,( 3, 3), activation='relu'))\n model.add(ZeroPadding2D((1,1)))\n model.add(Convolution2D(256,( 3, 3), activation='relu'))\n model.add(MaxPooling2D((2,2), strides=(2,2)))\n\n model.add(ZeroPadding2D((1,1)))\n model.add(Convolution2D(512,( 3, 3), activation='relu'))\n model.add(ZeroPadding2D((1,1)))\n model.add(Convolution2D(512,( 3, 3), activation='relu'))\n model.add(ZeroPadding2D((1,1)))\n model.add(Convolution2D(512,( 3, 3), activation='relu'))\n model.add(MaxPooling2D((2,2), strides=(2,2)))\n\n model.add(ZeroPadding2D((1,1)))\n model.add(Convolution2D(512,( 3, 3), activation='relu'))\n model.add(ZeroPadding2D((1,1)))\n model.add(Convolution2D(512,( 3, 3), activation='relu'))\n model.add(ZeroPadding2D((1,1)))\n model.add(Convolution2D(512,( 3, 3), activation='relu'))\n model.add(MaxPooling2D((2,2), strides=(2,2)))\n\n model.add(Flatten())\n model.add(Dense(4096, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(4096, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(1000, activation='softmax'))\n \n flattened = model.layers\n\n naive_bayes_layer = weight_dict.attrs['nb_layers']\n\n for layer in range(naive_bayes_layer):\n val = weight_dict['layer_{}'.format(layer)]\n w = [val['param_{}'.format(param)] for param in range(val.attrs['nb_params'])]\n if not w : continue\n if len(w[0].shape) >2: \n w[0] = np.swapaxes(w[0],0,3)\n w[0] = np.swapaxes(w[0],0,2)\n w[0] = np.swapaxes(w[0],1,2)\n flattened[layer].set_weights(w)\n # remove last tow layers to match dimensions\n model = remove_layer(model)\n model = remove_layer(model)\n weight_dict.close() \n print(\"VGG MODEL\")\n print (model.summary())\n return model\n\n\ndef set_params():\n backend.set_image_data_format('channels_first')\n# sets image to be read as (depth, input_depth, rows, cols)\n backend.set_image_dim_ordering('th')\n\n\ndef VQA():\n image_size = 4096\n word_size = 300\n n_lstm= 3\n n_hidden_lstm= 512\n max_word = 30 # max words in question\n n_dense = 3\n n_hidden = 1024\n activation = 'tanh'\n dropout = 0.5\n #Image layer\n\n image_model= Sequential()\n image_model.add(Reshape((image_size,), input_shape=(image_size,)))\n #LSTM layer\n language_model = Sequential()\n language_model.add(LSTM(n_hidden_lstm, return_sequences=True, input_shape=(max_word, word_size)))\n language_model.add(LSTM(n_hidden_lstm, return_sequences=True))\n language_model.add(LSTM(n_hidden_lstm, return_sequences=False))\n\n #combine model\n model = Sequential()\n model.add(Merge([language_model, image_model], mode='concat', concat_axis=1))\n\n for _ in range(n_dense):\n model.add(Dense(n_hidden, kernel_initializer='uniform'))\n model.add(Activation(activation))\n model.add(Dropout(dropout))\n model.add(Dense(1000))\n #Final layer with Top 1000 answers\n model.add(Activation('softmax'))\n print (model.summary())\n return model\n\n\ndef main(image_path=None, ques=None):\n start_time = time.time()\n set_params()\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n print(\"Obtaining image in form of features\")\n image_resized = cv2.resize(cv2.imread(image_path), (224, 224))\n image_float = image_resized.astype(np.float32)\n normalized_vector = [103.939, 116.779, 123.68]\n for third_dimension in range(3):\n image_float[:, :, third_dimension] = image_float[:, :, third_dimension] - normalized_vector[third_dimension]\n # convert from width,height,channel to channel,width,height \n image_float = image_float.transpose((2,0,1)) \n print(\"Image has dimensions\"+ str(image_float.shape))\n image = np.expand_dims(image_float, axis=0) \n # Adding extra dimension for maintaining model size\n print(\"Image now has dimensions\"+ str(image_float.shape))\n im_features = np.zeros((1, 4096))\n model_image = VGG_16('VQA_Attack/Weights/vgg16_dict.h5')\n stochastic_gd= SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)\n model_image.compile(optimizer=stochastic_gd, loss='categorical_crossentropy')\n im_features[0,:] = model_image.predict(image)[0]\n print(\"Converting question into embeddings\")\n embedding = spacy.load('en_vectors_web_lg')\n word_as_tokens = embedding(str(ques))\n tensor_q = np.zeros((1, 30, 300))\n # 30 because max is 30 words in a question\n for x in range(len(word_as_tokens)):\n tensor_q[0,x,:] = word_as_tokens[x].vector\n vqa = VQA()\n vqa.load_weights('VQA_Attack/Weights/VQA_weights.hdf5')\n vqa.compile(loss='categorical_crossentropy', optimizer='rmsprop')\n print(\"VQA MODEL\")\n print (vqa.summary())\n print(\"Inferencing....................\")\n output = vqa.predict([tensor_q, im_features])\n # Answer will be a top 1000 answer vector\n # Answers are encoded as labels to minimize compute as words increase compute\n encoder = joblib.load('VQA_Attack/Weights/labelencoder.pkl')\n answer_vector = {}\n answer_vector['answer']= []\n answer_vector['answer_prob']= []\n temp_list = []\n for label in reversed(np.argsort(output)[0,-5:]):\n temp_list.append(label)\n answer_vector['answer'].append(str(encoder.inverse_transform(temp_list)[0]))\n answer_vector['answer_prob'].append(str(round(output[0,label]*100, 2)))\n temp_list.pop()\n print (answer_vector)\n print(\"Time taken for prediction: \" +str(time.time()-start_time))\n return answer_vector\n\n\nif __name__ == \"__main__\":\n main('test.jpg', \"What vechile is in the picture?\")\n","repo_name":"callmetesla/VQA-Attack","sub_path":"VQA/vqa.py","file_name":"vqa.py","file_ext":"py","file_size_in_byte":7268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30365716036","text":"\r\n#!/usr/bin/python\r\n\r\n\r\nimport threading\r\nimport time\r\nfrom threading import Thread\r\n\r\nexitFlag = 0\r\n\r\nclass myThread (threading.Thread):\r\n def __init__(self, threadID, name, counter, delay):\r\n threading.Thread.__init__(self)\r\n self.threadID = threadID\r\n self.name = name\r\n self.counter = counter\r\n self.delay = delay\r\n def run(self):\r\n print(\"\\nStarting \" + self.name)\r\n print(self)\r\n print_time(self.name, self.counter, self.delay)\r\n print(\"\\nExiting \" + self.name)\r\n\r\ndef print_time(threadName, counter, delay):\r\n while counter:\r\n if exitFlag:\r\n threadName.exit()\r\n time.sleep(delay)\r\n print(\"\\n%s: %s %d\" % (threadName, time.ctime(time.time()),counter))\r\n print(threading.active_count())\r\n counter -= 1\r\n\r\n# Create new threads\r\n#thread1 = myThread(1, \"Thread-1\", 2, 1)\r\n#thread2 = myThread(2, \"Thread-2\", 5, 2)\r\nthread3 = Thread(target=print_time, args=(\"Thread-3\", 3, 1))\r\nthread4 = Thread(target=print_time, args=(\"Thread-4\", 4, 3))\r\n\r\n# Start new Threads\r\n#thread1.start()\r\n#thread2.start()\r\nthread3.run()\r\nthread4.run()\r\n\r\n# Join threads\r\n#thread1.join()\r\n#thread2.join()\r\nthread3.join()\r\nthread4.join()\r\n\r\nprint(\"Exiting Main Thread\")\r\n","repo_name":"MadimetjaMadix/Networks","sub_path":"Lab 3 TCP & UDP Threading/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8215989561","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n\n@author: Kimberly McCormack\n\n:Last edited on: 10/28/2021\n\nScript to build catalog of full paths for data directory and search for paths given list of tiles\n\n\"\"\"\n\n\nimport glob\nimport itertools\nimport os\nimport shutil\nfrom shapely import geometry\nimport numpy as np\nimport geopandas as gpd\n\n\n\ndef buildpathlist(src_base_path, searchstr, listpath):\n \"\"\"\n Build and save a text file of full path names to\n specfied files in a directory\n\n Parameters\n ----------\n src_base_path : path [str]\n directory to search\n searchstr : string\n string to filter by\n listpath : path [str]\n full path to save text file\n \"\"\"\n\n cell_paths = []\n for root, dirs, files in os.walk(src_base_path):\n for d in dirs:\n cell_paths.append(glob.glob(os.path.join(root, d, searchstr)))\n\n cell_paths = list(itertools.chain(*cell_paths))\n with open(listpath, 'w') as filehandle:\n for path in cell_paths:\n if 'lidar' not in path:\n filehandle.write(\"{0}\\n\".format(path))\n\n\n\ndef tile_searchTDT(tile, searchlines):\n \"\"\"\n search for version 2 first, then version 1\n \"\"\"\n cell_search = tile +\"_02\"\n cell_path = \"\"\n \"\"\"Search TDT txt file for path of TDT cell\"\"\"\n for line in searchlines:\n if cell_search in line:\n cell_path = line\n break\n if len(cell_path) == 0:\n print(\"TDT version 2 not found, searching for version 1 instead\")\n cell_search = tile +\"_01\"\n for line in searchlines:\n if cell_search in line:\n cell_path = line\n break\n\n return cell_path\n\n\n\ndef buildTDTbbox(TDTcells):\n \"\"\"\n Create geodataframe of bounding box geometries for list of TanDEM-X cells\n\n Parameters\n ----------\n TDTcells : list of strings\n list of TanDEM-X cell names - e.g. ['N39E047', 'S08W102']\n\n Returns\n -------\n TDT_poly_gdf : geopandas geodataframe\n gdf with cell name as index and cell bounding box as geometry\n\n \"\"\"\n\n bbox_bottom = np.array([np.int(cell[1:3]) for cell in TDTcells])\n bbox_left = np.array([np.int(cell[-3::]) for cell in TDTcells])\n\n \"\"\"Deal with quadrants\"\"\"\n for i, cell in enumerate(TDTcells):\n if cell[0] == 'S':\n bbox_bottom[i] = -1*bbox_bottom[i]\n if cell[3] == 'W':\n bbox_left[i] = -1*bbox_left[i]\n\n \"\"\"Create geodataframe of TDT bounding boxes\"\"\"\n TDT_bbox = [geometry.box(l, b, r, t)\n for l, b, r, t in zip(bbox_left, bbox_bottom,\n bbox_left+1, bbox_bottom+1)]\n TDT_poly_gdf = gpd.GeoDataFrame(geometry=TDT_bbox)\n \n TDT_poly_gdf.crs = \"EPSG:4326\"\n TDT_poly_gdf.index = TDTcells\n TDT_poly_gdf['TILE_ID'] = TDTcells\n\n return TDT_poly_gdf\n\n\n\ndef find_duplicates(list1):\n \n # initialize a null list\n unique_list = []\n duplicate_list = []\n \n # traverse for all elements\n for x in list1:\n # check if exists in unique_list or not\n if x not in unique_list:\n unique_list.append(x)\n else:\n duplicate_list.append(x)\n \n return duplicate_list\n\n","repo_name":"ngageoint/pydrodem","sub_path":"tools/directory_catalog.py","file_name":"directory_catalog.py","file_ext":"py","file_size_in_byte":3222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11927490235","text":"\r\nfar = input(\"화씨를 입력하세요 :\")\r\ncel = (float(far) - 32)/1.8\r\n# cel = 5.5555555(far = 42일 경우)\r\n# cel * 100 = 555.555555...\r\n# int(cel * 100) = 555\r\n# int(cel * 100) / 100 = 5.55\r\nprint(far + \"의 섭씨 온도는 \" + str(int(cel*100)/100) + \" 입니다.\")\r\nprint(\"%s의 섭씨 온도는 %0.2f 입니다.\" % (far, cel))\r\nprint(\"%d의 섭씨 온도는 %0.2f 입니다.\" % (int(far), cel))\r\n\r\n\r\n#화씨 온도를 섭씨 온도로 변환하기\r\n'''\r\nftemp = float(input(\"화씨온도를 입력하세요: \"));\r\nctemp = (ftemp-32)*5/9\r\nprint(\"섭씨온도 :\", ctemp)\r\n'''\r\n\r\n# AUSG 동물원에 왔습니다! 사용자로부터 공작과 사자의 마리 수를 입력받아 총 다리 개수를 출력하는 프로그램을 작성하세요.\r\n\r\nlion = input(\"사자는 :\")\r\nbird = input(\"공작은 :\")\r\nleg1 = int(lion)*4\r\nleg2 = int(bird)*2\r\nsum = leg1+leg2\r\nprint(\"공작과 사자의 다리 갯수는 총 \"+str(sum)+\" 입니다. \")\r\n\r\n\r\n\r\n# 사용자로부터 숫자를 입력받아 그 숫자의 구구단을 출력하는 프로그램을 작성하세요.\r\n\r\nnum1 = int(input(\"숫자를 입력하시오 :\"))\r\nfor i in range(1,10):\r\n print(num1, \"*\", i, \"=\", num1*i)\r\n\r\n \r\n","repo_name":"AUSG/study-2019-first","sub_path":"python/week2/week2_jiyoon_temperature.py","file_name":"week2_jiyoon_temperature.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"17119063806","text":"# -*- coding: utf-8 -*-\nimport asyncio\nfrom functools import partial\n\nimport nbformat\nfrom prompt_toolkit.application.current import get_app\nfrom prompt_toolkit.filters import Condition\nfrom prompt_toolkit.formatted_text.base import StyleAndTextTuples\nfrom prompt_toolkit.layout.containers import (\n ConditionalContainer,\n Container,\n Float,\n FloatContainer,\n HSplit,\n VSplit,\n Window,\n)\nfrom prompt_toolkit.layout.controls import FormattedTextControl\nfrom prompt_toolkit.lexers import DynamicLexer, PygmentsLexer\nfrom prompt_toolkit.mouse_events import MouseEvent, MouseEventType\nfrom prompt_toolkit.search import start_search\nfrom prompt_toolkit.widgets import Label, SearchToolbar, TextArea\nfrom pygments.lexers import get_lexer_by_name\n\nfrom euporie.box import Border\nfrom euporie.keys import KeyBindingsInfo\nfrom euporie.output import Output\n\n\nclass ClickArea:\n \"\"\"Any empty widget which focuses `target` when clicked.\n\n Designed to be used as an overlay for clickable widgets in a FloatContainer.\n \"\"\"\n\n def __init__(self, target):\n self.target = target\n self.window = Window(\n FormattedTextControl(\n self._get_text_fragments,\n focusable=False,\n ),\n dont_extend_width=False,\n dont_extend_height=False,\n )\n\n def _get_text_fragments(self) -> StyleAndTextTuples:\n def handler(mouse_event: MouseEvent) -> None:\n if mouse_event.event_type == MouseEventType.MOUSE_UP:\n get_app().layout.focus(self.target)\n\n return [(\"class:cell-clickarea\", \"\", handler)]\n\n def __pt_container__(self) -> Container:\n return self.window\n\n\nclass Cell:\n def __init__(self, index, json, notebook):\n self.index = index\n self.json = json\n self.nb = notebook\n self.rendered = True\n self.editing = False\n self._drawing_position = None\n\n self.state = \"idle\"\n\n ft = FormattedTextControl(\n Border.TOP_LEFT,\n focusable=True,\n show_cursor=False,\n )\n self.control = Window(ft, width=1, height=0, style=self.border_style)\n\n self.show_input = Condition(\n lambda: (\n (self.json.get(\"cell_type\") != \"markdown\")\n | ((self.json.get(\"cell_type\") == \"markdown\") & ~self.rendered)\n )\n )\n self.show_output = Condition(\n lambda: (\n (self.json.get(\"cell_type\") != \"markdown\") & bool(self.outputs)\n | ((self.json.get(\"cell_type\") == \"markdown\") & self.rendered)\n )\n )\n self.scroll_input = Condition(\n lambda: (self.json.get(\"cell_type\") == \"markdown\") & ~self.rendered\n )\n self.wrap_input = Condition(lambda: self.json.get(\"cell_type\") == \"markdown\")\n self.is_editing = Condition(lambda: self.editing)\n self.show_prompt = Condition(lambda: self.cell_type == \"code\")\n self.is_focused = Condition(lambda: self.focused)\n self.obscured = Condition(\n lambda: (\n self._drawing_position is None\n or (\n self._drawing_position.top < 0\n or self._drawing_position.parent_height\n < self._drawing_position.top + self._drawing_position.height\n )\n )\n )\n self.show_input_line_numbers = Condition(\n lambda: self.nb.line_numbers and self.json.get(\"cell_type\") == \"code\"\n )\n\n self.load()\n\n def load_key_bindings(self):\n kb = KeyBindingsInfo()\n\n @kb.add(\n \"e\", filter=~self.is_editing, group=\"Notebook\", desc=\"Edit cell in $EDITOR\"\n )\n async def edit_in_editor(event):\n self.editing = True\n await self.input_box.buffer.open_in_editor()\n exit_edit_mode(event)\n\n @kb.add(\n \"enter\",\n filter=~self.is_editing,\n group=\"Notebook\",\n desc=\"Enter cell edit mode\",\n )\n def enter_edit_mode(event):\n self.editing = True\n self.container.modal = True\n get_app().layout.focus(self.input_box)\n self.rendered = False\n\n @kb.add(\"escape\", group=\"Notebook\", desc=\"Exit cell edit mode\")\n @kb.add(\n \"escape\", \"escape\", group=\"Notebook\", desc=\"Exit cell edit mode quickly\"\n )\n def exit_edit_mode(event):\n self.editing = False\n self.input = self.input_box.text\n self.nb.dirty = True\n self.container.modal = False\n # give focus back to selected cell (this might have changed!)\n get_app().layout.focus(self.nb.cell.control)\n\n @kb.add(\n \"escape\",\n \"[\",\n \"1\",\n \"3\",\n \";\",\n \"5\",\n \"u\",\n key_str=(\"c-enter\",),\n group=\"Notebook\",\n desc=\"Run cell\",\n )\n @kb.add(\"c-r\", group=\"Notebook\", desc=\"Run cell\")\n @kb.add(\"c-f20\")\n def run_or_render(event):\n exit_edit_mode(event)\n if self.cell_type == \"markdown\":\n self.output_box.children = self.rendered_outputs\n self.rendered = True\n elif self.cell_type == \"code\":\n self.state = \"queued\"\n self.run()\n\n @kb.add(\n \"escape\",\n \"[\",\n \"1\",\n \"3\",\n \";\",\n \"2\",\n \"u\",\n key_str=(\"s-enter\",),\n group=\"Notebook\",\n desc=\"Run then select next cell\",\n )\n @kb.add(\"f21\")\n def run_then_next(event):\n # Insert a cell if we are at the last cell\n n_cells = len(self.nb.page.children)\n if self.nb.page.selected_index == (n_cells) - 1:\n offset = n_cells - self.nb.page.selected_index\n self.nb.add(offset)\n else:\n self.nb.page.selected_index += 1\n run_or_render(event)\n\n @kb.add(\"c-f\", filter=self.is_editing, group=\"Edit Mode\", desc=\"Find\")\n def find(event):\n start_search(self.input_box.control)\n\n @kb.add(\"c-g\", filter=self.is_editing, group=\"Edit Mode\", desc=\"Find Next\")\n def find_next(event):\n search_state = get_app().current_search_state\n cursor_position = self.input_box.buffer.get_search_position(\n search_state, include_current_position=False\n )\n self.input_box.buffer.cursor_position = cursor_position\n\n @kb.add(\"c-z\", filter=self.is_editing, group=\"Edit Mode\", desc=\"Undo\")\n def undo(event):\n self.input_box.buffer.undo()\n\n return kb\n\n def run(self):\n self.clear_output()\n if self.nb.kc:\n # Execute input and wait for responses in kernel thread\n asyncio.run_coroutine_threadsafe(\n # self.nb.kc._async_execute_interactive(\n self._async_execute_interactive(\n code=self.input,\n allow_stdin=False,\n output_hook=self.ran,\n ),\n self.nb.kernel_loop,\n )\n\n async def _async_execute_interactive(\n self, code, allow_stdin=False, output_hook=None\n ):\n from queue import Empty\n\n import zmq.asyncio\n\n if not self.nb.kc.iopub_channel.is_alive():\n raise RuntimeError(\"IOPub channel must be running to receive output\")\n\n msg_id = self.nb.kc.execute(\n code,\n allow_stdin=False,\n )\n stdin_hook = self.nb.kc._stdin_hook_default\n\n timeout_ms = None\n\n poller = zmq.Poller()\n iopub_socket = self.nb.kc.iopub_channel.socket\n poller.register(iopub_socket, zmq.POLLIN)\n stdin_socket = None\n\n # wait for output and redisplay it\n while True:\n events = dict(poller.poll(timeout_ms))\n if not events:\n raise TimeoutError(\"Timeout waiting for output\")\n if stdin_socket in events:\n req = self.nb.kc.stdin_channel.get_msg(timeout=0)\n stdin_hook(req)\n continue\n if iopub_socket not in events:\n continue\n\n msg = self.nb.kc.iopub_channel.get_msg(timeout=0)\n\n if msg[\"parent_header\"].get(\"msg_id\") != msg_id:\n # not from my request\n continue\n output_hook(msg)\n\n # stop on idle\n if (\n msg[\"header\"][\"msg_type\"] == \"status\"\n and msg[\"content\"][\"execution_state\"] == \"idle\"\n ):\n break\n\n # output is done, get the reply\n while True:\n try:\n reply = self.nb.kc.get_shell_msg(timeout=None)\n except Empty as e:\n raise TimeoutError(\"Timeout waiting for reply\") from e\n if reply[\"parent_header\"].get(\"msg_id\") != msg_id:\n # not my reply, someone may have forgotten to retrieve theirs\n continue\n return reply\n\n def ran(self, msg):\n msg_type = msg.get(\"header\", {}).get(\"msg_type\")\n\n if msg_type == \"status\":\n self.state = msg.get(\"content\", {}).get(\"execution_state\")\n self.nb.kernel_status = self.state\n\n elif msg_type == \"execute_input\":\n self.json[\"execution_count\"] = msg.get(\"content\", {}).get(\"execution_count\")\n\n elif msg_type in (\"stream\", \"error\", \"display_data\", \"execute_result\"):\n self.json[\"outputs\"].append(nbformat.v4.output_from_msg(msg))\n\n # Update the outputs in the visible instance of this cell\n visible_cell = self.nb.get_cell_by_id(self.id)\n if visible_cell:\n visible_cell.output_box.children = visible_cell.rendered_outputs\n\n # Tell the app that the display needs updating\n get_app().invalidate()\n\n def set_cell_type(self, cell_type):\n if cell_type == \"code\":\n self.json.setdefault(\"execution_count\", None)\n self.json[\"cell_type\"] = cell_type\n self.load()\n\n def mouse_click(self):\n get_app().layout.focus(self.control)\n\n def load(self):\n\n fill = partial(Window, style=self.border_style)\n\n self.search_control = SearchToolbar()\n\n self.input_box = TextArea(\n text=self.input,\n # Does not accept conditions\n scrollbar=self.scroll_input(),\n wrap_lines=self.wrap_input,\n # Does not accept conditions\n line_numbers=self.show_input_line_numbers(),\n read_only=~self.is_editing,\n focusable=self.is_editing,\n lexer=DynamicLexer(\n lambda: PygmentsLexer(\n get_lexer_by_name(self.language).__class__,\n sync_from_start=False,\n )\n if self.cell_type != \"raw\"\n else None\n ),\n search_field=self.search_control,\n completer=self.nb.completer,\n complete_while_typing=False,\n style=\"class:cell-input\",\n )\n self.input_box.window.cursorline = self.is_editing\n\n self.output_box = HSplit(\n self.rendered_outputs,\n style=\"class:cell-output\",\n )\n\n top_border = VSplit(\n [\n self.control,\n ConditionalContainer(\n content=fill(\n char=Border.HORIZONTAL, width=len(self.prompt), height=1\n ),\n filter=self.show_prompt,\n ),\n ConditionalContainer(\n content=fill(width=1, height=1, char=Border.SPLIT_TOP),\n filter=self.show_prompt,\n ),\n fill(char=Border.HORIZONTAL, height=1),\n fill(width=1, height=1, char=Border.TOP_RIGHT),\n ],\n height=1,\n )\n input_row = ConditionalContainer(\n VSplit(\n [\n fill(width=1, char=Border.VERTICAL),\n ConditionalContainer(\n content=Label(\n self.prompt,\n width=len(self.prompt),\n style=\"class:cell-input-prompt\",\n ),\n filter=self.show_prompt,\n ),\n ConditionalContainer(\n content=fill(width=1, char=Border.VERTICAL),\n filter=self.show_prompt,\n ),\n HSplit([self.input_box, self.search_control]),\n fill(width=1, char=Border.VERTICAL),\n ],\n ),\n filter=self.show_input,\n )\n middle_line = ConditionalContainer(\n content=VSplit(\n [\n fill(width=1, height=1, char=Border.SPLIT_LEFT),\n ConditionalContainer(\n content=fill(char=Border.HORIZONTAL, width=len(self.prompt)),\n filter=self.show_prompt,\n ),\n ConditionalContainer(\n content=fill(width=1, height=1, char=Border.CROSS),\n filter=self.show_prompt,\n ),\n fill(char=Border.HORIZONTAL),\n fill(width=1, height=1, char=Border.SPLIT_RIGHT),\n ],\n height=1,\n ),\n filter=self.show_input & self.show_output,\n )\n output_row = ConditionalContainer(\n VSplit(\n [\n fill(width=1, char=Border.VERTICAL),\n ConditionalContainer(\n content=Label(\n self.prompt,\n width=len(self.prompt),\n style=\"class:cell-output-prompt\",\n ),\n filter=self.show_prompt,\n ),\n ConditionalContainer(\n fill(width=1, char=\" \"), filter=~self.show_prompt\n ),\n ConditionalContainer(\n content=fill(width=1, char=Border.VERTICAL),\n filter=self.show_prompt,\n ),\n self.output_box,\n ConditionalContainer(\n fill(width=1, char=\" \"), filter=~self.show_prompt\n ),\n fill(width=1, char=Border.VERTICAL),\n ],\n ),\n filter=self.show_output,\n )\n bottom_border = VSplit(\n [\n fill(width=1, height=1, char=Border.BOTTOM_LEFT),\n ConditionalContainer(\n content=fill(char=Border.HORIZONTAL, width=len(self.prompt)),\n filter=self.show_prompt,\n ),\n ConditionalContainer(\n content=fill(width=1, height=1, char=Border.SPLIT_BOTTOM),\n filter=self.show_prompt,\n ),\n fill(char=Border.HORIZONTAL),\n fill(width=1, height=1, char=Border.BOTTOM_RIGHT),\n ],\n height=1,\n )\n\n self.container = FloatContainer(\n content=HSplit(\n [top_border, input_row, middle_line, output_row, bottom_border],\n key_bindings=self.load_key_bindings(),\n ),\n floats=[\n Float(\n transparent=True,\n left=0,\n right=0,\n top=0,\n bottom=0,\n content=ConditionalContainer(\n ClickArea(self), filter=~self.is_focused\n ),\n ),\n ],\n )\n\n def border_style(self):\n if self.focused:\n if self.editing:\n return \"class:frame.border,cell-border-edit\"\n else:\n return \"class:frame.border,cell-border-selected\"\n else:\n return \"class:frame.border,cell-border\"\n\n @property\n def id(self):\n return self.json.get(\"id\")\n\n @property\n def language(self):\n if self.cell_type == \"markdown\":\n return \"markdown\"\n else:\n return self.nb.json.metadata.get(\"language_info\", {}).get(\"name\", \"python\")\n\n @property\n def focused(self):\n return get_app().layout.has_focus(self.container)\n\n @property\n def cell_type(self):\n return self.json.get(\"cell_type\", \"code\")\n\n @property\n def prompt(self):\n if self.state in (\"busy\", \"queued\"):\n prompt = \"*\"\n else:\n prompt = self.json.get(\"execution_count\", \"\")\n if prompt is None:\n prompt = \" \"\n if prompt:\n prompt = f\"[{prompt}]\"\n return prompt\n\n @property\n def input(self):\n return self.json.get(\"source\", \"\")\n\n @input.setter\n def input(self, value):\n self.json[\"source\"] = value\n\n def clear_output(self):\n self.json[\"outputs\"] = []\n self.load()\n\n @property\n def outputs(self):\n if self.cell_type == \"markdown\":\n return [\n {\"data\": {\"text/x-markdown\": self.input}, \"output_type\": \"markdown\"}\n ]\n else:\n return self.json.get(\"outputs\", [])\n\n @property\n def rendered_outputs(self):\n rendered_outputs = []\n for i, output_json in enumerate(self.outputs):\n rendered_outputs.append(Output(i, output_json, parent=self))\n return rendered_outputs\n\n def __pt_container__(self) -> \"Container\":\n return self.container\n","repo_name":"tspannhw/euporie","sub_path":"euporie/cell.py","file_name":"cell.py","file_ext":"py","file_size_in_byte":17980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"34440151267","text":"import pandas as pd\nimport tldextract\n\nimport constants\n\ndef detect_outliers(flows, out_fp, threshold=.5):\n vp_groups = flows.groupby(\"vendor_product\")\n\n outlier_dfs = []\n try:\n for vp, vp_flows in vp_groups:\n print(f\"\\n########### {vp} ###########\")\n dev_count = vp_flows.device_id.nunique()\n if dev_count < 10:\n print(f\"{vp} only has {dev_count} devices, skip...\")\n else:\n common_domains = vp_flows.groupby(\"short_domain\").device_id.nunique()\n common_domains = common_domains[common_domains > threshold*dev_count].index.values\n common_ips = vp_flows.groupby(\"remote_ip\").device_id.nunique()\n common_ips = common_ips[common_ips > threshold*dev_count].index.values\n\n print(f\"Common domains include: {common_domains}\")\n print(f\"Common IP include: {common_ips}\")\n\n normal_devs = vp_flows[(vp_flows.short_domain.isin(common_domains)) | (vp_flows.remote_ip.isin(common_ips))].device_id.unique()\n outlier_devs = vp_flows[(~vp_flows.device_id.isin(normal_devs)) & (vp_flows.remote_port != 53) & (vp_flows.remote_port != 5353)].device_id.unique()\n outlier_flows = vp_flows[vp_flows.device_id.isin(outlier_devs)]\n\n if len(outlier_flows.index) != 0:\n print(f\"Found {outlier_flows.device_id.nunique()} outlying devices.\")\n outlier_dfs.append(outlier_flows)\n else:\n print(f\"Did not find any outlying devices.\")\n\n print(\"\\nFinished analyzing.\")\n outliers_flows = pd.concat(outlier_dfs)\n print(\"saving files...\")\n outliers_flows.to_parquet(out_fp)\n except Exception as e:\n print(\"Something goes wrong.\")\n if len(outlier_dfs) > 0:\n outliers_flows = pd.concat(outlier_dfs)\n print(\"saving files...\")\n outliers_flows.to_parquet(out_fp)\n else:\n print(\"There are no outliers!\")\n\ndef analyze_outliers(flows):\n print(f\"Found {flows.device_id.nunique()} devices from {flows.vendor_product.nunique()} products in total. \")\n\n vp_gorups = flows.groupby(\"vendor_product\")\n\n for vp, vp_flows in vp_gorups:\n print(f\"\\n########### {vp} ###########\")\n print(f\"There are {vp_flows.device_id.nunique()} outlying devices in total.\")\n\n dev_groups = vp_flows.groupby(\"device_id\")\n\n for dev_id, dev_flows in dev_groups:\n print(f\"\\n====== {dev_id} ======\")\n # print(dev_flows[[\"short_domain\", \"short_hostname\"]].value_counts())\n print(dev_flows)\n\ndef remove_outliers(flows, outlier_flows, devs):\n outlier_dev_ids = outlier_flows.device_id.unique()\n flows = flows[~flows.device_id.isin(outlier_dev_ids)]\n\n devs = devs[~devs.device_id.isin(outlier_dev_ids)]\n outlier_devs = devs[devs.device_id.isin(outlier_dev_ids)]\n\n print(\"writing to file...\")\n flows.to_parquet(constants.ANALYSIS_FLOWS_FP)\n devs.to_parquet(constants.ANALYSIS_DEVS_FP)\n outlier_devs.to_parquet(constants.OUTLIERS_DEVS_FP)\n\ndef dns_traffic(flows):\n dns_flows = flows[flows.remote_port == 53]\n\nif __name__ == \"__main__\":\n print(\"reading files...\")\n flows = pd.read_parquet(constants.FLOWS_FP)\n\n # detect_outliers(flows, constants.OUTLIERS_FLOWS_FP, threshold=.2)\n\n print(\"\\n============================================================\\n\")\n\n outlier_flows = pd.read_parquet(constants.OUTLIERS_FLOWS_FP)\n analyze_outliers(outlier_flows)\n\n devs = pd.concat([pd.read_parquet(constants.DEVS_FP), pd.read_parquet(constants.OUTLIERS_DEVS_FP)])\n remove_outliers(flows, outlier_flows, devs)\n \n print(\"\\ndone!\")","repo_name":"research0269/iot-firewalls-2023","sub_path":"eval_src/data_cleaning/outlier_analysis.py","file_name":"outlier_analysis.py","file_ext":"py","file_size_in_byte":3750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"1512918602","text":"\n'''Manacher算法'''\nclass Solution(object):\n def longestPalindrome(self, s):\n\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n if not s:\n return s\n if len(s)<2:\n return s\n T='#'.join('@{}$'.format(s))#step 1\n #step2\n n=len(T)\n P=[0]*n\n c=0\n r=0\n maxlen=0\n centeridx=0\n for i in range(1,n-1):\n if r>i:\n P[i]=min(r-i,P[2*c-i])\n \n while T[i+1+P[i]]==T[i-1-P[i]]:\n P[i]=P[i]+1\n \n if i+P[i]>r:\n c=i\n r=i+P[i]\n if P[i]>maxlen:\n maxlen=P[i]\n centeridx=i\n \n begin=(centeridx-maxlen)//2\n end=(centeridx+maxlen)//2\n\n\n return s[begin:end]\n ","repo_name":"nobody0402/leetcode","sub_path":"python/5. Longest Palindromic Substring/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"41621934956","text":"# -*- coding: utf-8 -*-\nimport re\n\ndef phrase_index(sentence, phrase):\n '''\n Returns the start and end index of phrase (first instance) if it exists in\n sentence.\n\n ex: >>> phrase_index('the quick brown fox jumps over the lazy dog',\n 'brown fox jumps')\n (10, 24)\n '''\n phrase = str(phrase) # in case phrase is a number\n m = re.match(r'(.*?)\\b'+re.escape(phrase)+r'\\b', sentence)\n if m:\n # group 0 and 1 returns the match with and without the phrase respectively\n l = len(m.group(1))\n return (l, l+len(phrase)-1)\n return None\n\n\ndef phrase_pos(sentence, phrase):\n '''\n Returns the start and end position of phrase (first instance) if it exists in\n sentence.\n\n ex: >>> phrase_index('the quick brown fox jumps over the lazy dog',\n 'brown fox jumps')\n (2, 5)\n '''\n phrase = str(phrase) # in case phrase is a number\n s_tok = sentence.split()\n p_tok = phrase.split()\n p_len = len(p_tok)\n\n # get all indices where s_tok[i] matches p_tok[0]\n indices = [ i for i, x in enumerate(s_tok) if x == p_tok[0] ]\n for i in indices:\n if s_tok[i : i+p_len] == p_tok:\n return i, i+p_len\n return None\n","repo_name":"canaryhealth/nlu_trainer","sub_path":"nlu_trainer/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25185580339","text":"from __future__ import absolute_import, division, print_function\nfrom transformers.optimization import AdamW, get_linear_schedule_with_warmup\nfrom lib.classifiers.RobertaWrapper import RobertaForSequenceClassification, Inferencer, save_model, load_features\nfrom datetime import datetime\nimport torch\nimport random, argparse\nimport numpy as np\nfrom lib.handle_data.PreprocessForBert import *\nfrom lib.utils import get_torch_device, InputFeatures\nimport logging\nfrom lib.evaluate.Eval import my_eval\nfrom collections import Counter\nimport os\n\n'''\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self, my_id, input_ids, input_mask, segment_ids, label_id):\n self.my_id = my_id\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.segment_ids = segment_ids\n self.label_id = label_id\n'''\n\n########################\n# WHAT IS THE EXPERIMENT\n########################\n\n# find GPU if present\nmodel_mapping = {'rob_base': 'roberta-base',\n 'rob_dapt': 'experiments/adapt_dapt_tapt/pretrained_models/news_roberta_base',\n 'rob_tapt': 'experiments/adapt_dapt_tapt/pretrained_models/dsp_roberta_base_tapt_hyperpartisan_news_5015',\n 'rob_dapttapt': 'experiments/adapt_dapt_tapt/pretrained_models/dsp_roberta_base_dapt_news_tapt_hyperpartisan_news_5015',\n 'rob_basil_tapt': 'experiments/adapt_dapt_tapt/dont-stop-pretraining/roberta-tapt',\n }\ndevice, USE_CUDA = get_torch_device()\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-load', '--load', action='store_true', default=True)\nparser.add_argument('-ep', '--n_epochs', type=int, default=10) #2,3,4\nparser.add_argument('-debug', '--debug', action='store_true', default=False)\nparser.add_argument('-n_voters', '--n_voters', type=int, default=3)\n\nparser.add_argument('-sampler', '--sampler', type=str, default='sequential')\nparser.add_argument('-model', '--model', type=str, default=None) #2,3,4\nparser.add_argument('-lr', '--lr', type=float, default=None) #5e-5, 3e-5, 2e-5\nparser.add_argument('-bs', '--bs', type=int, default=None) #16, 21\nparser.add_argument('-sv', '--sv', type=int, default=None) #16, 21\nparser.add_argument('-fold', '--fold', type=str, default=None) #16, 21\nparser.add_argument('-force_emb', '--force_embed', action='store_true', default=False) #16, 21\nargs = parser.parse_args()\n\nFORCE_EMBED = args.force_embed\nN_EPS = args.n_epochs\nN_VOTERS = args.n_voters\nmodels = [args.model] if args.model else ['rob_basil_tapt']\nseeds = [args.sv] if args.sv else [34]\nbss = [args.bs] if args.bs else [16]\nlrs = [args.lr] if args.lr else [1e-5]\nfolds = [args.fold] if args.fold else [str(el+1) for el in range(10)]\nsamplers = [args.sampler] if args.sampler else ['sequential']\n\nDEBUG = args.debug\nif DEBUG:\n N_VOTERS = 3\n N_EPS = 10\n seeds = [99]\n bss = [16]\n lrs = [1e-5]\n folds = ['1']\n samplers = ['sequential']\n\n########################\n# WHERE ARE THE FILES\n########################\n\nTASK_NAME = f'Rob_majvote'\nFEAT_DIR = f'data/sent_clf/features_for_roberta_majvote'\nCHECKPOINT_DIR = f'models/checkpoints/{TASK_NAME}/'\nREPORTS_DIR = f'reports/{TASK_NAME}'\nTABLE_DIR = os.path.join(REPORTS_DIR, 'tables')\nCACHE_DIR = 'models/cache/' # This is where BERT will look for pre-trained models to load parameters from.\nMAIN_TABLE_FP = os.path.join(TABLE_DIR, f'roberta_ft_results.csv')\n\n#if not os.path.exists(CHECKPOINT_DIR):\n# os.makedirs(CHECKPOINT_DIR)\n#if not os.path.exists(CURRENT_BEST_DIR):\n# os.makedirs(CURRENT_BEST_DIR)\n\nif not os.path.exists(REPORTS_DIR):\n os.makedirs(REPORTS_DIR)\nif not os.path.exists(TABLE_DIR):\n os.makedirs(TABLE_DIR)\n\ntable_columns = 'model,sampler,seed,bs,lr,model_loc,fold,epoch,set_type,loss,fn,fp,tn,tp,acc,prec,rec,f1'\nmain_results_table = pd.DataFrame(columns=table_columns.split(','))\n\n########################\n# MAIN\n########################\n\nGRADIENT_ACCUMULATION_STEPS = 1\nWARMUP_PROPORTION = 0.1\nNUM_LABELS = 2\nPRINT_EVERY = 100\n\ninferencer = Inferencer(REPORTS_DIR, logger, device, use_cuda=USE_CUDA)\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\n\nif __name__ == '__main__':\n # set logger\n now = datetime.now()\n now_string = now.strftime(format=f'%b-%d-%Hh-%-M_{TASK_NAME}')\n LOG_NAME = f\"{REPORTS_DIR}/{now_string}.log\"\n console_hdlr = logging.StreamHandler(sys.stdout)\n file_hdlr = logging.FileHandler(filename=LOG_NAME)\n logging.basicConfig(level=logging.INFO, handlers=[console_hdlr, file_hdlr])\n logger = logging.getLogger()\n logger.info(args)\n\n for MODEL in models:\n ROBERTA_MODEL = model_mapping[MODEL]\n for SAMPLER in samplers:\n for SEED in seeds:\n if SEED == 0:\n SEED_VAL = random.randint(0, 300)\n else:\n SEED_VAL = SEED\n\n seed_name = f\"{MODEL}_{SAMPLER}_{SEED_VAL}\"\n random.seed(SEED_VAL)\n np.random.seed(SEED_VAL)\n torch.manual_seed(SEED_VAL)\n torch.cuda.manual_seed_all(SEED_VAL)\n\n for BATCH_SIZE in bss:\n bs_name = seed_name + f\"_bs{BATCH_SIZE}\"\n for LEARNING_RATE in lrs:\n setting_name = bs_name + f\"_lr{LEARNING_RATE}\"\n setting_results_table = pd.DataFrame(columns=table_columns.split(','))\n for fold_name in folds:\n fold_results_table = pd.DataFrame(columns=table_columns.split(','))\n\n # load test feats\n\n test_fp = os.path.join(FEAT_DIR, f\"{fold_name}_test_features.pkl\")\n test_ids, test_batches, test_labels = load_features(test_fp, 1, SAMPLER)\n\n all_votes = []\n tr_perfs = []\n for v in range(N_VOTERS):\n v_f1 = 0\n name = setting_name + f\"_f{fold_name}_v{v}\"\n\n # init results containers\n best_model_loc = os.path.join(CHECKPOINT_DIR, name)\n test_res = {'model': MODEL, 'seed': SEED_VAL, 'fold': fold_name, 'bs': BATCH_SIZE,\n 'lr': LEARNING_RATE, 'set_type': 'test',\n 'sampler': SAMPLER, 'voter': v}\n best_val_res = {'model': MODEL, 'seed': SEED_VAL, 'fold': fold_name, 'bs': BATCH_SIZE, 'lr': LEARNING_RATE, 'set_type': 'dev',\n 'f1': 0, 'model_loc': best_model_loc, 'sampler': SAMPLER, 'voter': v}\n\n # load feats\n train_fp = os.path.join(FEAT_DIR, f\"{fold_name}_{v}_train_features.pkl\")\n dev_fp = os.path.join(FEAT_DIR, f\"{fold_name}_{v}_dev_features.pkl\")\n _, train_batches, train_labels = load_features(train_fp, BATCH_SIZE, SAMPLER)\n _, dev_batches, dev_labels = load_features(dev_fp, 1, SAMPLER)\n\n # start training\n logger.info(f\"***** Training on Fold {fold_name} *****\")\n logger.info(f\" Details: {best_val_res}\")\n logger.info(f\" Logging to {LOG_NAME}\")\n\n FORCE = False\n if not os.path.exists(best_model_loc) or FORCE:\n model = RobertaForSequenceClassification.from_pretrained(ROBERTA_MODEL,\n cache_dir=CACHE_DIR,\n num_labels=NUM_LABELS,\n output_hidden_states=True,\n output_attentions=False)\n model.to(device)\n optimizer = AdamW(model.parameters(), lr=LEARNING_RATE, weight_decay=0.01,\n eps=1e-6) # To reproduce BertAdam specific behavior set correct_bias=False\n\n n_train_batches = len(train_batches)\n half_train_batches = int(n_train_batches / 2)\n GRADIENT_ACCUMULATION_STEPS = 2\n WARMUP_PROPORTION = 0.06\n num_tr_opt_steps = n_train_batches * N_EPS / GRADIENT_ACCUMULATION_STEPS\n num_tr_warmup_steps = int(WARMUP_PROPORTION * num_tr_opt_steps)\n scheduler = get_linear_schedule_with_warmup(optimizer,\n num_warmup_steps=num_tr_warmup_steps,\n num_training_steps=num_tr_opt_steps)\n\n model.train()\n\n for ep in range(1, N_EPS + 1):\n epoch_name = name + f\"_ep{ep}\"\n tr_loss = 0\n for step, batch in enumerate(train_batches):\n batch = tuple(t.to(device) for t in batch)\n\n model.zero_grad()\n outputs = model(batch[0], batch[1], labels=batch[2])\n (loss), logits, pooled_output, sequence_output, hidden_states = outputs\n\n loss.backward()\n tr_loss += loss.item()\n optimizer.step()\n scheduler.step()\n\n if step % PRINT_EVERY == 0 and step != 0:\n logging.info(f' Ep {ep} / {N_EPS} - {step} / {len(train_batches)} - Loss: {loss.item()}')\n\n av_loss = tr_loss / len(train_batches)\n\n train_mets, train_perf = inferencer.evaluate(model, train_batches, train_labels,\n av_loss=av_loss,\n set_type='train', name=epoch_name)\n\n dev_mets, dev_perf = inferencer.evaluate(model, dev_batches, dev_labels, av_loss=av_loss,\n set_type='dev', name=epoch_name)\n\n test_mets, test_perf = inferencer.evaluate(model, test_batches, test_labels,\n av_loss=av_loss,\n set_type='test', name=epoch_name)\n\n\n # check if best\n high_score = ''\n if dev_mets['f1'] > best_val_res['f1']:\n best_val_res.update(dev_mets)\n high_score = '(HIGH SCORE)'\n save_model(model, CHECKPOINT_DIR, name)\n\n logger.info(f'{epoch_name}: {dev_perf} {high_score}')\n\n logger.info(f'{epoch_name}: {train_perf}')\n logger.info(f'{epoch_name}: {test_perf}')\n\n best_model = RobertaForSequenceClassification.from_pretrained(best_model_loc,\n num_labels=NUM_LABELS,\n output_hidden_states=True,\n output_attentions=False)\n best_model.to(device)\n\n logger.info(f\"***** Best v{v} model on Fold {fold_name} *****\")\n logger.info(f\" Details: {[(k, type(v)) for k, v in best_val_res.items()]}\")\n dev_mets, dev_perf = inferencer.evaluate(best_model, dev_batches, dev_labels, set_type='dev')\n best_val_res.update(dev_mets)\n logging.info(f\"{dev_perf}\")\n\n preds, _ = inferencer.predict(best_model, test_batches)\n assert len(preds) == len(test_ids)\n all_votes.append(preds)\n\n # train_mets, train_perf = inferencer.evaluate(best_model, train_batches, train_labels,\n # set_type='train')\n\n # tr_perfs.append(train_perf)\n\n test_mets, test_perf = inferencer.evaluate(best_model, test_batches, test_labels,\n set_type='test')\n test_res.update(test_mets)\n logging.info(f\"{test_perf}\")\n\n fold_results_table = fold_results_table.append(best_val_res, ignore_index=True)\n fold_results_table = fold_results_table.append(test_res, ignore_index=True)\n\n FORCE_EMBED = True\n for EMB_TYPE in ['cross4bert']: #poolbert', 'avbert', 'unpoolbert', 'crossbert',\n emb_fp = f'data/embeddings/{MODEL}/{name}_basil_w_{EMB_TYPE}'\n\n if not os.path.exists(emb_fp) or FORCE_EMBED:\n logging.info(f'Generating {EMB_TYPE} ({emb_fp})')\n feat_fp = os.path.join(FEAT_DIR, f\"all_features.pkl\")\n all_ids, all_batches, all_labels = load_features(feat_fp, batch_size=1, sampler=SAMPLER)\n embs = inferencer.predict(best_model, all_batches, return_embeddings=True, emb_type=EMB_TYPE)\n\n assert len(embs) == len(all_ids)\n\n basil_w_BERT = pd.DataFrame(index=all_ids)\n basil_w_BERT[EMB_TYPE] = embs\n basil_w_BERT.to_csv(emb_fp)\n logger.info(f'{EMB_TYPE} embeddings in {emb_fp}.csv')\n\n\n majvote = [Counter(el).most_common()[0][0] for el in zip(*all_votes)]\n\n # test_mets, test_perf = inferencer.evaluate(best_model, test_batches, test_labels, set_type='test')\n test_mets, test_perf = my_eval(test_labels, majvote, set_type='test', name=name,\n opmode='classification')\n\n\n test_res.update(test_mets)\n test_res['voter'] = 'maj_vote'\n logging.info(f\"{test_perf}\")\n # logging.info(f\"{tr_perfs}\")\n\n\n\n # store performance in table\n fold_results_table = fold_results_table.append(test_res, ignore_index=True)\n setting_results_table = setting_results_table.append(fold_results_table)\n\n # print result on fold\n logging.info(f'Fold {fold_name} results: \\n{fold_results_table[[\"model\", \"seed\", \"bs\", \"lr\", \"fold\", \"voter\", \"set_type\", \"f1\"]]}')\n\n # print result of setting\n logging.info(\n f'Setting {setting_name} results: \\n{setting_results_table[[\"model\", \"seed\", \"bs\", \"lr\", \"fold\", \"voter\", \"set_type\", \"f1\"]]}')\n\n # store performance of setting\n main_results_table = main_results_table.append(setting_results_table, ignore_index=True)\n\n # write performance to file\n setting_results_table.to_csv(os.path.join(TABLE_DIR, f'{setting_name}_results_table.csv'), index=False)\n\nmain_results_table_orig = pd.read_csv(MAIN_TABLE_FP)\nmain_results_table = main_results_table_orig.append(main_results_table, ignore_index=True)\nmain_results_table.to_csv(MAIN_TABLE_FP, index=False)\n\n'''\nn_train_batches = len(train_batches)\nhalf_train_batches = int(n_train_batches / 2)\nnum_tr_opt_steps = n_train_batches * NUM_TRAIN_EPOCHS # / GRADIENT_ACCUMULATION_STEPS\nnum_tr_warmup_steps = int(WARMUP_PROPORTION * num_tr_opt_steps)\n#scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=num_tr_warmup_steps, num_training_steps=num_tr_opt_steps)\n'''","repo_name":"vdenberg/informational-entity-framing-detection","sub_path":"experiments/sent_clf/baselines/roberta_majority_vote.py","file_name":"roberta_majority_vote.py","file_ext":"py","file_size_in_byte":17675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72110570000","text":"import functools\nimport maya.cmds\nimport maya.mel\n\nimport IECore\n\nfrom .UIElement import UIElement\n\n## A class for making maya menus from an IECore.MenuDefinition. The menu is built dynamically when it's\n# displayed, so the definition can be edited at any time to change the menu.\nclass Menu( UIElement ) :\n\n\t# Creates a menu defined by the specified definition. parent may be a\n\t# window (in which case the menu is added to the menu bar), a menu (in which case a submenu is created)\n\t# or a control (in which case a popup menu is created). The optional keyword arguments operate as follows :\n\t#\n\t# label :\n\t# specifies a label for the submenu (if the parent is a menu) or menubar item (if the parent is a window).\n\t#\n\t# insertAfter :\n\t# specifies the menu item the submenu should be inserted after (if the parent is a menu).\n\t#\n\t# radialPosition :\n\t# specifies the radial position of the submenu (if the parent is a marking menu).\n\t#\n\t# button :\n\t# specifies the mouse button which may be used to raise a popup menu, in the same format as\n\t# expected by the maya.cmds.popupMenu() command.\n\t#\n\t# replaceExistingMenu :\n\t# determines whether we add the menu as a submenu, or overwrite the contents of the existing menu\n\t# (if the parent is a menu)\n\t#\n\t# keepCallback :\n\t# For use in cases where we want to extend an existing menu without overriding the postMenu callback.\n\t# Overriding the callback can break Maya in specific cases, such as the VP right click context menu.\n\tdef __init__( self, definition, parent, label=\"\", insertAfter=None, radialPosition=None, button = 3, replaceExistingMenu = False, keepCallback=False ) :\n\n\t\tif maya.cmds.window( parent, query=True, exists=True ) or maya.cmds.menuBarLayout( parent, query=True, exists=True ) :\n\t\t\t# parent is a window - we're sticking it in the menubar\n\t\t\tmenu = maya.cmds.menu( label=label, parent=parent, allowOptionBoxes=True, tearOff=True )\n\t\telif maya.cmds.menu( parent, query=True, exists=True ) :\n\t\t\tif keepCallback:\n\t\t\t\tmenu = parent\n\t\t\telif replaceExistingMenu :\n\t\t\t\t# parent is a menu - we're appending to it:\n\t\t\t\tmenu = parent\n\t\t\t\tself.__postMenu( menu, definition )\n\t\t\telse :\n\t\t\t\t# parent is a menu - we're adding a submenu\n\t\t\t\tkw = {}\n\t\t\t\tif not (insertAfter is None) :\n\t\t\t\t\tkw[\"insertAfter\"] = insertAfter\n\t\t\t\tif radialPosition :\n\t\t\t\t\tkw[\"radialPosition\"] = radialPosition\n\t\t\t\tmenu = maya.cmds.menuItem( label=label, parent=parent, tearOff=True, subMenu=True, allowOptionBoxes=True, **kw )\n\t\telse :\n\t\t\t# assume parent is a control which can accept a popup menu\n\t\t\tmenu = maya.cmds.popupMenu( parent=parent, button=button, allowOptionBoxes=True )\n\n\t\tUIElement.__init__( self, menu )\n\n\t\tif not keepCallback:\n\t\t\tmaya.cmds.menu( menu, edit=True, postMenuCommand=functools.partial( self.__postMenu, menu, definition ) )\n\t\telse:\n\t\t\tself._parseDefinition(parent, definition)\n\n\tdef _parseDefinition( self, parent, definition ):\n\n\t\tif callable( definition ) :\n\t\t\tdefinition = definition()\n\n\t\tif not isinstance( definition, IECore.MenuDefinition ):\n\t\t\traise IECore.Exception( \"Definition is not a valid IECore.MenuDefinition object.\" )\n\n\t\tallPaths = list(dict(definition.items()).keys())\n\t\trootItemDefinitions = IECore.MenuDefinition( [] )\n\n\t\t# scan definition once and get root item definitions\n\t\tfor path, item in definition.items():\n\t\t\tstrippedPath = path.strip('/')\n\t\t\tif '/' not in strippedPath:\n\t\t\t\trootItemDefinitions.append(path, item)\n\t\t\t\tcontinue\n\n\t\t\t# Implicit root needed, create root item definition\n\t\t\tsuperPath = \"/{}\".format(strippedPath.split(\"/\")[0])\n\t\t\tif not superPath in allPaths:\n\t\t\t\trootItemDefinitions.append(superPath, {})\n\n\t\t# iterate root definitions and create menu items\n\t\tfor path, item in rootItemDefinitions.items():\n\t\t\tname = path.strip('/')\n\n\t\t\t# merge items referencing this root item in their path string\n\t\t\tsubMenuDefinition = definition.reRooted(\"/\" + name + \"/\")\n\t\t\tif subMenuDefinition.size():\n\t\t\t\tif item.subMenu:\n\t\t\t\t\tif callable(item.subMenu):\n\t\t\t\t\t\t# force retrival of definition object now so we can merge menu items\n\t\t\t\t\t\titem.subMenu = item.subMenu()\n\t\t\t\t\titem.subMenu.update(subMenuDefinition)\n\t\t\t\telse:\n\t\t\t\t\titem.subMenu = subMenuDefinition\n\n\t\t\t# get Maya keyword args\n\t\t\tkw = {}\n\n\t\t\tif getattr(item, \"bold\", False):\n\t\t\t\tkw[\"boldFont\"] = True\n\n\t\t\tif getattr(item, \"italic\", False):\n\t\t\t\tkw[\"italicized\"] = True\n\n\t\t\tif getattr(item, \"blindData\", {}):\n\t\t\t\tmayaArgs = item.blindData.get(\"maya\", {})\n\t\t\t\tkw.update(mayaArgs)\n\n\t\t\t# create UI\n\t\t\tif item.divider:\n\t\t\t\tmenuItem = maya.cmds.menuItem(parent=parent, divider=True)\n\t\t\telif item.subMenu:\n\t\t\t\tsubMenu = maya.cmds.menuItem(label=name, subMenu=True, allowOptionBoxes=True, parent=parent, **kw)\n\t\t\t\tmaya.cmds.menu(subMenu, edit=True, postMenuCommand=functools.partial(self.__postMenu, subMenu, item.subMenu))\n\t\t\telse:\n\t\t\t\tactive = item.active\n\t\t\t\tif callable(active):\n\t\t\t\t\tactive = active()\n\n\t\t\t\tchecked = item.checkBox\n\t\t\t\tif callable(checked):\n\t\t\t\t\tchecked = checked()\n\t\t\t\t\tkw[\"checkBox\"] = checked\n\n\t\t\t\tmenuItem = maya.cmds.menuItem(label=name, parent=parent, enable=active, annotation=item.description, **kw)\n\t\t\t\tif item.command:\n\t\t\t\t\tmaya.cmds.menuItem(menuItem, edit=True, command=self.__wrapCallback(item.command))\n\t\t\t\tif item.secondaryCommand:\n\t\t\t\t\toptionBox = maya.cmds.menuItem(optionBox=True, enable=active, command=self.__wrapCallback(item.secondaryCommand), parent=parent)\n\n\tdef __wrapCallback( self, cb ) :\n\n\t\treturn self._createCallback( cb ) if callable( cb ) else cb\n\n\tdef __postMenu( self, parent, definition, *args ) :\n\n\t\tmaya.cmds.menu( parent, edit = True, deleteAllItems = True )\n\t\tself._parseDefinition( parent, definition )\n","repo_name":"ImageEngine/cortex","sub_path":"python/IECoreMaya/Menu.py","file_name":"Menu.py","file_ext":"py","file_size_in_byte":5603,"program_lang":"python","lang":"en","doc_type":"code","stars":510,"dataset":"github-code","pt":"3"} +{"seq_id":"23177570571","text":"import os\nimport time\nfrom pprint import pprint\n\nimport numpy as np\nimport plotly.io as pio\nfrom pymoo.algorithms.moo.nsga2 import NSGA2\nfrom pymoo.algorithms.moo.nsga3 import ReferenceDirectionSurvival\nfrom pymoo.core.population import pop_from_array_or_individual\nfrom pymoo.factory import get_sampling, get_crossover, get_mutation, get_problem, get_reference_directions\nfrom pymoo.factory import get_termination\nfrom pymoo.optimize import minimize\n\nfrom src.sds.examples.problems.utestfun import TestFuncs\nfrom src.sds.core.continuation import ContinuationBoxes\nfrom src.sds.core.problem import AutomaticDifferentiationProblem\nfrom src.sds.factory import get_tfun, get_corrector, get_predictor\nfrom src.sds.utils.indicators import hypervolume\nfrom src.utils.plot import plot_boxes_3d, plot_points_4d, plot_traces\n\npio.renderers.default = \"browser\"\nif __name__ == '__main__':\n # %%\n cfg = {'save_plots': False,\n 'problem_name': 'zdt2'\n }\n\n solve_moea = False\n plot_boxes = False\n\n tfun = TestFuncs()\n testfun = tfun.get(cfg['problem_name'])\n problem = get_problem(\"dtlz2\", n_var=30, n_obj=3)\n problem = AutomaticDifferentiationProblem(problem)\n\n # %%\n if solve_moea:\n algorithm = NSGA2(\n pop_size=750,\n n_offsprings=750,\n sampling=get_sampling(\"real_random\"),\n crossover=get_crossover(\"real_sbx\", prob=0.9, eta=15),\n mutation=get_mutation(\"real_pm\", eta=20),\n eliminate_duplicates=True\n )\n\n termination = get_termination(\"n_gen\", 100)\n\n res = minimize(problem,\n algorithm,\n termination,\n seed=1,\n save_history=True,\n verbose=True)\n\n X = res.X\n F = res.F\n\n ixs = np.argsort(F[:, 0])\n pf_moea = F[ixs]\n ps_moea = X[ixs]\n\n # %%\n x0 = np.array([0.38726374, 0.50142701, 0.48666312, 0.5119315, 0.50954044, 0.4951654,\n 0.50899326, 0.51179125, 0.48861775, 0.52403041, 0.47975356, 0.52147994,\n 0.46698674, 0.50230151, 0.50465705, 0.51797048, 0.50098518, 0.48723901,\n 0.5261419, 0.51706457, 0.47990969, 0.49896529, 0.47078924, 0.48367379,\n 0.50657368, 0.48315766, 0.47689119, 0.52006725, 0.50089009, 0.49232051])\n f0 = problem.evaluate(x0)\n\n f_limits = np.array([[0., 1.]] * 3)\n\n predictor = get_predictor('no_adjustment',\n problem=problem,\n eps=1e-2)\n\n corrector = get_corrector('delta_criteria',\n problem=problem,\n t_fun=get_tfun('weighted_dominance',\n problem=problem,\n eps=1e-5,\n maxiter=50),\n a_fun=lambda a, dx: a,\n step_eps=9e-2,\n in_pf_eps=1e-2,\n maxiter=100)\n\n ds_cont = ContinuationBoxes(problem=problem,\n predictor=predictor,\n corrector=corrector,\n limits=f_limits,\n tree_h_coef=0.8,\n # h_max=14,\n step_eps=9e-2,\n history=True,\n )\n\n t0 = time.time()\n problem.n_f_evals, problem.n_grad_evals = 0, 0\n results = ds_cont.run(x0)\n print('time: {} s'.format(round(time.time() - t0, 4)))\n print('f(x) evals: {}, dx(x) evals: {}'.format(problem.n_f_evals, problem.n_grad_evals))\n pprint(results['evaluations'])\n\n # %% Hypervolume\n points = ds_cont.boxes.get_points()\n print('{} solutions found, {} best solutions found'.format(len(points['fx']),\n len(points['fx'][points['best_ix'], :])))\n hv_moea = hypervolume(pf_moea, ref=[2., 2., 2.]) if solve_moea else np.nan\n hv_pop = hypervolume(results['population']['F'], ref=[2., 2., 2.])\n hv_ref = hypervolume(testfun['pf'], ref=[2., 2., 2.])\n print('ref hv: {}, pop hv: {}, moea hv:{}'.format(hv_ref, hv_pop, hv_moea))\n\n # %%\n file_path = os.path.join('../img')\n\n if plot_boxes:\n boxes_edges = ds_cont.boxes.get_boxes()\n box_fig = plot_boxes_3d(boxes_edges, return_fig=True)\n\n pts_fig = plot_points_4d(points['fx'],\n secondary=points['c'],\n mask=points['best_ix'],\n return_fig=True,\n markersize=5)\n\n plot_traces([box_fig.data, pts_fig.data])\n\n plot_points_4d(points['fx'],\n secondary=points['c'],\n mask=points['best_ix'],\n markersize=5,\n only_best=False,\n title='Continuation method Pareto Front')\n\n if solve_moea:\n plot_points_4d(pf_moea, markersize=7, title='MOEA method Pareto Front')\n\n # %%\n F = points['fx']\n ref_dirs = get_reference_directions(\"energy\", problem.n_obj, 500)\n pop = pop_from_array_or_individual(F)\n pop.set('F', F)\n rds = ReferenceDirectionSurvival(ref_dirs)\n niching = rds.do(problem, pop, n_survive=500)\n opt = rds.opt.get('F')\n plot_points_4d(opt,\n markersize=8,\n title='Continuation method Pareto Front with nitching')\n","repo_name":"samlopezruiz/stochastic-directed-search-moo","sub_path":"src/sds/examples/zdtl2_3obj.py","file_name":"zdtl2_3obj.py","file_ext":"py","file_size_in_byte":5611,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"35866082683","text":"from typing import Dict\r\n\r\nimport httpx\r\nfrom botbuilder.core import MessageFactory, UserState\r\nfrom botbuilder.dialogs import (\r\n ComponentDialog,\r\n WaterfallDialog,\r\n WaterfallStepContext,\r\n DialogTurnResult,\r\n ListStyle)\r\nfrom botbuilder.dialogs.choices import Choice\r\nfrom botbuilder.dialogs.prompts import (\r\n ChoicePrompt,\r\n PromptOptions,\r\n)\r\n\r\nfrom bot_data import banks\r\nfrom bot_data.language import Language\r\nfrom config import BotConfig\r\nfrom data_models import UserPreferences\r\nfrom resources import ResponseMsgs\r\n\r\n\r\nclass UserPreferencesDialog(ComponentDialog):\r\n \"\"\"Represents a dialog which is used for gathering user profile information and preferences.\"\"\"\r\n # Nested dialog ids ids\r\n WATERFALL_DIALOG_ID = 'waterfall'\r\n LANG_CHOICE_PROMPT_ID = 'lang_prompt'\r\n BANK_CHOICE_PROMPT_ID = 'bank_prompt'\r\n\r\n FB_USER_SETTINGS_URL = 'https://graph.facebook.com/v5.0/me/custom_user_settings'\r\n\r\n def __init__(self, dialog_id: str, user_state: UserState):\r\n super(UserPreferencesDialog, self).__init__(dialog_id)\r\n\r\n self.user_preferences_accessor = user_state.create_property(\"user_preferences\")\r\n\r\n # Setup dialogs\r\n self.add_dialog(\r\n WaterfallDialog(\r\n UserPreferencesDialog.WATERFALL_DIALOG_ID,\r\n [\r\n self.language_step,\r\n self.bank_step,\r\n self.summary_step\r\n ],\r\n )\r\n )\r\n self.add_dialog(ChoicePrompt(UserPreferencesDialog.LANG_CHOICE_PROMPT_ID))\r\n self.add_dialog(ChoicePrompt(UserPreferencesDialog.BANK_CHOICE_PROMPT_ID))\r\n\r\n self.initial_dialog_id = UserPreferencesDialog.WATERFALL_DIALOG_ID\r\n\r\n async def language_step(\r\n self, step_context: WaterfallStepContext\r\n ) -> DialogTurnResult:\r\n \"\"\"Runs a prompt to get user preferred language.\r\n\r\n :param step_context:\r\n :return DialogTurnResult:\r\n \"\"\"\r\n return await step_context.prompt(\r\n UserPreferencesDialog.LANG_CHOICE_PROMPT_ID,\r\n PromptOptions(\r\n prompt=MessageFactory.text(ResponseMsgs.get('choose_language')),\r\n style=ListStyle.hero_card,\r\n choices=[Choice(ResponseMsgs.get('lang_hy')),\r\n Choice(ResponseMsgs.get('lang_en')),\r\n Choice(ResponseMsgs.get('lang_ru'))],\r\n ),\r\n )\r\n\r\n async def bank_step(self, step_context: WaterfallStepContext) -> DialogTurnResult:\r\n \"\"\"Runs a prompt to get user preferred bank.\r\n\r\n :param step_context:\r\n :return DialogTurnResult:\r\n \"\"\"\r\n # Get the previous dialog result, i.e. user chosen language, and parse it to get a Language() object\r\n prev_dialog_result = step_context.result.value\r\n if prev_dialog_result == ResponseMsgs.get('lang_en'):\r\n chosen_lang = Language.en\r\n elif prev_dialog_result == ResponseMsgs.get('lang_ru'):\r\n chosen_lang = Language.ru\r\n else:\r\n chosen_lang = Language.hy\r\n\r\n step_context.values[\"lang\"] = chosen_lang\r\n\r\n # Run a prompt to get user preferred bank.\r\n return await step_context.prompt(\r\n UserPreferencesDialog.BANK_CHOICE_PROMPT_ID,\r\n PromptOptions(\r\n prompt=MessageFactory.text(ResponseMsgs.get('choose_bank', lang=chosen_lang)),\r\n style=ListStyle.hero_card,\r\n choices=[Choice(getattr(bank, f'{chosen_lang.value}_name')) for bank in banks.BANKS]\r\n ),\r\n )\r\n\r\n async def summary_step(\r\n self, step_context: WaterfallStepContext\r\n ) -> DialogTurnResult:\r\n \"\"\"Saves the user preferences got from previous dialogs and sends to user a confirmation message.\r\n\r\n :param step_context:\r\n :return DialogTurnResult:\r\n \"\"\"\r\n # Get results from the previous dialogs\r\n prev_dialog_result = step_context.result.value\r\n chosen_lang = step_context.values[\"lang\"]\r\n\r\n # Get the chosen bank id\r\n chosen_bank_id = banks.get_by_name(prev_dialog_result).id_\r\n\r\n # Get the current profile object from user state.\r\n user_preferences = await self.user_preferences_accessor.get(\r\n step_context.context, UserPreferences\r\n )\r\n\r\n # Save the user profile data in memory so that it will be saved in db later.\r\n # This will be saved to the storage later.\r\n user_preferences.lang = chosen_lang\r\n user_preferences.bank = chosen_bank_id\r\n\r\n # Construct OK message\r\n msg = MessageFactory.text(ResponseMsgs.get('prefs_saved', chosen_lang, bank=prev_dialog_result))\r\n # Attach a keyboard-menu to the message for telegram\r\n if step_context.context.activity.channel_id == 'telegram':\r\n msg.channel_data = UserPreferencesDialog.__construct_telegram_keyboard(chosen_lang)\r\n\r\n # Send OK message\r\n await step_context.context.send_activity(msg)\r\n\r\n # Construct a keyboard-menu for facebook\r\n if step_context.context.activity.channel_id == 'facebook':\r\n # noinspection PyBroadException\r\n try:\r\n await UserPreferencesDialog.__make_fb_keyboard(step_context.context.activity.from_property.id,\r\n chosen_lang)\r\n except Exception:\r\n pass\r\n\r\n return await step_context.end_dialog()\r\n\r\n @staticmethod\r\n def __construct_telegram_keyboard(lang: Language) -> Dict:\r\n \"\"\"Constructs and returns a JSON (dict) obj for telegram keyboard.\"\"\"\r\n all_usd = ResponseMsgs.get('all_usd', lang)\r\n all_rur = ResponseMsgs.get('all_rur', lang)\r\n banks_msg = ResponseMsgs.get('banks', lang)\r\n my_bank = ResponseMsgs.get('my_bank', lang)\r\n preferences = ResponseMsgs.get('preferences', lang)\r\n\r\n return {\r\n \"method\": \"sendMessage\",\r\n \"parameters\": {\r\n \"reply_markup\": {\r\n \"keyboard\": [\r\n [{\"text\": all_usd}, {\"text\": all_rur}],\r\n [{\"text\": banks_msg}, {\"text\": my_bank}],\r\n [{\"text\": preferences}]\r\n ],\r\n \"resize_keyboard\": True\r\n }\r\n }\r\n }\r\n\r\n @staticmethod\r\n async def __make_fb_keyboard(psid: int, lang: Language):\r\n \"\"\"Makes an http request to FB API to make a persistent menu appear for the given user.\"\"\"\r\n all_usd = ResponseMsgs.get('all_usd', lang)\r\n all_rur = ResponseMsgs.get('all_rur', lang)\r\n my_bank = ResponseMsgs.get('my_bank', lang)\r\n\r\n async with httpx.AsyncClient() as client:\r\n await client.post(UserPreferencesDialog.FB_USER_SETTINGS_URL, params={\r\n \"access_token\": BotConfig.FB_TOKEN\r\n }, json={\r\n \"psid\": psid,\r\n \"persistent_menu\": [\r\n {\r\n \"locale\": \"default\",\r\n \"call_to_actions\": [\r\n {\"type\": \"postback\", \"title\": all_usd, \"payload\": all_usd},\r\n {\"type\": \"postback\", \"title\": all_rur, \"payload\": all_rur},\r\n {\"type\": \"postback\", \"title\": my_bank, \"payload\": my_bank},\r\n ]\r\n }\r\n ]\r\n })\r\n","repo_name":"aramayyes/Dram-Chatbot","sub_path":"src/dialogs/user_preferences_dialog.py","file_name":"user_preferences_dialog.py","file_ext":"py","file_size_in_byte":7504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"41296071041","text":"import logging\nimport shutil\nfrom typing import Union\n\nimport datasets\nfrom datasets import Dataset, DatasetDict, DownloadMode, Split\nfrom evaluator.app.core.dataset_formatter import DatasetFormatter\nfrom evaluator.app.settings import DatasetHandlerSettings\n\nlogger = logging.getLogger(__name__)\n\n\nclass DatasetDoesNotExistError(Exception):\n \"\"\"Raised when a dataset is requested which does not exist either locally or on huggingface.\"\"\"\n\n def __init__(self, dataset: str) -> None:\n msg = f'The requested dataset \"{dataset}\" does not exist locally and was not found on huggingface.'\n super().__init__(msg)\n\n\nclass DatasetHandler:\n def __init__(self) -> None:\n self.settings = DatasetHandlerSettings()\n self.dataset_formatter = DatasetFormatter()\n\n def get_dataset(self, dataset_name: str) -> Union[Dataset, DatasetDict]:\n \"\"\"\n Retrieves the validation-set of the specified dataset.\n Datasets will always be loaded locally from disk if possible.\n If the dataset does not exist locally, it will be downloaded and saved.\n\n Args:\n dataset_name (str): Name of the dataset on huggingface.\n\n Returns:\n :class:`Dataset` or :class:`DatasetDict`:\n - If `dataset_name` is a path of a dataset directory: the dataset requested.\n - If `dataset_name` is a path of a dataset dict directory: a ``datasets.DatasetDict`` with each split.\n \"\"\"\n try:\n dataset = datasets.load_from_disk(self.settings.dataset_dir + dataset_name)\n except FileNotFoundError:\n logger.debug(\n f'Dataset \"{dataset_name}\" not found locally. Going to download it.'\n )\n try:\n dataset = self.download_dataset(dataset_name)\n except FileNotFoundError:\n raise DatasetDoesNotExistError(dataset_name)\n\n return dataset\n\n def remove_dataset(self, dataset_name: str) -> bool:\n \"\"\"\n Deletes the specified dataset from local storage.\n\n Args:\n dataset_name (str): Name of the dataset on huggingface.\n\n Returns:\n :bool: True if the dataset-file was removed. False if it did not exist.\n \"\"\"\n logger.info(f'Removing dataset \"{dataset_name}\" from local storage.')\n try:\n shutil.rmtree(self.settings.dataset_dir + dataset_name)\n return True\n except FileNotFoundError:\n return False\n\n def download_dataset(self, dataset_name: str) -> Union[Dataset, DatasetDict]:\n \"\"\"\n (Re-)Downloads the validation-set of the specified dataset and saves it locally (even if it already exists locally).\n\n Args:\n dataset_name (str): Name of the dataset on huggingface.\n\n Returns:\n :class:`Dataset` or :class:`DatasetDict`:\n - If `dataset_name` is a path of a dataset directory: the dataset requested.\n - If `dataset_name` is a path of a dataset dict directory: a ``datasets.DatasetDict`` with each split.\n \"\"\"\n dataset = datasets.load_dataset(\n dataset_name,\n split=Split.VALIDATION,\n download_mode=DownloadMode.FORCE_REDOWNLOAD,\n )\n dataset.save_to_disk(self.settings.dataset_dir + dataset_name)\n return dataset\n\n def to_generic_format(self, dataset, dataset_metadata, sample_ids=None):\n \"\"\"\n Formats the given dataset into an universal (per skill-type) format.\n\n Args:\n dataset (:class:`Dataset` or :class:`DatasetDict`:): Dataset from huggingface.\n dataset_metadata (dict): Metadata about the dataset.\n sample_ids (List[str]): Optional list of sample-ids. When specified, only samples with the respective ids will be returned.\n Otherwise all samples in the dataset will be returned.\n The returned samples will be in the same order as the passed sample-ids.\n\n Returns: List of samples in the dataset in an universal format depending on the datasets skill-type.\n \"\"\"\n return self.dataset_formatter.format(dataset, dataset_metadata, sample_ids)\n","repo_name":"UKP-SQuARE/square-core","sub_path":"evaluator/evaluator/app/core/dataset_handler.py","file_name":"dataset_handler.py","file_ext":"py","file_size_in_byte":4193,"program_lang":"python","lang":"en","doc_type":"code","stars":67,"dataset":"github-code","pt":"3"} +{"seq_id":"3331659795","text":"from typing import List\nclass Solution:\n def partitionLabels(self, s: str) -> List[int]:\n L = len(s)\n mp = {s[i]:i for i in range(L)}\n ans, i = [],0\n while i < L:\n j,end = i+1,mp[s[i]]\n while j < end:\n end = max(end,mp[s[j]])\n j += 1\n ans.append(end-i+1)\n i = end+1\n return ans","repo_name":"agnik2019/Pythonista","sub_path":"data_structure_algorithm/greedy/763_partition_labels.py","file_name":"763_partition_labels.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71992553683","text":"import os\nimport unittest\nimport numpy as np\nfrom aavf import color\n\n\nclass TestColor(unittest.TestCase):\n\n def test_basic_color_amount(self):\n image = np.zeros(512 * 512 * 3).reshape(512, 512, 3)\n label_path = os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"file\", \"maxcolor.csv\")\n labels = np.genfromtxt(label_path, dtype=int)\n expected = np.zeros(11, dtype=float)\n expected[0] = 100\n eq = color.basic_color_amount(image, labels) == expected\n self.assertTrue(np.all(eq))\n\n def test_hsv_statistics(self):\n image = np.zeros(512 * 512 * 3).reshape(512, 512, 3)\n self.assertEquals(color.hsv_statistics(image), (0., 0., 0., 0., 0.))\n\n def test_pleasure_arousal_dominance(self):\n self.assertEquals(color.pleasure_arousal_dominance(0, 0), (0, 0, 0))\n","repo_name":"DanielMorales9/AAVF","sub_path":"test/color/color_test.py","file_name":"color_test.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29393205711","text":"import json\n\n\ndef test_scannet(model):\n from dkm.benchmarks import ScanNetBenchmark\n model.h_resized = 480\n model.w_resized = 640\n model.upsample_preds = False\n scannet_benchmark = ScanNetBenchmark(\"data/scannet\")\n scannet_results = []\n scannet_results.append(scannet_benchmark.benchmark(model))\n json.dump(scannet_results, open(f\"results/scannet_{model.name}.json\", \"w\"))\n\nif __name__ == \"__main__\":\n from dkm.models.model_zoo import DKMv3_indoor\n model = DKMv3_indoor()\n test_scannet(model)","repo_name":"Parskatt/DKM","sub_path":"experiments/dkm/test_DKMv3_indoor.py","file_name":"test_DKMv3_indoor.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":268,"dataset":"github-code","pt":"3"} +{"seq_id":"30189160483","text":"import uuid\nfrom http import HTTPStatus\n\nimport allure\nimport pytest\n\nfrom ..mylib import Assert\n\nPERSONS_URL = \"/persons/\"\n\n\n@allure.title(\"Get person by unknown id\")\n@pytest.mark.asyncio\nasync def test_get_unknown_person(make_get_request):\n unknown_uuid = str(uuid.uuid4())\n response = await make_get_request(f\"{PERSONS_URL}{unknown_uuid}/\")\n Assert(response).status_code(HTTPStatus.OK).body_len_is(0)\n\n\n@allure.title(\"Get list of films by unknown person id\")\n@pytest.mark.asyncio\nasync def test_get_person_film_details(make_get_request):\n unknown_uuid = str(uuid.uuid4())\n response = await make_get_request(f\"{PERSONS_URL}{unknown_uuid}/film\")\n Assert(response).status_code(HTTPStatus.OK).body_len_is(0)\n\n\n@allure.title(\"Search unkown person\")\n@pytest.mark.asyncio\nasync def test_search_unknown_person(make_get_request):\n response = await make_get_request(\n f\"{PERSONS_URL}search/\", {\"query\": \"UnknownPerson\"}\n )\n Assert(response).status_code(HTTPStatus.OK).body_len_is(0)\n\n\n@allure.title(\"Paging persons by invalid page\")\n@pytest.mark.asyncio\n@pytest.mark.parametrize(\n (\"page_number\", \"page_size\"),\n (\n (0, 1),\n (1, 0),\n (-1, 1),\n (1, -1),\n ),\n)\nasync def test_paging_persons_by_invalid_page(make_get_request, page_number, page_size):\n response = await make_get_request(\n f\"{PERSONS_URL}search/\", {\"page[number]\": page_number, \"page[size]\": page_size}\n )\n Assert(response).status_code(HTTPStatus.UNPROCESSABLE_ENTITY)\n","repo_name":"gooncharova-practicum/async_API_online_cinema","sub_path":"tests/functional/src/test_person.py","file_name":"test_person.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"2088234119","text":"\"\"\"\nCount Good Nodes in Binary Tree\n\nGiven a binary tree root, a node X in the tree is named good if in the path from root to X there are no nodes with a value greater than X.\n\nReturn the number of good nodes in the binary tree.\n\nExample 1:\n[Image: https://assets.leetcode.com/uploads/2020/04/02/test_sample_1.png]\nInput: root = [3,1,4,3,null,1,5]\nOutput: 4\nExplanation: Nodes in blue are good.\nRoot Node (3) is always a good node.\nNode 4 -> (3,4) is the maximum value in the path starting from the root.\nNode 5 -> (3,4,5) is the maximum value in the path\nNode 3 -> (3,1,3) is the maximum value in the path.\n\nExample 2:\n[Image: https://assets.leetcode.com/uploads/2020/04/02/test_sample_2.png]\nInput: root = [3,3,null,4,2]\nOutput: 3\nExplanation: Node 2 -> (3, 3, 2) is not good, because \"3\" is higher than it.\n\nExample 3:\nInput: root = [1]\nOutput: 1\nExplanation: Root is considered as good.\n\"\"\"\n\n\n\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n \ndef count_larger(root, maxSeen=-99999): \n if root is None:\n return 0\n if root.val >= maxSeen:\n maxSeen = root.val\n return 1 + count_larger(root.left, maxSeen) + count_larger(root.right, maxSeen)\n else: \n return count_larger(root.left, maxSeen) + count_larger(root.right, maxSeen)\n\n\n\ndef testing():\n # Example 1:\n # [Image: https://assets.leetcode.com/uploads/2020/04/02/test_sample_1.png]\n # Input: root = [3,1,4,3,null,1,5]\n # Output: 4\n\n # [3,1,4,3,null,1,5]\n tree = TreeNode(3)\n tree.left = TreeNode(1)\n tree.right = TreeNode(4)\n tree.left.left = TreeNode(3)\n tree.right.left = TreeNode(1)\n tree.right.right = TreeNode(5)\n expected = 4\n print(count_larger(tree) == expected)\n\n #Example 2:\n #[Image: https://assets.leetcode.com/uploads/2020/04/02/test_sample_2.png]\n # Input: root = [3,3,null,4,2]\n # Output: 3\n # Explanation: Node 2 -> (3, 3, 2) is not good, because \"3\" is higher than it.\n tree = TreeNode(3)\n tree.left = TreeNode(3)\n tree.left.left = TreeNode(4)\n tree.left.right = TreeNode(2)\n expected = 3\n print(count_larger(tree) == expected)\n\n # Example 3:\n # Input: root = [1]\n # Output: 1\n # Explanation: Root is considered as good.\n tree = TreeNode(1)\n expected = 1\n print(count_larger(tree) == expected)\n\n\ntesting()\n\n\"\"\"\nhttps://leetcode.com/problems/count-good-nodes-in-binary-tree/\n\"\"\"\n \n","repo_name":"markbroich/data_science","sub_path":"coding_challenges_example_solutions/count_good_nodes_bt/count_good_nodes_bt.py","file_name":"count_good_nodes_bt.py","file_ext":"py","file_size_in_byte":2409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33115468839","text":"from abc import ABC, abstractmethod\nimport math\nimport geopandas as gpd\nfrom shapely.geometry import Polygon, Point\nfrom shapely.ops import cascaded_union\nfrom ..utils import constants, utils\nimport numpy as np\n\n\nclass TessellationTilers:\n\n def __init__(self):\n\n self._tilers = {}\n\n def register_tiler(self, key, tiler):\n\n self._tilers[key] = tiler\n\n def create(self, key, **kwargs):\n\n tiler = self._tilers.get(key)\n\n if not tiler:\n raise ValueError(key)\n\n return tiler(**kwargs)\n\n def get(self, service_id, **kwargs):\n\n return self.create(service_id, **kwargs)\n\n\ntiler = TessellationTilers()\n\n\nclass TessellationTiler(ABC):\n\n @abstractmethod\n def __call__(self, **kwargs):\n pass\n\n @abstractmethod\n def _build(self, **kwargs):\n pass\n\n\nclass VoronoiTessellationTiler(TessellationTiler):\n\n def __init__(self):\n\n super().__init__()\n self._instance = None\n\n def __call__(self, points, crs=constants.DEFAULT_CRS):\n\n if not self._instance:\n\n if isinstance(points, gpd.GeoDataFrame):\n\n if not all(isinstance(x, Point) for x in points.geometry):\n raise ValueError(\"Not valid points object. Accepted type is GeoDataFrame.\")\n\n return self._build(points, crs)\n\n def _build(self, points, crs=constants.DEFAULT_CRS):\n\n gdf = gpd.GeoDataFrame(points.copy(), crs=crs)\n gdf.loc[:, constants.TILE_ID] = list(np.arange(0, len(gdf)))\n\n # Convert TILE_ID to have str type\n gdf[constants.TILE_ID] = gdf[constants.TILE_ID].astype('str')\n\n return gdf[[constants.TILE_ID, 'geometry']]\n\n\n# Register the builder\ntiler.register_tiler('voronoi', VoronoiTessellationTiler())\n\n\nclass SquaredTessellationTiler(TessellationTiler):\n\n def __init__(self):\n\n super().__init__()\n self._instance = None\n\n def __call__(self, base_shape, meters=50, crs=constants.DEFAULT_CRS, window_size=None):\n if not self._instance:\n\n if isinstance(base_shape, str):\n # Try to obatain the base shape from OSM\n base_shape = utils.bbox_from_name(base_shape)\n\n elif isinstance(base_shape, gpd.GeoDataFrame) or isinstance(base_shape, gpd.GeoSeries):\n\n if all(isinstance(x, Point) for x in base_shape.geometry):\n # Build a base shape that contains all the points in the given geodataframe\n base_shape = utils.bbox_from_points(base_shape)\n\n elif all(isinstance(x, Polygon) for x in base_shape.geometry) and len(base_shape) > 1:\n\n # Merge all the polygons\n polygons = base_shape.geometry.values\n base_shape = gpd.GeoSeries(cascaded_union(polygons), crs=base_shape.crs)\n\n #elif not all(isinstance(x, Polygon) for x in base_shape.geometry):\n # raise ValueError(\"Not valid geometry object. Accepted types are Point and Polygon.\")\n else:\n raise ValueError(\"Not valid base_shape object. Accepted types are str, GeoDataFrame or GeoSeries.\")\n\n return self._build(base_shape, meters, crs)\n\n def _build(self, base_shape, meters, crs=constants.DEFAULT_CRS):\n\n # We work with the universal crs epsg:3857\n tmp_crs = constants.UNIVERSAL_CRS\n tmp_crs['units'] = 'm'\n\n area = base_shape.to_crs(tmp_crs)\n\n # Obtain the boundaries of the geometry\n boundaries = dict({'min_x': area.total_bounds[0],\n 'min_y': area.total_bounds[1],\n 'max_x': area.total_bounds[2],\n 'max_y': area.total_bounds[3]})\n\n # Find number of square for each side\n x_squares = int(math.ceil(math.fabs(boundaries['max_x'] - boundaries['min_x']) / meters))\n y_squares = int(math.ceil(math.fabs(boundaries['min_y'] - boundaries['max_y']) / meters))\n\n # Placeholder for the polygon\n polygons = []\n\n shape = area.unary_union\n\n # Iterate on the x\n for i in range(0, x_squares):\n\n # Increment x\n x1 = boundaries['min_x'] + (meters * i)\n x2 = boundaries['min_x'] + (meters * (i + 1))\n\n # Iterate on y\n for j in range(0, y_squares):\n\n # Increment y\n y1 = boundaries['min_y'] + (meters * j)\n y2 = boundaries['min_y'] + (meters * (j + 1))\n polygon_desc = {}\n\n # Create shape (polygon)\n p = Polygon([(x1, y1), (x1, y2), (x2, y2), (x2, y1)])\n\n # s = boros_shape.intersection(p)\n s = shape.intersects(p)\n\n # if(s.area>0):\n if s:\n\n # shape.intersection(p) ATTENTION! If you use the intersection than the crawler fails!\n polygon_desc['geometry'] = p\n polygons.append(polygon_desc)\n\n gdf = gpd.GeoDataFrame(polygons, crs=tmp_crs)\n gdf = gdf.reset_index().rename(columns={\"index\": constants.TILE_ID})\n\n # Convert TILE_ID to have str type\n gdf[constants.TILE_ID] = gdf[constants.TILE_ID].astype('str')\n\n return gdf.to_crs(crs)\n\n\n# Register the builder\ntiler.register_tiler('squared', SquaredTessellationTiler())","repo_name":"jtapanes21/RADGIS","sub_path":"RADGIS/tessellation/tilers.py","file_name":"tilers.py","file_ext":"py","file_size_in_byte":5339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"28644123942","text":"\nimport datetime\nimport numpy\nimport tqdm\nimport random\nfrom matplotlib import pyplot as plt \nfrom project import Project\nimport recommend\nimport model_icf_tittle_simil\nimport model_ucf\nimport model_ucf_star_simil\nimport model_random\n\ndef extract_recommend_dataset(project, end_time=None):\n ret = []\n issues = project.get_issues()\n users = project.get_users()\n\n issues_meta = ({}, {})\n users_meta = ({}, {})\n\n for i, each_issue in enumerate(issues):\n issues_meta[0][i] = each_issue['id']\n issues_meta[1][each_issue['id']] = i\n\n for i, each_user in enumerate(users):\n users_meta[0][i] = each_user['user_name']\n users_meta[1][each_user['user_name']] = i\n\n # reorder activities by time\n for i, each_issue in enumerate(issues):\n if len(each_issue) <= 0:\n continue\n \n counter = {}\n for each_timeline in each_issue['timeline']:\n if each_timeline == {}:\n continue\n if 'time' not in each_timeline.keys():\n continue\n \n if end_time is not None and datetime.datetime.strptime(each_timeline['time'], '%Y-%m-%dT%H:%M:%S') > end_time:\n continue\n\n if 'author' not in each_timeline.keys():\n continue\n \n if each_timeline['author'] in counter.keys():\n counter[each_timeline['author']] += 1\n else:\n counter[each_timeline['author']] = 1\n\n for each_user, each_count in counter.items():\n if each_user is not None:\n ret.append((users_meta[1][each_user], i, each_count/len(each_issue['timeline'])))\n\n return ret\n\ndef dataset_split(dataset):\n length = len(dataset)\n split_pos = int(length*0.6)\n random.shuffle(dataset)\n return dataset[:split_pos], dataset[split_pos:]\n\ndef valid(model, test_data, k):\n # test data acceleration structure\n test_users = {}\n test_issues = {}\n for each_data in test_data:\n # add to test users\n if each_data[0] not in test_users.keys():\n test_users[each_data[0]] = []\n test_users[each_data[0]].append(each_data)\n\n # add to test issues\n if each_data[1] not in test_issues.keys():\n test_issues[each_data[1]] = []\n test_issues[each_data[1]].append(each_data)\n\n # valid\n counter = {}\n precision_sum = 0.0\n recall_sum = 0.0\n for each_user, data_list in test_users.items():\n recommend_result = model.recommend(each_user, k)\n counter[each_user] = 0\n for user_id, item_id, rate in data_list:\n if item_id in recommend_result:\n counter[user_id] += 1\n precision_sum += counter[each_user] / k\n recall_sum += counter[each_user] / len(data_list)\n precision = precision_sum / len(test_users)\n recall = recall_sum / len(test_users)\n\n correct_user_count = 0\n for each_user, correct_count in counter.items():\n if correct_count != 0:\n correct_user_count += 1\n\n print(f'accuracy: {correct_user_count}/{len(test_users)}, {correct_user_count/len(test_users)}')\n print(f'precision: {precision}')\n print(f'recall: {recall}')\n return correct_user_count/len(test_users), recall, precision\n\n\ndef run_evaluation():\n project = Project()\n project.load('./data/gumtree', '/GumTreeDiff/gumtree')\n # project.load('./data/deno', '/denoland/deno')\n # project.load('./data/FreeRDP', '/FreeRDP/FreeRDP')\n # project.load('./data/jd-gui', '/java-decompiler/jd-gui')\n print(f'user number: {len(project.get_users())}')\n print(f'issue number: {len(project.get_issues())}')\n dataset = extract_recommend_dataset(project)\n\n # filter for new comer\n '''\n filter_counter = {}\n for each_data in dataset:\n if each_data[0] not in filter_counter.keys():\n filter_counter[each_data[0]] = []\n filter_counter[each_data[0]].append(each_data)\n \n filtered_dataset = []\n for each_user, data_list in filter_counter.items():\n if len(data_list) > 2:\n for each_data in data_list:\n filtered_dataset.append(each_data)\n\n dataset = filtered_dataset\n '''\n\n # model = RandomRecommendModel(project)\n # model = model_star_based.StarBasedRecommendModel(project)\n # model = model_issue_similarity_based.IssueSimilarityBasedRecommendModel(project)\n model = model_ucf.UCFRecommendModel(project)\n \n x_list = [3, 5, 10, 20, 40]\n acc_plot = []\n prec_plot = []\n recall_plot = []\n\n for each_k in x_list:\n accuracy_mean = []\n precision_mean = []\n recall_mean = []\n for ex_count in range(10):\n # split data\n train_data, test_data = dataset_split(dataset)\n # train\n model.train(train_data)\n # valid\n accuracy, recall, precision = valid(model, test_data, each_k)\n accuracy_mean.append(accuracy)\n precision_mean.append(precision)\n recall_mean.append(recall)\n\n acc_plot.append(numpy.mean(accuracy_mean))\n recall_plot.append(numpy.mean(recall_mean))\n prec_plot.append(numpy.mean(precision_mean))\n print(f'mean: acc:{numpy.mean(accuracy_mean)} recall:{numpy.mean(recall_mean)} prec:{numpy.mean(precision_mean)}')\n \n plt.title(\"Issue recommend\")\n plt.xlabel(\"recommend top k issues\")\n plt.ylabel(\"accuracy or recall or precision\")\n plt.plot(x_list,acc_plot,\"b\", label='accuracy')\n plt.plot(x_list,recall_plot,\"r\", label='recall')\n plt.plot(x_list,prec_plot,\"y\", label='precision')\n plt.legend(loc='upper left')\n plt.show()\n\n\ndef run_all_method_evaluation():\n project = Project()\n # project.load('./data/gumtree', '/GumTreeDiff/gumtree')\n project.load('./data/deno', '/denoland/deno')\n print(f'user number: {len(project.get_users())}')\n print(f'issue number: {len(project.get_issues())}')\n dataset = extract_recommend_dataset(project)\n\n models = []\n models.append(model_random.RandomRecommendModel(project))\n models.append(model_ucf_star_simil.UCFStarSimilRecommendModel(project))\n models.append(model_icf_tittle_simil.ICFTittleSimilRecommendModel(project))\n models.append(model_ucf.UCFRecommendModel(project))\n \n x_list = [3, 5, 10, 20, 40]\n \n plt.title(\"Issue recommend\")\n plt.xlabel(\"recommend top k issues\")\n plt.ylabel(\"accuracy\")\n\n for each_model in models:\n acc_plot = []\n for each_k in x_list:\n accuracy_mean = []\n for ex_count in range(10):\n # split data\n train_data, test_data = dataset_split(dataset)\n # train\n each_model.train(train_data)\n # valid\n accuracy, recall, precision = valid(each_model, test_data, each_k)\n accuracy_mean.append(accuracy)\n\n acc_plot.append(numpy.mean(accuracy_mean))\n print(f'mean: acc:{numpy.mean(accuracy_mean)}')\n plt.plot(x_list,acc_plot, label=each_model.get_name())\n plt.legend(loc='upper left')\n plt.show()\n\n\nif __name__ == \"__main__\":\n # run_evaluation()\n run_all_method_evaluation()\n \n\n","repo_name":"jstzwj/issue-crawler","sub_path":"run_recommend.py","file_name":"run_recommend.py","file_ext":"py","file_size_in_byte":7219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30188350884","text":"import pytest\r\nimport os\r\nfrom task1 import get_file_list, merge_sorted_files\r\n\r\n@pytest.fixture\r\ndef test_files(tmp_path=''):\r\n file1 = os.path.join(tmp_path, \"file1.txt\")\r\n file2 = os.path.join(tmp_path, \"file2.txt\")\r\n file3 = os.path.join(tmp_path, \"file3.txt\")\r\n\r\n with open(file1, 'w') as f:\r\n f.write(\"1\\n3\\n5\")\r\n\r\n with open(file2, \"w\") as f:\r\n f.write(\"2\\n4\\n6\")\r\n\r\n with open(file3, \"w\") as f:\r\n f.write(\"7\\n8\\n9\")\r\n\r\n return [str(file1), str(file2), str(file3)]\r\n\r\ndef test1(tmp_path=\"D:/Курс/Homework7/pytests/Pytest1_data\"):\r\n test_file1 = os.path.join(tmp_path, 'testFile1.txt')\r\n with open(test_file1, 'w') as f:\r\n f.write(\"1\\n3\\n5\")\r\n test_file2 = os.path.join(tmp_path, 'testFile2.txt')\r\n with open(test_file2, 'w') as f:\r\n f.write(\"2\\n4\\n6\\n\")\r\n assert list(merge_sorted_files([\r\n test_file1,\r\n test_file2\r\n ])) == [1, 2, 3, 4, 5, 6]\r\n\r\ndef test_get_file_list(test_files):\r\n assert get_file_list(test_files[0]) == test_files\r\n\r\n\r\ndef test_merge_sorted_files(test_files):\r\n expected_output = [1, 2, 3, 4, 5, 6, 7, 8, 9]\r\n assert list(merge_sorted_files(test_files)) == expected_output","repo_name":"NikitaRim/Samael","sub_path":"Homework7/pytests/pytest1.py","file_name":"pytest1.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29254589927","text":"from dataclasses import dataclass\nfrom copy import deepcopy\nfrom collections.abc import Callable\nfrom maps.garden.libs_server.build import build_utils\nfrom maps.garden.sdk.module_traits.module_traits import SortOption\n\n\ndef is_sorted(arr: list, key: Callable):\n return all(\n key(arr[i]) <= key(arr[i+1])\n for i in range(len(arr) - 1)\n )\n\n\ndef test_objects_sorting_by_property():\n @dataclass\n class ValueHolder:\n properties: dict\n\n ref_objs = [ValueHolder(properties={\"value\": i}) for i in range(3, 0, -1)]\n for sort_option in [\n SortOption(sort_by_property=\"value\"),\n SortOption(key_pattern=\"{0.properties[value]}\")\n ]:\n sort_option.reverse = False\n objs = deepcopy(ref_objs)\n build_utils.sort_using_options(objs, [sort_option])\n assert is_sorted(objs, lambda x: x.properties[\"value\"])\n\n assert not is_sorted(ref_objs, lambda x: x.properties[\"value\"])\n\n sort_option.reverse = True\n build_utils.sort_using_options(objs, [sort_option])\n assert is_sorted(objs, lambda x: -x.properties[\"value\"])\n\n\ndef test_objects_sorting_by_build_id_simple():\n @dataclass\n class SimpleId:\n build_id: int\n\n ref_objs = [SimpleId(build_id=i) for i in range(3, 0, -1)]\n sort_option = SortOption(sort_by_build_id=True)\n sort_option.reverse = False\n objs = deepcopy(ref_objs)\n build_utils.sort_using_options(objs, [sort_option])\n assert is_sorted(objs, lambda x: x.build_id)\n\n assert not is_sorted(ref_objs, lambda x: x.build_id)\n\n sort_option.reverse = True\n build_utils.sort_using_options(objs, [sort_option])\n assert is_sorted(objs, lambda x: -x.build_id)\n\n\ndef test_objects_sorting_by_build_id_version():\n @dataclass\n class Version:\n build_id: int\n\n @dataclass\n class SimpleId:\n version: Version\n\n ref_objs = [SimpleId(version=Version(build_id=i)) for i in range(3, 0, -1)]\n sort_option = SortOption(sort_by_build_id=True)\n sort_option.reverse = False\n objs = deepcopy(ref_objs)\n build_utils.sort_using_options(objs, [sort_option])\n assert is_sorted(objs, lambda x: x.version.build_id)\n\n assert not is_sorted(ref_objs, lambda x: x.version.build_id)\n\n sort_option.reverse = True\n build_utils.sort_using_options(objs, [sort_option])\n assert is_sorted(objs, lambda x: -x.version.build_id)\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"maps/tests/test_build_utils.py","file_name":"test_build_utils.py","file_ext":"py","file_size_in_byte":2376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"35048615898","text":"from pocsuite3.api import (\n Output,\n POCBase,\n POC_CATEGORY,\n register_poc,\n requests,\n VUL_TYPE,\n)\n\n# 关于类的继承\nclass NetMizerpoc(POCBase):\n # fofa语句: title=\"NetMizer 日志管理系统\"\n vulID = \"2022071701\" # ssvid ID 如果是提交漏洞的同时提交 PoC,则写成 0\n version = \"1\" # 默认为1\n author = \"derian\" # PoC作者的大名\n vulDate = \"2021-9-24\" # 漏洞公开的时间,不知道就写今天\n createDate = \"2022-07-17\" # 编写 PoC 的日期\n updateDate = \"2022-07-17\" # PoC 更新的时间,默认和编写时间一样\n references = [\"http://wiki.xypbk.com/Web%E5%AE%89%E5%85%A8/NetMizer%E6%97%A5%E5%BF%97%E7%AE%A1%E7%90%86%E7%B3%BB%E7%BB%9F/NetMizer%20%E6%97%A5%E5%BF%97%E7%AE%A1%E7%90%86%E7%B3%BB%E7%BB%9F%20%E7%9B%AE%E5%BD%95%E9%81%8D%E5%8E%86.md\"] # 漏洞地址来源,0day不用写\n name = \"NetMizer 日志管理系统 目录遍历漏洞 PoC\" # PoC 名称\n appPowerLink = \"\" # 漏洞厂商主页地址\n appName = \"NetMizer\" # 漏洞应用名称\n appVersion = \"all\" # 漏洞影响版本\n vulType = VUL_TYPE.WEAK_PASSWORD # 弱口令 漏洞类型,类型参考见 漏洞类型规范表\n category = POC_CATEGORY.EXPLOITS.WEBAPP # poc对应的产品类型 web的\n # samples = [] # 测试样列,就是用 PoC 测试成功的网站\n # install_requires = [] # PoC 第三方模块依赖,请尽量不要使用第三方模块,必要时请参考《PoC第三方模块依赖说明》填写\n desc = \"\"\"NetMizer 日志管理系统存在目录遍历漏洞,可获取敏感信息\"\"\" # 漏洞简要描述\n pocDesc = \"\"\"直接在加上\\data目录\"\"\" # POC用法描述\n\n def _check(self):\n # 漏洞验证代码\n import requests\n url = self.url.strip()\n full_url = f\"{url}/data/\"\n headers = {\"Cache-Control\": \"max-age=0\", \"Upgrade-Insecure-Requests\": \"1\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.99 Safari/537.36\",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\",\n \"Accept-Encoding\": \"gzip, deflate\", \"Accept-Language\": \"zh-CN,zh;q=0.9\", \"Connection\": \"close\"}\n result = []\n try:\n res = requests.get(url=full_url, headers=headers, verify=False, timeout=9)\n # 判断是否存在漏洞\n if res.status_code == 200 and \"Index of /data\" in res.text:\n result.append(url)\n except Exception as e:\n print(e)\n # 跟 try ... except是一对的 , 最终一定会执行里面的代码 , 不管你是否报错\n finally:\n return result\n\n def _verify(self):\n # 验证模式 , 调用检查代码 ,\n result = {}\n res = self._check() # res就是返回的结果列表\n if res:\n result['VerifyInfo'] = {}\n result['VerifyInfo']['Info'] = self.name\n result['VerifyInfo']['vul_url'] = self.url\n result['VerifyInfo']['vul_detail'] = self.desc\n return self.parse_verify(result)\n\n def _attack(self):\n # 攻击模式 , 就是在调用验证模式\n return self._verify()\n\n def parse_verify(self, result):\n # 解析认证 , 输出\n output = Output(self)\n # 根据result的bool值判断是否有漏洞\n if result:\n output.success(result)\n else:\n output.fail('Target is not vulnerable')\n return output\n\n# 你会发现没有shell模式 , 对吧 ,根本就用不到\n\n# 其他自定义的可添加的功能函数\ndef other_fuc():\n pass\n\n# 其他工具函数\ndef other_utils_func():\n pass\n\n\n# 注册 DemoPOC 类 , 必须要注册\nregister_poc(NetMizerpoc)","repo_name":"Yuema013/pocsuite_poc","sub_path":"NetMizer_poc.py","file_name":"NetMizer_poc.py","file_ext":"py","file_size_in_byte":3892,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29554446838","text":"from collections import defaultdict\n\nwith open('inputs.txt') as f:\n rules, mine, nearby = f.read().strip().split('\\n\\n')\n\n# 내 티켓 정리\nmine = mine.split('\\n')[1].split(',')\n\nrules = rules.split('\\n')\nfields =[]\n\n# field range 따로 구해서 모아두기\nranges = []\nran_d = defaultdict(set)\nfor rule in rules:\n field, ran = rule.split(': ')\n fields.append(field)\n rans = ran.split(' or ')\n for r in rans:\n start, end = [int(i) for i in r.split('-')]\n this_range = range(start, end+1)\n ranges.append(this_range)\n ran_d[field] |= set(this_range) # 합집합으로 업데이트\n\n# nearby tickets 분리\ntickets = [i for i in nearby.split('\\n')][1:]\nfor i in range(len(tickets)):\n tickets[i] = [int(num) for num in tickets[i].split(',')]\n\n# 올바른 범위 숫자 set S\nS = set()\nfor r in ranges:\n S = S.union(set(r))\n\n# 티켓 솎아내기\ntickets = [ticket for ticket in tickets if set(ticket).issubset(S)]\n\n# 티켓 필드별로 정리\nt_d = defaultdict(list)\nfor c in range(len(tickets[0])):\n t_d[c] = [ticket[c] for ticket in tickets]\n\n# 유효성 검사\ncandidates = defaultdict(set)\nfor column, data in t_d.items():\n for field, field_ran in ran_d.items():\n # print(data, '###', field, field_ran, end='')\n if set(data).issubset(field_ran):\n # print('가능', end='')\n candidates[column].add(field)\n # print('')\n\nans = defaultdict(str)\nwhile fields:\n for c,f in candidates.items():\n if len(set(fields) & f) == 1:\n ans[c] = (set(fields)&f).pop()\n fields.remove(ans[c])\n\nvalue = 1\nfor k,v in ans.items():\n if v.startswith('departure'):\n value *= int(mine[k])\n\nprint(value)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"SeongIkKim/ALtudy","sub_path":"AdventOfCode/2020/day16/16_2.py","file_name":"16_2.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30514384102","text":"from typing import Dict, List, Optional, Tuple, Union\n\nimport numpy as np\nimport scipy\nfrom mmcv.transforms import BaseTransform, KeyMapper\nfrom mmengine.dataset import Compose\nfrom packaging import version as pv\nfrom scipy.stats import mode\nfrom torch.nn.modules.utils import _pair\n\nfrom mmaction.registry import TRANSFORMS\nfrom .loading import DecordDecode, DecordInit\nfrom .processing import _combine_quadruple\n\nif pv.parse(scipy.__version__) < pv.parse('1.11.0'):\n get_mode = mode\nelse:\n from functools import partial\n get_mode = partial(mode, keepdims=True)\n\n\n@TRANSFORMS.register_module()\nclass DecompressPose(BaseTransform):\n \"\"\"Load Compressed Pose.\n\n Required Keys:\n\n - frame_inds\n - total_frames\n - keypoint\n - anno_inds (optional)\n\n Modified Keys:\n\n - keypoint\n - frame_inds\n\n Added Keys:\n\n - keypoint_score\n - num_person\n\n Args:\n squeeze (bool): Whether to remove frames with no human pose.\n Defaults to True.\n max_person (int): The max number of persons in a frame. Defaults to 10.\n \"\"\"\n\n def __init__(self, squeeze: bool = True, max_person: int = 10) -> None:\n self.squeeze = squeeze\n self.max_person = max_person\n\n def transform(self, results: Dict) -> Dict:\n \"\"\"Perform the pose decoding.\n\n Args:\n results (dict): The resulting dict to be modified and passed\n to the next transform in pipeline.\n \"\"\"\n required_keys = ['total_frames', 'frame_inds', 'keypoint']\n for k in required_keys:\n assert k in results\n\n total_frames = results['total_frames']\n frame_inds = results.pop('frame_inds')\n keypoint = results['keypoint']\n\n if 'anno_inds' in results:\n frame_inds = frame_inds[results['anno_inds']]\n keypoint = keypoint[results['anno_inds']]\n\n assert np.all(np.diff(frame_inds) >= 0), \\\n 'frame_inds should be monotonical increasing'\n\n def mapinds(inds):\n uni = np.unique(inds)\n map_ = {x: i for i, x in enumerate(uni)}\n inds = [map_[x] for x in inds]\n return np.array(inds, dtype=np.int16)\n\n if self.squeeze:\n frame_inds = mapinds(frame_inds)\n total_frames = np.max(frame_inds) + 1\n\n results['total_frames'] = total_frames\n\n num_joints = keypoint.shape[1]\n num_person = get_mode(frame_inds)[-1][0]\n\n new_kp = np.zeros([num_person, total_frames, num_joints, 2],\n dtype=np.float16)\n new_kpscore = np.zeros([num_person, total_frames, num_joints],\n dtype=np.float16)\n nperson_per_frame = np.zeros([total_frames], dtype=np.int16)\n\n for frame_ind, kp in zip(frame_inds, keypoint):\n person_ind = nperson_per_frame[frame_ind]\n new_kp[person_ind, frame_ind] = kp[:, :2]\n new_kpscore[person_ind, frame_ind] = kp[:, 2]\n nperson_per_frame[frame_ind] += 1\n\n if num_person > self.max_person:\n for i in range(total_frames):\n nperson = nperson_per_frame[i]\n val = new_kpscore[:nperson, i]\n score_sum = val.sum(-1)\n\n inds = sorted(range(nperson), key=lambda x: -score_sum[x])\n new_kpscore[:nperson, i] = new_kpscore[inds, i]\n new_kp[:nperson, i] = new_kp[inds, i]\n num_person = self.max_person\n results['num_person'] = num_person\n\n results['keypoint'] = new_kp[:num_person]\n results['keypoint_score'] = new_kpscore[:num_person]\n return results\n\n def __repr__(self) -> str:\n repr_str = (f'{self.__class__.__name__}('\n f'squeeze={self.squeeze}, '\n f'max_person={self.max_person})')\n return repr_str\n\n\n@TRANSFORMS.register_module()\nclass GeneratePoseTarget(BaseTransform):\n \"\"\"Generate pseudo heatmaps based on joint coordinates and confidence.\n\n Required Keys:\n\n - keypoint\n - keypoint_score (optional)\n - img_shape\n\n Added Keys:\n\n - imgs (optional)\n - heatmap_imgs (optional)\n\n Args:\n sigma (float): The sigma of the generated gaussian map.\n Defaults to 0.6.\n use_score (bool): Use the confidence score of keypoints as the maximum\n of the gaussian maps. Defaults to True.\n with_kp (bool): Generate pseudo heatmaps for keypoints.\n Defaults to True.\n with_limb (bool): Generate pseudo heatmaps for limbs. At least one of\n 'with_kp' and 'with_limb' should be True. Defaults to False.\n skeletons (tuple[tuple]): The definition of human skeletons.\n Defaults to ``((0, 1), (0, 2), (1, 3), (2, 4), (0, 5), (5, 7),\n (7, 9), (0, 6), (6, 8), (8, 10), (5, 11), (11, 13),\n (13, 15), (6, 12), (12, 14), (14, 16), (11, 12))``,\n which is the definition of COCO-17p skeletons.\n double (bool): Output both original heatmaps and flipped heatmaps.\n Defaults to False.\n left_kp (tuple[int]): Indexes of left keypoints, which is used when\n flipping heatmaps. Defaults to (1, 3, 5, 7, 9, 11, 13, 15),\n which is left keypoints in COCO-17p.\n right_kp (tuple[int]): Indexes of right keypoints, which is used when\n flipping heatmaps. Defaults to (2, 4, 6, 8, 10, 12, 14, 16),\n which is right keypoints in COCO-17p.\n left_limb (tuple[int]): Indexes of left limbs, which is used when\n flipping heatmaps. Defaults to (0, 2, 4, 5, 6, 10, 11, 12),\n which is left limbs of skeletons we defined for COCO-17p.\n right_limb (tuple[int]): Indexes of right limbs, which is used when\n flipping heatmaps. Defaults to (1, 3, 7, 8, 9, 13, 14, 15),\n which is right limbs of skeletons we defined for COCO-17p.\n scaling (float): The ratio to scale the heatmaps. Defaults to 1.\n \"\"\"\n\n def __init__(self,\n sigma: float = 0.6,\n use_score: bool = True,\n with_kp: bool = True,\n with_limb: bool = False,\n skeletons: Tuple[Tuple[int]] = ((0, 1), (0, 2), (1, 3),\n (2, 4), (0, 5), (5, 7),\n (7, 9), (0, 6), (6, 8),\n (8, 10), (5, 11), (11, 13),\n (13, 15), (6, 12), (12, 14),\n (14, 16), (11, 12)),\n double: bool = False,\n left_kp: Tuple[int] = (1, 3, 5, 7, 9, 11, 13, 15),\n right_kp: Tuple[int] = (2, 4, 6, 8, 10, 12, 14, 16),\n left_limb: Tuple[int] = (0, 2, 4, 5, 6, 10, 11, 12),\n right_limb: Tuple[int] = (1, 3, 7, 8, 9, 13, 14, 15),\n scaling: float = 1.) -> None:\n\n self.sigma = sigma\n self.use_score = use_score\n self.with_kp = with_kp\n self.with_limb = with_limb\n self.double = double\n\n # an auxiliary const\n self.eps = 1e-4\n\n assert self.with_kp or self.with_limb, (\n 'At least one of \"with_limb\" '\n 'and \"with_kp\" should be set as True.')\n self.left_kp = left_kp\n self.right_kp = right_kp\n self.skeletons = skeletons\n self.left_limb = left_limb\n self.right_limb = right_limb\n self.scaling = scaling\n\n def generate_a_heatmap(self, arr: np.ndarray, centers: np.ndarray,\n max_values: np.ndarray) -> None:\n \"\"\"Generate pseudo heatmap for one keypoint in one frame.\n\n Args:\n arr (np.ndarray): The array to store the generated heatmaps.\n Shape: img_h * img_w.\n centers (np.ndarray): The coordinates of corresponding keypoints\n (of multiple persons). Shape: M * 2.\n max_values (np.ndarray): The max values of each keypoint. Shape: M.\n \"\"\"\n\n sigma = self.sigma\n img_h, img_w = arr.shape\n\n for center, max_value in zip(centers, max_values):\n if max_value < self.eps:\n continue\n\n mu_x, mu_y = center[0], center[1]\n st_x = max(int(mu_x - 3 * sigma), 0)\n ed_x = min(int(mu_x + 3 * sigma) + 1, img_w)\n st_y = max(int(mu_y - 3 * sigma), 0)\n ed_y = min(int(mu_y + 3 * sigma) + 1, img_h)\n x = np.arange(st_x, ed_x, 1, np.float32)\n y = np.arange(st_y, ed_y, 1, np.float32)\n\n # if the keypoint not in the heatmap coordinate system\n if not (len(x) and len(y)):\n continue\n y = y[:, None]\n\n patch = np.exp(-((x - mu_x)**2 + (y - mu_y)**2) / 2 / sigma**2)\n patch = patch * max_value\n arr[st_y:ed_y, st_x:ed_x] = \\\n np.maximum(arr[st_y:ed_y, st_x:ed_x], patch)\n\n def generate_a_limb_heatmap(self, arr: np.ndarray, starts: np.ndarray,\n ends: np.ndarray, start_values: np.ndarray,\n end_values: np.ndarray) -> None:\n \"\"\"Generate pseudo heatmap for one limb in one frame.\n\n Args:\n arr (np.ndarray): The array to store the generated heatmaps.\n Shape: img_h * img_w.\n starts (np.ndarray): The coordinates of one keypoint in the\n corresponding limbs. Shape: M * 2.\n ends (np.ndarray): The coordinates of the other keypoint in the\n corresponding limbs. Shape: M * 2.\n start_values (np.ndarray): The max values of one keypoint in the\n corresponding limbs. Shape: M.\n end_values (np.ndarray): The max values of the other keypoint\n in the corresponding limbs. Shape: M.\n \"\"\"\n\n sigma = self.sigma\n img_h, img_w = arr.shape\n\n for start, end, start_value, end_value in zip(starts, ends,\n start_values,\n end_values):\n value_coeff = min(start_value, end_value)\n if value_coeff < self.eps:\n continue\n\n min_x, max_x = min(start[0], end[0]), max(start[0], end[0])\n min_y, max_y = min(start[1], end[1]), max(start[1], end[1])\n\n min_x = max(int(min_x - 3 * sigma), 0)\n max_x = min(int(max_x + 3 * sigma) + 1, img_w)\n min_y = max(int(min_y - 3 * sigma), 0)\n max_y = min(int(max_y + 3 * sigma) + 1, img_h)\n\n x = np.arange(min_x, max_x, 1, np.float32)\n y = np.arange(min_y, max_y, 1, np.float32)\n\n if not (len(x) and len(y)):\n continue\n\n y = y[:, None]\n x_0 = np.zeros_like(x)\n y_0 = np.zeros_like(y)\n\n # distance to start keypoints\n d2_start = ((x - start[0])**2 + (y - start[1])**2)\n\n # distance to end keypoints\n d2_end = ((x - end[0])**2 + (y - end[1])**2)\n\n # the distance between start and end keypoints.\n d2_ab = ((start[0] - end[0])**2 + (start[1] - end[1])**2)\n\n if d2_ab < 1:\n self.generate_a_heatmap(arr, start[None], start_value[None])\n continue\n\n coeff = (d2_start - d2_end + d2_ab) / 2. / d2_ab\n\n a_dominate = coeff <= 0\n b_dominate = coeff >= 1\n seg_dominate = 1 - a_dominate - b_dominate\n\n position = np.stack([x + y_0, y + x_0], axis=-1)\n projection = start + np.stack([coeff, coeff], axis=-1) * (\n end - start)\n d2_line = position - projection\n d2_line = d2_line[:, :, 0]**2 + d2_line[:, :, 1]**2\n d2_seg = (\n a_dominate * d2_start + b_dominate * d2_end +\n seg_dominate * d2_line)\n\n patch = np.exp(-d2_seg / 2. / sigma**2)\n patch = patch * value_coeff\n\n arr[min_y:max_y, min_x:max_x] = \\\n np.maximum(arr[min_y:max_y, min_x:max_x], patch)\n\n def generate_heatmap(self, arr: np.ndarray, kps: np.ndarray,\n max_values: np.ndarray) -> None:\n \"\"\"Generate pseudo heatmap for all keypoints and limbs in one frame (if\n needed).\n\n Args:\n arr (np.ndarray): The array to store the generated heatmaps.\n Shape: V * img_h * img_w.\n kps (np.ndarray): The coordinates of keypoints in this frame.\n Shape: M * V * 2.\n max_values (np.ndarray): The confidence score of each keypoint.\n Shape: M * V.\n \"\"\"\n\n if self.with_kp:\n num_kp = kps.shape[1]\n for i in range(num_kp):\n self.generate_a_heatmap(arr[i], kps[:, i], max_values[:, i])\n\n if self.with_limb:\n for i, limb in enumerate(self.skeletons):\n start_idx, end_idx = limb\n starts = kps[:, start_idx]\n ends = kps[:, end_idx]\n\n start_values = max_values[:, start_idx]\n end_values = max_values[:, end_idx]\n self.generate_a_limb_heatmap(arr[i], starts, ends,\n start_values, end_values)\n\n def gen_an_aug(self, results: Dict) -> np.ndarray:\n \"\"\"Generate pseudo heatmaps for all frames.\n\n Args:\n results (dict): The dictionary that contains all info of a sample.\n\n Returns:\n np.ndarray: The generated pseudo heatmaps.\n \"\"\"\n\n all_kps = results['keypoint'].astype(np.float32)\n kp_shape = all_kps.shape\n\n if 'keypoint_score' in results:\n all_kpscores = results['keypoint_score']\n else:\n all_kpscores = np.ones(kp_shape[:-1], dtype=np.float32)\n\n img_h, img_w = results['img_shape']\n\n # scale img_h, img_w and kps\n img_h = int(img_h * self.scaling + 0.5)\n img_w = int(img_w * self.scaling + 0.5)\n all_kps[..., :2] *= self.scaling\n\n num_frame = kp_shape[1]\n num_c = 0\n if self.with_kp:\n num_c += all_kps.shape[2]\n if self.with_limb:\n num_c += len(self.skeletons)\n\n ret = np.zeros([num_frame, num_c, img_h, img_w], dtype=np.float32)\n\n for i in range(num_frame):\n # M, V, C\n kps = all_kps[:, i]\n # M, C\n kpscores = all_kpscores[:, i] if self.use_score else \\\n np.ones_like(all_kpscores[:, i])\n\n self.generate_heatmap(ret[i], kps, kpscores)\n return ret\n\n def transform(self, results: Dict) -> Dict:\n \"\"\"Generate pseudo heatmaps based on joint coordinates and confidence.\n\n Args:\n results (dict): The resulting dict to be modified and passed\n to the next transform in pipeline.\n \"\"\"\n heatmap = self.gen_an_aug(results)\n key = 'heatmap_imgs' if 'imgs' in results else 'imgs'\n\n if self.double:\n indices = np.arange(heatmap.shape[1], dtype=np.int64)\n left, right = (self.left_kp, self.right_kp) if self.with_kp else (\n self.left_limb, self.right_limb)\n for l, r in zip(left, right): # noqa: E741\n indices[l] = r\n indices[r] = l\n heatmap_flip = heatmap[..., ::-1][:, indices]\n heatmap = np.concatenate([heatmap, heatmap_flip])\n results[key] = heatmap\n return results\n\n def __repr__(self) -> str:\n repr_str = (f'{self.__class__.__name__}('\n f'sigma={self.sigma}, '\n f'use_score={self.use_score}, '\n f'with_kp={self.with_kp}, '\n f'with_limb={self.with_limb}, '\n f'skeletons={self.skeletons}, '\n f'double={self.double}, '\n f'left_kp={self.left_kp}, '\n f'right_kp={self.right_kp}, '\n f'left_limb={self.left_limb}, '\n f'right_limb={self.right_limb}, '\n f'scaling={self.scaling})')\n return repr_str\n\n\n@TRANSFORMS.register_module()\nclass PoseCompact(BaseTransform):\n \"\"\"Convert the coordinates of keypoints to make it more compact.\n Specifically, it first find a tight bounding box that surrounds all joints\n in each frame, then we expand the tight box by a given padding ratio. For\n example, if 'padding == 0.25', then the expanded box has unchanged center,\n and 1.25x width and height.\n\n Required Keys:\n\n - keypoint\n - img_shape\n\n Modified Keys:\n\n - img_shape\n - keypoint\n\n Added Keys:\n\n - crop_quadruple\n\n Args:\n padding (float): The padding size. Defaults to 0.25.\n threshold (int): The threshold for the tight bounding box. If the width\n or height of the tight bounding box is smaller than the threshold,\n we do not perform the compact operation. Defaults to 10.\n hw_ratio (float | tuple[float] | None): The hw_ratio of the expanded\n box. Float indicates the specific ratio and tuple indicates a\n ratio range. If set as None, it means there is no requirement on\n hw_ratio. Defaults to None.\n allow_imgpad (bool): Whether to allow expanding the box outside the\n image to meet the hw_ratio requirement. Defaults to True.\n \"\"\"\n\n def __init__(self,\n padding: float = 0.25,\n threshold: int = 10,\n hw_ratio: Optional[Union[float, Tuple[float]]] = None,\n allow_imgpad: bool = True) -> None:\n\n self.padding = padding\n self.threshold = threshold\n if hw_ratio is not None:\n hw_ratio = _pair(hw_ratio)\n\n self.hw_ratio = hw_ratio\n\n self.allow_imgpad = allow_imgpad\n assert self.padding >= 0\n\n def transform(self, results: Dict) -> Dict:\n \"\"\"Convert the coordinates of keypoints to make it more compact.\n\n Args:\n results (dict): The resulting dict to be modified and passed\n to the next transform in pipeline.\n \"\"\"\n img_shape = results['img_shape']\n h, w = img_shape\n kp = results['keypoint']\n\n # Make NaN zero\n kp[np.isnan(kp)] = 0.\n kp_x = kp[..., 0]\n kp_y = kp[..., 1]\n\n min_x = np.min(kp_x[kp_x != 0], initial=np.Inf)\n min_y = np.min(kp_y[kp_y != 0], initial=np.Inf)\n max_x = np.max(kp_x[kp_x != 0], initial=-np.Inf)\n max_y = np.max(kp_y[kp_y != 0], initial=-np.Inf)\n\n # The compact area is too small\n if max_x - min_x < self.threshold or max_y - min_y < self.threshold:\n return results\n\n center = ((max_x + min_x) / 2, (max_y + min_y) / 2)\n half_width = (max_x - min_x) / 2 * (1 + self.padding)\n half_height = (max_y - min_y) / 2 * (1 + self.padding)\n\n if self.hw_ratio is not None:\n half_height = max(self.hw_ratio[0] * half_width, half_height)\n half_width = max(1 / self.hw_ratio[1] * half_height, half_width)\n\n min_x, max_x = center[0] - half_width, center[0] + half_width\n min_y, max_y = center[1] - half_height, center[1] + half_height\n\n # hot update\n if not self.allow_imgpad:\n min_x, min_y = int(max(0, min_x)), int(max(0, min_y))\n max_x, max_y = int(min(w, max_x)), int(min(h, max_y))\n else:\n min_x, min_y = int(min_x), int(min_y)\n max_x, max_y = int(max_x), int(max_y)\n\n kp_x[kp_x != 0] -= min_x\n kp_y[kp_y != 0] -= min_y\n\n new_shape = (max_y - min_y, max_x - min_x)\n results['img_shape'] = new_shape\n\n # the order is x, y, w, h (in [0, 1]), a tuple\n crop_quadruple = results.get('crop_quadruple', (0., 0., 1., 1.))\n new_crop_quadruple = (min_x / w, min_y / h, (max_x - min_x) / w,\n (max_y - min_y) / h)\n crop_quadruple = _combine_quadruple(crop_quadruple, new_crop_quadruple)\n results['crop_quadruple'] = crop_quadruple\n return results\n\n def __repr__(self) -> str:\n repr_str = (f'{self.__class__.__name__}(padding={self.padding}, '\n f'threshold={self.threshold}, '\n f'hw_ratio={self.hw_ratio}, '\n f'allow_imgpad={self.allow_imgpad})')\n return repr_str\n\n\n@TRANSFORMS.register_module()\nclass PreNormalize3D(BaseTransform):\n \"\"\"PreNormalize for NTURGB+D 3D keypoints (x, y, z).\n\n PreNormalize3D first subtracts the coordinates of each joint\n from the coordinates of the 'spine' (joint #1 in ntu) of the first person\n in the first frame. Subsequently, it performs a 3D rotation to fix the Z\n axis parallel to the 3D vector from the 'hip' (joint #0) and the 'spine'\n (joint #1) and the X axis toward the 3D vector from the 'right shoulder'\n (joint #8) and the 'left shoulder' (joint #4). Codes adapted from\n https://github.com/lshiwjx/2s-AGCN.\n\n Required Keys:\n\n - keypoint\n - total_frames (optional)\n\n Modified Keys:\n\n - keypoint\n\n Added Keys:\n\n - body_center\n\n Args:\n zaxis (list[int]): The target Z axis for the 3D rotation.\n Defaults to ``[0, 1]``.\n xaxis (list[int]): The target X axis for the 3D rotation.\n Defaults to ``[8, 4]``.\n align_spine (bool): Whether to perform a 3D rotation to\n align the spine. Defaults to True.\n align_shoulder (bool): Whether to perform a 3D rotation\n to align the shoulder. Defaults to True.\n align_center (bool): Whether to align the body center.\n Defaults to True.\n \"\"\"\n\n def __init__(self,\n zaxis: List[int] = [0, 1],\n xaxis: List[int] = [8, 4],\n align_spine: bool = True,\n align_shoulder: bool = True,\n align_center: bool = True) -> None:\n self.zaxis = zaxis\n self.xaxis = xaxis\n self.align_center = align_center\n self.align_spine = align_spine\n self.align_shoulder = align_shoulder\n\n def unit_vector(self, vector: np.ndarray) -> np.ndarray:\n \"\"\"Returns the unit vector of the vector.\"\"\"\n return vector / np.linalg.norm(vector)\n\n def angle_between(self, v1: np.ndarray, v2: np.ndarray) -> float:\n \"\"\"Returns the angle in radians between vectors 'v1' and 'v2'.\"\"\"\n if np.abs(v1).sum() < 1e-6 or np.abs(v2).sum() < 1e-6:\n return 0\n v1_u = self.unit_vector(v1)\n v2_u = self.unit_vector(v2)\n return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))\n\n def rotation_matrix(self, axis: np.ndarray, theta: float) -> np.ndarray:\n \"\"\"Returns the rotation matrix associated with counterclockwise\n rotation about the given axis by theta radians.\"\"\"\n if np.abs(axis).sum() < 1e-6 or np.abs(theta) < 1e-6:\n return np.eye(3)\n axis = np.asarray(axis)\n axis = axis / np.sqrt(np.dot(axis, axis))\n a = np.cos(theta / 2.0)\n b, c, d = -axis * np.sin(theta / 2.0)\n aa, bb, cc, dd = a * a, b * b, c * c, d * d\n bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d\n return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],\n [2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],\n [2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])\n\n def transform(self, results: Dict) -> Dict:\n \"\"\"The transform function of :class:`PreNormalize3D`.\n\n Args:\n results (dict): The result dict.\n\n Returns:\n dict: The result dict.\n \"\"\"\n skeleton = results['keypoint']\n total_frames = results.get('total_frames', skeleton.shape[1])\n\n M, T, V, C = skeleton.shape\n assert T == total_frames\n if skeleton.sum() == 0:\n return results\n\n index0 = [\n i for i in range(T) if not np.all(np.isclose(skeleton[0, i], 0))\n ]\n\n assert M in [1, 2]\n if M == 2:\n index1 = [\n i for i in range(T)\n if not np.all(np.isclose(skeleton[1, i], 0))\n ]\n if len(index0) < len(index1):\n skeleton = skeleton[:, np.array(index1)]\n skeleton = skeleton[[1, 0]]\n else:\n skeleton = skeleton[:, np.array(index0)]\n else:\n skeleton = skeleton[:, np.array(index0)]\n\n T_new = skeleton.shape[1]\n\n if self.align_center:\n if skeleton.shape[2] == 25:\n main_body_center = skeleton[0, 0, 1].copy()\n else:\n main_body_center = skeleton[0, 0, -1].copy()\n mask = ((skeleton != 0).sum(-1) > 0)[..., None]\n skeleton = (skeleton - main_body_center) * mask\n\n if self.align_spine:\n joint_bottom = skeleton[0, 0, self.zaxis[0]]\n joint_top = skeleton[0, 0, self.zaxis[1]]\n axis = np.cross(joint_top - joint_bottom, [0, 0, 1])\n angle = self.angle_between(joint_top - joint_bottom, [0, 0, 1])\n matrix_z = self.rotation_matrix(axis, angle)\n skeleton = np.einsum('abcd,kd->abck', skeleton, matrix_z)\n\n if self.align_shoulder:\n joint_rshoulder = skeleton[0, 0, self.xaxis[0]]\n joint_lshoulder = skeleton[0, 0, self.xaxis[1]]\n axis = np.cross(joint_rshoulder - joint_lshoulder, [1, 0, 0])\n angle = self.angle_between(joint_rshoulder - joint_lshoulder,\n [1, 0, 0])\n matrix_x = self.rotation_matrix(axis, angle)\n skeleton = np.einsum('abcd,kd->abck', skeleton, matrix_x)\n\n results['keypoint'] = skeleton\n results['total_frames'] = T_new\n results['body_center'] = main_body_center\n return results\n\n def __repr__(self) -> str:\n repr_str = (f'{self.__class__.__name__}('\n f'zaxis={self.zaxis}, '\n f'xaxis={self.xaxis}, '\n f'align_center={self.align_center}, '\n f'align_spine={self.align_spine}, '\n f'align_shoulder={self.align_shoulder})')\n return repr_str\n\n\n@TRANSFORMS.register_module()\nclass PreNormalize2D(BaseTransform):\n \"\"\"Normalize the range of keypoint values.\n\n Required Keys:\n\n - keypoint\n - img_shape (optional)\n\n Modified Keys:\n\n - keypoint\n\n Args:\n img_shape (tuple[int, int]): The resolution of the original video.\n Defaults to ``(1080, 1920)``.\n \"\"\"\n\n def __init__(self, img_shape: Tuple[int, int] = (1080, 1920)) -> None:\n self.img_shape = img_shape\n\n def transform(self, results: Dict) -> Dict:\n \"\"\"The transform function of :class:`PreNormalize2D`.\n\n Args:\n results (dict): The result dict.\n\n Returns:\n dict: The result dict.\n \"\"\"\n h, w = results.get('img_shape', self.img_shape)\n results['keypoint'][..., 0] = \\\n (results['keypoint'][..., 0] - (w / 2)) / (w / 2)\n results['keypoint'][..., 1] = \\\n (results['keypoint'][..., 1] - (h / 2)) / (h / 2)\n return results\n\n def __repr__(self) -> str:\n repr_str = (f'{self.__class__.__name__}('\n f'img_shape={self.img_shape})')\n return repr_str\n\n\n@TRANSFORMS.register_module()\nclass JointToBone(BaseTransform):\n \"\"\"Convert the joint information to bone information.\n\n Required Keys:\n\n - keypoint\n\n Modified Keys:\n\n - keypoint\n\n Args:\n dataset (str): Define the type of dataset: 'nturgb+d', 'openpose',\n 'coco'. Defaults to ``'nturgb+d'``.\n target (str): The target key for the bone information.\n Defaults to ``'keypoint'``.\n \"\"\"\n\n def __init__(self,\n dataset: str = 'nturgb+d',\n target: str = 'keypoint') -> None:\n self.dataset = dataset\n self.target = target\n if self.dataset not in ['nturgb+d', 'openpose', 'coco']:\n raise ValueError(\n f'The dataset type {self.dataset} is not supported')\n if self.dataset == 'nturgb+d':\n self.pairs = [(0, 1), (1, 20), (2, 20), (3, 2), (4, 20), (5, 4),\n (6, 5), (7, 6), (8, 20), (9, 8), (10, 9), (11, 10),\n (12, 0), (13, 12), (14, 13), (15, 14), (16, 0),\n (17, 16), (18, 17), (19, 18), (21, 22), (20, 20),\n (22, 7), (23, 24), (24, 11)]\n elif self.dataset == 'openpose':\n self.pairs = ((0, 0), (1, 0), (2, 1), (3, 2), (4, 3), (5, 1),\n (6, 5), (7, 6), (8, 2), (9, 8), (10, 9), (11, 5),\n (12, 11), (13, 12), (14, 0), (15, 0), (16, 14), (17,\n 15))\n elif self.dataset == 'coco':\n self.pairs = ((0, 0), (1, 0), (2, 0), (3, 1), (4, 2), (5, 0),\n (6, 0), (7, 5), (8, 6), (9, 7), (10, 8), (11, 0),\n (12, 0), (13, 11), (14, 12), (15, 13), (16, 14))\n\n def transform(self, results: Dict) -> Dict:\n \"\"\"The transform function of :class:`JointToBone`.\n\n Args:\n results (dict): The result dict.\n\n Returns:\n dict: The result dict.\n \"\"\"\n keypoint = results['keypoint']\n M, T, V, C = keypoint.shape\n bone = np.zeros((M, T, V, C), dtype=np.float32)\n\n assert C in [2, 3]\n for v1, v2 in self.pairs:\n bone[..., v1, :] = keypoint[..., v1, :] - keypoint[..., v2, :]\n if C == 3 and self.dataset in ['openpose', 'coco']:\n score = (keypoint[..., v1, 2] + keypoint[..., v2, 2]) / 2\n bone[..., v1, 2] = score\n\n results[self.target] = bone\n return results\n\n def __repr__(self) -> str:\n repr_str = (f'{self.__class__.__name__}('\n f'dataset={self.dataset}, '\n f'target={self.target})')\n return repr_str\n\n\n@TRANSFORMS.register_module()\nclass ToMotion(BaseTransform):\n \"\"\"Convert the joint information or bone information to corresponding\n motion information.\n\n Required Keys:\n\n - keypoint\n\n Added Keys:\n\n - motion\n\n Args:\n dataset (str): Define the type of dataset: 'nturgb+d', 'openpose',\n 'coco'. Defaults to ``'nturgb+d'``.\n source (str): The source key for the joint or bone information.\n Defaults to ``'keypoint'``.\n target (str): The target key for the motion information.\n Defaults to ``'motion'``.\n \"\"\"\n\n def __init__(self,\n dataset: str = 'nturgb+d',\n source: str = 'keypoint',\n target: str = 'motion') -> None:\n self.dataset = dataset\n self.source = source\n self.target = target\n\n def transform(self, results: Dict) -> Dict:\n \"\"\"The transform function of :class:`ToMotion`.\n\n Args:\n results (dict): The result dict.\n\n Returns:\n dict: The result dict.\n \"\"\"\n data = results[self.source]\n M, T, V, C = data.shape\n motion = np.zeros_like(data)\n\n assert C in [2, 3]\n motion[:, :T - 1] = np.diff(data, axis=1)\n if C == 3 and self.dataset in ['openpose', 'coco']:\n score = (data[:, :T - 1, :, 2] + data[:, 1:, :, 2]) / 2\n motion[:, :T - 1, :, 2] = score\n\n results[self.target] = motion\n\n return results\n\n def __repr__(self) -> str:\n repr_str = (f'{self.__class__.__name__}('\n f'dataset={self.dataset}, '\n f'source={self.source}, '\n f'target={self.target})')\n return repr_str\n\n\n@TRANSFORMS.register_module()\nclass MergeSkeFeat(BaseTransform):\n \"\"\"Merge multi-stream features.\n\n Args:\n feat_list (list[str]): The list of the keys of features.\n Defaults to ``['keypoint']``.\n target (str): The target key for the merged multi-stream information.\n Defaults to ``'keypoint'``.\n axis (int): The axis along which the features will be joined.\n Defaults to -1.\n \"\"\"\n\n def __init__(self,\n feat_list: List[str] = ['keypoint'],\n target: str = 'keypoint',\n axis: int = -1) -> None:\n self.feat_list = feat_list\n self.target = target\n self.axis = axis\n\n def transform(self, results: Dict) -> Dict:\n \"\"\"The transform function of :class:`MergeSkeFeat`.\n\n Args:\n results (dict): The result dict.\n\n Returns:\n dict: The result dict.\n \"\"\"\n feats = []\n for name in self.feat_list:\n feats.append(results.pop(name))\n feats = np.concatenate(feats, axis=self.axis)\n results[self.target] = feats\n return results\n\n def __repr__(self) -> str:\n repr_str = (f'{self.__class__.__name__}('\n f'feat_list={self.feat_list}, '\n f'target={self.target}, '\n f'axis={self.axis})')\n return repr_str\n\n\n@TRANSFORMS.register_module()\nclass GenSkeFeat(BaseTransform):\n \"\"\"Unified interface for generating multi-stream skeleton features.\n\n Required Keys:\n\n - keypoint\n - keypoint_score (optional)\n\n Args:\n dataset (str): Define the type of dataset: 'nturgb+d', 'openpose',\n 'coco'. Defaults to ``'nturgb+d'``.\n feats (list[str]): The list of the keys of features.\n Defaults to ``['j']``.\n axis (int): The axis along which the features will be joined.\n Defaults to -1.\n \"\"\"\n\n def __init__(self,\n dataset: str = 'nturgb+d',\n feats: List[str] = ['j'],\n axis: int = -1) -> None:\n self.dataset = dataset\n self.feats = feats\n self.axis = axis\n ops = []\n if 'b' in feats or 'bm' in feats:\n ops.append(JointToBone(dataset=dataset, target='b'))\n ops.append(KeyMapper(remapping={'keypoint': 'j'}))\n if 'jm' in feats:\n ops.append(ToMotion(dataset=dataset, source='j', target='jm'))\n if 'bm' in feats:\n ops.append(ToMotion(dataset=dataset, source='b', target='bm'))\n ops.append(MergeSkeFeat(feat_list=feats, axis=axis))\n self.ops = Compose(ops)\n\n def transform(self, results: Dict) -> Dict:\n \"\"\"The transform function of :class:`GenSkeFeat`.\n\n Args:\n results (dict): The result dict.\n\n Returns:\n dict: The result dict.\n \"\"\"\n if 'keypoint_score' in results and 'keypoint' in results:\n assert self.dataset != 'nturgb+d'\n assert results['keypoint'].shape[\n -1] == 2, 'Only 2D keypoints have keypoint_score. '\n keypoint = results.pop('keypoint')\n keypoint_score = results.pop('keypoint_score')\n results['keypoint'] = np.concatenate(\n [keypoint, keypoint_score[..., None]], -1)\n return self.ops(results)\n\n def __repr__(self) -> str:\n repr_str = (f'{self.__class__.__name__}('\n f'dataset={self.dataset}, '\n f'feats={self.feats}, '\n f'axis={self.axis})')\n return repr_str\n\n\n@TRANSFORMS.register_module()\nclass UniformSampleFrames(BaseTransform):\n \"\"\"Uniformly sample frames from the video.\n\n To sample an n-frame clip from the video. UniformSampleFrames basically\n divide the video into n segments of equal length and randomly sample one\n frame from each segment. To make the testing results reproducible, a\n random seed is set during testing, to make the sampling results\n deterministic.\n\n Required Keys:\n\n - total_frames\n - start_index (optional)\n\n Added Keys:\n\n - frame_inds\n - frame_interval\n - num_clips\n - clip_len\n\n Args:\n clip_len (int): Frames of each sampled output clip.\n num_clips (int): Number of clips to be sampled. Defaults to 1.\n test_mode (bool): Store True when building test or validation dataset.\n Defaults to False.\n seed (int): The random seed used during test time. Defaults to 255.\n \"\"\"\n\n def __init__(self,\n clip_len: int,\n num_clips: int = 1,\n test_mode: bool = False,\n seed: int = 255) -> None:\n self.clip_len = clip_len\n self.num_clips = num_clips\n self.test_mode = test_mode\n self.seed = seed\n\n def _get_train_clips(self, num_frames: int, clip_len: int) -> np.ndarray:\n \"\"\"Uniformly sample indices for training clips.\n\n Args:\n num_frames (int): The number of frames.\n clip_len (int): The length of the clip.\n\n Returns:\n np.ndarray: The sampled indices for training clips.\n \"\"\"\n all_inds = []\n for clip_idx in range(self.num_clips):\n if num_frames < clip_len:\n start = np.random.randint(0, num_frames)\n inds = np.arange(start, start + clip_len)\n elif clip_len <= num_frames < 2 * clip_len:\n basic = np.arange(clip_len)\n inds = np.random.choice(\n clip_len + 1, num_frames - clip_len, replace=False)\n offset = np.zeros(clip_len + 1, dtype=np.int32)\n offset[inds] = 1\n offset = np.cumsum(offset)\n inds = basic + offset[:-1]\n else:\n bids = np.array(\n [i * num_frames // clip_len for i in range(clip_len + 1)])\n bsize = np.diff(bids)\n bst = bids[:clip_len]\n offset = np.random.randint(bsize)\n inds = bst + offset\n\n all_inds.append(inds)\n\n return np.concatenate(all_inds)\n\n def _get_test_clips(self, num_frames: int, clip_len: int) -> np.ndarray:\n \"\"\"Uniformly sample indices for testing clips.\n\n Args:\n num_frames (int): The number of frames.\n clip_len (int): The length of the clip.\n\n Returns:\n np.ndarray: The sampled indices for testing clips.\n \"\"\"\n\n np.random.seed(self.seed)\n all_inds = []\n for i in range(self.num_clips):\n if num_frames < clip_len:\n start_ind = i if num_frames < self.num_clips \\\n else i * num_frames // self.num_clips\n inds = np.arange(start_ind, start_ind + clip_len)\n elif clip_len <= num_frames < clip_len * 2:\n basic = np.arange(clip_len)\n inds = np.random.choice(\n clip_len + 1, num_frames - clip_len, replace=False)\n offset = np.zeros(clip_len + 1, dtype=np.int64)\n offset[inds] = 1\n offset = np.cumsum(offset)\n inds = basic + offset[:-1]\n else:\n bids = np.array(\n [i * num_frames // clip_len for i in range(clip_len + 1)])\n bsize = np.diff(bids)\n bst = bids[:clip_len]\n offset = np.random.randint(bsize)\n inds = bst + offset\n\n all_inds.append(inds)\n\n return np.concatenate(all_inds)\n\n def transform(self, results: Dict) -> Dict:\n \"\"\"The transform function of :class:`UniformSampleFrames`.\n\n Args:\n results (dict): The result dict.\n\n Returns:\n dict: The result dict.\n \"\"\"\n num_frames = results['total_frames']\n\n if self.test_mode:\n inds = self._get_test_clips(num_frames, self.clip_len)\n else:\n inds = self._get_train_clips(num_frames, self.clip_len)\n\n inds = np.mod(inds, num_frames)\n start_index = results.get('start_index', 0)\n inds = inds + start_index\n\n if 'keypoint' in results:\n kp = results['keypoint']\n assert num_frames == kp.shape[1]\n num_person = kp.shape[0]\n num_persons = [num_person] * num_frames\n for i in range(num_frames):\n j = num_person - 1\n while j >= 0 and np.all(np.abs(kp[j, i]) < 1e-5):\n j -= 1\n num_persons[i] = j + 1\n transitional = [False] * num_frames\n for i in range(1, num_frames - 1):\n if num_persons[i] != num_persons[i - 1]:\n transitional[i] = transitional[i - 1] = True\n if num_persons[i] != num_persons[i + 1]:\n transitional[i] = transitional[i + 1] = True\n inds_int = inds.astype(np.int64)\n coeff = np.array([transitional[i] for i in inds_int])\n inds = (coeff * inds_int + (1 - coeff) * inds).astype(np.float32)\n\n results['frame_inds'] = inds.astype(np.int32)\n results['clip_len'] = self.clip_len\n results['frame_interval'] = None\n results['num_clips'] = self.num_clips\n return results\n\n def __repr__(self) -> str:\n repr_str = (f'{self.__class__.__name__}('\n f'clip_len={self.clip_len}, '\n f'num_clips={self.num_clips}, '\n f'test_mode={self.test_mode}, '\n f'seed={self.seed})')\n return repr_str\n\n\n@TRANSFORMS.register_module()\nclass PadTo(BaseTransform):\n \"\"\"Sample frames from the video.\n\n To sample an n-frame clip from the video, PadTo samples\n the frames from zero index, and loop or zero pad the frames\n if the length of video frames is less than the value of `length`.\n\n Required Keys:\n\n - keypoint\n - total_frames\n - start_index (optional)\n\n Modified Keys:\n\n - keypoint\n - total_frames\n\n Args:\n length (int): The maximum length of the sampled output clip.\n mode (str): The padding mode. Defaults to ``'loop'``.\n \"\"\"\n\n def __init__(self, length: int, mode: str = 'loop') -> None:\n self.length = length\n assert mode in ['loop', 'zero']\n self.mode = mode\n\n def transform(self, results: Dict) -> Dict:\n \"\"\"The transform function of :class:`PadTo`.\n\n Args:\n results (dict): The result dict.\n\n Returns:\n dict: The result dict.\n \"\"\"\n total_frames = results['total_frames']\n assert total_frames <= self.length\n start_index = results.get('start_index', 0)\n inds = np.arange(start_index, start_index + self.length)\n inds = np.mod(inds, total_frames)\n\n keypoint = results['keypoint'][:, inds].copy()\n if self.mode == 'zero':\n keypoint[:, total_frames:] = 0\n\n results['keypoint'] = keypoint\n results['total_frames'] = self.length\n return results\n\n def __repr__(self) -> str:\n repr_str = (f'{self.__class__.__name__}('\n f'length={self.length}, '\n f'mode={self.mode})')\n return repr_str\n\n\n@TRANSFORMS.register_module()\nclass PoseDecode(BaseTransform):\n \"\"\"Load and decode pose with given indices.\n\n Required Keys:\n\n - keypoint\n - total_frames (optional)\n - frame_inds (optional)\n - offset (optional)\n - keypoint_score (optional)\n\n Modified Keys:\n\n - keypoint\n - keypoint_score (optional)\n \"\"\"\n\n @staticmethod\n def _load_kp(kp: np.ndarray, frame_inds: np.ndarray) -> np.ndarray:\n \"\"\"Load keypoints according to sampled indexes.\"\"\"\n return kp[:, frame_inds].astype(np.float32)\n\n @staticmethod\n def _load_kpscore(kpscore: np.ndarray,\n frame_inds: np.ndarray) -> np.ndarray:\n \"\"\"Load keypoint scores according to sampled indexes.\"\"\"\n return kpscore[:, frame_inds].astype(np.float32)\n\n def transform(self, results: Dict) -> Dict:\n \"\"\"The transform function of :class:`PoseDecode`.\n\n Args:\n results (dict): The result dict.\n\n Returns:\n dict: The result dict.\n \"\"\"\n if 'total_frames' not in results:\n results['total_frames'] = results['keypoint'].shape[1]\n\n if 'frame_inds' not in results:\n results['frame_inds'] = np.arange(results['total_frames'])\n\n if results['frame_inds'].ndim != 1:\n results['frame_inds'] = np.squeeze(results['frame_inds'])\n\n offset = results.get('offset', 0)\n frame_inds = results['frame_inds'] + offset\n\n if 'keypoint_score' in results:\n results['keypoint_score'] = self._load_kpscore(\n results['keypoint_score'], frame_inds)\n\n results['keypoint'] = self._load_kp(results['keypoint'], frame_inds)\n\n return results\n\n def __repr__(self) -> str:\n repr_str = f'{self.__class__.__name__}()'\n return repr_str\n\n\n@TRANSFORMS.register_module()\nclass MMUniformSampleFrames(UniformSampleFrames):\n \"\"\"Uniformly sample frames from the multi-modal data.\"\"\"\n\n def transform(self, results: Dict) -> Dict:\n \"\"\"The transform function of :class:`MMUniformSampleFrames`.\n\n Args:\n results (dict): The result dict.\n\n Returns:\n dict: The result dict.\n \"\"\"\n num_frames = results['total_frames']\n modalities = []\n for modality, clip_len in self.clip_len.items():\n if self.test_mode:\n inds = self._get_test_clips(num_frames, clip_len)\n else:\n inds = self._get_train_clips(num_frames, clip_len)\n inds = np.mod(inds, num_frames)\n results[f'{modality}_inds'] = inds.astype(np.int32)\n modalities.append(modality)\n results['clip_len'] = self.clip_len\n results['frame_interval'] = None\n results['num_clips'] = self.num_clips\n if not isinstance(results['modality'], list):\n # should override\n results['modality'] = modalities\n return results\n\n\n@TRANSFORMS.register_module()\nclass MMDecode(DecordInit, DecordDecode, PoseDecode):\n \"\"\"Decode RGB videos and skeletons.\"\"\"\n\n def __init__(self, io_backend: str = 'disk', **kwargs) -> None:\n DecordInit.__init__(self, io_backend=io_backend, **kwargs)\n DecordDecode.__init__(self)\n self.io_backend = io_backend\n self.kwargs = kwargs\n self.file_client = None\n\n def transform(self, results: Dict) -> Dict:\n \"\"\"The transform function of :class:`MMDecode`.\n\n Args:\n results (dict): The result dict.\n\n Returns:\n dict: The result dict.\n \"\"\"\n for mod in results['modality']:\n if results[f'{mod}_inds'].ndim != 1:\n results[f'{mod}_inds'] = np.squeeze(results[f'{mod}_inds'])\n frame_inds = results[f'{mod}_inds']\n if mod == 'RGB':\n if 'filename' not in results:\n results['filename'] = results['frame_dir'] + '.mp4'\n video_reader = self._get_video_reader(results['filename'])\n imgs = self._decord_load_frames(video_reader, frame_inds)\n del video_reader\n results['imgs'] = imgs\n elif mod == 'Pose':\n assert 'keypoint' in results\n if 'keypoint_score' not in results:\n keypoint_score = [\n np.ones(keypoint.shape[:-1], dtype=np.float32)\n for keypoint in results['keypoint']\n ]\n results['keypoint_score'] = np.stack(keypoint_score)\n results['keypoint'] = self._load_kp(results['keypoint'],\n frame_inds)\n results['keypoint_score'] = self._load_kpscore(\n results['keypoint_score'], frame_inds)\n else:\n raise NotImplementedError(\n f'MMDecode: Modality {mod} not supported')\n\n # We need to scale human keypoints to the new image size\n if 'imgs' in results and 'keypoint' in results:\n real_img_shape = results['imgs'][0].shape[:2]\n if real_img_shape != results['img_shape']:\n oh, ow = results['img_shape']\n nh, nw = real_img_shape\n\n assert results['keypoint'].shape[-1] in [2, 3]\n results['keypoint'][..., 0] *= (nw / ow)\n results['keypoint'][..., 1] *= (nh / oh)\n results['img_shape'] = real_img_shape\n results['original_shape'] = real_img_shape\n\n return results\n\n def __repr__(self) -> str:\n repr_str = (f'{self.__class__.__name__}('\n f'io_backend={self.io_backend})')\n return repr_str\n\n\n@TRANSFORMS.register_module()\nclass MMCompact(BaseTransform):\n \"\"\"Convert the coordinates of keypoints and crop the images to make them\n more compact.\n\n Required Keys:\n\n - imgs\n - keypoint\n - img_shape\n\n Modified Keys:\n\n - imgs\n - keypoint\n - img_shape\n\n Args:\n padding (float): The padding size. Defaults to 0.25.\n threshold (int): The threshold for the tight bounding box. If the width\n or height of the tight bounding box is smaller than the threshold,\n we do not perform the compact operation. Defaults to 10.\n hw_ratio (float | tuple[float]): The hw_ratio of the expanded\n box. Float indicates the specific ratio and tuple indicates a\n ratio range. If set as None, it means there is no requirement on\n hw_ratio. Defaults to 1.\n allow_imgpad (bool): Whether to allow expanding the box outside the\n image to meet the hw_ratio requirement. Defaults to True.\n \"\"\"\n\n def __init__(self,\n padding: float = 0.25,\n threshold: int = 10,\n hw_ratio: Union[float, Tuple[float]] = 1,\n allow_imgpad: bool = True) -> None:\n\n self.padding = padding\n self.threshold = threshold\n if hw_ratio is not None:\n hw_ratio = _pair(hw_ratio)\n self.hw_ratio = hw_ratio\n self.allow_imgpad = allow_imgpad\n assert self.padding >= 0\n\n def _get_box(self, keypoint: np.ndarray, img_shape: Tuple[int]) -> Tuple:\n \"\"\"Calculate the bounding box surrounding all joints in the frames.\"\"\"\n h, w = img_shape\n\n kp_x = keypoint[..., 0]\n kp_y = keypoint[..., 1]\n\n min_x = np.min(kp_x[kp_x != 0], initial=np.Inf)\n min_y = np.min(kp_y[kp_y != 0], initial=np.Inf)\n max_x = np.max(kp_x[kp_x != 0], initial=-np.Inf)\n max_y = np.max(kp_y[kp_y != 0], initial=-np.Inf)\n\n # The compact area is too small\n if max_x - min_x < self.threshold or max_y - min_y < self.threshold:\n return 0, 0, w, h\n\n center = ((max_x + min_x) / 2, (max_y + min_y) / 2)\n half_width = (max_x - min_x) / 2 * (1 + self.padding)\n half_height = (max_y - min_y) / 2 * (1 + self.padding)\n\n if self.hw_ratio is not None:\n half_height = max(self.hw_ratio[0] * half_width, half_height)\n half_width = max(1 / self.hw_ratio[1] * half_height, half_width)\n\n min_x, max_x = center[0] - half_width, center[0] + half_width\n min_y, max_y = center[1] - half_height, center[1] + half_height\n\n # hot update\n if not self.allow_imgpad:\n min_x, min_y = int(max(0, min_x)), int(max(0, min_y))\n max_x, max_y = int(min(w, max_x)), int(min(h, max_y))\n else:\n min_x, min_y = int(min_x), int(min_y)\n max_x, max_y = int(max_x), int(max_y)\n return min_x, min_y, max_x, max_y\n\n def _compact_images(self, imgs: List[np.ndarray], img_shape: Tuple[int],\n box: Tuple[int]) -> List:\n \"\"\"Crop the images acoordding the bounding box.\"\"\"\n h, w = img_shape\n min_x, min_y, max_x, max_y = box\n pad_l, pad_u, pad_r, pad_d = 0, 0, 0, 0\n if min_x < 0:\n pad_l = -min_x\n min_x, max_x = 0, max_x + pad_l\n w += pad_l\n if min_y < 0:\n pad_u = -min_y\n min_y, max_y = 0, max_y + pad_u\n h += pad_u\n if max_x > w:\n pad_r = max_x - w\n w = max_x\n if max_y > h:\n pad_d = max_y - h\n h = max_y\n\n if pad_l > 0 or pad_r > 0 or pad_u > 0 or pad_d > 0:\n imgs = [\n np.pad(img, ((pad_u, pad_d), (pad_l, pad_r), (0, 0)))\n for img in imgs\n ]\n imgs = [img[min_y:max_y, min_x:max_x] for img in imgs]\n return imgs\n\n def transform(self, results: Dict) -> Dict:\n \"\"\"The transform function of :class:`MMCompact`.\n\n Args:\n results (dict): The result dict.\n\n Returns:\n dict: The result dict.\n \"\"\"\n img_shape = results['img_shape']\n kp = results['keypoint']\n # Make NaN zero\n kp[np.isnan(kp)] = 0.\n min_x, min_y, max_x, max_y = self._get_box(kp, img_shape)\n\n kp_x, kp_y = kp[..., 0], kp[..., 1]\n kp_x[kp_x != 0] -= min_x\n kp_y[kp_y != 0] -= min_y\n\n new_shape = (max_y - min_y, max_x - min_x)\n results['img_shape'] = new_shape\n results['imgs'] = self._compact_images(results['imgs'], img_shape,\n (min_x, min_y, max_x, max_y))\n return results\n\n def __repr__(self) -> str:\n repr_str = (f'{self.__class__.__name__}(padding={self.padding}, '\n f'threshold={self.threshold}, '\n f'hw_ratio={self.hw_ratio}, '\n f'allow_imgpad={self.allow_imgpad})')\n return repr_str\n","repo_name":"open-mmlab/mmaction2","sub_path":"mmaction/datasets/transforms/pose_transforms.py","file_name":"pose_transforms.py","file_ext":"py","file_size_in_byte":54317,"program_lang":"python","lang":"en","doc_type":"code","stars":3560,"dataset":"github-code","pt":"3"} +{"seq_id":"42610708920","text":"import sys\n\n\n# Shared bewteen part 1 and 2\nADJ = ((-1, -1), (-1, 00), (-1, +1),\n (00, -1), (00, +1),\n (+1, -1), (+1, 00), (+1, +1))\n\n\ndef get_next_state(seats, rules):\n \"\"\"\n Takes in a 2D array of characters representing seats and gets the next\n state of the system given a dictionary of rules.\n \"\"\"\n\n next_state, changes = [], 0\n for i in range(len(seats)):\n next_state.append([])\n for j in range(len(seats[0])):\n seat = rules[seats[i][j]](seats, i, j)\n changes += seat != seats[i][j]\n next_state[i].append(seat)\n\n return next_state, changes\n\n\ndef occupied_seats(seats, rules):\n \"\"\"\n Counts the number of occupied seats that will eventually be reached when\n the state of the system reaches equilibrium.\n \"\"\"\n\n state, changes = get_next_state(seats, rules)\n while changes != 0:\n state, changes = get_next_state(state, rules)\n\n return sum(seat == '#' for row in state for seat in row)\n\n\n# Part 1\ndef check_adj(seats, r, c):\n \"\"\"\n Takes a 2D array of characters and returns the number of occupied seats\n (represented by '#') that are adjacent to a given seat.\n \"\"\"\n\n occupied = 0\n for dr, dc in ADJ:\n\n i, j = r + dr, c + dc\n in_bounds = 0 <= i < len(seats) and 0 <= j < len(seats[0])\n\n if in_bounds and seats[i][j] == '#':\n occupied += 1\n\n return occupied\n\n\n# Part 2\ndef check_vis(seats, r, c):\n \"\"\"\n Takes a 2D array of characters and returns the number of occupied seats\n (represented by '#') that are visible to a given seat (ie. count the first\n seat that is encountered after moving in a straight line in any of the\n adjacent directions).\n \"\"\"\n\n occupied = 0\n for dr, dc in ADJ:\n i, j = r + dr, c + dc\n while 0 <= i < len(seats) and 0 <= j < len(seats[0]):\n\n if seats[i][j] == '#':\n occupied += 1\n break\n elif seats[i][j] == 'L':\n break\n\n i, j = i + dr, j + dc\n\n return occupied\n\n\ndef main():\n assert len(sys.argv) > 1, 'Missing argument: path to input file'\n assert len(sys.argv) < 3, 'Too many arguments'\n input_file = sys.argv[1]\n\n with open(input_file, 'r') as f:\n seats = [list(row) for row in f.read().splitlines()]\n\n # Rules for state changes\n rules1 = {\n 'L': lambda seats, i, j: '#' if check_adj(seats, i, j) == 0 else 'L',\n '#': lambda seats, i, j: 'L' if check_adj(seats, i, j) >= 4 else '#',\n '.': lambda seats, i, j: '.'\n }\n\n rules2 = {\n 'L': lambda seats, i, j: '#' if check_vis(seats, i, j) == 0 else 'L',\n '#': lambda seats, i, j: 'L' if check_vis(seats, i, j) >= 5 else '#',\n '.': lambda seats, i, j: '.'\n }\n\n # Solve part 1\n part1 = occupied_seats(seats, rules1)\n print('\\nPart 1:', part1)\n\n # Solve part 2\n part2 = occupied_seats(seats, rules2)\n print('Part 2:', part2)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"yuzhoumo/advent-of-code","sub_path":"2020/day11/day11.py","file_name":"day11.py","file_ext":"py","file_size_in_byte":3016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"74115851282","text":"# 4.10 序列上索引值迭代\n# enumerate() 函数\nmy_list = ['a', 'b', 'c']\nfor idx, val in enumerate(my_list, 2):\n print(idx, val)\n# 2 a\n# 3 b\n# 4 c\n\ndef parse_data(filename):\n with open(filename, 'rt') as f:\n for lineno, line in enumerate(f, 5):\n print(lineno,line)\n fields = line.split()\n try:\n count = int(fields[1])\n print(fields)\n except ValueError as e:\n print('Line {}: Parse error: {}'.format(lineno, e))\n\nparse_data(\"test.txt\")\n\nfrom collections import defaultdict\nword_summary = defaultdict(list)\n\nwith open('test.txt', 'r') as f:\n lines = f.readlines()\n print(lines)\n\nfor idx, line in enumerate(lines):\n # Create a list of words in current line\n words = [w.strip().lower() for w in line.split()]\n for word in words:\n # 字典赋值\n word_summary[word].append(idx)\nprint(word_summary)\n","repo_name":"CuteSmartTiger/mastering_python","sub_path":"iterationANDgenerate/4.10enumerate.py","file_name":"4.10enumerate.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"27363336926","text":"# MBTI 종류는 총 16가지\n# 비둘기집의 원리에 의해 N>=33이라면 반드시 3개 이상이 중복되는 MBTI가 생긴다\n\nfrom sys import maxsize\nfrom itertools import combinations\n\ndef cal(str1, str2, str3):\n diff = 0\n for i in range(4):\n if str1[i] != str2[i]:\n diff += 1\n if str2[i] != str3[i]:\n diff += 1\n if str3[i] != str1[i]:\n diff += 1\n return diff\n\nT = int(input())\nfor _ in range(T):\n N = int(input())\n data = list(input().split())\n if N >= 33:\n print(0)\n continue\n\n minValue = maxsize\n for x, y, z in combinations(data, 3):\n minValue = min(minValue, cal(x, y, z))\n print(minValue)\n\n'''\n3\n3\nENTJ INTP ESFJ\n4\nESFP ESFP ESFP ESFP\n5\nINFP INFP ESTP ESTJ ISTJ\n'''","repo_name":"lake041/algorithm","sub_path":"baekjoon/.etc/Brute Force/Silver 1, 가장 가까운 세 사람의 심리적 거리.py","file_name":"Silver 1, 가장 가까운 세 사람의 심리적 거리.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36608043787","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 11 20:15:21 2021\n\n@author: Nicole Lee\n\"\"\"\nimport lab05_util \nrestaurants = lab05_util.read_yelp('yelp.txt')\ndef print_info(resturant_list):\n name = resturant_list[0]\n street = resturant_list[3]\n spl_street = street.split(\"+\")\n street1 = spl_street[0]\n street2 = spl_street[1]\n pl_type = resturant_list[5]\n lavg = resturant_list[-1]\n lmax = max(lavg)\n lmin = min(lavg)\n if len(lavg) < 3:\n avg = sum(lavg) / len(lavg) \n else:\n avg = (sum(lavg)-lmax-lmin) / len(lavg)\n if avg <= 2:\n rate = \"bad\"\n elif avg > 2 and avg <=3:\n rate = \"average\"\n elif avg > 3 and avg <=4:\n rate = \"above average\"\n elif avg > 4 and avg <=5:\n rate = \"very good\"\n print(name + \" ({})\".format(pl_type)+ \"\\n\\t\" + street1 + \"\\n\\t\" + street2 + \"\\n\" + \"Average score: {:.2f}\".format(avg) + \"\\n\")\n print(\"This restaurant is rated {}, based on {} reviews.\".format(rate, len(lavg)))\nwhile True:\n rest_num = input(\"Please enter a resturant id: \")\n rest_num = int(rest_num)\n rest_ind = rest_num -1\n if rest_num <= 0:\n print(\"Error, invalid ID\")\n break\n elif rest_num > 0:\n print_info(restaurants[rest_ind])\n\n","repo_name":"niccolesgit/Python-Projects","sub_path":"labs/Lab 05/check2.py","file_name":"check2.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"43827613740","text":"import random\nfrom typing import Any, Dict, Optional, Tuple, cast\n\nfrom kornia import augmentation as kna\nfrom kornia.augmentation import random_generator as rg\nfrom kornia.augmentation._2d.intensity.base import IntensityAugmentationBase2D\nfrom kornia.augmentation.utils import _range_bound\nfrom kornia.enhance.adjust import adjust_contrast\nfrom torch import Tensor\n\nfrom .augment_base import Augment\n\n\n# copied from https://github.com/kornia/kornia/blob/53808e5fd039a4de97fcb207b7643fd72c30f032/kornia/augmentation/_2d/intensity/contrast.py\n# because still not on pypi\nclass RandomContrast(IntensityAugmentationBase2D):\n r\"\"\"Apply a random transformation to the contrast of a tensor image.\n\n This implementation aligns PIL. Hence, the output is close to TorchVision.\n\n .. image:: _static/img/RandomContrast.png\n\n Args:\n p: probability of applying the transformation.\n contrast: the contrast factor to apply\n clip_output: if true clip output\n silence_instantiation_warning: if True, silence the warning at instantiation.\n same_on_batch: apply the same transformation across the batch.\n keepdim: whether to keep the output shape the same as input (True) or broadcast it\n to the batch form (False).\n Shape:\n - Input: :math:`(C, H, W)` or :math:`(B, C, H, W)`, Optional: :math:`(B, 3, 3)`\n - Output: :math:`(B, C, H, W)`\n\n .. note::\n This function internally uses :func:`kornia.enhance.adjust_contrast\n\n Examples:\n >>> rng = torch.manual_seed(0)\n >>> inputs = torch.rand(1, 3, 3, 3)\n >>> aug = RandomContrast(contrast = (0.5,2.),p=1.)\n >>> aug(inputs)\n tensor([[[[0.2750, 0.4258, 0.0490],\n [0.0732, 0.1704, 0.3514],\n [0.2716, 0.4969, 0.2525]],\n \n [[0.3505, 0.1934, 0.2227],\n [0.0124, 0.0936, 0.1629],\n [0.2874, 0.3867, 0.4434]],\n \n [[0.0893, 0.1564, 0.3778],\n [0.5072, 0.2201, 0.4845],\n [0.2325, 0.3064, 0.5281]]]])\n\n To apply the exact augmenation again, you may take the advantage of the previous parameter state:\n >>> input = torch.rand(1, 3, 32, 32)\n >>> aug = RandomContrast((0.8,1.2), p=1.)\n >>> (aug(input) == aug(input, params=aug._params)).all()\n tensor(True)\n \"\"\"\n\n def __init__(\n self,\n contrast: Tuple[float, float] = (1.0, 1.0),\n clip_output: bool = True,\n same_on_batch: bool = False,\n p: float = 1.0,\n keepdim: bool = False,\n return_transform: Optional[bool] = None,\n ) -> None:\n super().__init__(p=p, return_transform=return_transform, same_on_batch=same_on_batch, keepdim=keepdim)\n self.contrast: Tensor = _range_bound(contrast, 'contrast', center=1.0)\n self._param_generator = cast(\n rg.PlainUniformGenerator, rg.PlainUniformGenerator((self.contrast, \"contrast_factor\", None, None))\n )\n self.clip_output = clip_output\n\n def apply_transform(\n self, input: Tensor, params: Dict[str, Tensor], flags: Dict[str, Any], transform: Optional[Tensor] = None\n ) -> Tensor:\n contrast_factor = params[\"contrast_factor\"].to(input)\n return adjust_contrast(input, contrast_factor, self.clip_output)\n\n\nclass AugmentPreTraining(Augment):\n def __init__(self, imgH, imgW) -> None:\n super().__init__()\n self.transforms = kna.AugmentationSequential(\n RandomContrast(contrast=(.7, 1.),p=1.),\n kna.RandomGaussianBlur((11, 11), (0.5, 1.5), p=1.),\n kna.AugmentationSequential(\n kna.RandomCrop(size=(int(imgH*random.uniform(.6, 1.)),\n imgW),\n p=1.,),\n kna.Resize((imgH, imgW))\n ),\n kna.AugmentationSequential(\n kna.RandomCrop(size=(imgH,\n int(imgW*random.uniform(.96, 1.))),\n p=1.,),\n kna.Resize((imgH, imgW))\n ),\n kna.RandomSharpness((4, 4.5), p=1.),\n kna.RandomThinPlateSpline(scale=random.uniform(0.02, 0.03),\n p=1.),\n kna.RandomPerspective(random.uniform(0.01, 0.02),\n p=1., keepdim=True),\n random_apply=(1, 5),\n same_on_batch=False,\n )\n\n\nclass AugmentTraining(Augment):\n def __init__(self, imgH, imgW) -> None:\n super().__init__()\n self.transforms = kna.AugmentationSequential(\n utils.augment.RandomContrast(contrast=(.7, 1.),p=1.),\n kna.RandomGaussianBlur((11, 11), (0.5, 1.5), p=1.),\n kna.AugmentationSequential(\n kna.RandomCrop(size=(int(imgH*random.uniform(.8, 1.)),\n imgW),\n p=1.,),\n kna.Resize((imgH, imgW))\n ),\n kna.AugmentationSequential(\n kna.RandomCrop(size=(imgH,\n int(imgW*random.uniform(.98, 1.))),\n p=1.,),\n kna.Resize((imgH, imgW))\n ),\n random_apply=True,\n same_on_batch=False,\n )\n","repo_name":"marcodiri/s2s-contrastive-text-recognition","sub_path":"model/augmentations/augment.py","file_name":"augment.py","file_ext":"py","file_size_in_byte":5387,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"24602019816","text":"# -*- coding: utf-8 -*-\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\nimport pdb\n\nfrom seq2seq.attn import Attention\n\n\nclass Encoder(nn.Module):\n def __init__(self, args):\n super(Encoder, self).__init__()\n self.args = args\n\n # Encoder\n self.encoder_rnn = nn.GRU(args.embed_dim, args.hidden_size, args.num_layers,\n batch_first=True, bidirectional=True, dropout=args.encoder_dropout)\n\n self.dropout_layer = nn.Dropout(p=args.decoder_dropout)\n\n self.init_hidden = nn.Linear(2 * args.hidden_size, args.hidden_size)\n self.input_proj = nn.Linear(2 * args.hidden_size, args.hidden_size)\n self.gate_proj = nn.Linear(2 * args.hidden_size + args.embed_dim, 2 * args.hidden_size)\n\n def forward(self, src_embed, src_lengths, u_review, u_review_lens, user_embed):\n\n # feed input to encoder RNN\n packed = pack_padded_sequence(src_embed, src_lengths, batch_first=True, enforce_sorted=False)\n encoder_hidden, encoder_final = self.encoder_rnn(packed)\n encoder_hidden, _ = pad_packed_sequence(encoder_hidden, batch_first=True) # encoder_hidden: [B, S, 2H]\n\n packed = pack_padded_sequence(u_review, u_review_lens, batch_first=True, enforce_sorted=False)\n _, review_final = self.encoder_rnn(packed)\n u_review_final = torch.cat([review_final[0:review_final.size(0):2], review_final[1:review_final.size(0):2]], dim=2)[-1]\n u_review_final = u_review_final.view(self.args.batch_size, self.args.mem_size, -1)\n\n # user_embed = u_review_final.mean(1)\n\n gate = torch.sigmoid(self.gate_proj(torch.cat(\n [encoder_hidden, user_embed.unsqueeze(1).repeat(1, encoder_hidden.size(1), 1)], dim=-1)))\n encoder_hidden = encoder_hidden.mul(gate)\n\n # get encoder final state, will be used as decoder initial state\n # fwd_final = encoder_final[0:encoder_final.size(0):2]\n # bwd_final = encoder_final[1:encoder_final.size(0):2]\n # encoder_final = torch.cat([fwd_final, bwd_final], dim=2) # encoder_final: [num_layers, B, 2H]\n\n # gate_h = torch.sigmoid(self.gate_proj(torch.cat([encoder_final, user_embed.unsqueeze(0).repeat(2, 1, 1)], dim=-1)))\n # encoder_final = encoder_final.mul(gate_h)\n encoder_final = self.input_proj(encoder_hidden[:,-1,:])\n # hidden = torch.tanh(self.init_hidden(encoder_final))\n hidden = encoder_final.unsqueeze(0)\n context_hidden = encoder_final.unsqueeze(1)\n\n return hidden, encoder_hidden, context_hidden\n","repo_name":"kldcr/USN","sub_path":"seq2seq/enc.py","file_name":"enc.py","file_ext":"py","file_size_in_byte":2648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"35836237061","text":"def find_projection_area(matr):\n rlen, clen = len(matr), len(matr[0])\n total_area = 0\n rmax = [0] * rlen\n cmax = [0] * clen\n\n for rid, row in enumerate(matr):\n for cid, val in enumerate(row):\n if not val:\n continue\n total_area += 1\n if rmax[rid] < val:\n rmax[rid] = val\n if cmax[cid] < val:\n cmax[cid] = val\n total_area += sum(rmax) + sum(cmax)\n\n return total_area\n\n\n# matr = [[1, 2], [3, 4]]\nmatr = [[1, 1, 1], [1, 0, 1], [1, 1, 1]]\nprint(find_projection_area(matr))\n","repo_name":"s-surineni/atice","sub_path":"leet_code/projection_area_of_3d_shapes.py","file_name":"projection_area_of_3d_shapes.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10304045736","text":"''' 2865번 나는 위대한 슈퍼스타K '''\nn, m, k = map(int, input().split())\nparticipants = [0.0] * n\n\nfor i in range(m):\n line = input().split()\n \n for j in range(n):\n participant, skill = int(line[2*j])-1, float(line[2*j+1])\n participants[participant] = max(participants[participant], skill)\n\nprint(round(sum(sorted(participants, reverse=True)[:k]),1))\n\n\n \n\n \n \n \n\n","repo_name":"kingbj940429/Coding_Test_Solution","sub_path":"beak_joon/b_2865.py","file_name":"b_2865.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"73164338962","text":"'''FROM TERMINAL\n. CatApiApp/bin/activate\ncd CatApiApp\nexport FLASK_ENV=development\nexport FLASK_APP=server.py\nflask run\n'''\nfrom flask import Flask, render_template, url_for, jsonify\nfrom bs4 import BeautifulSoup\nimport requests\nfrom PIL import Image\nimport io \n\n#_name__ is our main \napp = Flask(__name__)\n\nwith open('templates/index.html') as fp:\n\tsoup = BeautifulSoup(fp, 'html.parser')\n\tcat_button = soup.find(id = 'cat_button')\n\tcat_result = soup.find(id = 'cat_results')\n\n\t\n#rendering the HTML page\n@app.route('/')\ndef index_html():\n\tcat_result = generate_random_cats()\n\n\t'''TEST AREA\n\tres_img = requests.get(cat_result)\n\timg_bytes = io.BytesIO(res_img.content)\n\timg = Image.open(img_bytes)\n\timg.show()\n\n\tprint('cats: ' + cat_result)\n\tprint('----------------')\n\t'''\n\treturn render_template('index.html', data = cat_result)\n\n#generating random cat images on buttom click\n@app.route('/randomcats')\ndef generate_random_cats():\n\n\turl = 'https://api.thecatapi.com/v1/images/search'\n\tpayload = {}\n\theaders = {\n\t '50427698-0fc1-4f8e-b67c-4b6fd9925152': ''\n\t}\n\n\tresponse = requests.request('GET', url, headers = headers, data = payload)\n\n\tif response.status_code != 200:\n\t\traise RuntimeError(f'Error fetching: {response.status_code}, check the api and try again')\n\t\treturn response\n\telse:\n\t\tif response == []:\n\t\t\tgenerate_random_cats()\n\t\telse:\n\t\t\tdata = response.json()\n\t\t\timage_url = data[0]['url']\n\n\t#print('generate_random_cats(): ' + data[0]['url'])\n\treturn data[0]['url']\n\t\nif __name__ == '__main__':\n\tapp.run()\n\n","repo_name":"CigdemOzturk/Cat-API","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7268743798","text":"from DiverseSelector.distance import (\n pairwise_similarity_bit,\n tanimoto,\n modified_tanimoto,\n)\n\nimport numpy as np\nfrom numpy.testing import assert_almost_equal, assert_equal, assert_raises\n\n\ndef test_pairwise_similarity_bit_raises():\n # check raised error for input feature matrix that is not 2D\n assert_raises(ValueError, pairwise_similarity_bit, np.random.random(5), \"tanimoto\")\n assert_raises(ValueError, pairwise_similarity_bit, np.random.random((2, 3, 4)), \"tanimoto\")\n # check raised error for not-available method\n assert_raises(ValueError, pairwise_similarity_bit, np.random.random((5, 1)), \"tan\")\n assert_raises(ValueError, pairwise_similarity_bit, np.random.random((5, 1)), tanimoto)\n\n\ndef test_tanimoto_raises():\n # check raised error when a or b is not 1D\n assert_raises(ValueError, tanimoto, np.random.random((1, 5)), np.random.random(5))\n assert_raises(ValueError, tanimoto, np.random.random(3), np.random.random((1, 4)))\n assert_raises(ValueError, tanimoto, np.random.random(4), np.random.random((3, 4)))\n assert_raises(ValueError, tanimoto, np.random.random((3, 3)), np.random.random((2, 3)))\n # check raised error when a and b don't have the same length\n assert_raises(ValueError, tanimoto, np.random.random(3), np.random.random(5))\n assert_raises(ValueError, tanimoto, np.random.random(20), np.random.random(10))\n\n\ndef test_tanimoto():\n \"\"\"Test the tanimoto function on one pair of points.\"\"\"\n a = np.array([2, 0, 1])\n b = np.array([2, 0, 0])\n # expected = (2*2 + 0*0 + 1*0) / (2**2 + 1 + 2**2 - 2*2)\n assert_equal(tanimoto(a, b), 4 / (5 + 4 - 4))\n\n\ndef test_tanimoto_bitstring():\n \"\"\"Test the tanimoto function on one pair of points.\"\"\"\n a = np.array([0, 0, 0, 1, 0, 1, 1])\n b = np.array([1, 1, 0, 0, 0, 1, 1])\n assert_equal(tanimoto(a, b), 2 / 5)\n\n\ndef test_tanimoto_matrix():\n \"\"\"Testing the tanimoto function with predefined feature matrix.\"\"\"\n x = np.array([[1, 4], [3, 2]])\n s = pairwise_similarity_bit(x, \"tanimoto\")\n expected = np.array([[1, (11 / 19)], [(11 / 19), 1]])\n assert_equal(s, expected)\n\n\ndef test_modified_tanimoto():\n a = np.array([1, 1, 0, 0, 1])\n b = np.array([0, 0, 0, 0, 1])\n expected = (1.6 / 9) + (1.4 / 6)\n assert_equal(modified_tanimoto(a, b), expected)\n\n\ndef test_modified_tanimoto_all_ones():\n \"\"\"Test the modified tanimoto function when input is all '1' bits\"\"\"\n a = np.array([1, 1, 1, 1, 1])\n assert_equal(modified_tanimoto(a, a), 1)\n\n\ndef test_modified_tanimoto_all_zeroes():\n \"\"\"Test the modified tanimoto function when input is all '0' bits\"\"\"\n a = np.zeros(5)\n assert_equal(modified_tanimoto(a, a), 1)\n\n\ndef test_modified_tanimoto_dimension_error():\n \"\"\"Test modified tanimoto raises error when input has incorrect dimension.\"\"\"\n a = np.zeros([7, 5])\n b = np.zeros(5)\n assert_raises(ValueError, modified_tanimoto, a, b)\n assert_raises(ValueError, modified_tanimoto, b, a)\n assert_raises(ValueError, modified_tanimoto, np.ones(3), np.ones(5))\n\n\ndef test_modified_tanimoto_matrix():\n \"\"\"Testing the modified tanimoto function with predefined feature matrix.\"\"\"\n x = np.array([[1, 0, 1], [0, 1, 1]])\n s = pairwise_similarity_bit(x, \"modified_tanimoto\")\n expceted = np.array([[1, (4 / 27)], [(4 / 27), 1]])\n assert_equal(s, expceted)\n","repo_name":"FanwangM/selector_debug","sub_path":"DiverseSelector/tests/test_distance.py","file_name":"test_distance.py","file_ext":"py","file_size_in_byte":3345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6167148443","text":"#!/usr/bin/env python\n\nimport logging, os, sys, argparse, subprocess, time, re\nimport libmount as mnt\nfrom datetime import datetime, timedelta\nimport subprocess\n\ndef setup_logging():\n\tlogging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)\n\nUNSET = object()\ndef get_env(name,default=UNSET):\n\tif name in os.environ:\n\t\treturn os.environ[name]\n\tif default is not UNSET:\n\t\treturn default\n\tlogging.error(\"Missing environment variable %s\",name)\n\tsys.exit(os.EX_USAGE)\n\ndef get_rancher_host_label(label_name):\n\ttry:\n\t\timport requests\n\t\tresponse=requests.get('http://rancher-metadata/latest/self/host/labels/%s' % label_name)\n\t\tif response.status_code==200:\n\t\t\treturn response.text\n\texcept:\n\t\tpass\n\treturn None\n\ndef get_rancher_host_name(default=None):\n\ttry:\n\t\timport requests\n\t\tresponse=requests.get('http://rancher-metadata/latest/self/host/name')\n\t\tif response.status_code==200:\n\t\t\treturn response.text\n\texcept:\n\t\tpass\n\treturn default\n\n\ndef setup():\n\tsetup_logging()\n\tbackup_server=get_env('BACKUP_SERVER',None)\n\tif backup_server is None:\n\t\tbackup_server=get_rancher_host_label('backup_server')\n\t\tlogging.info('Got BACKUP_SERVER=%s from rancher host label \"backup_server\"',backup_server)\n\tif backup_server is None:\n\t\tlogging.error(\"Missing environment variable BACKUP_SERVER\")\n\t\tsys.exit(os.EX_USAGE)\n\tconfig={\n\t\t'server':\t\tbackup_server,\n\t\t'port':\t\t\tint(get_env('BACKUP_SERVER_PORT',22)),\n\t\t'server_key':\tget_env('BACKUP_SERVER_PUBLIC_KEY',None),\n\t\t'user':\t\t\tget_env('BACKUP_SERVER_USER','backup'),\n\t\t'volume_dir':\tget_env('BACKUP_VOLUME_DIR','/data/volumes'),\n\t\t'mount_dir':\tget_env('BACKUP_MOUNT_DIR','/data/mounts'),\n\t\t'conf_dir':\t\tget_env('BACKUP_CONF_DIR','/data/conf'),\n\t\t'volumes':\t\t[]\n\t}\n\tlogging.info(\"Using server %s@%s:%s\",config['user'],config['server'],config['port'])\n\ttimeout_regex = re.compile(r'(\\d+)([hms])')\n\tfor vol in filter(None,get_env('BACKUP_VOLUMES').split(',')):\n\t\ttimeout_str=get_env('VOL_%s_TIMEOUT'%vol,'24h')\n\t\ttimeout_parts = timeout_regex.match(timeout_str)\n\t\tif not timeout_parts:\n\t\t\tlogging.error(\"Invalid timeout: %s\",timeout_str)\n\t\ttimeout=int(timeout_parts.group(1))\n\t\tif timeout_parts.group(2)=='m':\n\t\t\ttimeout=timeout * 60\n\t\telif timeout_parts.group(2)=='h':\n\t\t\ttimeout=timeout * 3600\n\t\t\n\t\tvc={\n\t\t 'vol':\t vol,\n\t\t 'dir':\t get_env('VOL_%s_PATH'%vol,os.path.join(config['volume_dir'], vol)),\n\t\t 'encrypt': bool(get_env('VOL_%s_ENCRYPT'%vol,False)),\n\t\t 'excludes': filter(None,get_env('VOL_%s_EXCLUDE'%vol,'').split(\",\")),\n\t\t 'timeout': timeout\n\t\t}\n\t\tconfig['volumes'].append(vc)\n\t\t\n\t\tlogging.info(\"Volume %s at %s\",vc['vol'],vc['dir'])\n\t\tlogging.info(\" Timeout set to %s\",timeout_str)\n\t\tfor exclude in vc['excludes']:\n\t\t\tlogging.info(\" Excluding %s\",exclude)\n\t\t\n\treturn config\n\ndef run_backups(config,volumes=[]):\n\tfor vol in config['volumes']:\n\t\tif (len(volumes)==0) or (vol['vol'] in volumes):\n\t\t\trun_backup(config,vol)\n\ndef check_backup_ready(volpath,volume):\n\treadyfile = os.path.join(volpath,'.ready_for_backup')\n\tif not os.path.exists(readyfile):\n\t\tlogging.warn('[%s] ERROR! Indicator file %s does not exist in - skipping this backup.',volume['vol'],readyfile)\n\t\treturn False\n\treturn True\n\ndef run_backup(config,volume):\n\tvolname = volume['vol']\n\n\tvolpath=volume['mount_dir']\n\t\n\tlogging.info(\"[%s] Starting backup of %s, mountet to %s\",volname,volume['dir'],volpath)\n\n\tif not volpath.endswith(\"/\"):\n\t\tvolpath+=\"/\"\n\n\tif not check_backup_ready(volpath,volume):\n\t\treturn\n\n\t# build rsync command\n\tcmd=[]\n\tif os.path.exists('/usr/bin/nice'):\n\t\tcmd+=['/usr/bin/nice','-n','19']\n\telif os.path.exists('/bin/nice'):\n\t\tcmd+=['/bin/nice','-n','19']\n\n\tif os.path.exists('/usr/bin/ionice'):\n\t\tcmd+=['/usr/bin/ionice','-c','3']\n\telif os.path.exists('/bin/ionice'):\n\t\tcmd+=['/bin/ionice','-c','3']\n\n\t# backup timeout\n\tcmd+=[\"/usr/bin/timeout\",'-t',str(volume['timeout'])]\n\t\n\t# rsync with options\n\tcmd.append(\"/usr/bin/rsync\")\n\tcmd.append(\"-e\")\n\tcmd.append(\"ssh -p %s -o HostKeyAlgorithms=ssh-rsa -o UserKnownHostsFile=%s/known_hosts -o IdentityFile=/%s/id_rsa\" %\n\t (config['port'],config['conf_dir'],config['conf_dir']))\n\tcmd.append(\"-avr\")\n\tcmd.append(\"--numeric-ids\")\n\tcmd.append(\"--delete-during\")\n\tcmd.append(\"--acls\")\n\tcmd.append(\"--xattrs\") # TODO: Skip if not supported or make configurable\n\tcmd.append(\"--sparse\")\n\t\n\t# append excludes\n\tfor exclude in volume['excludes']:\n\t\tcmd.append(\"--exclude\")\n\t\tcmd.append(exclude)\n\t\n\t# source\n\tcmd.append(volpath)\n\t\n\t# destination\n\tcmd.append(\"%s@%s:%s\" % (config['user'],config['server'],volume['vol']))\n\t\n\t# ensure that we use our own key for backup, not the one passed via ssh agent by the current user\n\tmyenv=os.environ.copy();\n\tmyenv['SSH_AUTH_SOCK']=\"\"\n\t\n\tlogging.info(\"[%s] Running '%s'\",volname,\"' '\".join(cmd))\n\t\n\t# execute the rsync command\n\trsyncExitValue=-1\n\ttry:\n\t\tp = None # Starting the process might fail -> ensure that p is defined\n\t\tp=subprocess.Popen(cmd,env=myenv,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)\n\t\tfor line in iter(p.stdout.readline,''):\n\t\t\tlogging.warn(\"[%s] %s\",volname,line.rstrip())\n\t\tp.wait()\n\t\trsyncExitValue=p.returncode\n\texcept KeyboardInterrupt:\n\t\trsyncExitValue=20\n\t\tpass\n\tfinally:\n\t\tif p:\n\t\t\ttry:\n\t\t\t\tp.terminate()\n\t\t\t\ttime.sleep(1)\n\t\t\t\tp.kill()\n\t\t\texcept OSError:\n\t\t\t\tpass # Process might already be terminated\n\n\tif rsyncExitValue==124:\n\t\tlogging.warn(\"[%s] ERROR! The backup timed out after %s\",volname,volume['timeout'])\n\t\treturn\n\t\n\t# rsync exit code 0 - everything was ok\n\t# rsync exit code 24 - everything was ok but some files changed during sync\n\tif rsyncExitValue!=0 and rsyncExitValue!=24:\n\t\tlogging.info('[%s] ERROR! rsync exited with code %d - this backup is failed.',volname,rsyncExitValue)\n\t\treturn\n\n\tlogging.info('[%s] backup ok - tell the server that we are done.',volname)\n\n\tcmd=[\"/usr/bin/ssh\"]\n\tcmd.append(\"-o\")\n\tcmd.append(\"HostKeyAlgorithms=ssh-rsa\")\n\tcmd.append(\"-o\")\n\tcmd.append(\"UserKnownHostsFile=%s/known_hosts\" % config['conf_dir'])\n\tcmd.append(\"-o\")\n\tcmd.append(\"IdentityFile=%s/id_rsa\" % config['conf_dir'])\n\tcmd.append(\"-p\")\n\tcmd.append(str(config['port']))\n\tcmd.append(\"%s@%s\" % (config['user'],config['server']))\n\tcmd.append(\"FINISH_BACKUP\")\n\tcmd.append(volume['vol'])\n\tsubprocess.call(cmd,env=myenv)\n\ndef bind_mount(src,dst):\n\tif os.path.ismount(dst):\n\t\tlogging.debug(\"Volume %s is already mounted to %s\",src,dst)\n\t\treturn\n\n\tif not os.path.exists(dst):\n\t\tos.makedirs(dst)\n\n\tlogging.info(\"Bind-mounting volume %s to %s\",src,dst)\n\tctx = mnt.Context()\n\tctx.options=\"bind\"\n\tctx.source=src\n\tctx.target=dst\n\ttry:\n\t\tctx.mount()\n\texcept Exception as e:\n\t\tlogging.error('Failed to mount: %s. Please ensure that you are running as root with docker capability SYS_ADMIN.',e)\n\t\tsys.exit(os.EX_IOERR)\n\ndef encfs_mount(src,dst,passfile):\n\tif os.path.ismount(dst):\n\t\tlogging.debug(\"Volume %s is already mounted to %s\",src,dst)\n\t\treturn\n\n\tif not os.path.exists(dst):\n\t\tos.makedirs(dst)\n\n\tlogging.info(\"Encfs reverse-mounting volume %s to %s\",src,dst)\n\n\tsubprocess.check_call(['encfs','--reverse','--extpass=cat %s|tr -d \\'\\n\\''%passfile,src,dst])\n\ndef mount_dirs(config):\n\tfor volume in config['volumes']:\n\t\tvoldir=volume['dir']\n\t\tvolume['mount_dir']=os.path.join(config['mount_dir'],volume['vol'])\n\t\tif volume['encrypt']:\n\t\t\tif not os.path.exists('/dev/fuse'):\n\t\t\t\tlogging.error('/dev/fuse not found. Please run the container with \"--device /dev/fuse\".')\n\t\t\t\tsys.exit(os.EX_IOERR)\n\t\t\tif not os.path.exists(os.path.join(voldir,'.encfs6.xml')):\n\t\t\t\tlogging.error('Encfs configuration .encfs6.xml not found in %s. Please Setup encfs first.',voldir)\n\t\t\t\tsys.exit(os.EX_IOERR)\n\t\t\tpassfile=os.path.join(config['conf_dir'],'encfs_%s.pass'%volume['vol'])\n\t\t\tif not os.path.exists(passfile):\n\t\t\t\tlogging.error('Please put the enfs password in %s\"',passfile)\n\t\t\t\tsys.exit(os.EX_IOERR)\n\n\t\t\tplaindir=os.path.join(config['mount_dir'],'_plain_%s'%volume['vol'])\n\t\t\tbind_mount(voldir,plaindir)\n\t\t\tencfs_mount(plaindir,volume['mount_dir'],passfile)\n\t\telse:\n\t\t\tbind_mount(voldir,volume['mount_dir'])\n\ndef setup_ssh(config):\n\tconfdir=config['conf_dir']\n\tif not os.path.exists(confdir):\n\t\tos.makedirs(confdir)\n\tpubkeys=config['server_key']\n\tif pubkeys:\n\t\tlogging.info('Creating %s/known_hosts' % confdir)\n\t\tknown_hosts=[]\n\t\tfor pubkey in filter(None,list(pubkeys.split())):\n\t\t\tpubkey=pubkey.strip()\n\t\t\tif len(pubkey)==0:\n\t\t\t\tcontinue\n\t\t\tkey_parts=pubkey.split(':')\n\t\t\tlogging.info(' Adding %s %s',key_parts[0],key_parts[1])\n\t\t\tif config['port']==22:\n\t\t\t\tknown_hosts.append('%s %s %s'%(config['server'],key_parts[0],key_parts[1]))\n\t\t\telse:\n\t\t\t\tknown_hosts.append('[%s]:%s %s %s'%(config['server'],config['port'],key_parts[0],key_parts[1]))\n\t\twith open(os.path.join(confdir,'known_hosts'),'w+') as file:\n\t\t\tfile.write('\\n'.join(known_hosts))\n\t\t\tfile.write('\\n')\n\n\tif not os.path.exists(os.path.join(confdir,'id_rsa')):\n\t\tlogging.info('Creating %s/id_rsa'%confdir)\n\t\tsubprocess.call(['ssh-keygen','-N','','-t','rsa','-f',os.path.join(confdir,'id_rsa')])\n\n\twith open(os.path.join(confdir,'id_rsa.pub'),'r') as file:\n\t\tssh_key=file.read().split()\n\t\tlogging.info('Use the following key on the backup server:')\n\t\tclient_name=get_rancher_host_name('[BACKUP_CLIENT_NAME]')\n\t\tlogging.info(' %s:%s:%s',client_name,ssh_key[0],ssh_key[1])\n\ndef get_next_schedule(hour,minute):\n\tnow = datetime.now()\n\tschedule = now.replace(hour=hour,minute=minute,second=0,microsecond=0)\n\twhile schedule < now:\n\t schedule = schedule + timedelta(days=1)\n\treturn schedule\n\n\ndef schedule_backups(config,hour,minute):\n\twhile True:\n\t\tschedule = get_next_schedule(hour,minute)\n\t\tlogging.info(\"Scheduled next backup at %s\",schedule)\n\t\twhile schedule > datetime.now():\n\t\t\ttime.sleep(10)\n\t\trun_backups(config)\n\ndef main():\n\tconfig=setup()\n\tsetup_ssh(config)\n\tmount_dirs(config)\n\n\tdef time_type(s, pat=re.compile(r\"(\\d{1,2}):(\\d{2})\")):\n\t\tif s=='auto':\n\t\t\ts=get_rancher_host_label('backup_schedule')\n\t\t\tif s is None:\n\t\t\t\traise argparse.ArgumentTypeError(\"Got 'auto' as schedule time but found no rancher host label 'backup_schedule'.\")\n\t\ttime = pat.match(s)\n\t\tif not time:\n\t\t raise argparse.ArgumentTypeError(\"Invalid time format\")\n\t\treturn {'hour':int(time.group(1)),'minute':int(time.group(2))}\n\t\n\tparser = argparse.ArgumentParser(description='Rsyncbackup client')\n\tsp = parser.add_subparsers()\n\tsp_run = sp.add_parser('run', help='Run backup for one or more volumes')\n\tsp_run.set_defaults(action='run')\n\tsp_run.add_argument('volumes',metavar='VOLUME', nargs='*', help='An optional list of volumes to backup',\n\t choices=[[]]+[v['vol'] for v in config['volumes']])\n\tsp_cron = sp.add_parser('schedule', help='Schedules backups at a given time of the day')\n\tsp_cron.add_argument('time',metavar='HH:MM', help='The time of day to schedule the backup', type=time_type)\n\tsp_cron.set_defaults(action='cron')\n\targs = parser.parse_args()\n\t\n\tif args.action=='run':\n\t\trun_backups(config,args.volumes)\n\telse:\n\t\tschedule_backups(config,args.time['hour'],args.time['minute'])\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"evermind/docker-rsyncbackup-client","sub_path":"rsyncbackup-client.py","file_name":"rsyncbackup-client.py","file_ext":"py","file_size_in_byte":10916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71453552402","text":"\n# arr = [\"1\",\"2\",\"4\",\"3\",\"3\",\"4\",\"1\",\"5\"]\n# process = [\"read 1 3 1 2\",\"read 2 6 4 7\",\"write 4 3 3 5 2\",\"read 5 2 2 5\",\"write 6 1 3 3 9\", \"read 9 1 0 7\"]\n# result = [\"24\",\"3415\",\"4922\",\"12492215\",\"13\"]\n\n\n##여러 프로세스가 다음 규칙에 따라 배열(arr) 하나에 접근하여 읽기(Read) 또는 쓰기(Write) 작업을 수행하려 합니다.\n#\n# 한 번에 여러 프로세스가 배열에서 동시에 읽기 작업을 수행할 수 있습니다.\n# 배열에 읽기 작업을 수행 중인 경우, 새로운 읽기 요청 프로세스는 즉시 작업을 수행할 수 있습니다.\n# 한 번에 하나의 프로세스만 배열에서 쓰기 작업을 수행할 수 있습니다.\n# 배열에 쓰기 작업을 수행 중인 경우, 새로운 읽기, 쓰기 요청 프로��스는 모두 대기해야 합니다.\n# 배열에 읽기 작업을 수행 중인 경우, 새로운 쓰기 요청 프로세스는 모두 대기해야 합니다.\n# 하나 이상의 쓰기 작업이 대기 중인 경우, 새로운 읽기 요청 또한 대기해야 합니다.\n# 대기 중인 읽기, 쓰기 작업 중에서 다음으로 작업할 프로세스를 선택할 때\n# 읽기 작업보다 쓰기 작업을 먼저 수행합니다.\n# 쓰기 작업이 여러 개라면, 먼저 요청된 쓰기 작업을 먼저 수행합니다.\n# 대기 중인 작업을 배열에서 수행하려 함과 동시에 새로운 작업 요청이 들어온다면, 새 작업 요청을 포함하여 다음으로 작업할 프로세스를 선택합니다.\n# 예를 들어, 10초에 쓰기 작업이 끝났고, 읽기 작업만 대기 중인 경우, 10초에 새로운 쓰기 작업 요청이 들어왔다면, 쓰기 작업을 먼저 처리합니다.\n# 위 규칙에 따라 읽기, 쓰기 작업을 처리한 후, 읽기 작업에서 읽은 내용과 전체 프로세스가 배열을 사용한 시간은 얼마나 되는지 알아보려 합니다.\n#\n# 초기 배열의 상태가 담긴 문자열 배열 arr과 읽기, 쓰기 작업 요청이 담긴 문자열 배열 processes가 매개변수로 주어집니다. 읽기 작업에서 읽은 내용을 processes에서 주어진 순서대로 정답 배열에 담은 뒤, 배열이 전체 프로세스에 의해 사용된 시간을 정답 배열의 마지막에 담아 return 하도록 solution 함수를 완성해주세요.\n#\n# 제한사항\n# 5 ≤ arr의 길이 ≤ 1,000\n# 1 ≤ arr의 원소 ≤ 9\n# arr의 원소는 1부터 9까지의 숫자가 문자열 형태로 담겨있습니다.\n# 1 ≤ processes의 길이 ≤ 1,000\n# processes의 원소는 \"read t1 t2 A B\", 또는 \"write t1 t2 A B C\" 형태입니다.\n# t1은 요청 시각, t2는 해당 요청을 처리하는데 걸리는 시간입니다.\n# 1 ≤ t1 ≤ 1,000\n# 1 ≤ t2 ≤ 100\n# A, B는 데이터를 읽거나 쓸 구간으로, 배열의 인덱스를 나타냅니다.\n# 0 ≤ A ≤ B < arr의 길이\n# C는 배열 구간에 쓸 한 자리 숫자입니다. arr[A] ~ arr[B]에 해당하는 구간의 값을 전부 C로 바꾸면 됩니다.\n# 1 ≤ C ≤ 9\n# t1, t2, A, B, C는 모두 정수입니다.\n# 같은 시각에 요청된 작업은 없습니다(즉, 모든 문자열에 대해서 t1의 값은 서로 다릅니다).\n# processes에는 \"read t1 t2 A B\"가 적어도 하나 이상 들어있습니다.\n# processes는 t1기준으로 정렬되어 있습니다.\n# 배열이 전체 프로세스에 의해 사용된 시간은 정답 배열의 마지막에 문자열 형태로 담아 return 하면 됩니다.\n\n\n\narrinput = [\"1\",\"2\",\"4\",\"3\",\"3\",\"4\",\"1\",\"5\"]\nproinput = [\"read 1 3 1 2\",\"read 2 6 4 7\",\"write 4 3 3 5 2\",\"read 5 2 2 5\",\"write 6 1 3 3 9\", \"read 9 1 0 7\"]\n\n\n#arrinput = [\"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\"]\n#proinput = [\"write 1 12 1 5 8\", \"read 2 3 0 2\", \"read 5 5 1 2\", \"read 7 5 2 5\", \"write 13 4 0 1 3\", \"write 19 3 3 5 5\", \"read 30 4 0 6\", \"read 32 3 1 5\"]\n\ndef solution(arr, processes):\n\n write_process = []\n read_process = []\n\n\n for process in processes:\n ipt = list(map(str, process.split(\" \")))\n t1, t2, a, b = int(ipt[1]), int(ipt[2]), int(ipt[3]), int(ipt[4])\n if ipt[0]==\"read\":\n read_process.append([t1, t2, a, b])\n else:\n write_process.append([t1, t2, a, b, ipt[5]])\n\n ans = [\"\"]*len(read_process)\n\n cur_time = 1\n cur_process = []\n spent_time = 0\n\n def minst(arr):\n minv = 1001\n idx = -1\n for i, a in enumerate(arr):\n if minv>a[0]:\n idx = i\n minv = a[0]\n return [idx, minv]\n\n while (write_process or read_process or cur_process) and cur_time<30:\n\n\n cur_time += 1\n\n\n\n answer = ans\n answer.append(str(spent_time))\n #answer = []\n return answer\n\n\n\nx = solution(arrinput, proinput)\nprint(x)","repo_name":"dowoonlee/TIL","sub_path":"dev_match/sk2-2.py","file_name":"sk2-2.py","file_ext":"py","file_size_in_byte":4746,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37907830348","text":"#!/usr/bin/env python\n\"\"\"\nGimp plugin for loading/saving the smartimage file format\n\"\"\"\nimport os\nfrom gimpfu import *\n\n\n# Some common info about this file type\nfileTypeName='smartimage'\nfileExtensions=['simg','simt']\nfileMimeType='image/smartimage+xml' # TODO: is this correct?\nfileLocationProtocols='http:,ftp:,https:,sftp:,file:'\ndescriptionLoad='load a '+fileTypeName+' ('+(', '.join(fileExtensions))+') file'\ndescriptionSave='save a '+fileTypeName+' ('+(', '.join(fileExtensions))+') file'\ncanLoad=True\ncanSave=True\nauthor='KC Eilander'\ncopyrightHolder=author\ncopyrightYear=2018\n\n\t\n# ------------ Saving\nif canSave:\n\t\n\tdef saveFile(img,drawable,filename,raw_filename):\n\t\traise NotImplementedError()\n\t\n\t# when we are queried, specify that we are a save handler\n\tdef register_save_handlers():\n\t\tgimp.register_save_handler('file-'+fileTypeName+'-save',','.join(fileExtensions),fileLocationProtocols)\n\t\n\t# register all that\n\tregister(\n\t\t'file-'+fileTypeName+'-save',\n\t\tdescriptionSave,\n\t\tdescriptionSave,\n\t\tauthor,\n\t\tcopyrightHolder,\n\t\tstr(copyrightYear),\n\t\tfileTypeName,\n\t\t'*',\n\t\t[\n\t\t\t(PF_IMAGE,\"image\",\"Input image\",None),\n\t\t\t(PF_DRAWABLE,\"drawable\",\"Input drawable\",None),\n\t\t\t(PF_STRING,\"filename\",\"The name of the file\",None),\n\t\t\t(PF_STRING,\"raw-filename\",\"The name of the file\",None),\n\t\t],\n\t\t[],\n\t\tsaveFile,\n\t\ton_query=register_save_handlers,\n\t\tmenu=''\n\t)\n\n\t\n# ------------ Loading\nif canLoad:\n\t\n\tdef getThumbnail(filename,thumb_size):\n\t\traise NotImplementedError()\n\n\tdef loadFile(filename,raw_filename):\n\t\traise NotImplementedError()\n\n\t# when we are queried, specify that we are a load handler\n\tdef register_load_handlers():\n\t\tgimp.register_load_handler('file-'+fileTypeName+'-load',','.join(fileExtensions),fileLocationProtocols)\n\t\tpdb['gimp-register-file-handler-mime']('file-'+fileTypeName+'-load',fileMimeType)\n\t\tpdb['gimp-register-thumbnail-loader']('file-'+fileTypeName+'-load','file-'+fileTypeName+'-load-thumb')\n\t\n\t# register all that\n\tregister(\n\t\t'file-'+fileTypeName+'-load',\n\t\tdescriptionLoad,\n\t\tdescriptionLoad,\n\t\tauthor,\n\t\tcopyrightHolder,\n\t\tstr(copyrightYear),\n\t\tfileTypeName,\n\t\tNone,\n\t\t[\n\t\t\t(PF_STRING,'filename','The name of the file to load',None),\n\t\t\t(PF_STRING,'raw-filename','The name entered',None),\n\t\t],\n\t\t[(PF_IMAGE,'image','Output image')],#results. Format (type,name,description)\n\t\tloadFile,\n\t\ton_query=register_load_handlers,\n\t\tmenu=\"\",\n\t)\n\tregister(\n\t\t'file-'+fileTypeName+'-load-thumb',\n\t\tdescriptionLoad,\n\t\tdescriptionLoad,\n\t\tauthor,\n\t\tcopyrightHolder,\n\t\tstr(copyrightYear),\n\t\tNone,\n\t\tNone,\n\t\t[\n\t\t\t(PF_STRING,'filename','The name of the file to load',None),\n\t\t\t(PF_INT,'thumb-size','Preferred thumbnail size',None),\n\t\t],\n\t\t[\n\t\t\t(PF_IMAGE,'image','Thumbnail image'),\n\t\t\t(PF_INT,'image-width','Width of full-sized image'),\n\t\t\t(PF_INT,'image-height','Height of full-sized image')\n\t\t],\n\t\tgetThumbnail,\n\t)\n\n\nmain()\n","repo_name":"TheHeadlessSourceMan/smartimage","sub_path":"gimp plugin/file-smartimage.py","file_name":"file-smartimage.py","file_ext":"py","file_size_in_byte":2858,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"22465584068","text":"\"\"\"\nlog in\n\"\"\"\nfrom aiogram.dispatcher import FSMContext as s\nfrom aiogram.types import Message as m\nfrom keyboardbutton import keyboardbutton\nfrom functions import users_main_menu, users_second_menu, read_all_media\nfrom loader import bot\nfrom states import *\n\n\nasync def main_menu(m: m, state: s):\n \"\"\"\n :param state:\n :param m:\n :return:\n \"\"\"\n if m.text in users_main_menu():\n await state.update_data(changed_day=m.text)\n await m.answer(\"Malumot turini tanlang:\",\n reply_markup=keyboardbutton(users_second_menu(first_menu=m.text) + [\"Chiqish\"]))\n await User_state.second_menu.set()\n await state.update_data(main_menu=m.text)\n else:\n await m.answer(\"Bunday menyu mavjud emas!\", reply_markup=keyboardbutton(users_main_menu()+[\"Foydalanish \"\n \"yo'riqnomasi\"]))\n\n\nasync def second_menu(m: m, state: s):\n \"\"\"\n :param state:\n :param m:\n :return:\n \"\"\"\n database = await state.get_data()\n if m.text in users_second_menu(first_menu=database.get('main_menu')):\n for media_id in read_all_media(media_type=m.text, day=database.get(\n \"changed_day\")):\n await bot.send_document(chat_id=m.chat.id, document=media_id)\n elif m.text == \"Chiqish\":\n await m.answer(f\"Chiqildi!\\nKerakli menyuni tanlashingiz mumkin:\",\n reply_markup=keyboardbutton(users_main_menu()+[\"Foydalanish yo'riqnomasi\"]))\n await User_state.main_menu.set()\n else:\n await m.answer(\"Bunday menyu mavjud emas!\",\n reply_markup=keyboardbutton(users_second_menu(first_menu=database.get('main_menu'))+[\"Chiqish\"]))\n","repo_name":"My-name-is-Jamshidbek/media_bot","sub_path":"apps/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14378771654","text":"from sklearn.datasets import make_classification\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.experimental import enable_halving_search_cv\nfrom sklearn.model_selection import HalvingGridSearchCV\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom argparse import ArgumentParser, ArgumentDefaultsHelpFormatter\nfrom sklearn.metrics import classification_report\nimport json\nimport pickle\nimport pandas as pd\nimport numpy as np\n\n\ndef train_lr(config,X_train, X_test, y_train, y_test,storagePath):\n #print(config)\n config = config['logisticRegression']\n if config[\"gridSearch\"]:\n param_grid = {\n 'penalty' : ['l1', 'l2'],\n 'C' : np.logspace(-4, 4, 20),\n 'class_weight': ['balanced', None],\n 'solver': ['liblinear']\n }\n base_estimator = LogisticRegression()\n sh = HalvingGridSearchCV(base_estimator, param_grid, cv=5,factor=2,max_resources=40).fit(X_train, y_train)\n lr = sh.best_estimator_\n else:\n lr = LogisticRegression(penalty = config['penalty'],\n solver = config['solver'],\n class_weight = config['class_weight'],\n max_iter = config['max_iter'],\n ).fit(X_train,y_train)\n \n y_pred = lr.predict(X_test)\n #print(len(y_pred))\n #print(len(y_test))\n target_names = ['negative', 'neutral', 'positive']\n report = classification_report(y_test, y_pred, target_names=target_names,output_dict=True)\n df = pd.DataFrame(report)\n df.to_csv(\"./models/report.csv\")\n #classification_report_csv(report)\n filename = storagePath+\"/lr_model.pk\"\n with open(filename,'wb') as f: pickle.dump(lr, f)\n \n\ndef train_model(config, dataFilePath, destFilePath):\n \n dataFile = dataFilePath+\"/dataset_embed_reduced.pk\"\n labelFile = dataFilePath+\"/dataset_labels.pk\"\n with open(labelFile,'rb') as f: Y = pickle.load(f)\n f.close()\n with open(dataFile,'rb') as f: X = pickle.load(f)\n f.close()\n X_train, X_test, y_train, y_test = train_test_split(X,Y,test_size=0.33, random_state=42)\n for i in config.keys():\n model = i\n model_config = config[i]\n if model == \"logisticRegression\":\n train_lr(config,X_train, X_test, y_train, y_test,destFilePath)\n\ndef pass_data(args):\n with open(args.config, 'r') as json_file:\n config = json.load(json_file)\n train_model(config, args.datafile, args.destination)\n\ndef get_parser():\n \"\"\"Get parser object.\"\"\"\n \n\n parser = ArgumentParser(\n description=__doc__, formatter_class=ArgumentDefaultsHelpFormatter\n ) \n parser.add_argument(\"-d\", \n \"--destination_folder\", \n dest = \"destination\", \n default = \"../model/\", \n help=\"folder for storing model files\")\n parser.add_argument(\"-df\", \n \"--dataFiles\", \n dest = \"datafile\", \n default = \"../pre_process/data/\", \n help=\"folder name for extracted data\")\n parser.add_argument( \"-c\",\n \"--config\", \n dest = \"config\", \n default = \"../training/config.json\", \n help=\"file containing the model configurations\")\n \n return parser\n\nif __name__ == \"__main__\":\n args = get_parser().parse_args()\n pass_data(args)\n\n","repo_name":"Muffakham/snetiment-analysis","sub_path":"training/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3422,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"31160831525","text":"from __future__ import annotations\nfrom abc import ABC, abstractmethod\nfrom typing import List\n\nclass Budget(ABC):\n\n _budgetList = []\n\n @staticmethod\n @abstractmethod\n\n def budget_size():\n pass\n\nclass TripBudget(Budget):\n \n def budget_size(self,budget_input):\n self._budgetList.append(budget_input)\n return \"Current Budget Size: \" + str(budget_input)\n\n\nclass EmergencyBudget(Budget):\n \n def budget_size(self,budget_input):\n self._budgetList.append(budget_input)\n return \"Emergency Budget Size: \" + str(budget_input)\n\n\nclass ConversionBudget(Budget):\n \n def budget_size(self,budget_input,conversion_rate):\n self._budgetList.clear()\n budget_input = budget_input/conversion_rate\n self._budgetList.append(budget_input)\n return \"Conversion Budget Size: \" + str(budget_input)\n\nclass MainBudget(TripBudget, EmergencyBudget, ConversionBudget):\n \n TotalBudget = 0\n\n def budgetMain(self):\n \n tripBudgetSystem = TripBudget()\n emergencyBudgetSystem = EmergencyBudget()\n ConversionBudgetSystem = ConversionBudget()\n\n print(\"***Budget Menu***\")\n tripBudget = int(input(\"Enter the Current Budget: \"))\n tripBudgetSystem.budget_size(tripBudget)\n\n emergencyBudget = int(input(\"Enter the Emergency Budget: \"))\n emergencyBudgetSystem.budget_size(emergencyBudget)\n\n print(\"Are you going to a Foreign Country?\")\n ans = input(\"Yes or No: \")\n\n if (ans == \"Yes\" or ans == \"y\"):\n preBudget = sum(self._budgetList)\n print(\"This is your current Budget: \" + str(preBudget))\n converRate = float(input(\"Enter the Conversion Rate of the country you choose: \"))\n ConversionBudgetSystem.budget_size(preBudget,converRate)\n totalBudget = sum(self._budgetList)\n self.TotalBudget = round(totalBudget,2)\n print(\"Your Total Budget for that Country is \" + str(round(totalBudget, 2)))\n else:\n totalBudget = sum(self._budgetList)\n self.TotalBudget = totalBudget\n print(\"Your Total Budget is \" + str(totalBudget))\n\n\n\n\nif __name__ == \"__main__\":\n \"MAIN METHOD TO TEST THE BUDGET CLASS\"\n\n budgetSystem = MainBudget()\n\n budgetSystem.budgetMain()\n\n ","repo_name":"NietzscheArboladura/TEST","sub_path":"Budget.py","file_name":"Budget.py","file_ext":"py","file_size_in_byte":2324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7261448517","text":"import requests\nimport goole_image_hybrid_roadmap\nimport pandas as pd\nimport re\nimport json\nimport find_area\n\nurl = \"https://app.regrid.com/api/v1/search.json?query=\"\n\n\n# add = pd.read_excel(\"(01-09(friday))_Lawn_AI_Test_Addresses.xlsx\")\n# addresses = [\n# re.sub(\" +\", \" \", address.strip()) for address in add[\"Test Case Address\"].tolist()\n# ]\n\n\ndef get_image(address):\n addresses = []\n addresses.append(address)\n headers = {\n \"accept\": \"application/json\",\n \"x-regrid-token\": \"eyJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJyZWdyaWQuY29tIiwiaWF0IjoxNjk1Mzc0NTM5LCJleHAiOjE2OTc5NjY1MzksInUiOjMxNjU1MCwiZyI6MjMxNTMsImNhcCI6InBhOnRzOnBzOmJmOm1hOnR5OmVvOnNiIn0.7Fj4hTJE1OLP1dhYrkhpHlCTpxcGxbuHqLjyzyhv_dU\",\n }\n house_areas = []\n for count, address in enumerate(addresses, start=23):\n params = {\"query\": address}\n\n response = requests.get(url, params=params, headers=headers)\n # with open(\"json_data.txt\", \"a\") as file:\n # file.write(f\"Response:{response}\\n\")\n\n # print(response)\n if response.status_code == 200:\n data = response.json()\n with open(\"data.json\", \"w\") as json_file:\n json.dump(data, json_file)\n # with open(\"json_data.txt\", \"a\") as file:\n # file.write(f\"data:{data}\\n\")\n # print(\"data\", data)\n # print(data)\n try:\n # if count==1 or count==2:\n # print(data,\"\\n\")\n coordinates = data[\"results\"][0][\"geometry\"][\"coordinates\"][0]\n # print(\"Area coordinates: \", coordinates)\n image = goole_image_hybrid_roadmap.getimage_google(\n coordinates, count, address\n )\n # print(type(image))\n house_area = data[\"buildings\"][0][\"properties\"][\n \"ed_bldg_footprint_sqft\"\n ]\n house_areas.append(house_area)\n # print(\"Total House area:\", house_area)\n find_area.cal_area(coordinates, address)\n with open(\"data.json\", \"r\") as f:\n data = json.load(f)\n total_sqft = data[\"results\"][0][\"properties\"][\"fields\"][\"sqft\"]\n house_area = data[\"buildings\"][-1][\"properties\"][\n \"ed_bldg_footprint_sqft\"\n ]\n print(\"House Area:\", house_area)\n\n return image, total_sqft, house_area, coordinates\n\n except IndexError:\n print(address)\n\n else:\n print(f\"Error getting data for {address}: {response.status_code}\")\n # pass\n # with open(\"house_areas.txt\", \"w\") as file:\n # for area in house_areas:\n # file.write(str(area) + \"\\n\")\n","repo_name":"anuragraiofficial321/Lawn_AI","sub_path":"Flask app/regrid.py","file_name":"regrid.py","file_ext":"py","file_size_in_byte":2796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72192379603","text":"import gym\nimport random\nimport numpy as np\n\n# Overview\nenv = gym.make('Taxi-v3').env\nenv.render()\nenv.reset()\nenv.render()\nenv.reset()\n\nprint(env.action_space)\nprint(env.observation_space)\n\nlen(env.P)\n\nenv.P[484]\n\n# Training\nrandom.uniform(0, 1)\nenv.action_space\n\nq_table = np.zeros([env.observation_space.n, env.action_space.n])\nq_table.shape\n\nq_table\n\nnp.argmax(np.array([3, 5]))\n\n# 1-10% 3-90%\n# exploration / exploitation\n# 0 = south 1 = north 2 = east 3 = west 4 = pickup 5 = dropoff\n#%%time\nfrom IPython.display import clear_output\n\nalpha = 0.1\ngamma = 0.6\nepsilon = 0.1\n\nfor i in range(100000):\n estado = env.reset()\n\n penalidades, recompensa = 0, 0\n done = False\n while not done:\n # Exploração\n if random.uniform(0, 1) < epsilon:\n acao = env.action_space.sample()\n # Exploitation\n else:\n acao = np.argmax(q_table[estado])\n\n proximo_estado, recompensa, done, info = env.step(acao)\n\n q_antigo = q_table[estado, acao]\n proximo_maximo = np.max(q_table[proximo_estado])\n\n q_novo = (1 - alpha) * q_antigo + alpha * (recompensa + gamma * proximo_maximo)\n q_table[estado, acao] = q_novo\n\n if recompensa == -10:\n penalidades += 1\n\n estado = proximo_estado\n\n if i % 100 == 0:\n clear_output(wait=True)\n print('Episódio: ', i)\n\nprint('Treinamento concluído')\n\n# 0 = south 1 = north 2 = east 3 = west 4 = pickup 5 = dropoff\nq_table[346]\n\nenv.reset()\nenv.render()\n\nenv.step(1)\nenv.render()\n\nenv.step(1)\nenv.render()\n\nenv.encode(3, 2, 1, 2)\n\n# Avaliation\ntotal_penalidades = 0\nepisodios = 50\nframes = []\n\nfor _ in range(episodios):\n estado = env.reset()\n penalidades, recompensa = 0, 0\n done = False\n while not done:\n acao = np.argmax(q_table[estado])\n estado, recompensa, done, info = env.step(acao)\n\n if recompensa == -10:\n penalidades += 1\n\n frames.append({\n 'frame': env.render(mode='ansi'),\n 'state': estado,\n 'action': acao,\n 'reward': recompensa\n })\n\n total_penalidades += penalidades\n\nprint('Episódios', episodios)\nprint('Penalidades', total_penalidades)\n\nframes[0]\n\nfrom time import sleep\nfor frame in frames:\n clear_output(wait=True)\n print(frame['frame'])\n print('Estado', frame['state'])\n print('Ação', frame['action'])\n print('Recompensa', frame['reward'])\n sleep(.5)","repo_name":"glauberss2007/AI-overview","sub_path":"machine_learning/reinforcment-learn/taxe.py","file_name":"taxe.py","file_ext":"py","file_size_in_byte":2351,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34492450328","text":"import os\nimport shutil\nimport click\n\n@click.command()\n@click.argument('directory', default=\".\")\ndef remove_pycache(directory):\n for root, dirs, _ in os.walk(directory):\n if \"__pycache__\" in dirs:\n pycache_path = os.path.join(root, \"__pycache__\")\n try:\n shutil.rmtree(pycache_path)\n click.echo(f\"Removed: {pycache_path}\")\n except Exception as e:\n click.echo(f\"Error removing {pycache_path}: {e}\")\n\nif __name__ == \"__main__\":\n remove_pycache()\n","repo_name":"hcoco1/phase3_cli_click","sub_path":"lib/others/debug.py","file_name":"debug.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11047835954","text":"'''\n#==============================================================================\ncky.py\n/Users/aelshen/Documents/Dropbox/School/CLMS 2013-2014/Winter 2014/Ling 571-Deep Processing Techniques for NLP/hw2_571_aelshen/src/cky.py\nCreated on Jan 29, 2014\n@author: Ahmad Elshenawy\n#==============================================================================\n'''\n\nimport nltk\nimport os\nimport sys\nfrom cnf_convert import CNF\nfrom collections import defaultdict\n#==============================================================================\n#--------------------------------Constants-------------------------------------\n#==============================================================================\nDEBUG = True\n\n#==============================================================================\n#-----------------------------------Main---------------------------------------\n#==============================================================================\ndef main():\n if len(sys.argv) < 3:\n print(\"cky.py requires two arguments:\"\\\n + os.linesep + \"\\t(1)context free grammar file\"\\\n + os.linesep + \"\\t(2)input data file\")\n sys.exit()\n \n grammar_file = sys.argv[1]\n sentences = open(sys.argv[2],'r')\n #cfg is converted\n grammar = CNF(grammar_file)\n \n CKY(sentences, grammar)\n \n#============================================================================== \n#---------------------------------Functions------------------------------------\n#==============================================================================\n\n#Parse given data using the CKY algorithm\ndef CKY(data, cnf_grammar):\n lines_parsed = 0\n total_parses = 0\n #read each line of the input data\n for line in data:\n lines_parsed += 1\n \n print(os.linesep + \"Sentence #\" + str(lines_parsed) + \": \" + line)\n sentence = line.strip()\n \n #tokenize the current sentence\n sentence = nltk.word_tokenize(sentence)\n \n #the (n+1)x(n+1) table needed for the cky algorithm\n table = [[None for x in xrange(len(sentence) + 1)] for x in xrange(len(sentence) + 1)]\n back_trace = defaultdict(set)\n \n for j in xrange( 1, len(sentence) + 1 ):\n word = \"'\" + sentence[j - 1] + \"'\"\n #list of tuples\n labels = []\n #get every preterminal that produces the current word\n for LHS in cnf_grammar.terminal_rules_by_daughter[word]:\n parent = Node(LHS, [word])\n labels.append(parent)\n back_trace[(j-1,j)].add( parent )\n #end LHS in cnf_grammar.terminal_rules_by_daughter[word]:\n table[j-1][j] = labels\n\n \n for i in range(j-2,-1,-1):\n k = i + 1\n LHS = []\n while k <= j - 1:\n B = table[i][k][:]\n C = table[k][j][:]\n \n \n for left_child in B:\n for right_child in C:\n if left_child.label in cnf_grammar.nonterminal_rules_by_daughter:\n RHS = (left_child.label, right_child.label)\n #if a rule exists that produces this (left,right) pair\n if RHS in cnf_grammar.nonterminal_rules_by_daughter[left_child.label]: \n for label in cnf_grammar.nonterminal_rules_by_daughter[left_child.label][RHS]:\n #create an object to keep track of the parent, \n #and the left and right children that led to it\n parent = Node(label, [left_child, right_child])\n #save to a list of all possible parents for this j\n LHS.append(parent)\n #add this parent object to the backtrace, \n #using the start and stop (i.e. (i,j) ) tuple as a key\n #to keep track of the length of the span\n back_trace[(i,j)].add( parent )\n k += 1\n #while k <= j - 1:\n table[i][j] = LHS\n \n #for i in range(j-2,-1,-1):\n \n \n #end for j in range( 1,len(sentence) ):\n \n #lines_parsed += 1\n count = 0\n #for any parses in the backtrace that cover the entire span of the sentence\n for trace in back_trace[(0, len(sentence) )]:\n #for any such span that begins with the start symbol of the grammar\n if trace.label == cnf_grammar.start_symbol:\n count += 1\n print(\"Parse #\" + str(count) + \":\")\n ParsePrint(trace)\n \n print(str(count) + \" Total Parse(s) for Sentence#\" + str(lines_parsed) + os.linesep)\n #end for line in data:\n\n\n\n#Recursively follows the lineage of a parent object, printing in simple \n#bracketed form until a terminal is produced\ndef ParsePrint(trace, indent = \"\"):\n if len(trace.children) == 1:\n print(indent + \"(\" + trace.label + \"\\t\" + trace.children[0] + \")\")\n #ParsePrint(trace.children[0], indent + \" \")\n return\n else:\n print(indent + \"(\" + trace.label )\n ParsePrint(trace.children[0], indent + \" \")\n #print(indent + \")\")\n ParsePrint(trace.children[1], indent + \" \")\n print(indent + \")\")\n \n \n#============================================================================== \n#----------------------------------Classes-------------------------------------\n#==============================================================================\nclass Node:\n def __init__(self, label, children = []):\n self.label = label\n self.children = children\n#============================================================================== \n#------------------------------------------------------------------------------\n#==============================================================================\nif __name__ == \"__main__\":\n sys.exit( main() )","repo_name":"aelshen/hw2_571_aelshen","sub_path":"src/cky.py","file_name":"cky.py","file_ext":"py","file_size_in_byte":6309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13206846094","text":"# -*- author: iwinstar -*-\n# -*- encoding: utf-8 -*-\n# -*- datetime: 2018/09/10 08:10:08 -*-\n\nimport sqlite3\nimport threadpool\nimport os\nimport time\nimport datetime\nimport sys\n\nfrom urllib import request\nfrom urllib import error\nfrom PIL import Image\n\nImage.MAX_IMAGE_PIXELS = 1000000000\n\nclass UnsplashDownloader:\n def __init__(self, pictures, folder, threads=10):\n self.pictures = pictures\n self.folder = folder\n self.threads = threads\n\n def run(self):\n # use thread pool to download pictures\n pool = threadpool.ThreadPool(self.threads)\n requests = threadpool.makeRequests(self.downloader, self.pictures)\n [pool.putRequest(req) for req in requests]\n pool.wait()\n\n def downloader(self, created_at, file_name, url):\n try:\n full_name = self.folder + '/' + file_name\n\n # check local picture\n if os.path.exists(full_name):\n local_file_size = int(os.path.getsize(full_name))\n remote_file_size = int(request.urlopen(url).headers['Content-Length'])\n\n if remote_file_size == local_file_size:\n print('Downloaded %s' % file_name)\n return\n\n # download picture\n print('Downloading: %s' % file_name)\n request.urlretrieve(url, full_name)\n\n # initialize change_time to picture's upload_time\n change_time = time.mktime(time.strptime(created_at, '%Y-%m-%d %H:%M:%S'))\n\n try:\n # get picture's last modified time stored in exif\n exif = Image.open(full_name)._getexif()\n\n if exif and exif.get(306):\n # python don't provide interface to change file's create_time under mac\n # so, here we just modify change_time, known as ctime\n # more introduction about exif format: http://www.exiv2.org/tags.html\n change_time = time.mktime(time.strptime(exif.get(306), '%Y:%m:%d %H:%M:%S'))\n except Exception as e:\n print(\"%s exception %s\" % (file_name, e))\n \n os.utime(full_name, (change_time, change_time))\n except error.ContentTooShortError:\n print('Network Error, re-download: ' + url)\n self.downloader(created_at, file_name, url)\n\n\nif __name__ == \"__main__\":\n\n COLOR_BEGIN = '\\033[93m'\n COLOR_END = '\\033[0m'\n\n db_file = \"database/picture.db\"\n cp_file = \"checkpoint/download\"\n\n time_begin = datetime.datetime.now()\n\n # get params\n folder_path = sys.argv[1]\n\n if not os.path.exists(folder_path):\n os.mkdir(folder_path)\n\n # read checkpoint\n condition = None\n with open(cp_file, 'r') as fr:\n checkpoint = fr.read()\n fr.close()\n\n if checkpoint:\n condition = \"where created_at >= '%s'\" % checkpoint\n\n # get all picture urls\n conn = sqlite3.connect(db_file)\n cursor = conn.execute(\"select distinct created_at, file_name, url from picture %s order by created_at asc\" % condition)\n\n pictures = []\n for picture in cursor:\n pictures.append((list(picture), None))\n checkpoint = picture[0]\n\n # threads shouldn't be very large\n pd = UnsplashDownloader(pictures, folder_path, threads=5)\n pd.run()\n\n # record checkpoint\n with open(cp_file, 'w') as fw:\n fw.write(checkpoint)\n fw.close()\n\n time_end = datetime.datetime.now()\n seconds = (time_end - time_begin).total_seconds()\n hms = str(datetime.timedelta(seconds=seconds))\n\n print(\"%sCheckpoint: %s, Total download pictures: %s, Total time: %s%s\" % \\\n (COLOR_BEGIN, checkpoint, len(pictures), hms, COLOR_END))\n","repo_name":"iwinstar/unsplash-downloader","sub_path":"UnsplashDownloader.py","file_name":"UnsplashDownloader.py","file_ext":"py","file_size_in_byte":3714,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"8936030001","text":"__author__ = 'ict'\n\nfrom calculate.helper.statistics import variance\nfrom calculate.helper.statistics import entropy\nfrom calculate.helper.statistics import gini\nimport math\n\n\ndef importance(data_dict, method=\"standard variance\"):\n weight_list = []\n tmp = data_dict.popitem()\n data_dict[tmp[0]] = tmp[1]\n for i in range(len(tmp[1])):\n if method == \"standard variance\":\n weight_list.append(math.sqrt(variance([value[i] for _, value in data_dict.items()])))\n elif method == \"variance\":\n weight_list.append(variance([value[i] for _, value in data_dict.items()]))\n elif method == \"entropy\":\n weight_list.append(entropy([value[i] for _, value in data_dict.items()]))\n elif method == \"gini\":\n weight_list.append(gini([value[i] for _, value in data_dict.items()]))\n else:\n raise Exception(\"Invalid method: \" + str(method))\n for key, value in data_dict.items():\n data_dict[key] = [weight_list[i] * value[i] for i in range(len(tmp[1]))]","repo_name":"fudong1127/scroll","sub_path":"calculate/helper/importance.py","file_name":"importance.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22653972835","text":"from abc import ABCMeta\nfrom abc import abstractmethod\nfrom bootstrapvz.common.tools import log_check_call\nfrom bootstrapvz.common.fsm_proxy import FSMProxy\nfrom ..exceptions import PartitionError\n\n\nclass AbstractPartitionMap(FSMProxy):\n \"\"\"Abstract representation of a partiton map\n This class is a finite state machine and represents the state of the real partition map\n \"\"\"\n\n __metaclass__ = ABCMeta\n\n # States the partition map can be in\n events = [{'name': 'create', 'src': 'nonexistent', 'dst': 'unmapped'},\n {'name': 'map', 'src': 'unmapped', 'dst': 'mapped'},\n {'name': 'unmap', 'src': 'mapped', 'dst': 'unmapped'},\n ]\n\n def __init__(self, bootloader):\n \"\"\"\n :param str bootloader: Name of the bootloader we will use for bootstrapping\n \"\"\"\n # Create the configuration for the state machine\n cfg = {'initial': 'nonexistent', 'events': self.events, 'callbacks': {}}\n super(AbstractPartitionMap, self).__init__(cfg)\n\n def is_blocking(self):\n \"\"\"Returns whether the partition map is blocking volume detach operations\n\n :rtype: bool\n \"\"\"\n return self.fsm.current == 'mapped'\n\n def get_total_size(self):\n \"\"\"Returns the total size the partitions occupy\n\n :return: The size of all partitions\n :rtype: Sectors\n \"\"\"\n # We just need the endpoint of the last partition\n return self.partitions[-1].get_end()\n\n def create(self, volume):\n \"\"\"Creates the partition map\n\n :param Volume volume: The volume to create the partition map on\n \"\"\"\n self.fsm.create(volume=volume)\n\n @abstractmethod\n def _before_create(self, event):\n pass\n\n def map(self, volume):\n \"\"\"Maps the partition map to device nodes\n\n :param Volume volume: The volume the partition map resides on\n \"\"\"\n self.fsm.map(volume=volume)\n\n def _before_map(self, event):\n \"\"\"\n :raises PartitionError: In case a partition could not be mapped.\n \"\"\"\n volume = event.volume\n try:\n # Ask kpartx how the partitions will be mapped before actually attaching them.\n mappings = log_check_call(['kpartx', '-l', volume.device_path])\n import re\n regexp = re.compile(r'^(?P.+[^\\d](?P\\d+)) : '\n r'(?P\\d) (?P\\d+) '\n r'{device_path} (?P\\d+)$'\n .format(device_path=volume.device_path))\n log_check_call(['kpartx', '-as', volume.device_path])\n\n import os.path\n # Run through the kpartx output and map the paths to the partitions\n for mapping in mappings:\n match = regexp.match(mapping)\n if match is None:\n raise PartitionError('Unable to parse kpartx output: ' + mapping)\n partition_path = os.path.join('/dev/mapper', match.group('name'))\n p_idx = int(match.group('p_idx')) - 1\n self.partitions[p_idx].map(partition_path)\n\n # Check if any partition was not mapped\n for idx, partition in enumerate(self.partitions):\n if partition.fsm.current not in ['mapped', 'formatted']:\n raise PartitionError('kpartx did not map partition #' + str(partition.get_index()))\n\n except PartitionError:\n # Revert any mapping and reraise the error\n for partition in self.partitions:\n if partition.fsm.can('unmap'):\n partition.unmap()\n log_check_call(['kpartx', '-ds', volume.device_path])\n raise\n\n def unmap(self, volume):\n \"\"\"Unmaps the partition\n\n :param Volume volume: The volume to unmap the partition map from\n \"\"\"\n self.fsm.unmap(volume=volume)\n\n def _before_unmap(self, event):\n \"\"\"\n :raises PartitionError: If the a partition cannot be unmapped\n \"\"\"\n volume = event.volume\n # Run through all partitions before unmapping and make sure they can all be unmapped\n for partition in self.partitions:\n if partition.fsm.cannot('unmap'):\n msg = 'The partition {partition} prevents the unmap procedure'.format(partition=partition)\n raise PartitionError(msg)\n # Actually unmap the partitions\n log_check_call(['kpartx', '-ds', volume.device_path])\n # Call unmap on all partitions\n for partition in self.partitions:\n partition.unmap()\n","repo_name":"andsens/bootstrap-vz","sub_path":"bootstrapvz/base/fs/partitionmaps/abstract.py","file_name":"abstract.py","file_ext":"py","file_size_in_byte":4648,"program_lang":"python","lang":"en","doc_type":"code","stars":262,"dataset":"github-code","pt":"3"} +{"seq_id":"36053535255","text":"def Jun(j_cash, machineDuck):\n stock = 0 # 주식 수\n\n for day in machineDuck:\n if j_cash >= day:\n stock += j_cash // day\n j_cash -= (j_cash // day) * day\n\n print(\"준현이의 자산 : \", (stock * machineDuck[13]) + j_cash)\n return (stock * machineDuck[13]) + j_cash\n\n\ndef Sung(s_cash, machineDuck):\n stock = 0 # 주식 수\n up, down = 0, 0 # 주가 증감 카운트\n yesterday = 0 # 전일 주가\n\n for idx, today in enumerate(machineDuck):\n # 첫날은 건너뛰기\n if idx == 0:\n yesterday = today\n continue\n\n if today > yesterday:\n up += 1\n down = 0\n elif today < yesterday:\n up = 0\n down += 1\n else:\n up, down = 0, 0\n\n # 매수, >= : 다음날 더 떨어지면 또 살 수 있음\n if down >= 3:\n stock += s_cash // today # 누적 주식 수\n s_cash -= (s_cash // today) * today # 남은 현금 계산\n # 매도\n elif up == 3:\n s_cash += stock * today\n stock = 0\n\n yesterday = today\n\n print(\"성민이의 자산 : \", (stock * machineDuck[13]) + s_cash)\n return (stock * machineDuck[13]) + s_cash\n\n\nif __name__ == '__main__':\n cash = int(input())\n MachineDuck = list(map(int, input().split()))\n\n final_Jun = Jun(cash, MachineDuck) # 준현이의 최종 자산\n final_Sung = Sung(cash, MachineDuck) # 성민이의 최종 자산\n\n if final_Jun > final_Sung:\n print(\"BNP\")\n elif final_Jun < final_Sung:\n print(\"TIMING\")\n else:\n print(\"SAMESAME\")","repo_name":"Sangmin627/AlgoStudy2023","sub_path":"창재/백준/구현/week1/20546/[baekjoon]20546.py","file_name":"[baekjoon]20546.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"17024878838","text":"\"\"\"Setuptools package definition\"\"\"\n\nfrom setuptools import setup\nfrom setuptools import find_packages\nimport os\n\n\n__version__ = None\nversion_file = \"pyaptly/version.py\"\nwith open(version_file) as f:\n code = compile(f.read(), version_file, 'exec')\n exec(code)\n\n\ndef find_data(packages, extensions):\n \"\"\"Finds data files along with source.\n\n :param packages: Look in these packages\n :param extensions: Look for these extensions\n \"\"\"\n data = {}\n for package in packages:\n package_path = package.replace('.', '/')\n for dirpath, _, filenames in os.walk(package_path):\n for filename in filenames:\n for extension in extensions:\n if filename.endswith(\".%s\" % extension):\n file_path = os.path.join(\n dirpath,\n filename\n )\n file_path = file_path[len(package) + 1:]\n if package not in data:\n data[package] = []\n data[package].append(file_path)\n return data\n\nwith open('README.rst', 'r') as f:\n README_TEXT = f.read()\n\nsetup(\n name = \"pyaptly\",\n version = __version__,\n packages = find_packages(),\n package_data=find_data(\n find_packages(), [\"yml\"]\n ),\n entry_points = {\n 'console_scripts': [\n \"pyaptly = pyaptly:main\",\n ]\n },\n install_requires = [\n \"pyyaml\",\n \"freeze\",\n \"six\"\n ],\n author = \"Adfinis-SyGroup\",\n author_email = \"https://adfinis-sygroup.ch/\",\n description = \"Aptly mirror/snapshot managment automation.\",\n long_description = README_TEXT,\n keywords = \"aptly mirror snapshot automation\",\n url = \"https://github.com/adfinis-sygroup/pyaptly\",\n classifiers = [\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Information Technology\",\n \"License :: OSI Approved :: \"\n \"GNU Affero General Public License v3\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n ]\n)\n","repo_name":"adfinis/pyaptly","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2411,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"3"} +{"seq_id":"7055117441","text":"# https://adventofcode.com/2015/day/18\nfrom __future__ import print_function\nfrom itertools import product\n\n\ndef sum_grid(grid):\n return sum(int(c == '#') for line in grid for c in line)\n\n\noffsets = list(product([-1, 0, 1], [-1, 0, 1]))\n\n\ndef get_state(grid, height, width, x, y):\n total = 0\n for dif_x, dif_y in offsets:\n neighbor_x = x + dif_x\n neighbor_y = y + dif_y\n if dif_x == 0 and dif_y == 0 \\\n or neighbor_x not in range(width) \\\n or neighbor_y not in range(height):\n continue\n total += grid[neighbor_y][neighbor_x] == '#'\n if total > 3:\n return '.'\n state = grid[y][x]\n if state == '#':\n return '#' if total == 2 or total == 3 else '.'\n else:\n return '#' if total == 3 else '.'\n\n\ndef grid_cycle(grid):\n height, width = len(grid), len(grid[0])\n new_grid = [[get_state(grid, height, width, x, y) for x in range(width)]\n for y in range(height)]\n return new_grid\n\n\ndef part1(grid, steps):\n for _ in range(steps):\n grid = grid_cycle(grid)\n return sum_grid(grid)\n\n\ndef set_corners_on(grid):\n grid[0][0] = '#'\n grid[0][-1] = '#'\n grid[-1][0] = '#'\n grid[-1][-1] = '#'\n\n\ndef part2(grid, steps):\n set_corners_on(grid)\n for _ in range(steps):\n grid = grid_cycle(grid)\n set_corners_on(grid)\n return sum_grid(grid)\n\n\ndef process(grid, steps):\n # part 1\n result = part1(grid, steps)\n print(\"part 1:\", result)\n # part 2\n result = part2(grid, steps)\n print(\"part 2:\", result)\n\n\ndef parse_line(line):\n return list(line)\n\n\ndef load_data(fileobj):\n return [parse_line(line.rstrip()) for line in fileobj]\n\n\ndef main(file, steps):\n print(file)\n with open(file) as f:\n process(load_data(f), steps)\n\n\nif __name__ == \"__main__\":\n # main(\"test.txt\", 5)\n main(\"input.txt\", 100)\n","repo_name":"PetrPrazak/AdventOfCode","sub_path":"2015/18/aoc2015_18.py","file_name":"aoc2015_18.py","file_ext":"py","file_size_in_byte":1897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5750174321","text":"import random\nclass perceptron(object):\n weights = []\n lc = 0\n def __init__(self,n, lc):\n for i in range(n):\n self.weights.append(random.choice([-1,1])) #add random weights\n self.c = lc\n \n# def train(inputs, desired):\n# guess = \n def train(self, inputs, desired):\n guess = perceptron.feedforward(self,inputs)\n error = desired - guess\n for i in range(len(self.weights)):\n self.weights[i] += self.lc * error * guess \n\n def feedforward(self,inputs):#checked this worked\n sum = 0\n for i in range(len(self.weights)):\n sum += inputs[i] * self.weights[i]\n return self.activate(sum)\n\n def activate(self,sum):\n if (sum > 0):\n return 1\n else:\n return -1\n\nweights = perceptron(3,0.1) # how to make perceptron (n,0.1)\nprint(perceptron.feedforward(weights,[2,1,3])) #this is how you use methods in class","repo_name":"a1noh/perceptron","sub_path":"perceptron.py","file_name":"perceptron.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18833443936","text":"from hotel.celery import app\nfrom .service import send, sending\nfrom .views import *\n\n@app.task\ndef send_message(name, message, email, name_room = None):\n\tif name_room == None:\n\t\tname1 = 'name: '+name\n\t\ttext = name1 + '\\n'+ message\n\t\tsend(email, text)\n\telse:\n\t\tname1 = 'name: ' + name\n\t\ttext = name1 + '\\n' + message + '\\n' + 'Аренда: '+ name_room\n\t\tsending(email, text)","repo_name":"Alexsandr007/hotel","sub_path":"hotel_list/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"44685312332","text":"from Indoor_GPS.marvelmind import MarvelmindHedge\nfrom Constants.Consts import *\nimport time\n\nclass GPS():\n\n def __init__(self, grid, pid):\n # create Marvel Mind Hedge thread\n # get USB port with ls /dev/tty.usb*\n # adr is the address of the hedgehog beacon!\n self.hedge = MarvelmindHedge(tty=tty, adr=hedge_addr, debug=False)\n # start thread\n self.hedge.start()\n # REQUIRED SLEEP TIME in order for thread to start and init_pos to be correct\n time.sleep(1)\n # data in array's [usnadr, x, y, z, timestamp]\n self.init_pos = self.hedge.position()\n self.init_pos = [self.init_pos[0], self.init_pos[2], self.init_pos[1]*-1, self.init_pos[3], self.init_pos[4]]\n self.location_buffer = []\n self.grid = grid\n self.pid = pid\n\n def update_loc(self, curr_tile):\n \"\"\"\n updates the current tile based on the GPS input\n \"\"\"\n # call indoor gps get location function\n # avgPosition = [0, 0]\n # for prev_pos in self.location_buffer:\n # if prev_pos is not None:\n # avgPosition[0] += prev_pos[0]\n # avgPosition[1] += prev_pos[1]\n\n # if len(self.location_buffer) >= 5:\n # try:\n # avgPosition[0] /= len(self.location_buffer)\n # avgPosition[1] /= len(self.location_buffer)\n [_, y, x, z, ang, time] = self.hedge.position()\n x = -x\n x1 = x\n y1 = y\n\n self.location_buffer.pop(0)\n self.location_buffer.append((x1, y1))\n # map the position to the correct frame of reference\n x = (x - self.init_pos[1]) * 10\n y = (y - self.init_pos[2]) * 10\n x = int(tile_num_width/2) + int(x * 100 / tile_size)\n y = int(tile_num_height/2) + int(y * 100 / tile_size)\n prev_tile = curr_tile\n curr_tile = self.grid.grid[x][y]\n self.pid.update_PID(curr_tile.x, curr_tile.y)\n\n return prev_tile, curr_tile\n # except:\n # print(\"GPS Reads robot position is off the screen!\")\n # self.update_loc(curr_tile)\n # else:\n # print(\"IN GPS ELSE \")\n # [_, y, x, z, ang, time] = self.hedge.position()\n # x = -x\n # x1 = x\n # y1 = y\n # self.location_buffer.append((x1, y1))\n #\n # return self.update_loc(curr_tile)\n","repo_name":"cornell-cup/C1C0_path_planning","sub_path":"Indoor_GPS/GPS.py","file_name":"GPS.py","file_ext":"py","file_size_in_byte":2421,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"29061620087","text":"# -*- coding: utf-8 -*-\n\nfrom collections import namedtuple\nfrom copy import deepcopy\nfrom datetime import datetime, timedelta as shift\n\nimport pytest\n\nfrom apikeys.tests_by_typical_flows import typical_flows as flow, plain_function as plain\nfrom apikeys.apikeys_utils import get_parameters, Person_type\nfrom btestlib import utils\nfrom btestlib.utils import aDict\n\n__author__ = 'ilya_knysh'\n\nBASE_DT = datetime.utcnow()\nSTART_PREVIOUS_MONTH, END_PREVIOUS_MONTH = utils.Date.previous_month_first_and_last_days(BASE_DT)\nSTART_CURRENT_MONTH, END_CURRENT_MONTH = utils.Date.current_month_first_and_last_days(BASE_DT)\n\nTariff = namedtuple('Tariff',\n ['tariff_cc', # Наименование Тарифа\n 'service_id', # Наименование Сервиса\n 'counters', # Наименование Счетчика (для некоторых продуктов их несколько\n 'unit' # Наименование Юнита\n ])\n\nnon_commercial_tariffs = [\n\n Tariff('routingmatrix_custom', 'routingmatrix', ['cells'], 'routingmatrix_cells_daily'),\n Tariff('apimaps_custom', 'apimaps', ['total'], 'apimaps_total_daily'),\n Tariff('mapkit_custom', 'mapkit', ['total'], 'mapkit_total_daily'),\n Tariff('staticmaps_custom', 'staticmaps', ['hits'], 'staticmaps_hits_daily'),\n Tariff('city_custom', 'city', ['hits'], 'city_hits_daily'),\n]\n\ngeneral_scenarios = [\n # Test-case 0\n {'description': u'[unlim]',\n 'base_dt': BASE_DT,\n 'stats': [\n {'completions': {'shift_limit': 1000000}, 'dt': BASE_DT},\n ],\n 'active_after_scenario': True,\n 'limit': -1},\n\n # Test-case 1\n {'description': u'[10000 limit +]',\n 'base_dt': BASE_DT,\n 'stats': [\n {'completions': {'shift_limit': 1}, 'dt': BASE_DT},\n ],\n 'active_after_scenario': False,\n 'limit': 10000},\n\n # Test-case 2\n {'description': u'[10000 limit equal]',\n 'base_dt': BASE_DT,\n 'stats': [\n {'completions': {'shift_limit': 0}, 'dt': BASE_DT},\n ],\n 'active_after_scenario': False,\n 'limit': 10000},\n\n # Test-case 3\n {'description': u'[10000 limit -]',\n 'base_dt': BASE_DT,\n 'stats': [\n {'completions': {'shift_limit': -1}, 'dt': BASE_DT},\n ],\n 'active_after_scenario': True,\n 'limit': 10000},\n\n # Test-case 4\n {'description': u'[multikeys 10000 limit +]',\n 'base_dt': BASE_DT,\n 'stats': [\n {'completions': {'shift_limit': -4999}, 'dt': BASE_DT, 'key': 1},\n {'completions': {'shift_limit': -5000}, 'dt': BASE_DT, 'key': 2},\n ],\n 'active_after_scenario': False,\n 'limit': 10000},\n\n # Test-case 5\n {'description': u'[multikeys 10000 limit -]',\n 'base_dt': BASE_DT,\n 'stats': [\n {'completions': {'shift_limit': -5001}, 'dt': BASE_DT, 'key': 1},\n {'completions': {'shift_limit': -5000}, 'dt': BASE_DT, 'key': 2},\n ],\n 'active_after_scenario': True,\n 'limit': 10000},\n\n # Test-case 6\n {'description': u'[limitless_duration]',\n 'base_dt': BASE_DT,\n 'stats': [\n {'completions': {'shift_limit': 1000000}, 'dt': BASE_DT},\n ],\n 'active_after_scenario': True,\n 'limit': -1,\n 'validity_period': 367,\n }\n]\n\n\n@pytest.mark.parametrize('tariff', get_parameters(non_commercial_tariffs), ids=lambda x: x.tariff_cc)\n@pytest.mark.parametrize(\n 'scenario', get_parameters(general_scenarios, )\n , ids=lambda x: x['description'])\n@pytest.mark.good\ndef test_non_commercial(scenario, tariff, free_passport):\n scenario_copy = aDict(deepcopy(scenario))\n scenario_copy.tariff = tariff.tariff_cc\n flow.LimitChecker.non_commercial(scenario_copy, free_passport, tariff.service_id, tariff)\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"billing/tests_by_typical_flows/limit_checker/test_non_commercial.py","file_name":"test_non_commercial.py","file_ext":"py","file_size_in_byte":3796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"40262427621","text":"from dataclasses import dataclass\nfrom typing import Optional, List\n\nfrom monitor.notifications.templates import (\n ALERT_STARTED_EMAIL,\n ALERT_STARTED_SMS,\n ALERT_STOPPED_EMAIL,\n ALERT_STOPPED_SMS,\n POWER_OUTAGE_STARTED_EMAIL,\n POWER_OUTAGE_STARTED_SMS,\n POWER_OUTAGE_STOPPED_EMAIL,\n POWER_OUTAGE_STOPPED_SMS,\n)\n\n\nclass NotificationType:\n ALERT_STARTED = \"alert_started\"\n ALERT_STOPPED = \"alert_stopped\"\n POWER_OUTAGE_STARTED = \"power_outage_started\"\n POWER_OUTAGE_STOPPED = \"power_outage_stopped\"\n\n\n@dataclass\nclass Notification:\n type: NotificationType\n id: int\n sensors: List[str]\n time: str\n retry: int = 0\n last_try: float = 0.0\n\n # True = sent, False = sending failed, None = no need to send (not subscribed)\n sms_sent: Optional[bool] = False\n email1_sent: Optional[bool] = False\n email2_sent: Optional[bool] = False\n\n def get_sms_template(self):\n mapping = {\n NotificationType.ALERT_STARTED: ALERT_STARTED_SMS,\n NotificationType.ALERT_STOPPED: ALERT_STOPPED_SMS,\n NotificationType.POWER_OUTAGE_STARTED: POWER_OUTAGE_STARTED_SMS,\n NotificationType.POWER_OUTAGE_STOPPED: POWER_OUTAGE_STOPPED_SMS,\n }\n\n try:\n return mapping[self.type]\n except KeyError:\n self._logger.error(\"Unknown notification type!\")\n\n def get_email_template(self):\n mapping = {\n NotificationType.ALERT_STARTED: ALERT_STARTED_EMAIL,\n NotificationType.ALERT_STOPPED: ALERT_STOPPED_EMAIL,\n NotificationType.POWER_OUTAGE_STARTED: POWER_OUTAGE_STARTED_EMAIL,\n NotificationType.POWER_OUTAGE_STOPPED: POWER_OUTAGE_STOPPED_EMAIL,\n }\n\n return mapping[self.type]\n\n def get_email_subject(self):\n mapping = {\n NotificationType.ALERT_STARTED: \"Alert started\",\n NotificationType.ALERT_STOPPED: \"Alert stopped\",\n NotificationType.POWER_OUTAGE_STARTED: \"Power outage started\",\n NotificationType.POWER_OUTAGE_STOPPED: \"Power outage stopped\",\n }\n\n return mapping[self.type]\n\n @property\n def processed(self):\n return (\n (self.sms_sent is None or self.sms_sent) and\n (self.email1_sent is None or self.email1_sent) and\n (self.email2_sent is None or self.email2_sent)\n )\n","repo_name":"ArPIHomeSecurity/arpi_server","sub_path":"src/monitor/notifications/notification.py","file_name":"notification.py","file_ext":"py","file_size_in_byte":2376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"28792447848","text":"import os\r\nimport logging\r\nimport datetime\r\nfrom airflow.models import DAG\r\nfrom airflow.hooks import S3_hook, postgres_hook\r\nfrom airflow.contrib.hooks.aws_hook import AwsHook\r\nfrom airflow.operators.python_operator import PythonOperator\r\nfrom airflow.operators.postgres_operator import PostgresOperator\r\n\r\ndef log_data(msg):\r\n return logging.info(msg)\r\n\r\ndef iterate_directory(dir, filetype):\r\n files = []\r\n for filename in os.listdir(dir):\r\n if filename.endswith(filetype):\r\n files.append((dir+\"/\"+filename, filename))\r\n return files\r\n\r\ndef create_bucket(bucket_name, s3_connection=\"shinyui_s3_dend\"):\r\n hook = S3_hook.S3Hook(s3_connection)\r\n log_data(\"S3 is creating bucket: {}\".format(bucket_name))\r\n hook.create_bucket(bucket_name=bucket_name)\r\n log_data(\"Bucket: {} is created\".format(bucket_name))\r\n\r\ndef upload_file_to_s3(bucket_name, files, s3_connection=\"shinyui_s3_dend\"):\r\n hook = S3_hook.S3Hook(s3_connection)\r\n for idx, file in enumerate(files):\r\n file_path = file[0]\r\n file_name = file[1]\r\n log_data(\"Uploading file {file} to {bucket}\".format(file=file_name, bucket=bucket_name))\r\n hook.load_file(filename=file_path, key=file_name, bucket_name=bucket_name)\r\n log_data(\"{file} uploaded to {bucket}\".format(file=file_name, bucket=bucket_name))\r\n\r\ndef check_number_of_files(bucket_name, files, s3_connection=\"shinyui_s3_dend\"):\r\n hook = S3_hook.S3Hook(s3_connection)\r\n number_of_files_s3 = len(hook.list_keys(bucket_name=bucket_name))\r\n number_of_files_local = len(files)\r\n if number_of_files_s3 != number_of_files_local:\r\n raise Exception(\"The number of files on {bucket} isn't equal to the record\".format(bucket=bucket_name))\r\n else:\r\n log_data(\"The number of files on {bucket} is equal to the record\".format(bucket=bucket_name))\r\n\r\ndef copy_data_to_redshift(table, s3_files):\r\n aws_hook = AwsHook(\"shinyui_aws_credentials\") #aws_user_credentials\r\n credentials = aws_hook.get_credentials()\r\n redshift_hook = postgres_hook.PostgresHook(\"shinyui_redshift\") #aws_redshift_credentials\r\n sql_stmt = \"\"\"\r\n COPY {table}\r\n FROM '{s3}'\r\n ACCESS_KEY_ID '{id}'\r\n SECRET_ACCESS_KEY '{key}'\r\n IGNOREHEADER 1\r\n CSV\r\n DELIMITER ','\r\n \"\"\".format(table=table,\r\n s3=s3_files,\r\n id=credentials.access_key,\r\n key=credentials.secret_key)\r\n redshift_hook.run(sql_stmt)\r\n\r\ndef quality_check(table):\r\n redshift_hook = postgres_hook.PostgresHook(\"shinyui_redshift\") #aws_redshift_credentials\r\n sql_stmt = \"\"\"\r\n SELECT COUNT(*)\r\n FROM {table}\r\n \"\"\".format(table=table)\r\n number = redshift_hook.run(sql_stmt)\r\n if number <= 0:\r\n raise Exception(\"Table {table} quality test wasn't passed\".format(table=table))\r\n else:\r\n logging.info(\"Table {table} passed the quality test\".format(table=table))\r\n\r\n\r\n\r\ndag = DAG(\r\n dag_id=\"capstone_project_pipeline\",\r\n start_date=datetime.datetime(2019, 5, 26),\r\n schedule_interval=None,\r\n)\r\n\r\niterate_directory_task = PythonOperator(\r\n task_id=\"iterate_directory\",\r\n python_callable=iterate_directory,\r\n op_kwargs={\r\n \"dir\":\"/usr/local/airflow/data\",\r\n \"filetype\":\".csv\"\r\n },\r\n dag=dag\r\n)\r\n\r\ncreate_bucket_task = PythonOperator(\r\n task_id=\"create_bucket\",\r\n python_callable=create_bucket,\r\n op_kwargs={\r\n \"bucket_name\":\"shinyui-dend-capstone\"\r\n },\r\n dag=dag\r\n)\r\n\r\nupload_file_to_s3_task = PythonOperator(\r\n task_id=\"upload_file_to_s3\",\r\n python_callable=upload_file_to_s3,\r\n op_kwargs={\r\n \"bucket_name\":\"shinyui-dend-capstone\",\r\n \"files\":iterate_directory(dir=\"/usr/local/airflow/data\", filetype=\".csv\")\r\n },\r\n dag=dag\r\n)\r\n\r\ncheck_number_of_files_task = PythonOperator(\r\n task_id=\"check_number_of_files\",\r\n python_callable=check_number_of_files,\r\n op_kwargs={\r\n \"bucket_name\":\"shinyui-dend-capstone\",\r\n \"files\":iterate_directory(dir=\"/usr/local/airflow/data\", filetype=\".csv\")\r\n },\r\n dag=dag\r\n)\r\n\r\ncreate_hour_table_task = PostgresOperator(\r\n task_id=\"create_hour_table\",\r\n postgres_conn_id=\"shinyui_redshift\",\r\n sql=\"\"\"CREATE TABLE IF NOT EXISTS staging_eur_hour (\r\n date VARCHAR NOT NULL PRIMARY KEY,\r\n hour VARCHAR,\r\n bid_open REAL NOT NULL,\r\n bid_high REAL NOT NULL,\r\n bid_low REAL NOT NULL,\r\n bid_close REAL NOT NULL,\r\n bid_change REAL NOT NULL,\r\n ask_open REAL NOT NULL,\r\n ask_high REAL NOT NULL,\r\n ask_low REAL NOT NULL,\r\n ask_close REAL NOT NULL,\r\n ask_change REAL NOT NULL\r\n )\"\"\",\r\n dag=dag\r\n)\r\n\r\ncreate_minute_table_task = PostgresOperator(\r\n task_id=\"create_minute_table\",\r\n postgres_conn_id=\"shinyui_redshift\",\r\n sql=\"\"\"CREATE TABLE IF NOT EXISTS staging_eur_minute (\r\n date VARCHAR NOT NULL PRIMARY KEY,\r\n minute VARCHAR,\r\n bid_open REAL NOT NULL,\r\n bid_high REAL NOT NULL,\r\n bid_low REAL NOT NULL,\r\n bid_close REAL NOT NULL,\r\n bid_change REAL NOT NULL,\r\n ask_open REAL NOT NULL,\r\n ask_high REAL NOT NULL,\r\n ask_low REAL NOT NULL,\r\n ask_close REAL NOT NULL,\r\n ask_change REAL NOT NULL\r\n )\"\"\",\r\n dag=dag\r\n)\r\n\r\ncreate_news_table_task = PostgresOperator(\r\n task_id=\"create_news_table\",\r\n postgres_conn_id=\"shinyui_redshift\",\r\n sql=\"\"\"CREATE TABLE IF NOT EXISTS staging_eur_news (\r\n date VARCHAR NOT NULL PRIMARY KEY,\r\n title VARCHAR (65535),\r\n article VARCHAR (65535)\r\n )\"\"\",\r\n dag=dag\r\n)\r\n\r\nprint_msg_task = PythonOperator(\r\n task_id=\"log_info\",\r\n python_callable=log_data,\r\n op_kwargs={\r\n \"msg\":\"Table successfully created, ready to load data\"\r\n },\r\n dag=dag\r\n)\r\n\r\ncopy_data_to_hour_table_task = PythonOperator(\r\n task_id=\"copy_eur_hour\",\r\n python_callable=copy_data_to_redshift,\r\n op_kwargs={\r\n \"table\":\"staging_eur_hour\",\r\n \"s3_files\":\"s3://shinyui-dend-capstone/eurusd_hour.csv\"\r\n },\r\n dag=dag\r\n)\r\n\r\ncopy_data_to_minute_table_task = PythonOperator(\r\n task_id=\"copy_eur_minute\",\r\n python_callable=copy_data_to_redshift,\r\n op_kwargs={\r\n \"table\":\"staging_eur_minute\",\r\n \"s3_files\":\"s3://shinyui-dend-capstone/eurusd_minute.csv\"\r\n },\r\n dag=dag\r\n)\r\n\r\ncopy_data_to_news_table_task = PythonOperator(\r\n task_id=\"copy_eur_news\",\r\n python_callable=copy_data_to_redshift,\r\n op_kwargs={\r\n \"table\":\"staging_eur_news\",\r\n \"s3_files\":\"s3://shinyui-dend-capstone/eurusd_news.csv\"\r\n },\r\n dag=dag\r\n)\r\n\r\nhour_table_check_task = PythonOperator(\r\n task_id=\"quality_check_hour\",\r\n python_callable=quality_check,\r\n op_kwargs={\r\n \"table\":\"staging_eur_hour\"\r\n },\r\n dag=dag\r\n)\r\n\r\nminute_table_check_task = PythonOperator(\r\n task_id=\"quality_check_minute\",\r\n python_callable=quality_check,\r\n op_kwargs={\r\n \"table\":\"staging_eur_minute\"\r\n },\r\n dag=dag\r\n)\r\n\r\nnews_table_check_task = PythonOperator(\r\n task_id=\"quality_check_hour\",\r\n python_callable=quality_check,\r\n op_kwargs={\r\n \"table\":\"staging_eur_news\"\r\n },\r\n dag=dag\r\n)\r\n\r\ncreate_bucket_task >> iterate_directory_task >> upload_file_to_s3_task\r\nupload_file_to_s3_task >> check_number_of_files_task\r\ncheck_number_of_files_task >> [create_hour_table_task, create_minute_table_task, create_news_table_task] >> print_msg_task\r\nprint_msg_task >> [copy_data_to_hour_table_task, copy_data_to_minute_table_task, copy_data_to_news_table_task]\r\nhour_table_check_task >> minute_table_check_task >> news_table_check_task","repo_name":"Shinyui/Udacty-Data-Engineering-Project-Capstone-Project","sub_path":"dags/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":7518,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"36332491028","text":"#!/usr/bin/env python3\nimport os\nimport json\nimport argparse\n\nimport numpy as np\nimport pandas as pd\nfrom joblib import dump\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics import confusion_matrix, precision_recall_fscore_support, \\\n f1_score, recall_score, precision_score, accuracy_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.svm import LinearSVC\n\nEXCLUDED_NEWSPAPERS = {'The New York Times'} # ,'The Washington Post'}\nSCORE_TYPE = \"macro\"\n\n\ndef main():\n \"\"\"\n Classifies a given corpus by political orientation.\n Part of the COPPOC project.\n\n The parameter values of the vectorizer and the classifier\n have been validated in a GridSearch setup by Leon\n on the 28th of October.\n :return:\n \"\"\"\n parser = argparse.ArgumentParser(\n description='Parameters for the COP Political Orientation'\n ' classifier')\n # parser.add_argument('--train', help='file name for training data')\n # parser.add_argument('--test', help='file name for test data')\n parser.add_argument('-f', '--full', help=\"file name for complete\"\n \" data set\")\n args = parser.parse_args()\n\n # TODO add handling for specific test/training data\n if args.full:\n print(\"Loading data...\")\n with open(args.full, 'r') as F:\n data = json.load(F)\n else:\n print(\"No data source found! Specify which data the script\"\n \" should use.\")\n parser.print_help()\n exit()\n\n X, y = prepare_data(data)\n labels = np.unique(y)\n X_train, X_test, y_train, y_test = train_test_split(X, y,\n random_state=42,\n test_size=0.27)\n print(\"Data has been split!\")\n\n vectorizer = TfidfVectorizer(analyzer='word', lowercase=False,\n ngram_range=(1, 2), norm='l2',\n strip_accents=None)\n classifier = LinearSVC(C=0.7, dual=True, fit_intercept=True, loss='hinge',\n multi_class='crammer_singer', penalty='l1')\n\n pipeline = Pipeline([\n ('vec', vectorizer),\n ('clf', classifier)\n ])\n\n print(\"Transforming data...\")\n features = [row[\"body\"] for row in X_train]\n pipeline.fit(features, y_train)\n\n print(\"Applying algorithm to test data...\")\n samples = [row[\"body\"] for row in X_test]\n y_guess = pipeline.predict(samples)\n print(\"Classification completed!\\n\")\n\n # Prints the scores.\n print(f\"Overall scores ({SCORE_TYPE}):\")\n print(\"Accuracy\", accuracy_score(y_true=y_test, y_pred=y_guess))\n print(\"Precision\", precision_score(y_true=y_test, y_pred=y_guess,\n average=SCORE_TYPE))\n print(\"Recall\", recall_score(y_true=y_test, y_pred=y_guess,\n average=SCORE_TYPE))\n print(\"F1-score\", f1_score(y_true=y_test, y_pred=y_guess,\n average=SCORE_TYPE))\n print()\n\n scores = precision_recall_fscore_support(y_test, y_guess, labels=labels)\n print(pd.DataFrame(scores, columns=labels,\n index=[\"Precision\", \"Recall\", \"F-score\",\n \"Support\"]).drop([\"Support\"]), '\\n')\n\n # Print confusion matrix.\n matrix = confusion_matrix(y_test, y_guess, labels=labels)\n print(pd.DataFrame(matrix, index=labels, columns=labels), '\\n')\n\n dump(pipeline, \"classification_pipeline.joblib\")\n\n results = pd.DataFrame(scores)\n # results = results.sort_values('mean_test_F1', ascending=False)\n results.to_excel(\"main_scores.xlsx\", engine='openpyxl')\n results.to_html(\"main_scores.html\")\n\n\ndef prepare_data(data):\n \"\"\"\n Prepares classification data\n by applying filters and splitting.\n :param data:\n :return:\n \"\"\"\n X, y = [], []\n for row in data:\n X.append({key: value for key, value in row.items()\n if key != \"political_orientation\"})\n y.append(row[\"political_orientation\"])\n\n for row in X:\n if row[\"newspaper\"] in EXCLUDED_NEWSPAPERS:\n index = X.index(row)\n del X[index]\n del y[index]\n\n return X, y\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"leonwetzel/Learning-from-Data","sub_path":"Project/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17781535569","text":"# -*- coding: utf-8 -*-\n\nsayilar = [1,2,3,4,5]\n\n# sayilarKareli = []\n\n# bunun yerine mapping kullanmak\n# for sayi in sayilar:\n# sayilarKareli.append(sayi*sayi)\n\n\n\n# x**2 = x*x - x'in karesi\nsayilarKareli = list(map(lambda sayi : sayi**2, sayilar))\n\nprint(sayilarKareli)\n\n\n#--------------------------------------------------------------------------\n\n\nsayilarFiltreli = list(filter(lambda sayi : sayi>2, sayilar))\n\nprint(sayilarFiltreli) \n\n\n#--------------------------------------------------------------------------\n\n\nfrom functools import reduce\n\nsayilarFaktoriyel = reduce(lambda x,y : x*y, sayilar)\n\n# çalışma mantığı :\n# x y\n# 1 2\n# 1*2 3\n# 2*3 4\n# 6*4 5\n# 24*5 end ---> 120 \n\nprint(sayilarFaktoriyel)\n\n","repo_name":"rzayevsahil/Python","sub_path":"Temel/20.map-filter-reduce.py","file_name":"20.map-filter-reduce.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"tr","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"40654234189","text":"\"\"\"\nPerforms GEOJSON mission file reading, GPS-coordinates conversion to ENU system,\nCPP trajectory calculation, waypoints conversion back to GPS-coordinates,\ntrajectory JSON file generation.\n\n\"\"\"\nimport os\nfrom pathlib import Path\nimport json\nimport geojson\nimport pymap3d as pm\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Polygon\nimport numpy as np\nfrom grid_map import GridMap\nfrom grid_based_sweep_coverage_path_planner import planning\n#from tools import define_polygon, polygon_contains_point\n\n\n\"\"\"\n#generate JSON data\ndata = {\n\t\"polygon\":[\n\t\t{ \"latitude\": 59.949379, \"longitude\": 30.618275 },\n\t\t{ \"latitude\": 59.949434, \"longitude\": 30.618637 },\n\t\t{ \"latitude\": 59.949301, \"longitude\": 30.618758 },\n\t\t{ \"latitude\": 59.949262, \"longitude\": 30.618321 }\n\t],\n\t\"central_point\":[\n\t\t{ \"latitude\": 59.949345, \"longitude\": 30.618524 }\n\t]\n}\njson_string = json.dumps(data,sort_keys=True, indent=4)\n\n# save JSON data\nwith open(\"mission.json\", \"w\") as outfile:\n\toutfile.write(json_string)\n\"\"\"\n\ndef export_polygon(filename):\n\tmission_data = []\n\tlat = []\n\tlon = []\n\tif os.path.exists(filename):\n\t\twith open(filename, \"r\") as f: \n\t\t\tmission_data = geojson.load(f) \n\t\t\tif type(mission_data['features'][0]['geometry']) == geojson.geometry.Polygon:\n\t\t\t\tmy_polygon = mission_data['features'][0]['geometry']['coordinates']\n\t\t\t\tfor i in range(len(my_polygon[0])):\n\t\t\t\t\tlat.append(my_polygon[0][i][1])\n\t\t\t\t\tlon.append(my_polygon[0][i][0])\n\t\t\t\tlat.pop(0)\n\t\t\t\tlon.pop(0)\n\t\t\tif type(mission_data['features'][1]['geometry']) == geojson.geometry.Polygon:\n\t\t\t\tmy_polygon = mission_data['features'][1]['geometry']['coordinates']\n\t\t\t\tprint(my_polygon[0])\n\t\t\t\tfor i in range(len(my_polygon[0])):\n\t\t\t\t\tlat.append(my_polygon[0][i][1])\n\t\t\t\t\tlon.append(my_polygon[0][i][0])\n\t\t\t\tlat.pop(0)\n\t\t\t\tlon.pop(0)\n\t\t\telse:\n\t\t\t\tprint(\"No polygon!\")\n\telse:\n\t\tprint(\"There is no file with mission!\")\n\tprint(\"poly\" , lat ,lon)\n\treturn lat, lon\n\n\ndef export_central_point(filename):\n\tpoint_data = []\n\tif os.path.exists(filename):\n\t\twith open(filename, \"r\") as f: \n\t\t\tpoint_data = geojson.load(f) \n\t\t\tif type(point_data['features'][0]['geometry']) == geojson.geometry.Point:\n\t\t\t\tmy_point = point_data['features'][0]['geometry']['coordinates']\n\t\t\t\tlat0 = my_point[1]\n\t\t\t\tlon0 = my_point[0]\n\t\t\tif type(point_data['features'][1]['geometry']) == geojson.geometry.Point:\n\t\t\t\tmy_point = point_data['features'][1]['geometry']['coordinates']\n\t\t\t\tlat0 = my_point[1]\n\t\t\t\tlon0 = my_point[0]\n\t\t\telse:\n\t\t\t\tprint(\"No central point!\")\n\telse:\n\t\tprint(\"There is no file with mission!\")\n\tprint(\"CP\", lat0, lon0)\n\treturn float(lat0), float(lon0)\n\t\n\ndef main():\n\n\tenu_coord = []\n\tx_enu_coord = []\n\ty_enu_coord = []\n\tpoly_mission_= []\n\tcpp_path = []\n\tcpp_path_gps = []\n\th0_ = 0 \n\th_ = 0 \n\n\tsweep_resolution = 10 #TODO: dynamic import through GUI\n\n\thome = str(Path.home())\n\tpath = home + '/agriculture_pest_exterminator/gui/coverage_path_planning/mission.geojson'\n\tlat0_, lon0_ = export_central_point(path)\t\t# to change filename/path to file\n\tlat_, lon_ = export_polygon(path)\t\t\t\t# to change filename/path to file\n\t\n\tfor i in range(len(lat_)):\n\t\tenu_coord.append(pm.geodetic2enu(lat_[i], lon_[i], h_, lat0_, lon0_, h0_))\n\t\n\tfor i in range(len(enu_coord)):\n\t\tx_enu_coord.append(enu_coord[i][0])\n\t\ty_enu_coord.append(enu_coord[i][1])\n\n\tpoly_mission = list(zip(x_enu_coord, y_enu_coord))\n\n\tfor i in range(len(x_enu_coord)):\n\t\tpoly_mission_.append(list(poly_mission[i]))\n\t\n\tflight_area_vertices = np.array(poly_mission_)\n\tox = flight_area_vertices[:,0].tolist() + [flight_area_vertices[0,0]]\n\toy = flight_area_vertices[:,1].tolist() + [flight_area_vertices[0,1]]\n\tgoal_x, goal_y = planning(ox, oy, sweep_resolution)\n\t\n\tfor a, b in zip(goal_x, goal_y):\n\t\tcpp_path.append([a, b])\n\n\tfor i in range(len(cpp_path)):\n\t\tcpp_path_gps.append(pm.enu2geodetic(goal_x[i], goal_y[i], h_, lat0_, lon0_, h0_))\n\n\tjson_string = json.dumps(list(cpp_path_gps))\n\tpath_out = home + '/agriculture_pest_exterminator/gui/coverage_path_planning/'\n\twith open(path_out + \"trajectory.json\", \"w\") as outfile:\n\t\toutfile.write(json_string)\n\t\n\tprint('Waypoints(ENU):')\n\tprint(cpp_path)\n\tplt.plot(goal_x, goal_y)\n\tx_enu_coord.append(x_enu_coord[0])\n\ty_enu_coord.append(y_enu_coord[0])\n\tplt.plot(x_enu_coord, y_enu_coord)\n\tplt.show()\n\t\nif __name__ == '__main__':\n\tmain()","repo_name":"IgorLebed/agriculture_pest_exterminator","sub_path":"gui/coverage_path_planning/cpp_track.py","file_name":"cpp_track.py","file_ext":"py","file_size_in_byte":4294,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"3614973283","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 2 14:52:41 2018\n\n@author: lankuohsing\n\"\"\"\n\n# In[]\nimport numpy as np\n# In[]\na=np.array([[[1],[2]],\n [[3],[4]],\n [[5],[6]]])\n\n# In[]\nb=a.reshape(a.shape[0]*a.shape[1],1)","repo_name":"lankuohsing/TensorFlow-Examples","sub_path":"RNN/timeseries/sin_prediction/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"3394880263","text":"# codigo del primer parcial \r\n\r\nimport numpy as np\r\nfrom numpy import linalg as LA\r\n\r\n#from condiciones import s_o_c, f_o_c\r\n#from derivadas import Grad, Hess, cuadrados\r\n#from wolfe import genera_alpha, is_pos_def, modificacion_hessiana\r\n\r\n#agregamos las funciones que nos ayudaran a correr nuestro algoritmo\r\n\r\ndef f_o_c(f,x, tol=1e-12):\r\n \"\"\"\r\n Función que calcula las condiciones de primer orden\r\n \"\"\"\r\n grad = np.array(Grad(f,x))\r\n if np.dot(grad, grad) < tol:\r\n return True\r\n else :\r\n return False\r\n\r\ndef s_o_c(f, x0, tol=1e-15):\r\n \"\"\"\r\n Inserten aqui código para condiciones de segundo orden \r\n \"\"\"\r\n hess = Hess(f, x0, tol)\r\n print(LA.eigvals(hess))\r\n if np.all(LA.eigvals(hess) > tol) :\r\n return True\r\n else :\r\n return False\r\n\r\ndef Grad(f, x0, h=1e-6, i=-1):\r\n \"\"\"\r\n Función que calcula el Grad de una función en un punto\r\n \"\"\"\r\n n = len(x0)\r\n if i in range(n):\r\n z = np.zeros(n)\r\n z[i] = h/2\r\n Grad = (f(x0 + z) - f(x0 - z))/h\r\n else:\r\n Grad=np.zeros(n)\r\n for j in range(n):\r\n z = np.zeros(n)\r\n z[j] = h/2\r\n Grad[j]= (f(x0 + z) - f(x0 - z))/h\r\n return Grad\r\n\r\n\r\ndef Hess(f, x0, h=1e-4, method = \"basic\"):\r\n \"\"\"\r\n Función que calcula la Hessiana de una función en un punto. \r\n f: función sobre la cual queremos calcular la hessiana.\r\n x0: Punto sobre el cual queremos hacer el cálculo\r\n h: nivel de precisión para hacer el cálculo\r\n method: Método por el cual se quiere hacer puede ser: 'basic', 'grad', 'centered', 'gradCentered'\r\n \"\"\"\r\n n = len(x0)\r\n Hess = np.matrix(np.zeros((n,n)))\r\n for i in range(n):\r\n for j in range(n):\r\n z_i = np.zeros(n)\r\n z_i[i] = h\r\n z_j = np.zeros(n)\r\n z_j[j] = h\r\n if method == \"basic\":\r\n Hess[i,j] = ( f(x0 + z_j +z_i) - f(x0 + z_i ) - f(x0+z_j) +f(x0)) / (h**2)\r\n elif method == \"grad\":\r\n Hess[i,j] = (Grad(f,x0+z_j,h,i) - Grad(f,x0,h,i) + \\\r\n Grad(f,x0+z_i,h,j) - Grad(f,x0,h,j))/(2*h)\r\n elif method == \"centered\":\r\n if i==j:\r\n Hess[i,j] = (-f(x0+2*z_i) + 16*f(x0+z_i) - 30*f(x0)+\\\r\n 16*f(x0-z_i) - f(x0-2*z_i)) / (12*h**2)\r\n else :\r\n Hess[i,j] = (f(x0+z_i+z_j) - f(x0 + z_i - z_j) - \\\r\n f(x0 - z_i + z_j) + f(x0-z_i-z_j))/(4*h**2)\r\n elif method == \"gradCentered\":\r\n Hess[i,j] = (Grad(f,x0+z_j,h)[i] - Grad(f, x0-z_j,h)[i] + \\\r\n Grad(f,x0+z_i,h)[j] - Grad(f,x0-z_i,h)[j])/(4*h)\r\n return Hess\r\n\r\ndef genera_alpha(f, x0, pk, c1=1e-4, tol=1e-5):\r\n \"\"\"\r\n Backtracking LS i.e. Algoritmo que encuentra una alpha que cumpla condiciones de wolfe. \r\n \"\"\"\r\n alpha, rho, c = 1, 4/5, c1\r\n while f(x0 + alpha*pk)>f(x0) + c*alpha*np.dot(Grad(f, x0),pk):\r\n alpha*=rho\r\n return alpha\r\n\r\n#definimos la función de Rosenbrock \r\n\r\n#argumento de la función es un vector\r\ndef Rosenbrock(x0): \r\n a=0\r\n b=100\r\n x=x0[0]\r\n y=x0[1]\r\n f = (a-x)**2 + b*(y-x**2)**2\r\n return f\r\n\r\n\r\n# probemos con los siguientes valores para x0\r\nx0=(1.8,2.3)\r\n\r\n#Probemos el algoritmo de Newton, es decir el paso alfa es completo=1 y la matriz B es la hessiana \r\n\r\ndef BusquedaLineal_alfa1(f, x0, metodo=\"maximo descenso\"):\r\n xk=x0\r\n if metodo == \"Newton\":\r\n while not (f_o_c(f,xk)) and (s_o_c(f,xk)):\r\n grad=Grad(f, xk)\r\n hess=Hess(f,xk)\r\n pk=LA.solve(hess,-grad)\r\n alpha = 1\r\n xk= xk + alpha*pk\r\n else:\r\n while not (f_o_c(f,xk)) and (s_o_c(f,xk)):\r\n grad=Grad(f,xk)\r\n pk = -grad\r\n alpha = 1\r\n xk = xk + alpha*pk\r\n return xk\r\n\r\nprint(\"Este es el resultado obtenido con el algoritmo de Newton:\")\r\nprint(BusquedaLineal_alfa1(Rosenbrock,x0,0.000001))\r\n\r\n#ahora probamos el algoritmo de BLS de Newton en especial\r\n\r\ndef BusquedaLineal_amplio(f, x0, metodo=\"maximo descenso\"):\r\n xk=x0\r\n if metodo == \"Newton\":\r\n while not (f_o_c(f,xk)) and (s_o_c(f,xk)):\r\n grad=Grad(f, xk)\r\n hess=Hess(f,xk)\r\n pk=LA.solve(hess,-grad)\r\n alpha = genera_alpha(f,x0,pk)\r\n xk= xk + alpha*pk\r\n else:\r\n while not (f_o_c(f,xk)) and (s_o_c(f,xk)):\r\n grad=Grad(f,xk)\r\n pk = -grad\r\n alpha = genera_alpha(f,xk,pk)\r\n xk = xk + alpha*pk\r\n return xk\r\n\r\n\r\nprint(\"Este es el resultado obtenido con el algoritmo BLS de Newton:\")\r\nprint(BusquedaLineal_amplio(Rosenbrock,x0,0.000001))","repo_name":"Skalas/Analisis-Aplicado-Spring2021","sub_path":"Alumnos/Rxmirxz13/codigo_rosenbrock.py","file_name":"codigo_rosenbrock.py","file_ext":"py","file_size_in_byte":4819,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"20141660785","text":"\"\"\" \nclass Node:\n def __init__(self, data):\n\t\tself.data = data\n\t\tself.next = None\n This is method only submission.\n You only need to complete the method.\n\"\"\"\ndef count(head, search_for):\n # Code here\n count = 0 \n if not head: return 0 \n while head:\n if head.data == search_for:\n count+=1\n head = head.next \n return count","repo_name":"kathleenfwang/algorithms-and-data-structures","sub_path":"Daily Coding Problem/LinkedList/num_of_occurences.py","file_name":"num_of_occurences.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18771670682","text":"# python3 manage.py shell to load data to django\n# must place raw data in right folder\nimport pandas as pd\nfrom datacollector.models import Rental\n\ndf = pd.read_csv(\"datacollector/data/final_rental\")\n\nrentals = [\n Rental(\n id = df.at[row, 'id'],\n night_price = df.at[row, 'night_price'],\n num_of_baths = df.at[row, 'num_of_baths'],\n num_of_rooms = df.at[row, 'num_of_rooms'],\n name = df.at[row, 'name'],\n airbnb_neighborhood = df.at[row, 'airbnb_neighborhood'],\n capacity_of_people = df.at[row, 'capacity_of_people'],\n property_type = df.at[row, 'property_type'],\n reviews_count = df.at[row, 'reviews_count'],\n start_rating = df.at[row, 'start_rating'],\n created_at = df.at[row, 'created_at'],\n num_of_beds = df.at[row, 'num_of_beds'],\n lat = df.at[row, 'lat'],\n lon = df.at[row, 'lon']\n ) for row in range(0, 50)\n]\n\nRental.objects.bulk_create(rentals)","repo_name":"xsharonhe/open_door","sub_path":"utils/commands/datacollection.py","file_name":"datacollection.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"27815563753","text":"# Video IR\nimport cv2\nimport numpy as np\nimport imutils\n\ndef getScrews(img):\n\timg1 = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\tret,img2 = cv2.threshold(img1,140,255,cv2.THRESH_BINARY)\n\timg3 = cv2.medianBlur(img2,3)\n\timg4 = cv2.GaussianBlur(img3,(5,5),0)\n\timg5 = cv2.Canny(img4, 174,255)\n\n\tout = cv2.hconcat([img2,img3,img4,img5])\n\t# cv2.imshow('th1,medBlur,edged,mask',out)\n\t# cv2.waitKey(0)\n\n\tcnts1 = cv2.findContours(img5, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\tcnts1 = imutils.grab_contours(cnts1)\n\tcnts1 = sorted(cnts1, key = cv2.contourArea, reverse = True)[:6]\n\tcxList1 = []\n\tcyList1 = []\n\tcwList1 = []\n\tchList1 = []\t\n\tfor c1 in cnts1:\n\t\txFlag1 = False\n\t\tyFlag1 = False\n\t\tperi1 = cv2.arcLength(c1, True)\n\t\tapprox1 = cv2.approxPolyDP(c1, 0.05 * peri1, True)\n\t\txx1,yy1,ww1,hh1 = cv2.boundingRect(approx1)\n\t\taspect1 = ww1/hh1\n\t\t# print('cxList: ', cxList, \" cyList: \" , cyList)\n\t\t# print('x', xx, 'y', yy)\n\n\t\tfor i in cxList1:\n\t\t\tif abs(xx1-i)<5:\n\t\t\t\t# print('same')\n\t\t\t\txFlag1 = True\n\t\tfor j in cyList1:\n\t\t\tif abs(yy1-j)<5:\n\t\t\t\t# print('same')\n\t\t\t\tyFlag1 = True\n\n\t\t# print(\"x:\",xx1,\"y:\",yy1,\"vertices:\",len(approx1),\" area:\",cv2.contourArea(approx1),\"\\t aspect:\",round(aspect1,1) )\t\n\t\tif xFlag1==False and yFlag1==False and cv2.contourArea(approx1)>2 and cv2.contourArea(approx1)<200 and aspect1>0.8:\n\t\t\tprint(\"x:\",xx1,\"y:\",yy1,\"vertices:\",len(approx1),\" area:\",cv2.contourArea(approx1),\"\\t aspect:\",round(aspect1,1) )\t\n\t\t\tcxList1.append(xx1)\n\t\t\tcyList1.append(yy1)\t\t\n\t\t\tcwList1.append(ww1)\n\t\t\tchList1.append(hh1)\t\t\n\t\t\tcv2.rectangle(img,(xx1,yy1),(xx1+(ww1),yy1+(hh1)),(0,255,0),2)\n\t\t\tif cv2.contourArea(approx1)>130 and cv2.contourArea(approx1)<200 and aspect1>1.05 and aspect1<1.25:\n\t\t\t\timg6 = cv2.putText(img,'3',(xx1+ww1+3,yy1+20),cv2.FONT_HERSHEY_SIMPLEX,1,(0,255,0),2, cv2.LINE_AA)\n\t\t\tif cv2.contourArea(approx1)>95 and cv2.contourArea(approx1)<130 and aspect1>1.15 and aspect1<1.45:\n\t\t\t\timg6 = cv2.putText(img,'2',(xx1+ww1+3,yy1+20),cv2.FONT_HERSHEY_SIMPLEX,1,(0,255,0),2, cv2.LINE_AA)\n\t\t\tif cv2.contourArea(approx1)>70 and cv2.contourArea(approx1)<95 and aspect1>0.85 and aspect1<1.45:\n\t\t\t\timg6 = cv2.putText(img,'1',(xx1+ww1+3,yy1+20),cv2.FONT_HERSHEY_SIMPLEX,1,(0,255,0),2, cv2.LINE_AA)\n\t\t\t# cv2.imshow('outScrews or img in func',img)\n\t\t\t# cv2.waitKey(0)\n\treturn img\n\n\n\n\n\n\n\n\n\ncap = cv2.VideoCapture('fuelProbe2.m4v')\ni = 0\nwhile(True): \n\n\tret, img = cap.read()\t\n\tif(ret==False):\n\t\tbreak\n\n\timage_color = cv2.resize(img,None,fx=0.6,fy=0.6)\n\timage_color_orig = image_color.copy()\n\timage_color_copy = image_color.copy()\n\t# cv2.imshow('orig',image_color)\n\t# cv2.waitKey(0)\n\n\tboundaries = [ ([155, 90, 110], [200, 200, 200]) ]\n\tfor (lower, upper) in boundaries:\n\t\tlower = np.array(lower, dtype = \"uint8\")\n\t\tupper = np.array(upper, dtype = \"uint8\")\n\t\tmask = cv2.inRange(image_color, lower, upper)\n\t\tout = cv2.bitwise_and(image_color, image_color, mask = mask)\t\t\n\n\t# cv2.imshow('out',out)\n\t# cv2.waitKey(0)\n\n\tkernel = np.ones((3, 3), np.uint8)\n\tmask = cv2.erode(out, kernel, iterations=2)\n\tmask = cv2.dilate(mask, kernel, iterations=5)\n\t# cv2.imshow('mask',mask)\n\t# cv2.waitKey(0)\n\n\tmedBlur = cv2.medianBlur(mask,3)\n\n\tedged = cv2.Canny(medBlur, 245,255)\n\t# cv2.imshow('edged',edged)\n\t# cv2.waitKey(0)\n\n\tgausBlur = cv2.GaussianBlur(medBlur,(5,5),0)\n\t# cv2.imshow('gblur',gausBlur)\n\t# cv2.waitKey(0)\n\n\n\tcnts = cv2.findContours(edged, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\tcnts = imutils.grab_contours(cnts)\n\tcnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:1]\n\tfor c in cnts:\n\t\tperi = cv2.arcLength(c, True)\n\t\tapprox = cv2.approxPolyDP(c, 0.05 * peri, True)\n\t\tvertices = len(approx)\n\t\txx,yy,ww,hh = cv2.boundingRect(approx)\n\t\taspect = round(ww/hh,1)\n\t\tarea = cv2.contourArea(approx)\n\n\t\tprint(\"===================================\")\n\t\tprint(\"x:\",xx,\"y:\",yy,\"vertices:\",vertices,\" area:\",area,\"\\t aspect:\",aspect )\t\n\t\tprint(\"===================================\")\n\n\t\tif area>11000 and area<14000 and aspect>0.45 and aspect<0.75 and vertices==4:\n\t\t\t# print(\"x:\",xx,\"y:\",yy,\"vertices:\",vertices,\" area:\",area,\"\\t aspect:\",aspect )\t\n\t\t\t# cv2.rectangle(image_color_copy,(xx,yy),(xx+ww,yy+hh),(0,255,0),1)\t\t\t\n\t\t\t# cv2.imshow('output',image_color_copy)\n\t\t\t# cv2.waitKey(0)\n\n\t\t\toutScrews = getScrews(image_color_copy[yy+1:yy+hh,xx+1:xx+ww])\n\t\t\timage_color_orig[yy+1:yy+hh,xx+1:xx+ww] = outScrews\n\t\t\t\n\t\t\t# cv2.imshow('image_color_orig',image_color_orig)\n\n\t\t\tbreak\n\t\t\n\tcv2.imshow('image_color_orig',image_color_orig)\n\t\t\t\n\t# out = cv2.hconcat([image_color,image_color_copy])\n\t# cv2.imshow('out',out)\n\t# cv2.imshow('edged',edged)\n\n\n\n\tif cv2.waitKey(0) & 0xFF == ord('q'):\n\t\tbreak\n\n\n\n\n\n\n\n\n\n\t# for i in cxList:\n\t# \tif abs(xx-i)<5:\n\t# \t\t# print('same')\n\t# \t\txFlag = True\n\t# for j in cyList:\n\t# \tif abs(yy-j)<5:\n\t# \t\t# print('same')\n\t# \t\tyFlag = True\n\n\t# if xFlag==False and yFlag==False and cv2.contourArea(approx)>4 and cv2.contourArea(approx)<404:\n\t\t# cxList.append(xx)\n\t\t# cyList.append(yy)\n\t\t# imbox = image_color_copy[yy-30:yy+30,xx-10:xx+30]\t\t\n\t\t# avgrow1 = np.average(imbox, axis=0)\n\t\t# avg1 = np.average(avgrow1, axis=0)\n\t\t# if avg1[2]>130:\n\n\n\n","repo_name":"vcovasan/LIFFT","sub_path":"Image Recognition/screws-RGB-vid.py","file_name":"screws-RGB-vid.py","file_ext":"py","file_size_in_byte":5061,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"9761399940","text":"#!/usr/bin/python\n# arquivo: Usuarios.py\n\nfrom flask import Blueprint, jsonify, request\nfrom Models.APIModel import Usuarios as UsuariosModel\nimport json\n\nusuario_bp = Blueprint('usuarios',__name__)\n\n\n@usuario_bp.route(\"/usuarios/\")\ndef listar_usuarios():\n usuarios = UsuariosModel.objects().to_json()\n retorno = {\"usuarios\":json.loads(usuarios)}\n return jsonify(retorno)\n\n@usuario_bp.route(\"/usuarios/\",methods=[\"POST\"])\ndef cadastrar_usuario():\n novo = request.get_json()\n u = UsuariosModel()\n u.nome = novo.get(\"nome\")\n u.email = novo.get(\"email\")\n u.save()\n\n retorno = {\"message\":\"Usuario cadastrado com sucesso\"}\n return jsonify(retorno)\n\n@usuario_bp.route(\"/usuarios//\",methods=[\"PUT\"])\ndef atualizar_usuario(id):\n novo = request.get_json()\n usuario = UsuariosModel.objects(id=id).first()\n usuario.nome = novo.get(\"nome\")\n usuario.email = novo.get(\"email\")\n usuario.save()\n\n retorno = {\"message\":\"Usuarios ID {0} atualizado com sucesso\".format(id)}\n return jsonify(retorno)\n\n@usuario_bp.route(\"/usuarios//\",methods=[\"DELETE\"])\ndef deletar_usuario(id):\n usuario = UsuariosModel.objects(id=id).first()\n usuario.delete()\n\n retorno = {\"message\":\"Usuario ID {0} removido com sucesso\".format(id)}\n return jsonify(retorno)\n\n\n\n\n","repo_name":"MarceloSuetomi/DeployTool","sub_path":"HandsOn/Aula02/blueprints/Usuarios.py","file_name":"Usuarios.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"24630716644","text":"from abc import ABCMeta, abstractproperty\n\nclass Menu(object):\n\n __metaclass__ = ABCMeta\n\n def __init__(self, logger, **kwargs):\n self.logger = logger\n for key in kwargs:\n setattr(self, key, kwargs[key])\n\n @abstractproperty\n def menus(self):\n return NotImplemented\n\n @abstractproperty\n def handlers(self):\n return NotImplemented\n\n def run(self, menu_key):\n choice = self._show(menu_key)\n return self._do(menu_key, choice)\n\n def _show(self, menu_key):\n menu = self.menus[menu_key]\n choices = menu['choices']\n print('\\n{}:'.format(menu['title']))\n for i in range(1, len(choices)):\n print('{}. {}'.format(i, choices[i]['desc']))\n print('\\n0. {}'.format(choices[0]['desc']))\n return int(input('\\nEnter selection: '))\n\n def _do(self, menu_key, choice):\n try:\n return self.menus[menu_key]['choices'][choice]['do']()\n except IndexError:\n print('Invalid selection')\n return True\n","repo_name":"aisthesis/opttrack","sub_path":"opttrack/lib/ui/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6475995341","text":"from Google_Sheets.google_sheets_client import GoogleSheetsClient\nfrom Showrunner.slack_notifier import SlackNotifier\nimport logging\nimport time\n\n\nclass AnnouncementManager:\n def __init__(self, google_sheets_client, slack_notifier):\n self.google_sheets_client = google_sheets_client\n self.slack_notifier = slack_notifier\n\n def get_column_letter(self, column_number):\n result = []\n while column_number > 0:\n column_number, remainder = divmod(column_number - 1, 26)\n result[:0] = chr(65 + remainder)\n return \"\".join(result)\n\n def process_announcements(self, run_of_show_sheet, run_of_show_range):\n while True:\n logging.debug(f\"Checking spreadsheet for new announcements...\")\n run_of_show_data = self.google_sheets_client.get_sheet_data(run_of_show_sheet, run_of_show_range)\n logging.debug(f\"run_of_show_data sample: {run_of_show_data}\")\n if run_of_show_data:\n headers, values = run_of_show_data\n for index, row_values in enumerate(values):\n row = dict(zip(headers, row_values))\n if row[\"Priority\"] == \"1\" and row[\"Approved\"] == \"Yes\" and row[\"Sent\"] == \"0\":\n logging.debug(f\"Sending message: {row['Message']}\")\n self.slack_notifier.upcoming_session(\n row[\"Channel\"], row[\"Message\"], row[\"Speakers\"], row[\"Datetime_to_be_sent\"], row[\"Stage\"]\n )\n sent_column_index = headers.index(\"Sent\") + 1\n cell_range = (\n f\"{run_of_show_range.split('!')[0]}!{self.get_column_letter(sent_column_index)}{index + 2}\"\n )\n self.google_sheets_client.update_cell_value(run_of_show_sheet, cell_range, \"1\")\n else:\n logging.debug(f\"Skipping message: {row['Message']}\")\n else:\n logging.warning(\"No data found in the run_of_show_data.\")\n time.sleep(10)\n","repo_name":"Nick-Harvey/fdcai_bot","sub_path":"Announcements/announcement_manager.py","file_name":"announcement_manager.py","file_ext":"py","file_size_in_byte":2104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37375032725","text":"import json\n\nfrom flask import current_app\n\nfrom tests.util.servertestcase import ServerTestCase\nfrom walkoff.scheduler import InvalidTriggerArgs\nfrom walkoff.serverdb import db\nfrom walkoff.serverdb.scheduledtasks import ScheduledTask\n\n\nclass TestScheduledTask(ServerTestCase):\n\n def setUp(self):\n self.date_trigger = {'type': 'date', 'args': {'run_date': '2017-01-25 10:00:00'}}\n\n def tearDown(self):\n db.session.rollback()\n for task in db.session.query(ScheduledTask).all():\n db.session.delete(task)\n current_app.running_context.scheduler.scheduler.remove_all_jobs()\n current_app.running_context.scheduler.stop()\n db.session.commit()\n\n def assertSchedulerWorkflowsRunningEqual(self, workflows=None):\n if workflows is None:\n self.assertDictEqual(current_app.running_context.scheduler.get_all_scheduled_workflows(), {})\n else:\n scheduled_workflows = current_app.running_context.scheduler.get_all_scheduled_workflows()\n self.assertSetEqual(set(scheduled_workflows['None']), set(workflows))\n\n def assertJsonIsCorrect(self, task, expected):\n actual_json = task.as_json()\n actual_json['workflows'] = set(actual_json['workflows'])\n self.assertDictEqual(actual_json, expected)\n\n def assertStructureIsCorrect(self, task, name, description='', status='running', workflows=None,\n trigger_type='unspecified', trigger_args=None, expected_running_workflows=None):\n self.assertEqual(task.name, name)\n self.assertEqual(task.description, description)\n self.assertEqual(task.status, status)\n self.assertEqual(task.trigger_type, trigger_type)\n if workflows is not None:\n self.assertSetEqual({workflow.workflow_id for workflow in task.workflows}, workflows)\n else:\n self.assertSetEqual({workflow.workflow_id for workflow in task.workflows}, set())\n if trigger_args is not None:\n self.assertDictEqual(json.loads(task.trigger_args), trigger_args)\n else:\n self.assertEqual(task.trigger_args, '{}')\n self.assertSchedulerWorkflowsRunningEqual(expected_running_workflows)\n\n def test_init_default(self):\n task = ScheduledTask(name='test')\n self.assertStructureIsCorrect(task, 'test')\n\n def test_init_with_description(self):\n task = ScheduledTask(name='test', description='desc')\n self.assertStructureIsCorrect(task, 'test', description='desc')\n\n def test_init_with_invalid_status(self):\n task = ScheduledTask(name='test', status='invalid')\n self.assertStructureIsCorrect(task, 'test')\n\n def test_init_with_workflows(self):\n task = ScheduledTask(name='test', workflows=['id1', 'id2', 'id3', 'id4'])\n\n self.assertStructureIsCorrect(task, 'test', workflows={'id1', 'id2', 'id3', 'id4'})\n\n def test_init_with_trigger(self):\n task = ScheduledTask(name='test', task_trigger=self.date_trigger)\n self.assertStructureIsCorrect(task, 'test', trigger_type='date',\n trigger_args={'run_date': '2017-01-25 10:00:00'})\n\n def test_init_with_invalid_trigger(self):\n trigger = {'type': 'date', 'args': {'run_date': '2017-100-25 10:00:00'}}\n with self.assertRaises(InvalidTriggerArgs):\n ScheduledTask(name='test', task_trigger=trigger)\n\n def test_init_stopped(self):\n task = ScheduledTask(name='test', status='stopped')\n self.assertStructureIsCorrect(task, 'test', status='stopped')\n\n def test_init_with_status_with_trigger_with_workflows(self):\n workflows = ['id1', 'id2', 'id3', 'id4']\n task = ScheduledTask(name='test', task_trigger=self.date_trigger, status='running', workflows=workflows)\n self.assertStructureIsCorrect(task, 'test', trigger_type='date',\n trigger_args={'run_date': '2017-01-25 10:00:00'},\n status='running', workflows=set(workflows), expected_running_workflows=workflows)\n\n def test_init_with_status_with_trigger_without_workflows(self):\n task = ScheduledTask(name='test', task_trigger=self.date_trigger, status='running')\n self.assertStructureIsCorrect(task, 'test', trigger_type='date',\n trigger_args={'run_date': '2017-01-25 10:00:00'},\n status='running')\n\n def test_init_with_status_trigger_unspecified(self):\n workflows = ['id1', 'id2', 'id3', 'id4']\n task = ScheduledTask(name='test', status='running', workflows=['id1', 'id2', 'id3', 'id4'])\n self.assertStructureIsCorrect(task, 'test', status='running', workflows=set(workflows))\n\n def test_update_name_desc_only(self):\n task = ScheduledTask(name='test')\n update = {'name': 'updated_name', 'description': 'desc'}\n task.update(update)\n self.assertEqual(task.name, 'updated_name')\n self.assertEqual(task.description, 'desc')\n\n def test_update_workflows_none_existing_stopped(self):\n task = ScheduledTask(name='test', status='stopped')\n update = {'workflows': ['a', 'b', 'c']}\n task.update(update)\n self.assertListEqual([workflow.workflow_id for workflow in task.workflows], ['a', 'b', 'c'])\n self.assertSchedulerWorkflowsRunningEqual(workflows=None)\n\n def test_update_workflows_none_existing_running(self):\n workflows = ['a', 'b', 'c', 'd']\n task = ScheduledTask(name='test', task_trigger=self.date_trigger, status='running')\n update = {'workflows': ['a', 'b', 'c']}\n task.update(update)\n self.assertListEqual([workflow.workflow_id for workflow in task.workflows], ['a', 'b', 'c'])\n self.assertSchedulerWorkflowsRunningEqual(['a', 'b', 'c'])\n\n def test_update_workflows_with_existing_workflows_stopped(self):\n task = ScheduledTask(name='test', workflows=['b', 'c', 'd'])\n update = {'workflows': ['a', 'b', 'c']}\n task.update(update)\n self.assertSetEqual({workflow.workflow_id for workflow in task.workflows}, {'a', 'b', 'c'})\n self.assertSchedulerWorkflowsRunningEqual(workflows=None)\n\n def test_update_workflows_with_existing_workflows_running_new_only(self):\n workflows = ['a', 'b', 'c', 'd']\n task = ScheduledTask(name='test', task_trigger=self.date_trigger, workflows=['b', 'c', 'd'], status='running')\n update = {'workflows': workflows}\n task.update(update)\n self.assertSetEqual({workflow.workflow_id for workflow in task.workflows}, {'a', 'b', 'c', 'd'})\n self.assertSchedulerWorkflowsRunningEqual(workflows)\n\n def test_update_workflows_with_existing_workflows_running_remove_only(self):\n workflows = ['a', 'b', 'c', 'd']\n task = ScheduledTask(name='test', task_trigger=self.date_trigger, workflows=workflows, status='running')\n update = {'workflows': ['b', 'c']}\n task.update(update)\n self.assertSetEqual({workflow.workflow_id for workflow in task.workflows}, {'b', 'c'})\n self.assertSchedulerWorkflowsRunningEqual(['b', 'c'])\n\n def test_update_workflows_with_existing_workflows_running_add_and_remove(self):\n workflows = ['a', 'b', 'c', 'd']\n task = ScheduledTask(name='test', task_trigger=self.date_trigger, workflows=['b', 'c', 'd'], status='running')\n update = {'workflows': ['a', 'b']}\n task.update(update)\n self.assertSetEqual({workflow.workflow_id for workflow in task.workflows}, {'a', 'b'})\n self.assertSchedulerWorkflowsRunningEqual(['a', 'b'])\n\n def test_update_scheduler(self):\n task = ScheduledTask(name='test', task_trigger=self.date_trigger)\n update = {'task_trigger': {'type': 'interval', 'args': {'hours': 1, 'weeks': 4}}}\n task.update(update)\n self.assertEqual(task.trigger_type, 'interval')\n self.assertDictEqual(json.loads(task.trigger_args), {'hours': 1, 'weeks': 4})\n self.assertSchedulerWorkflowsRunningEqual(workflows=None)\n\n def test_update_scheduler_invalid_scheduler(self):\n task = ScheduledTask(name='test', task_trigger=self.date_trigger)\n update = {'name': 'renamed', 'task_trigger': {'type': 'interval', 'args': {'invalid': 1, 'weeks': 4}}}\n with self.assertRaises(InvalidTriggerArgs):\n task.update(update)\n self.assertEqual(task.name, 'test')\n self.assertSchedulerWorkflowsRunningEqual(workflows=None)\n\n def test_start_from_running(self):\n task = ScheduledTask(name='test', status='running')\n task.start()\n self.assertEqual(task.status, 'running')\n self.assertSchedulerWorkflowsRunningEqual(workflows=None)\n\n def test_start_from_stopped_unspecified_trigger(self):\n task = ScheduledTask(name='test')\n task.start()\n self.assertEqual(task.status, 'running')\n self.assertSchedulerWorkflowsRunningEqual(workflows=None)\n\n def test_start_from_stopped_with_trigger(self):\n workflows = ['a', 'b', 'c', 'd']\n task = ScheduledTask(name='test', task_trigger=self.date_trigger, workflows=['b', 'c', 'd'])\n task.start()\n self.assertEqual(task.status, 'running')\n self.assertSchedulerWorkflowsRunningEqual(['b', 'c', 'd'])\n\n def test_stop_from_running_no_workflows(self):\n task = ScheduledTask(name='test', status='running')\n task.stop()\n self.assertEqual(task.status, 'stopped')\n self.assertSchedulerWorkflowsRunningEqual(workflows=None)\n\n def test_stop_from_running_with_workflows(self):\n task = ScheduledTask(name='test', task_trigger=self.date_trigger, workflows=['b', 'c', 'd'])\n task.stop()\n self.assertEqual(task.status, 'stopped')\n self.assertSchedulerWorkflowsRunningEqual(workflows=None)\n\n def test_stop_from_stopped(self):\n task = ScheduledTask(name='test')\n task.stop()\n self.assertEqual(task.status, 'stopped')\n\n def test_as_json_name_desc_only(self):\n task = ScheduledTask(name='test', description='desc')\n expected = {'id': None,\n 'name': 'test',\n 'description': 'desc',\n 'status': 'running',\n 'workflows': set(),\n 'task_trigger': {'type': 'unspecified',\n 'args': {}}}\n self.assertJsonIsCorrect(task, expected)\n\n def test_as_json_with_workflows(self):\n task = ScheduledTask(name='test', workflows=['b', 'c', 'd'])\n expected = {'id': None,\n 'name': 'test',\n 'description': '',\n 'status': 'running',\n 'workflows': {'b', 'c', 'd'},\n 'task_trigger': {'type': 'unspecified',\n 'args': {}}}\n self.assertJsonIsCorrect(task, expected)\n\n def test_as_json_with_workflows_with_duplicates(self):\n task = ScheduledTask(name='test', workflows=['b', 'c', 'd', 'd', 'c', 'b'])\n expected = {'id': None,\n 'name': 'test',\n 'description': '',\n 'status': 'running',\n 'workflows': {'b', 'c', 'd'},\n 'task_trigger': {'type': 'unspecified',\n 'args': {}}}\n self.assertJsonIsCorrect(task, expected)\n\n def test_as_json_with_scheduler(self):\n task = ScheduledTask(name='test', task_trigger=self.date_trigger)\n expected = {'id': None,\n 'name': 'test',\n 'description': '',\n 'status': 'running',\n 'workflows': set(),\n 'task_trigger': self.date_trigger}\n self.assertJsonIsCorrect(task, expected)\n\n def test_as_json_running(self):\n task = ScheduledTask(name='test', status='stopped')\n expected = {'id': None,\n 'name': 'test',\n 'description': '',\n 'status': 'stopped',\n 'workflows': set(),\n 'task_trigger': {'type': 'unspecified',\n 'args': {}}}\n self.assertJsonIsCorrect(task, expected)\n","repo_name":"sgnls/WALKOFF","sub_path":"tests/test_scheduledtasks_database.py","file_name":"test_scheduledtasks_database.py","file_ext":"py","file_size_in_byte":12294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"36434167641","text":"import sys\nsys.path.append('../..')\nsys.path.append('..')\nsys.path.append('../../')\n\nfrom type_def import *\n\nfrom tqdm import tqdm\nimport networkx as nx\nimport numpy as np\nimport pickle\nfrom gensim.models import Word2Vec\n\nfrom utils import tools, dir_tools\nfrom utils.graph import node2vec\nfrom utils.graph.index_edit import IndexDict\nfrom utils.graph.tools import cascade2edges, get_cascade_idx\nfrom work.Cascade import cascade_settings\n\n\ndef parse_cascade_line(cascade_line: str) -> Dict[str, Any]:\n \"\"\"\n 解析一个cascade文件中的一行,\n\n 一行数据里面的不同信息按\\t隔开\n - parts[0]: 不确定,但是应该是一个id?\n - parts[1]: 是一些用户\n - parts[2]: 似乎是年份?\n - parts[3]: 不像是用户id,可能是时间?\n - parts[4]: cascade中的边的信息\n\n :param cascade_line:\n :return:\n \"\"\"\n parts = cascade_line.split('\\t')\n edge_strs = parts[4].split(\" \")\n node_to_edges = dict()\n edge_cnt = int(parts[3])\n edges = []\n if edge_cnt != 0:\n for elem_edge in edge_strs:\n edge_parts = elem_edge.split(\":\")\n src_node = int(edge_parts[0])\n tgt_node = int(edge_parts[1])\n edges.append((src_node, tgt_node))\n # node2edges: src node -> (tgt node, tgt degree)\n cascade = {\n \"id\": int(parts[0]), # str(int)\n \"parts_1\": parts[1],\n \"parts_2\": parts[2],\n \"edge_cnt\": edge_cnt, # int\n \"edges\": edges, # List[Tuple[int, int]]\n 'label': int(parts[5]), # int\n \"time\": list(map(int, parts[6].split(' '))) # List[int]\n }\n return cascade\n\n\ndef read_cascade(cascade_path: str) -> List[dict]:\n \"\"\"\n 读取一个cascade文件\n\n 一个cascade文件的格式如下:\n 每一行都是一个cascade,具体的行格式见parse_cascade_line函数的注释部分\n\n :param cascade_path:\n :return:\n \"\"\"\n cascade_lines = open(cascade_path, 'r', encoding='utf-8').read().strip().split('\\n')\n cascade_infos = list(parse_cascade_line(x) for x in cascade_lines)\n return cascade_infos\n\n\ndef read_global_graph(global_graph_path: str):\n \"\"\"\n 读取一个Global Graph文件\n\n 一个Global Graph文件的格式如下:\n todo\n :param global_graph_path:\n :return:edge2weight, node2degree\n \"\"\"\n lines = open(global_graph_path, 'r', encoding='utf-8').read().strip().split('\\n')\n edge2weight, node2degree = {}, {}\n for elem_line in lines:\n parts = elem_line.split('\\t\\t') # StrList, len==2\n source_node = int(parts[0]) # source node of current path?\n if parts[1] != 'null': # 当source node存在后继节点\n node_freq_strs = parts[1].split('\\t')\n # StrList, str be like target_node:weight\n for elem_node_freq in node_freq_strs:\n node_freq = elem_node_freq.split(':')\n weight = int(node_freq[1])\n target_node = int(node_freq[0])\n if cascade_settings.trans_type == 0:\n edge2weight[(source_node, target_node)] = weight\n degree = len(node_freq_strs)\n else:\n degree = 0\n node2degree[source_node] = degree\n return edge2weight, node2degree\n\n\ndef cascade_edges_to_graph(\n edges: List[Tuple[int, int]],\n edge_cnt: int,\n node2degree: dict,\n edge2weight: dict,\n trans_type: int,\n pseudo_count: float):\n \"\"\"\n 将edges转换为networkx graph\n :param edges:\n :return:\n \"\"\"\n node_to_edges = dict()\n if edge_cnt != 0:\n for (elem_src, elem_tgt) in edges:\n try:\n if not elem_src in node_to_edges:\n neighbors = list()\n node_to_edges[elem_src] = neighbors\n else:\n neighbors = node_to_edges[elem_src]\n neighbors.append((elem_tgt, node2degree.get(elem_tgt, 0)))\n except:\n pass\n nx_G = nx.DiGraph()\n for source, nbr_weights in node_to_edges.items():\n # 这是老版本的dict.items()吗\n for nbr_weight in nbr_weights:\n target = nbr_weight[0]\n\n if trans_type == 0: # trans_type不是string吗,怎么又012了\n edge_weight = pseudo_count + edge2weight.get((source, target), 0)\n weight = edge_weight\n elif trans_type == 1:\n target_nbrs = node_to_edges.get(target, None)\n local_degree = 0 if target_nbrs is None else len(target_nbrs)\n local_degree += pseudo_count\n weight = local_degree\n else:\n global_degree = nbr_weight[1] + pseudo_count\n weight = global_degree\n # 应该分别对于edge,deg,DEG\n nx_G.add_edge(source, target, weight=weight)\n # 这里就是为每一条边定义了weight,用来计算转移概率的\n return nx_G\n\ndef generate_random_walk(\n edge2weight: dict,\n node2degree: dict,\n cascades: List[dict],\n walks_per_graph: int = cascade_settings.walks_per_graph,\n trans_type: int = cascade_settings.trans_type,\n walk_length: int = cascade_settings.walk_length,\n pseudo_count: float = cascade_settings.pseudo_count,\n p: float = cascade_settings.node2vec_p,\n q: float = cascade_settings.node2vec_q):\n \"\"\"\n 在一个global graph中,为每个cascade计算随机游走路径\n\n\n :param edge2weight:\n :param node2degree:\n :param cascades:\n :param walks_per_graph: 每一个cascade生成的随机游走序列的个数\n :param trans_type:\n :param walk_length:\n :param pseudo_count:\n :param p:\n :param q:\n :return:\n List[str] 每个str都是用\\t隔开的随机游走序列\n \"\"\"\n random_walk_lines = []\n random_walk_dict = {}\n # key = cascade_id\n # value = list of random walk list\n print('random walking...')\n for elem_cascade in tqdm(cascades):\n cascade_id = elem_cascade['id']\n edge_cnt = elem_cascade['edge_cnt']\n edges = elem_cascade['edges']\n nx_G = cascade_edges_to_graph(edges, edge_cnt, node2degree, edge2weight, trans_type, pseudo_count)\n\n # List of the starting nodes.\n roots = list()\n # List of the starting nodes excluding nodes without outgoing neighbors.\n roots_noleaf = list()\n # exclude?\n\n str_list = list()\n str_list.append(str(cascade_id))\n random_walk_dict[str(cascade_id)] = []\n\n probs = list()\n probs_noleaf = list()\n weight_sum_noleaf = 0.0\n weight_sum = 0.0\n\n # Obtain sampling probabilities of roots.\n for node, weight in nx_G.out_degree(weight=\"weight\"):\n org_weight = weight\n if weight == 0:\n weight += pseudo_count\n weight_sum += weight\n if org_weight > 0:\n weight_sum_noleaf += weight\n\n for node, weight in nx_G.out_degree(weight=\"weight\"):\n org_weight = weight\n if weight == 0:\n weight += pseudo_count\n roots.append(node)\n prob = weight / weight_sum\n probs.append(prob)\n if org_weight > 0:\n roots_noleaf.append(node)\n prob = weight / weight_sum_noleaf\n probs_noleaf.append(prob)\n\n sample_total = walks_per_graph\n first_time = True\n G = node2vec.Graph(nx_G, True, p, q)\n G.preprocess_transition_probs()\n\n while True:\n if first_time:\n first_time = False\n node_list = roots\n prob_list = probs\n else:\n node_list = roots_noleaf\n prob_list = probs_noleaf\n n_sample = min(len(node_list), sample_total)\n if n_sample <= 0:\n break\n sample_total -= n_sample\n\n sampled_nodes = np.random.choice(node_list, n_sample, replace=False, p=prob_list)\n walks = G.simulate_walks(len(sampled_nodes), walk_length, sampled_nodes)\n for idx in range(len(walks)):\n if len(walks[idx]) <= walk_length:\n walks[idx] = walks[idx] + [-1] * (walk_length - len(walks[idx]))\n random_walk_dict[str(cascade_id)].extend(walks)\n for walk in walks:\n str_list.append(' '.join(str(k) for k in walk))\n result = '\\t'.join(str_list)\n random_walk_lines.append(result)\n return random_walk_dict\n\n\ndef output_random_walks_txt(random_walks: dict, filename: str):\n \"\"\"\n 将random_walk_dict转换为旧的字符串格式,然后输出\n 这样做是为了兼容旧的代码\n :param random_walks:\n :param filename:\n :return:\n \"\"\"\n lines = []\n for key, value in random_walks.items():\n walk = [str(key)]\n for elem_walk in value:\n walk.append(' '.join(list(str(x) for x in elem_walk)))\n lines.append('\\t'.join(walk))\n f = open(filename, 'w', encoding='utf-8')\n for elem in lines:\n f.write(elem + '\\n')\n f.close()\n\n\ndef load_random_walks_txt(filename: str):\n \"\"\"\n 读取旧的字符串格式的random_walk文件,然后转换为dict格式\n :param filename:\n :return:\n \"\"\"\n lines = open(filename, 'r', encoding='utf-8').read().strip().split('\\n')\n random_walk_dict = {}\n for elem_line in lines:\n parts = elem_line.split('\\t')\n cascade_id = parts[0]\n walks = list(x.split(' ') for x in parts[1:])\n random_walk_dict[cascade_id] = walks\n return random_walk_dict\n\n\ndef generate_vocab_index(original_ids):\n \"\"\"\n 为每个id重新生成一段连续的整数id列表\n :param original_ids:\n :return:\n \"\"\"\n if -1 not in original_ids:\n if isinstance(original_ids, set):\n original_ids.add(-1)\n else:\n original_ids.append(-1)\n print('generating vocab index...')\n vocab_index = IndexDict(original_ids)\n return vocab_index\n\n\ndef convert_random_walk_to_original_ids(random_walk_dicts: dict):\n \"\"\"\n 根据随机游走序列获取所有包含的节点的id,转化成int类型,生成一个集合\n :param random_walk_dict:\n :return:\n \"\"\"\n vocab_set = set()\n for elem_walks in random_walk_dicts.values():\n for elem_walk in elem_walks:\n for elem_node in elem_walk:\n vocab_set.add(int(elem_node))\n return vocab_set\n\n\ndef convert_random_walk_lines_to_original_ids(random_walk_lines: List[str]):\n \"\"\"\n 根据随机游走序列获取所有包含的节点的id,生成一个集合\n :param random_walk_lines:\n :return:\n \"\"\"\n\n vocab_set = set()\n print('converting random walk to original ids...')\n for elem_line in random_walk_lines:\n walks = elem_line.split('\\t')\n for elem_walk in walks[1:]:\n for elem_node in elem_walk.split():\n vocab_set.add(int(elem_node))\n return vocab_set\n\n\ndef convert_cascade_to_original_ids(cascades: List[dict]):\n \"\"\"\n 与convert_random_walk_to_original_ids类似\n 读取所有cascade中包含的节点的id,生成一个id的集合\n :param cascades:\n :return:\n \"\"\"\n vocab_set = set()\n print('converting cascade to original ids')\n for elem_cascade in cascades:\n edges = elem_cascade['edges']\n for elem_edge in edges:\n node1, node2 = elem_edge\n vocab_set.add(int(node1))\n vocab_set.add(int(node2))\n return vocab_set\n\n\ndef generate_word2vec(\n random_walk_dicts: dict,\n embedding_size=cascade_settings.word2vec_dimensions,\n window_size=cascade_settings.word2vec_window_size,\n epochs=cascade_settings.word2vec_iter_epoch,\n workers=cascade_settings.word2vec_workers,\n min_count=cascade_settings.word2vec_min_count,\n sg=cascade_settings.word2vec_sg):\n \"\"\"\n 输入所有的非测试集的随机游走序列,为每一个id训练word2vec向量\n :param random_walk_dicts: 非测试集的随机游走序列\n :param embedding_size:\n :param window_size:\n :param epochs:\n :param workers:\n :param min_count:\n :param sg:\n :return:\n \"\"\"\n walks = []\n for elem_walks in random_walk_dicts.values():\n walks.extend(elem_walks)\n model = Word2Vec(\n walks,\n vector_size=embedding_size,\n window=window_size,\n min_count=min_count,\n sg=sg,\n workers=workers,\n epochs=epochs)\n return model\n\n\ndef resemble_word2vec(word2vec_model, vocab_index):\n \"\"\"\n 为word2vec词向量的每个node,用vocab_index分配新的index\n :param word2vec_model:\n :param vocab_index:\n :return:\n \"\"\"\n np.random.seed(13) # 在空白处填充随机值\n wv = word2vec_model.wv\n num_nodes, num_dims = wv.vectors.shape\n\n # 随机生成一个已有词汇表大小的词嵌入矩阵()\n node_vec = np.random.normal(size=(vocab_index.length(), num_dims))\n node_vec = node_vec.tolist()\n for i in tqdm(list(range(num_nodes))):\n vec = wv[i]\n vec_idx = int(wv.index_to_key[i])\n if vec_idx not in vocab_index.vocab_set:\n continue\n node_vec[vocab_index.new(vec_idx)] = vec\n return np.array(node_vec)\n\n\ndef generate_deepcas_data(\n cascade_directory: str = cascade_settings.cascade_directory,\n globalgraph_directory: str = cascade_settings.globalgraph_directory,\n output_directory: str = cascade_settings.output_directory,\n walks_per_graph: int = cascade_settings.walks_per_graph,\n walk_length: int = cascade_settings.walk_length):\n \"\"\"\n 生成一次DeepCas以及以deepcas为基础的模型训练所需的所有数据\n walks_per_graph与walk_length为核心参数,由于经常用到,所以也加入到参数中\n :param cascade_directory: cascade文件的存放路径。\n 具体的cascade文件名在settings中配置了\n :param globalgraph_directory: GlobalGraph文件的存放路径\n :param output_directory: 生成的random_walk与vocab_index的保存路径\n :param walks_per_graph:\n :param walk_length:\n :return:\n \"\"\"\n # 处理一下路径后缀\n if cascade_directory[-1] != '/':\n cascade_directory += '/'\n if globalgraph_directory[-1] != '/':\n globalgraph_directory += '/'\n if output_directory[-1] != '/':\n output_directory += '/'\n\n # 首先分别读取train、val、test的cascade文件,以及global_graph的信息\n train_cascades = read_cascade(cascade_directory + cascade_settings.cascade_train_file)\n val_cascades = read_cascade(cascade_directory + cascade_settings.cascade_val_file)\n test_cascades = read_cascade(cascade_directory + cascade_settings.cascade_test_file)\n edge2weight, node2degree = read_global_graph(globalgraph_directory + cascade_settings.global_graph_file)\n\n # 分别生成train、val、test的random_walk\n train_random_walks = generate_random_walk(edge2weight, node2degree, train_cascades, walks_per_graph=walks_per_graph, walk_length=walk_length)\n val_random_walks = generate_random_walk(edge2weight, node2degree, val_cascades, walks_per_graph=walks_per_graph, walk_length=walk_length)\n test_random_walks = generate_random_walk(edge2weight, node2degree, test_cascades, walks_per_graph=walks_per_graph, walk_length=walk_length)\n\n # 分别使用train、val、test生成original_ids,然后合并。这里不用random_walk生成,因为random_walk可能不包含某些node\n train_original_ids = convert_cascade_to_original_ids(train_cascades)\n val_original_ids = convert_cascade_to_original_ids(val_cascades)\n test_original_ids = convert_cascade_to_original_ids(test_cascades)\n original_ids = set()\n original_ids = original_ids.union(train_original_ids)\n original_ids = original_ids.union(val_original_ids)\n original_ids = original_ids.union(test_original_ids)\n\n # 用original_ids生成vocab_index\n vocab_index = generate_vocab_index(original_ids)\n\n # 使用训练集的random_walk训练word2vec嵌入矩阵\n original_word2vec = generate_word2vec(train_random_walks)\n word2vec = resemble_word2vec(original_word2vec, vocab_index)\n\n # 保存\n pickle.dump(vocab_index, open(output_directory + cascade_settings.vocab_index_file, 'wb'))\n pickle.dump(word2vec, open(output_directory + cascade_settings.word2vec_file, 'wb'))\n output_random_walks_txt(train_random_walks, output_directory + cascade_settings.random_walks_train_file)\n output_random_walks_txt(val_random_walks, output_directory + cascade_settings.random_walks_val_file)\n output_random_walks_txt(test_random_walks, output_directory + cascade_settings.random_walks_test_file)\n\n\nif __name__ == '__main__':\n # generate_deepcas_data(\n # cascade_directory='../../../DeepCas/data/other/weibo_data/预测T之后的增长/T_3_h',\n # globalgraph_directory='../../../DeepCas/data/other/weibo_data/预测T之后的增长',\n # output_directory='../../../DeepCas/data/other/weibo_data/预测T之后的增长/T_3_h_new'\n # )\n generate_deepcas_data(\n cascade_directory='../../data/cascade/APS/5',\n globalgraph_directory='../../data/cascade/APS',\n output_directory='../../data/cascade/APS/5'\n )","repo_name":"1170500820/DLtools","sub_path":"work/Cascade/cascade_utils.py","file_name":"cascade_utils.py","file_ext":"py","file_size_in_byte":17360,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"31616630595","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Author: LonJE\n\nimport click\nfrom gevent.pywsgi import WSGIServer\nfrom app import app\n\n@click.command()\n@click.option('--port', '-p', default=5000, help='listening port')\ndef run(port):\n # app.run(debug=True, port=port)\n http_server = WSGIServer(('127.0.0.1', port), app)\n http_server.serve_forever()\n\nif __name__ == '__main__':\n run()\n","repo_name":"chen787331608/lyric-search","sub_path":"web/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"3"} +{"seq_id":"39787578220","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport six\n\nfrom google.protobuf import text_format\n\nfrom tensorflow.contrib.decision_trees.proto import generic_tree_model_pb2 as _tree_proto\nfrom tensorflow.contrib.tensor_forest.proto import tensor_forest_params_pb2 as _params_proto\nfrom tensorflow.contrib.tensor_forest.python import tensor_forest\nfrom tensorflow.contrib.tensor_forest.python.ops import model_ops\nfrom tensorflow.contrib.tensor_forest.python.ops import stats_ops\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.platform import tf_logging as logging\n\n\n# Stores tuples of (leaf model type, stats model type)\nCLASSIFICATION_LEAF_MODEL_TYPES = {\n 'all_dense': (_params_proto.MODEL_DENSE_CLASSIFICATION,\n _params_proto.STATS_DENSE_GINI),\n 'all_sparse': (_params_proto.MODEL_SPARSE_CLASSIFICATION,\n _params_proto.STATS_SPARSE_GINI),\n 'sparse_then_dense':\n (_params_proto.MODEL_SPARSE_OR_DENSE_CLASSIFICATION,\n _params_proto.STATS_SPARSE_THEN_DENSE_GINI),\n}\nREGRESSION_MODEL_TYPE = (\n _params_proto.MODEL_REGRESSION,\n _params_proto.STATS_LEAST_SQUARES_REGRESSION,\n _params_proto.COLLECTION_BASIC)\n\nCOLLECTION_TYPES = {\n 'basic': _params_proto.COLLECTION_BASIC,\n 'graph_runner': _params_proto.GRAPH_RUNNER_COLLECTION\n}\n\nFINISH_TYPES = {\n 'basic': _params_proto.SPLIT_FINISH_BASIC,\n 'hoeffding': _params_proto.SPLIT_FINISH_DOMINATE_HOEFFDING,\n 'bootstrap': _params_proto.SPLIT_FINISH_DOMINATE_BOOTSTRAP\n}\nPRUNING_TYPES = {\n 'none': _params_proto.SPLIT_PRUNE_NONE,\n 'half': _params_proto.SPLIT_PRUNE_HALF,\n 'quarter': _params_proto.SPLIT_PRUNE_QUARTER,\n '10_percent': _params_proto.SPLIT_PRUNE_10_PERCENT,\n 'hoeffding': _params_proto.SPLIT_PRUNE_HOEFFDING,\n}\nSPLIT_TYPES = {\n 'less_or_equal': _tree_proto.InequalityTest.LESS_OR_EQUAL,\n 'less': _tree_proto.InequalityTest.LESS_THAN\n}\n\n\ndef build_params_proto(params):\n \"\"\"Build a TensorForestParams proto out of the V4ForestHParams object.\"\"\"\n proto = _params_proto.TensorForestParams()\n proto.num_trees = params.num_trees\n proto.max_nodes = params.max_nodes\n proto.is_regression = params.regression\n proto.num_outputs = params.num_classes\n proto.num_features = params.num_features\n\n proto.leaf_type = params.v4_leaf_model_type\n proto.stats_type = params.v4_stats_model_type\n proto.collection_type = params.v4_split_collection_type\n proto.pruning_type.type = params.v4_pruning_type\n proto.finish_type.type = params.v4_finish_type\n\n proto.inequality_test_type = params.v4_split_type\n\n proto.drop_final_class = False\n proto.collate_examples = params.v4_collate_examples\n proto.checkpoint_stats = params.v4_checkpoint_stats\n proto.use_running_stats_method = params.v4_use_running_stats_method\n proto.initialize_average_splits = params.v4_initialize_average_splits\n\n if params.v4_prune_every_samples:\n text_format.Merge(params.v4_prune_every_samples,\n proto.pruning_type.prune_every_samples)\n else:\n # Pruning half-way through split_after_samples seems like a decent default,\n # making it easy to select the number being pruned with v4_pruning_type\n # while not paying the cost of pruning too often. Note that this only holds\n # if not using a depth-dependent split_after_samples.\n if params.v4_split_after_samples:\n logging.error(\n 'If using depth-dependent split_after_samples and also pruning, '\n 'need to set v4_prune_every_samples')\n proto.pruning_type.prune_every_samples.constant_value = (\n params.split_after_samples / 2)\n\n if params.v4_finish_check_every_samples:\n text_format.Merge(params.v4_finish_check_every_samples,\n proto.finish_type.check_every_steps)\n else:\n # Checking for finish every quarter through split_after_samples seems\n # like a decent default. We don't want to incur the checking cost too often,\n # but (at least for hoeffding) it's lower than the cost of pruning so\n # we can do it a little more frequently.\n proto.finish_type.check_every_steps.constant_value = int(\n params.split_after_samples / 4)\n\n if params.v4_split_after_samples:\n text_format.Merge(params.v4_split_after_samples, proto.split_after_samples)\n else:\n proto.split_after_samples.constant_value = params.split_after_samples\n\n if params.v4_num_splits_to_consider:\n text_format.Merge(params.v4_num_splits_to_consider,\n proto.num_splits_to_consider)\n else:\n proto.num_splits_to_consider.constant_value = params.num_splits_to_consider\n\n proto.dominate_fraction.constant_value = params.dominate_fraction\n proto.min_split_samples.constant_value = params.split_after_samples\n\n if params.v4_param_file:\n with open(params.v4_param_file) as f:\n text_format.Merge(f.read(), proto)\n\n return proto\n\n\nclass V4ForestHParams(object):\n\n def __init__(self, hparams):\n for k, v in six.iteritems(hparams.__dict__):\n setattr(self, k, v)\n\n # How to store leaf models.\n model_name = getattr(self, 'v4_model_name', 'all_dense')\n self.v4_leaf_model_type = (\n REGRESSION_MODEL_TYPE[0] if self.regression else\n CLASSIFICATION_LEAF_MODEL_TYPES[model_name][0])\n\n # How to store stats objects.\n self.v4_stats_model_type = (\n REGRESSION_MODEL_TYPE[1] if self.regression else\n CLASSIFICATION_LEAF_MODEL_TYPES[model_name][1])\n\n split_collection_name = getattr(self, 'v4_split_collection_name',\n 'basic')\n self.v4_split_collection_type = (\n REGRESSION_MODEL_TYPE[2] if self.regression else\n COLLECTION_TYPES[split_collection_name])\n\n finish_name = getattr(self, 'v4_finish_name', 'basic')\n self.v4_finish_type = (\n _params_proto.SPLIT_FINISH_BASIC if self.regression else\n FINISH_TYPES[finish_name])\n\n pruning_name = getattr(self, 'v4_pruning_name', 'none')\n self.v4_pruning_type = PRUNING_TYPES[pruning_name]\n\n self.v4_collate_examples = getattr(self, 'v4_collate_examples', False)\n\n self.v4_checkpoint_stats = getattr(self, 'v4_checkpoint_stats', False)\n self.v4_use_running_stats_method = getattr(\n self, 'v4_use_running_stats_method', False)\n self.v4_initialize_average_splits = getattr(\n self, 'v4_initialize_average_splits', False)\n\n self.v4_param_file = getattr(self, 'v4_param_file', None)\n\n self.v4_split_type = getattr(self, 'v4_split_type',\n SPLIT_TYPES['less_or_equal'])\n\n # Special versions of the normal parameters, that support depth-dependence\n self.v4_num_splits_to_consider = getattr(self, 'v4_num_splits_to_consider',\n None)\n self.v4_split_after_samples = getattr(self, 'v4_split_after_samples',\n None)\n self.v4_finish_check_every_samples = getattr(\n self, 'v4_finish_check_every_samples', None)\n self.v4_prune_every_samples = getattr(\n self, 'v4_prune_every_samples', None)\n\n\nclass TreeTrainingVariablesV4(tensor_forest.TreeTrainingVariables):\n \"\"\"Stores tf.Variables for training a single random tree.\"\"\"\n\n def __init__(self, params, tree_num, training):\n if (not hasattr(params, 'params_proto') or\n not isinstance(params.params_proto,\n _params_proto.TensorForestParams)):\n params.params_proto = build_params_proto(params)\n\n params.serialized_params_proto = params.params_proto.SerializeToString()\n self.stats = None\n if training:\n # TODO(gilberth): Manually shard this to be able to fit it on\n # multiple machines.\n self.stats = stats_ops.fertile_stats_variable(\n params, '', self.get_tree_name('stats', tree_num))\n self.tree = model_ops.tree_variable(\n params, '', self.stats, self.get_tree_name('tree', tree_num))\n\n\nclass RandomTreeGraphsV4(tensor_forest.RandomTreeGraphs):\n \"\"\"Builds TF graphs for random tree training and inference.\"\"\"\n\n def tree_initialization(self):\n return control_flow_ops.no_op()\n\n def training_graph(self, input_data,\n input_labels,\n random_seed,\n data_spec,\n sparse_features=None,\n input_weights=None):\n if input_weights is None:\n input_weights = []\n\n sparse_indices = []\n sparse_values = []\n sparse_shape = []\n if sparse_features is not None:\n sparse_indices = sparse_features.indices\n sparse_values = sparse_features.values\n sparse_shape = sparse_features.dense_shape\n\n if input_data is None:\n input_data = []\n\n leaf_ids = model_ops.traverse_tree_v4(\n self.variables.tree,\n input_data,\n sparse_indices,\n sparse_values,\n sparse_shape,\n input_spec=data_spec.SerializeToString(),\n params=self.params.serialized_params_proto)\n\n update_model = model_ops.update_model_v4(\n self.variables.tree,\n leaf_ids,\n input_labels,\n input_weights,\n params=self.params.serialized_params_proto)\n\n finished_nodes = stats_ops.process_input_v4(\n self.variables.tree,\n self.variables.stats,\n input_data,\n sparse_indices,\n sparse_values,\n sparse_shape,\n input_labels,\n input_weights,\n leaf_ids,\n input_spec=data_spec.SerializeToString(),\n random_seed=random_seed,\n params=self.params.serialized_params_proto)\n\n with ops.control_dependencies([update_model]):\n return stats_ops.grow_tree_v4(\n self.variables.tree,\n self.variables.stats,\n finished_nodes,\n params=self.params.serialized_params_proto)\n\n def inference_graph(self, input_data, data_spec, sparse_features=None):\n sparse_indices = []\n sparse_values = []\n sparse_shape = []\n if sparse_features is not None:\n sparse_indices = sparse_features.indices\n sparse_values = sparse_features.values\n sparse_shape = sparse_features.dense_shape\n if input_data is None:\n input_data = []\n\n return model_ops.tree_predictions_v4(\n self.variables.tree,\n input_data,\n sparse_indices,\n sparse_values,\n sparse_shape,\n input_spec=data_spec.SerializeToString(),\n params=self.params.serialized_params_proto)\n\n def average_impurity(self):\n return constant_op.constant(0)\n\n def size(self):\n \"\"\"Constructs a TF graph for evaluating the current number of nodes.\n\n Returns:\n The current number of nodes in the tree.\n \"\"\"\n return model_ops.tree_size(self.variables.tree)\n\n def feature_usage_counts(self):\n return model_ops.feature_usage_counts(\n self.variables.tree, params=self.params.serialized_params_proto)\n\n\nclass RandomForestGraphsV4(tensor_forest.RandomForestGraphs):\n\n def __init__(self, params, tree_graphs=None, tree_variables_class=None,\n **kwargs):\n if not isinstance(params, V4ForestHParams):\n params = V4ForestHParams(params)\n super(RandomForestGraphsV4, self).__init__(\n params, tree_graphs=tree_graphs or RandomTreeGraphsV4,\n tree_variables_class=(tree_variables_class or TreeTrainingVariablesV4),\n **kwargs)\n","repo_name":"baidu-research/tensorflow-allreduce","sub_path":"tensorflow/contrib/tensor_forest/python/tensor_forest_v4.py","file_name":"tensor_forest_v4.py","file_ext":"py","file_size_in_byte":11405,"program_lang":"python","lang":"en","doc_type":"code","stars":372,"dataset":"github-code","pt":"3"} +{"seq_id":"43025127634","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport wtforms\n\nfrom warehouse import forms\nfrom warehouse.i18n import localize as _\n\n\nclass DeletePublisherForm(forms.Form):\n __params__ = [\"publisher_id\"]\n\n publisher_id = wtforms.StringField(\n validators=[\n wtforms.validators.InputRequired(message=_(\"Specify a publisher ID\")),\n wtforms.validators.UUID(message=_(\"Publisher must be specified by ID\")),\n ]\n )\n","repo_name":"pypi/warehouse","sub_path":"warehouse/oidc/forms/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":3382,"dataset":"github-code","pt":"3"} +{"seq_id":"9934404601","text":"def gcd(a, b):\n if a == 0:\n return b\n return gcd(b % a, a)\n\ndef phi(n):\n r = 1\n for i in range(2, n):\n if gcd(i,n) == 1:\n r += 1\n return r\n\nprint(phi(10))","repo_name":"RMzemog/DS-Algo","sub_path":"Practice/Euler’s Totient Function.py","file_name":"Euler’s Totient Function.py","file_ext":"py","file_size_in_byte":194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18669501797","text":"class Caeser_cipher:\n '''\n The Caeser cipher accepts two inputs: a key and a plaintext or input string. It lowercases the input string and transforms each word in accordance with the key, much like any other monoalphabetic encryption technique before returning ciphered plaintext in uppercase.\n '''\n\n def __init__(self, key, input_string):\n self.key = key\n self.input_string = input_string\n\n def encryption(self):\n encrypted_text = \"\"\n for word in self.input_string:\n encrypted_text += chr((ord(word)-97 + self.key) % 26 + 97)\n return encrypted_text.upper()\n\n def decryption(self):\n plaintext = \"\"\n for word in self.input_string:\n plaintext += chr((ord(word)-97 - self.key) % 26 + 97)\n return plaintext.lower()\n\n #get_user_input method takes plain text input and lowercases.\n @classmethod\n def get_user_input(self):\n try:\n key = int(input('Enter the key value: '))\n input_string = input('Enter the plaintext: ')\n return self(key, input_string.lower())\n except:\n print('Invalid input!')\n\n\ndef main():\n obj = Caeser_cipher.get_user_input()\n print(\"Note:\\n 1.) Encryption \\n 2.) Decryption\")\n choice = int(input(\"Enter your choice:\")) \n if choice == 1:\n print(\"Encrypted text : \" + str(obj.encryption()))\n elif choice== 2 : \n print(\"Decrypted text : \" + str(obj.decryption()))\n else:\n print(\"Invalid choice\")\nmain()","repo_name":"n-pradip/Cryptography","sub_path":"caeser_cipher/caeser_cipher.py","file_name":"caeser_cipher.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14593304367","text":"import os\nimport sqlite3\nimport re\nfrom flask import Flask\nfrom flask import render_template\n\napp = Flask(__name__, static_folder=os.path.dirname(os.path.realpath(__file__)) + \"/static\")\n\ncolors = [\n\t\"#FF0029\",\"#377EB8\",\"#66A61E\",\"#984EA2\",\n\t\"#00D2D5\",\"#FF7F00\",\"#AF8D00\",\"#7F80CD\",\n\t\"#B3E900\",\"#C42E60\",\"#A65628\",\"#F781BF\",\n\t\"#8DD3C7\",\"#BEBADA\",\"#FB8072\",\"#80B1D3\",\n\t\"#FDB462\",\"#FCCDE5\",\"#BC80BD\",\"#FFED6F\"]\n\nDATABASE=\"/usr/local/bin/dmarchiver/dmarchiver.sqlite\"\n\n@app.route(\"/\")\ndef hello():\n\n\tlabels = []\n\tvalues = []\n\tpub_domain = []\n\tpub_domain_count = []\n\n\tconn = sqlite3.connect(DATABASE)\n\tcur = conn.cursor()\n\tcur.execute(\"SELECT org_name, COUNT(*) FROM dmarc_reports GROUP BY org_name\")\n\tconn.commit()\n\n\trows = cur.fetchall();\n\n\tfor i in rows:\n\t\tlabels.append(i[0])\n\t\tvalues.append(i[1])\n\n\tstr_labels = \"\"\n\tstr_values = \"\"\n\tstr_pub_domain_count = \"\"\n\n\tfor i in values:\n\t\tstr_values = str(str_values) + str(i) + \",\"\n\n\tcur.execute(\"SELECT pub_domain, COUNT(*) FROM dmarc_reports GROUP BY pub_domain\")\n\tconn.commit()\n\n\trows = cur.fetchall();\n\n\tfor i in rows:\n\t\tpub_domain.append(i[0])\n\t\tpub_domain_count.append(i[1])\n\n\tfor i in pub_domain_count:\n\t\tstr_pub_domain_count = str(str_pub_domain_count) + str(i) + \",\"\n\n\n\treturn render_template(\"test.html\",title='DMARC Report Overview', max=100, values=str_values, labels=labels, colors=colors, pub_domain=pub_domain, pub_domain_count=str_pub_domain_count)\n\n\n@app.route('/alignment')\ndef algiment():\n\treturn render_template(\"alignment.html\",title='DMARC Alignment Overview', max=100, colors=colors)\n\n\n\n\n#@app.route(\"/\")\n\n\n","repo_name":"mischmeister/dmarchiver","sub_path":"web/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42322593709","text":"# exchanges\nDEFAULT_DISTRICT_ID = 1\nEXCHANGE_ROUTING_KEY = ''\n\n# hot storage\nALARM_TAG_LIST = [\n 'controller.cabinet-door',\n 'controller.fire',\n 'controller.station-door',\n]\n\nALL = '__all__'\nEVENTS_KEY = 'ctrl:{}:events'\nEVENTS_ZCARD = 500\nSTATE_KEY = 'ctrl{}:{}:state'\n\n# listeners\nRECONNECT_TIMEOUT = 10\n","repo_name":"NilovAlexander/esb-worker","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14810812461","text":"from rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom .models import Category\nfrom .serializers import CategorySerializerGet, CategorySerializerPost\n\n\ndef save_children(parent, children):\n child_object = {}\n for child in children:\n category = {\n 'name': child['name'],\n 'parent': parent.id\n }\n serializer = CategorySerializerPost(data=category)\n if serializer.is_valid(raise_exception=True):\n child_object = serializer.save()\n\n if child.get('children'):\n save_children(child_object, child['children'])\n\n\nclass CategoryView(APIView):\n def get(self, request, pk):\n try:\n categories = Category.objects.get(pk=pk)\n except Category.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n serializer = CategorySerializerGet(categories)\n return Response(serializer.data)\n\n def post(self, request):\n name = request.data.get('name')\n parent = None\n children = request.data.get('children')\n child_object = {}\n category = {\n 'name': name,\n 'parent': parent\n }\n serializer = CategorySerializerPost(data=category)\n if serializer.is_valid(raise_exception=True):\n parent = serializer.save()\n\n for child in children:\n category = {\n 'name': child['name'],\n 'parent': parent.id\n }\n serializer = CategorySerializerPost(data=category)\n if serializer.is_valid(raise_exception=True):\n child_object = serializer.save()\n\n if child['children']:\n save_children(child_object, child['children'])\n return Response({'success': 'Categories saved'})\n","repo_name":"den503/categories-api","sub_path":"categories/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"41003781023","text":"import sqlite3\nimport os\nimport requests\n\nclass Database(object):\n def __init__(self):\n self.db = sqlite3.connect(\"database.db\")\n self.cursor = self.db.cursor()\n self.cursor.execute(\"CREATE TABLE IF NOT EXISTS Pedidos (id INTEGER PRIMARY KEY AUTOINCREMENT, cliente TEXT, fecha TEXT);\")\n self.cursor.execute(\"CREATE TABLE IF NOT EXISTS PedidosProductos (id INTEGER REFERENCES Pedidos(id), nombre TEXT, cantidad NUMERIC, precio NUMERIC);\")\n self.db.commit()\n def registrar_pedido(self, pedido):\n self.cursor.execute(\"INSERT INTO Pedidos(cliente, fecha) VALUES (?,?)\", (pedido['cliente'], pedido['fecha']))\n id_last=self.cursor.lastrowid\n for producto in pedido['productos']:\n self.cursor.execute(\"INSERT INTO PedidosProductos(id, nombre, cantidad, precio) VALUES (?,?,?,?)\", (id_last, producto['nombre'], producto['cantidad'], producto['precio']))\n #return {\"ok\": True, \"id\": id_last}\n self.db.commit()\n return {\"ok\": True, \"id\": id_last}\n def close(self):\n self.db.close()","repo_name":"Jovamih/distribuidos-pedidos-api","sub_path":"modula.py","file_name":"modula.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36054530632","text":"from aiogram import types, Dispatcher\nfrom aiogram.dispatcher import FSMContext\nfrom aiogram.dispatcher.filters import CommandStart\nfrom aiogram.utils.markdown import hcode\n\nfrom tgbot.functions.gettext_func import get_start_text\nfrom tgbot.functions.minor_functions import get_date_order\nfrom tgbot.keyboards.reply import lang_cb, main_keyb, lang_keyb\nfrom tgbot.middlewares.lang_middleware import _, i18n\nfrom tgbot.misc.set_bot_commands import set_default_commands\n\n\nasync def bot_start(message: types.Message, state: FSMContext, session, db_commands):\n user = await db_commands.get_user(user_id=message.from_user.id)\n\n if user is None:\n config = message.bot.get('config')\n text = f\"New user: \\n\" \\\n f\"Full name: {message.from_user.get_mention(as_html=True)}\\n\" \\\n f\"Username: @{message.from_user.username}\\n\" \\\n f\"ID: {hcode(message.from_user.id)}\"\n await message.bot.send_message(chat_id=config.tg_bot.chat_id, text=text)\n\n await db_commands.add_user(user_id=message.from_user.id,\n first_name=message.from_user.first_name,\n last_name=message.from_user.last_name,\n username=message.from_user.username,\n lang_code=message.from_user.language_code,\n role='user'\n )\n if str(message.from_user.id) in config.tg_bot.admin_ids:\n await db_commands.set_admins(message.from_user.id)\n\n await session.commit()\n\n await message.answer(_(\"🌐 Выберите язык:\"), reply_markup=lang_keyb)\n await state.set_state(\"choosing_lang_start\")\n\n else:\n await message.answer(await get_start_text(message.from_user.full_name,\n message.from_user.id, db_commands))\n\n\nasync def choosing_language_start(call: types.CallbackQuery, state: FSMContext, callback_data: dict, db_commands,\n session):\n lang = callback_data.get(\"name\")\n i18n.ctx_locale.set(lang)\n await call.answer(text=_(\"🇷🇺 Вы выбрали русский язык\").format(lang=lang))\n await call.message.delete()\n await call.message.answer(await get_start_text(call.from_user.full_name,\n call.from_user.id, db_commands),\n reply_markup=main_keyb())\n await set_default_commands(call.bot)\n\n await db_commands.update_preferred_date_order(call.from_user.id, get_date_order(lang))\n await db_commands.update_language(user_id=call.from_user.id, lang=lang)\n await session.commit()\n\n await state.reset_state()\n\n\nasync def choosing_language_start_state(message: types.Message):\n await message.answer(_(\"🌐 Выберите язык:\"), reply_markup=lang_keyb)\n\n\ndef register_start(dp: Dispatcher):\n dp.register_message_handler(bot_start, CommandStart())\n dp.register_callback_query_handler(choosing_language_start, lang_cb.filter(), state=\"choosing_lang_start\")\n dp.register_message_handler(choosing_language_start_state, state=\"choosing_lang_start\")\n","repo_name":"abrikk/time-birthday-bot","sub_path":"tgbot/handlers/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":3228,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"36438326386","text":"from pwn import *\nimport string\n\nUP = string.ascii_uppercase[:26]\na = [0 for i in range(9)]\n\nCONN = remote(\"chals.tuctf.com\", 30002)\n\ndef get_dict(l, length=15):\n d = {}\n for i in range(26):\n p = remote(\"chals.tuctf.com\", 30002)\n level(p,l, chr(ord(\"A\")+i)*length)\n log.info(\"Getting \" + chr(ord(\"A\")+i))\n p.recvuntil(b\"encrypted is \")\n e = p.readline()[:-1].decode()\n a = [e[j*5:j*5+5] for j in range(int(len(e)/5))]\n d[chr(ord(\"A\")+i)] = a\n p.close()\n return d\n\ndef gen_length(length):\n s = UP[:length]\n return s\n\ndef get_pos(l, d):\n pos_dict = [0, 0]\n for i in range(2,18):\n pos = []\n p = remote(\"chals.tuctf.com\", 30002)\n text = gen_length(i)\n level(p,l, text)\n log.info(\"Getting \" + str(i))\n p.recvuntil(b\"encrypted is \")\n e = p.readline()[:-1].decode()\n t = [e[j*5:j*5+5] for j in range(int(len(e)/5))]\n for j in range(len(text)):\n pos.append(t.index(d[text[j]][0]))\n pos_dict.append(pos)\n p.close()\n return pos_dict\n\n\ndef solveX(l, d, pos_dict):\n level(CONN,l)\n for i in range(50):\n CONN.recvuntil(b\"Decrypt \")\n result = \"\"\n target = CONN.readline()[:-1].decode()\n log.info(\"Target: \" + target)\n length = int(len(target)/5)\n for i in range(int(len(target)/5)):\n pos = pos_dict[length][i]\n for j in d.keys():\n if (d[j][0]==target[pos*5:pos*5+5]):\n result+=j\n \n log.info(\"Decrypt: \" + result)\n CONN.sendline(result.encode())\n\ndef solve(l, d):\n level(CONN,l)\n for i in range(50):\n CONN.recvuntil(b\"Decrypt \")\n result = \"\"\n target = CONN.readline()[:-1].decode()\n log.info(\"Target: \" + target)\n for i in range(int(len(target)/5)):\n for key in d.keys():\n sub = d[key]\n if(sub[i]==target[5*i:5*i+5]):\n result += key\n\n log.info(\"Decrypt: \" + result)\n CONN.sendline(result.encode())\n\ndef level(p, x, text=\"a\"):\n p.sendlineafter(b\"What level? \",str(x).encode())\n p.sendlineafter(b\"Give me text:\",text.encode())\nfor i in range(10):\n log.info(\"Level: \" + str(i))\n if(i==7 or i==6 or i==9):\n d = get_dict(i,1)\n p_d = get_pos(i, d)\n solveX(i, d, p_d)\n else:\n d = get_dict(i,17)\n solve(i,d)\n\nCONN.interactive()\n","repo_name":"stewie2k2/CTF-writeup","sub_path":"tuctf2022/crypto/bacon.py","file_name":"bacon.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7391128123","text":"#:coding=utf8:\n\nimport logging\n\nfrom django.test import TestCase as DjangoTestCase\nfrom django.conf import settings\n\nfrom jogging.models import Log, jogging_init\n\nclass DatabaseHandlerTestCase(DjangoTestCase):\n \n def setUp(self):\n from jogging.handlers import DatabaseHandler, MockHandler\n import logging\n \n self.LOGGING = getattr(settings, 'LOGGING', None)\n \n settings.LOGGING = {\n 'database_test': {\n 'handler': DatabaseHandler(),\n 'level': logging.DEBUG,\n },\n 'multi_test': {\n 'handlers': [\n { 'handler': DatabaseHandler(), 'level': logging.DEBUG },\n { 'handler': MockHandler(), 'level': logging.DEBUG },\n ],\n },\n }\n \n jogging_init()\n \n def tearDown(self):\n import logging\n\n # clear out all handlers on loggers\n loggers = [logging.getLogger(\"\"), logging.getLogger(\"database_test\"), logging.getLogger(\"multi_test\")]\n for logger in loggers:\n logger.handlers = []\n \n # delete all log entries in the database\n for l in Log.objects.all():\n l.delete()\n \n if self.LOGGING:\n settings.LOGGING = self.LOGGING\n jogging_init()\n \n def test_basic(self):\n logger = logging.getLogger(\"database_test\")\n logger.info(\"My Logging Test\")\n log_obj = Log.objects.latest()\n self.assertEquals(log_obj.level, \"INFO\")\n self.assertEquals(log_obj.source, \"database_test\")\n self.assertEquals(log_obj.msg, \"My Logging Test\")\n self.assertTrue(log_obj.host)\n \n def test_multi(self):\n logger = logging.getLogger(\"multi_test\")\n logger.info(\"My Logging Test\")\n \n log_obj = Log.objects.latest()\n self.assertEquals(log_obj.level, \"INFO\")\n self.assertEquals(log_obj.source, \"multi_test\")\n self.assertEquals(log_obj.msg, \"My Logging Test\")\n self.assertTrue(log_obj.host)\n \n log_obj = settings.LOGGING[\"multi_test\"][\"handlers\"][1][\"handler\"].msgs[0]\n self.assertEquals(log_obj.levelname, \"INFO\")\n self.assertEquals(log_obj.name, \"multi_test\")\n self.assertEquals(log_obj.msg, \"My Logging Test\")\n\nclass DictHandlerTestCase(DjangoTestCase):\n\n def setUp(self):\n from jogging.handlers import MockHandler\n import logging\n \n self.LOGGING = getattr(settings, 'LOGGING', None)\n\n settings.LOGGING = {\n 'dict_handler_test': {\n 'handlers': [\n { 'handler': MockHandler(), 'level': logging.ERROR },\n { 'handler': MockHandler(), 'level': logging.INFO },\n ],\n },\n }\n \n jogging_init()\n \n def tearDown(self):\n import logging\n\n # clear out all handlers on loggers\n loggers = [logging.getLogger(\"\"), logging.getLogger(\"database_test\"), logging.getLogger(\"multi_test\")]\n for logger in loggers:\n logger.handlers = []\n \n # delete all log entries in the database\n for l in Log.objects.all():\n l.delete()\n \n if self.LOGGING:\n settings.LOGGING = self.LOGGING\n jogging_init()\n \n def test_basic(self):\n logger = logging.getLogger(\"dict_handler_test\")\n error_handler = settings.LOGGING[\"dict_handler_test\"][\"handlers\"][0][\"handler\"]\n info_handler = settings.LOGGING[\"dict_handler_test\"][\"handlers\"][1][\"handler\"]\n\n\n logger.info(\"My Logging Test\")\n # Make sure we didn't log to the error handler\n self.assertEquals(len(error_handler.msgs), 0)\n\n log_obj = info_handler.msgs[0]\n self.assertEquals(log_obj.levelname, \"INFO\")\n self.assertEquals(log_obj.name, \"dict_handler_test\")\n self.assertEquals(log_obj.msg, \"My Logging Test\")\n\nclass GlobalExceptionTestCase(DjangoTestCase):\n urls = 'jogging.tests.urls'\n \n def setUp(self):\n from jogging.handlers import DatabaseHandler, MockHandler\n import logging\n \n self.LOGGING = getattr(settings, 'LOGGING', None)\n self.GLOBAL_LOG_HANDLERS = getattr(settings, 'GLOBAL_LOG_HANDLERS', None)\n self.GLOBAL_LOG_LEVEL = getattr(settings, 'GLOBAL_LOG_LEVEL', None)\n \n loggers = [logging.getLogger(\"\")]\n for logger in loggers:\n logger.handlers = []\n \n settings.LOGGING = {}\n settings.GLOBAL_LOG_HANDLERS = [MockHandler()]\n settings.GLOBAL_LOG_LEVEL = logging.DEBUG\n \n jogging_init()\n \n def tearDown(self):\n import logging\n\n # clear out all handlers on loggers\n loggers = [logging.getLogger(\"\")]\n for logger in loggers:\n logger.handlers = []\n \n # delete all log entries in the database\n for l in Log.objects.all():\n l.delete()\n \n if self.LOGGING:\n settings.LOGGING = self.LOGGING\n if self.GLOBAL_LOG_HANDLERS:\n settings.GLOBAL_LOG_HANDLERS = self.GLOBAL_LOG_HANDLERS\n if self.GLOBAL_LOG_LEVEL:\n settings.GLOBAL_LOG_LEVEL = self.GLOBAL_LOG_LEVEL\n jogging_init()\n \n def test_exception(self):\n from views import TestException\n try:\n resp = self.client.get(\"/exception_view\")\n self.fail(\"Expected Exception\")\n except TestException:\n pass\n root_handler = logging.getLogger(\"\").handlers[0]\n\n log_obj = root_handler.msgs[0]\n self.assertEquals(log_obj.levelname, \"ERROR\")\n self.assertEquals(log_obj.name, \"root\")\n self.assertTrue(\"Traceback\" in log_obj.msg)\n","repo_name":"zain/jogging","sub_path":"jogging/tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":5779,"program_lang":"python","lang":"en","doc_type":"code","stars":103,"dataset":"github-code","pt":"3"} +{"seq_id":"42055460646","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom mpl_toolkits.mplot3d.art3d import Poly3DCollection\n\nimport yt\nfrom yt.visualization.api import Streamlines\n\n# Load the dataset\nds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n\n# Define c: the center of the box, N: the number of streamlines,\n# scale: the spatial scale of the streamlines relative to the boxsize,\n# and then pos: the random positions of the streamlines.\nc = ds.arr([0.5] * 3, \"code_length\")\nN = 30\nscale = ds.quan(15, \"kpc\").in_units(\"code_length\") # 15 kpc in code units\npos_dx = np.random.random((N, 3)) * scale - scale / 2.0\npos = c + pos_dx\n\n# Create the streamlines from these positions with the velocity fields as the\n# fields to be traced\nstreamlines = Streamlines(\n ds,\n pos,\n (\"gas\", \"velocity_x\"),\n (\"gas\", \"velocity_y\"),\n (\"gas\", \"velocity_z\"),\n length=1.0,\n)\nstreamlines.integrate_through_volume()\n\n# Create a 3D matplotlib figure for visualizing the streamlines\nfig = plt.figure()\nax = Axes3D(fig, auto_add_to_figure=False)\nfig.add_axes(ax)\n\n# Trace the streamlines through the volume of the 3D figure\nfor stream in streamlines.streamlines:\n stream = stream[np.all(stream != 0.0, axis=1)]\n\n # Make the colors of each stream vary continuously from blue to red\n # from low-x to high-x of the stream start position (each color is R, G, B)\n # can omit and just set streamline colors to a fixed color\n x_start_pos = ds.arr(stream[0, 0], \"code_length\")\n x_start_pos -= ds.arr(0.5, \"code_length\")\n x_start_pos /= scale\n x_start_pos += 0.5\n color = np.array([x_start_pos, 0, 1 - x_start_pos])\n\n # Plot the stream in 3D\n ax.plot3D(stream[:, 0], stream[:, 1], stream[:, 2], alpha=0.3, color=color)\n\n# Create a sphere object centered on the highest density point in the simulation\n# with radius = 1 Mpc\nsphere = ds.sphere(\"max\", (1.0, \"Mpc\"))\n\n# Identify the isodensity surface in this sphere with density = 1e-24 g/cm^3\nsurface = ds.surface(sphere, (\"gas\", \"density\"), 1e-24)\n\n# Color this isodensity surface according to the log of the temperature field\ncolors = yt.apply_colormap(np.log10(surface[(\"gas\", \"temperature\")]), cmap_name=\"hot\")\n\n# Render this surface\np3dc = Poly3DCollection(surface.triangles, linewidth=0.0)\ncolors = colors[0, :, :] / 255.0 # scale to [0,1]\ncolors[:, 3] = 0.3 # alpha = 0.3\np3dc.set_facecolors(colors)\nax.add_collection(p3dc)\n\n# Save the figure\nplt.savefig(\"streamlines_isocontour.png\")\n","repo_name":"yt-project/yt","sub_path":"doc/source/cookbook/streamlines_isocontour.py","file_name":"streamlines_isocontour.py","file_ext":"py","file_size_in_byte":2492,"program_lang":"python","lang":"en","doc_type":"code","stars":411,"dataset":"github-code","pt":"3"} +{"seq_id":"4309628048","text":"\nimport os\n\nfrom flask import Flask, render_template, request, redirect, url_for, flash, session\nfrom werkzeug.utils import secure_filename\n\nfrom model import Users\nfrom fy import fy\nfrom bdc import bdc\nfrom exts import db\nimport config\nfrom flask_login import \\\n login_user, login_required,\\\n LoginManager, current_user,\\\n logout_user\n\napp = Flask(__name__)\napp.config.from_object(config)\ndb.init_app(app)\napp.register_blueprint(fy)\napp.register_blueprint(bdc)\n# app.register_blueprint(app)\n\nloginmanager = LoginManager()\nloginmanager.session_protection = 'strong'\nloginmanager.login_view = 'login'\nloginmanager.init_app(app)\n\n@loginmanager.user_loader\ndef load_user(id):\n return Users.query.get(id)\n\n@app.route('/')\ndef home():\n try:\n uid = current_user.name\n if uid:\n # print(uid)\n return redirect(url_for(uid + '.hello'))\n except:\n return render_template('login.html')\n\n@app.route('/login',methods=['GET','POST'])\ndef login():\n if request.method == 'POST':\n uid = request.form['username']\n upwd = request.form['password']\n user = Users.query.filter_by(name = uid).first()\n if user and user.check_password(upwd):\n login_user(user)\n #法院0\n if user.userid == 0:\n return redirect(url_for('fy.hello'))\n if user.userid == 1:\n return redirect(url_for('bdc.hello'))\n else:\n flash('账号或者密码错误')\n return render_template('login.html')\n if request.method == 'GET':\n return redirect(url_for('home'))\n\n\n\n\n@app.route('/logout',methods=['GET','POST'])\n@login_required\ndef logout():\n logout_user()\n flash('已退出')\n return render_template('login.html')\n\n\n@app.route('/upload',methods=['GET','POST'])\ndef upload():\n if request.method == 'POST':\n f = request.files[\"file\"]\n upload_path = os.path.join(basedir, 'static/upload/')\n file_name = upload_path + secure_filename(f.filename)\n f.save(file_name)\n res = {\n \"code\" : 0,\n \"msg\" :\"附件上传成功\",\n \"src\" : f.filename\n\n }\n return res\n\n\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"jmj1993/fybdc","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"32012848676","text":"import random\nimport socket\nimport threading\nimport time\n\nserver = socket.socket()\nport = 12349\nserver.bind(('localhost', port))\nserver.listen(5)\nconnection = True\n\n\ndef inject_error(msg):\n prob = random.randint(0, 9)\n if (prob <= 4):\n ch = msg[0:1]\n if ch == '1':\n ch = '0'\n else:\n ch = '1'\n msg = ch + msg[1::]\n\n return msg\n\n\ndef handle_connection(sender, reciever):\n msg = sender.recv(1024).decode()\n while msg != \"exit\":\n msg = inject_error(msg)\n print(msg)\n time.sleep(random.randint(0, 2))\n reciever.sendall(f'{msg}'.encode())\n acknowledgement = reciever.recv(1024)\n print(f\"Recieved {acknowledgement.decode()}\")\n sender.sendall(acknowledgement)\n print(\"Ack sent\")\n msg = sender.recv(1024).decode()\n\n reciever.send(msg.encode())\n print(\"Done\")\n sender.close()\n reciever.close()\n server.close()\n\n\nwhile connection:\n c, caddr = server.accept()\n print('Got connection from', caddr)\n r, raddr = server.accept()\n print('Got connection from', raddr)\n\n thread = threading.Thread(target=handle_connection, args=(c, r))\n thread.start()\n print(f\"Active Coonections is : {threading.active_count() - 1} \")\n\nserver.close()\n","repo_name":"Snehanjan2001/networks_sem5","sub_path":"Assignment 2/Stop_and_ Wait/Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"15572558592","text":"########################################################################################################################\n# MSc Artificial Intelligence - City, University of London #\n# INM707 - Garcia Plaza, Albert #\n########################################################################################################################\nimport numpy as np\nfrom scipy.interpolate import splprep, splev\nimport matplotlib.pyplot as plt\nimport os\n\n\ndef create_spline(state):\n \"\"\"\n Given a list with 4 pairs (x, y) location, create and return the cubit spline that passes along the four points and\n also along the anchored points.\n :param state: list with 4 movable points with shape [x1, y1, x2, y2, x3, y3, x4, y4].\n :return x_new: x cooordinales of the spline (201 points).\n :return y_new: y cooordinales of the spline (201 points).\n \"\"\"\n state = state.reshape((4, 2)) # reshape the input list\n state = np.array([[1., 0.], state[0], state[1], [0, 0], state[2], state[3], [1.0, -0.001]]) # include anchored pts\n tck, u = splprep(state.T, u=None, s=0.0)\n u_new = np.linspace(u.min(), u.max(), 201)\n x_new, y_new = splev(u_new, tck, der=0)\n x_new = normalize(x_new)\n\n return x_new, y_new\n\n\ndef normalize(x):\n \"\"\"\n Given the list with x coordinates of the spline, returns the same coordinates but with unitary length.\n :param x: list with x coordinates of the spline.\n :return x_norm: normalized list with max(x)-min(x)=1\n \"\"\"\n x_min = min(x)\n x_max = max(x)\n x_range = x_max - x_min\n x_norm = (x - x_min) / x_range\n\n return x_norm\n\n\ndef plot(x, y, title=\"Airfoil Shape\"):\n \"\"\"\n Plotting function to check the created splines. Used for the report and check the algorithm, not during the\n training process.\n \"\"\"\n plt.title(title)\n plt.xlim(-1, 2)\n plt.ylim(-1.5, 1.5)\n plt.xlabel('$x/c$')\n plt.ylabel('$y/c$')\n plt.plot(x, y)\n\n\ndef intersection_check(x, y):\n \"\"\"\n Checks if the generated spline cross itself. If so, returns True, otherwise returns False.\n :param x: x coordinates of the spline to be checked.\n :param y: y coordinates of the spline to be checked.\n :return y: boolean value -True is the spline cross itself, False otherwise.\n \"\"\"\n index = int(np.argwhere(x == min(x)))\n y_extrados = y[:index] # points belonging to the upper half of the spline\n y_intrados = y[index+1:] # points belonging to the bottom half of the spline\n\n for i in range(len(y_extrados)):\n # If at the same x coordinate, the theoretical bottom point if over the upper one, that means CROSSING curve\n if y_extrados[i] < y_intrados[i]:\n return True\n else:\n return False\n\n\ndef initial_state():\n \"\"\"\n Generates the starting shape (circle with unitary diameter).\n :return state: 8 elements list corresponding to the initial state.\n \"\"\"\n point_b = [0.75, 0.5]\n point_c = [0.25, 0.5]\n point_e = [0.25, -0.5]\n point_f = [0.75, -0.5]\n state = np.array([point_b, point_c, point_e, point_f]).flatten()\n\n return state\n\n\ndef apply_action(state, action):\n \"\"\"\n Given one state and one action, apply this last to generate the next state vector.\n :param state: current state.\n :param action: action applied (as a vector with 20 elements).\n :return state: new state after the action.\n \"\"\"\n # Obtain all 4 movable points\n point_b = state.reshape((4, 2))[0]\n point_c = state.reshape((4, 2))[1]\n point_e = state.reshape((4, 2))[2]\n point_f = state.reshape((4, 2))[3]\n\n if action in [0, 5, 10, 15]: # these action indexes mean stay at the same position\n movement = [0, 0]\n elif action in [1, 6, 11, 16]: # these action indexes mean move up\n movement = [0, 0.005]\n elif action in [2, 7, 12, 17]: # these action indexes mean move down\n movement = [0, -0.005]\n elif action in [3, 8, 13, 18]: # these action indexes mean move left\n movement = [-0.005, 0]\n else:\n movement = [0.005, 0] # other indexes mean move right\n\n # Finally, is selected which movable point will be displaced (first five elements of the action vector are referred\n # to point_b, next five points to point_c, and so on.\n if action < 5:\n point_b += movement\n elif 5 <= action < 10:\n point_c += movement\n elif 10 <= action < 15:\n point_e += movement\n else:\n point_f += movement\n\n state = np.array([point_b, point_c, point_e, point_f]).flatten() # convert the new state on the state vector shape\n\n return state\n\n\ndef check_if_exists(state):\n \"\"\"\n Check if the current state have been simulated and stored on the Numpy file.\n :param state: state to be checked.\n :return: if the state has been stored, return its lift coefficient. Otherwise, return False.\n \"\"\"\n if not os.path.exists('alreadySimulated.npy'): # if the Numpy file does not exist, return False (first simulation)\n return False\n\n else:\n entries = np.load('alreadySimulated.npy') # if the state has been previously stored on the file, returns it cl\n for entry in entries:\n if np.array_equal(state, entry[:-1]):\n return entry[-1]\n\n return False # if not, return also False\n\n\ndef save_simulation(state, cl):\n \"\"\"\n Store the tuple state and lift coefficient on the Numpy file.\n :param state: state vector.\n :param cl: state lift coefficient.\n \"\"\"\n new_entry = np.array([np.append(state, cl)]) # create the tuple to be stored\n if not os.path.exists('alreadySimulated.npy'): # if is the first element simulated, create the file\n np.save('alreadySimulated.npy', new_entry)\n\n else:\n # If the file already exists; open it, read it content and append the new state-cl array.\n old_entries = np.load('alreadySimulated.npy')\n new_entries = np.concatenate((old_entries, new_entry))\n np.save('alreadySimulated.npy', new_entries)\n","repo_name":"AlbertGarcia1991/MScArtificialIntelligence_AerodynamicShapeOptimization","sub_path":"DQN/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30709758876","text":"# Given an input string s, reverse the order of the words.\n# A word is defined as a sequence of non-space characters.\n# The words in s will be separated by at least one space.\n# Return a string of the words in reverse order concatenated by a single space.\n# Note that s may contain leading or trailing spaces or multiple spaces between two words.\n# The returned string should only have a single space separating the words.\n# Do not include any extra spaces.\n# Example 1:\n# Input: s = \"the sky is blue\"\n# Output: \"blue is sky the\"\n# Example 2:\n# Input: s = \" hello world \"\n# Output: \"world hello\"\n# Explanation: Your reversed string should not contain leading or trailing spaces.\n\ndef reverse_words(s: str):\n ls = s.split()\n # ls.reverse()\n # res = \"\"\n # for word in ls:\n # if word != ls[-1]:\n # res = res + word + \" \"\n # else:\n # res = res + word\n # return res\n rev_ls = ls[::-1]\n res = \" \".join(rev_ls)\n return res\n\n\nst = \" hello world \"\n\nprint(reverse_words(st))\n\n","repo_name":"pradeep-automation/Python-Practice","sub_path":"reverse_words.py","file_name":"reverse_words.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"69931286481","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nrule = 60 # Number between 0 and 255\nsize = 100 # Number of cells\nsteps = 100 # Number of generations\n\n\ndef do_step(x, rb):\n \"\"\"Calculate a step of an cellular automaton.\"\"\"\n # The columns contain the L, C, R values of all cells.\n y = np.vstack((np.roll(x, 1), x, np.roll(x, -1))).astype(np.int8)\n # We get the LCR pattern numbers between 0 and 7.\n z = np.sum(y * np.array([[4], [2], [1]]), axis=0).astype(np.int8)\n # We get the patterns given by the rule.\n return rb[7 - z]\n\n\ndef initialize(r, sz, st):\n \"\"\"Create an elementary cellular automaton\"\"\"\n # Compute the binary representation of the rule\n rule_bin = np.array([int(b) for b in np.binary_repr(r, 8)], dtype=np.int8)\n # Prepare matrix\n x = np.zeros((st, sz), dtype=np.int8)\n # Random initial state (1st row)\n x[0, :] = np.random.rand(sz) < .5\n # Simulate by applying step function\n for i in range(st - 1):\n x[i + 1, :] = do_step(x[i, :], rule_bin)\n return x\n\n\nif __name__ == '__main__':\n axes = plt.gca()\n axes.imshow(initialize(rule, size, steps), interpolation='none', cmap=plt.cm.binary)\n axes.set_axis_off()\n axes.set_title(f\"Rule: {rule}\")\n\n plt.show()\n","repo_name":"ChrnyaevEK/cell-automa","sub_path":"cellular_automaton.py","file_name":"cellular_automaton.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"1539261689","text":"# -*- coding: UTF-8 -*-\nimport sys\nfrom unittest import TestCase\n\nsys.path.append('../../../src')\nfrom core.tools.station import Station\n\n\nclass TestStation(TestCase):\n\n def test_get_station_value_by_key(self):\n station = Station()\n value = station.get_stations_value_by_key(\"VAP\")\n self.assertIsNotNone(value)\n self.assertEqual(value, \"北京北\")\n\n def test_get_station_key_by_values(self):\n station = Station()\n key = station.get_station_key_by_values(\"北京北\")\n self.assertIsNotNone(key)\n self.assertEqual(key, \"VAP\")\n\n def test_request_stations(self):\n stations = Station.request_stations()\n self.assertIsNotNone(stations)\n\n def test_has_this_station(self):\n station = Station()\n exist = station.has_this_station(\"上海\")\n self.assertTrue(exist)\n not_exist = station.has_this_station(\"不知道\")\n self.assertFalse(not_exist)\n","repo_name":"EasonAndLily/play_with_12306","sub_path":"src/test/tools/test_station.py","file_name":"test_station.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"12828242207","text":"# This file is part of MetaTB. MetaTB is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General\n# Public License as published by the Free Software Foundation, either\n# version 3 of the License, or (at your option) any later version.\n#\n# MetaTB is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY\n# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public\n# License for more details.\n#\n# A copy of the GNU General Public License should be available\n# alongside this source in a file named gpl-3.0.txt. If not,\n# see .\n\nimport numpy as np\nfrom .pythtb import * \nfrom .lattice import Lattice\nimport matplotlib.pyplot as plt\n\nclass TBmodel:\n def __init__(self,lattice):\n self.lattice = lattice\n self.model = None\n self.subspacenum = lattice.getsubspacenum()\n self.graph = None\n\n def getpositions(self):\n return self.lattice.getpositions()\n\n def build(self,onsite=0,hopping=1.0,method='exp',width=1.0,tolerance=0.0,eps=1e-3):\n lats = self.lattice.getlattices()\n positions = self.lattice.getpositions()\n sites = self.lattice.getsites()\n graph = list()\n\n if len(lats)==1:\n lat = lats[0]\n orb = sites\n self.model=tb_model(2,2,lat,orb)\n\n lat = np.array(lat)\n N = len(positions)\n for i in range(N):\n for j in range(i,N):\n positioni = positions[i,:]\n positionj0 = positions[j,:]\n values = list()\n directions = list()\n for m in range(-1,2):\n for n in range(-1,2):\n if (i==j) and (m==0) and (n==0):\n continue\n positionj = positionj0 + m*lat[0] + n*lat[1]\n if method == 'guassian':\n t = hopping*np.exp(-(np.sqrt(np.sum(np.square(positioni-positionj)))/width)**2)\n else:\n t = hopping*np.exp(-(np.sqrt(np.sum(np.square(positioni-positionj)))/width))\n\n if (len(values) == 0) or (t - values[0] > hopping*eps) or (abs(t-values[0]) 0) and (t - values[0] > hopping*eps):\n values = list()\n directions = list()\n if [-m,-n] not in directions:\n values.append(t)\n directions.append([m,n])\n \n for m in range(len(values)):\n if values[m] < tolerance:\n continue\n self.model.set_hop(values[m], i, j, directions[m])\n mi,ni = directions[m]\n graph.append([positioni,positionj0 + mi*lat[0] + ni*lat[1],values[m]])\n #graph.append([positioni,positionj0,value])\n\n else:\n raise Exception(\"Not realize it yet!\")\n\n self.graph = graph\n return graph\n\n def bandstructure(self,ks,pointsnum=100):\n (k_vec,k_dist,k_node)=self.model.k_path(ks, pointsnum, report=False)\n evals=self.model.solve_all(k_vec)\n return k_vec,k_dist,k_node,evals\n\n def plotgraph(self,span=5,graph=None,filename=None,fileformat='pdf'):\n if graph is None:\n graph = self.graph\n\n fig = plt.figure()\n positions = self.lattice.supercell(span,span)\n\n lats = self.lattice.getlattices()\n if len(lats)==1:\n lat = np.array(lats[0])\n style = 'y--'\n xs = [0,lat[0,0]]\n ys = [0,lat[0,1]]\n plt.plot(xs,ys,style)\n xs = [0,lat[1,0]]\n ys = [0,lat[1,1]]\n plt.plot(xs,ys,style)\n xs = [lat[0,0],lat[0,0]+lat[1,0]]\n ys = [lat[0,1],lat[0,1]+lat[1,1]]\n plt.plot(xs,ys,style)\n xs = [lat[1,0],lat[0,0]+lat[1,0]]\n ys = [lat[1,1],lat[0,1]+lat[1,1]]\n plt.plot(xs,ys,style)\n\n plt.scatter(positions[:,0],positions[:,1],s=100)\n plt.xlim([np.min(positions[:,0]),np.max(positions[:,0])])\n plt.ylim([np.min(positions[:,1]),np.max(positions[:,1])])\n ls = list()\n for i in range(len(graph)):\n start,end,value = np.array(graph[i])\n xs = [start[0],end[0]]\n ys = [start[1],end[1]] \n plt.text(np.mean(xs), np.mean(ys), np.around(value,decimals=2), fontsize=8,color='m')\n plt.plot(xs,ys,c='r',alpha=value)\n\n cellpositions = self.lattice.getpositions()\n plt.scatter(cellpositions[:,0],cellpositions[:,1],c='black',s=100)\n if filename is None:\n filename = 'model'\n plt.savefig(filename+'.'+fileformat)\n\n def calcbandgap(self,nk=350,tolerance=1e-3):\n diracstatus = False\n diracenergies = list()\n degeneratepoints = list()\n\n ks = [[0,0],[0.5,0],[1.0,0.0],[0.5,0.5],[0,1],[0,0.5],[0,0],[0.5,0.5]]\n (k_vec,k_dist,k_node) = self.model.k_path(ks,nk,report=False)\n bands = self.model.solve_all(k_vec)\n N = np.size(bands,axis=0)\n maxvalues = np.zeros(N)\n minvalues = np.zeros(N)\n for i in range(N):\n maxvalues[i] = np.max(bands[i,:])\n minvalues[i] = np.min(bands[i,:])\n\n # Find Dirac point\n for i in range(N):\n for j in range(i+1,N):\n if (abs(maxvalues[i] - minvalues[j]) 0:\n startenergy = possets[start]*tolerance + minvalue\n endenergy = possets[end]*tolerance + minvalue\n center = (endenergy + startenergy)/2\n width = endenergy-startenergy\n bandgaps.append([center,width])\n\n bandgaps = np.array(bandgaps)\n return bandgaps,diracstatus,diracenergies","repo_name":"longyangking/MetaTB","sub_path":"metatb/tbmodel.py","file_name":"tbmodel.py","file_ext":"py","file_size_in_byte":7568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"73948765201","text":"from flask import Flask\nfrom flask import render_template, make_response\nfrom flask import Response, request, jsonify, redirect\nfrom quiz_data import *\nimport uuid\nimport random\nimport tutorial_data\nimport time\nfrom datetime import datetime\n\napp = Flask(__name__)\n\n\nquiz_questions = []\nuser_answers = ['X'] * 10\nmarked_questions = []\nend_time = 0.0\nquiz_duration = 10\n\n# data\ndata = tutorial_data.tutorial\ntutorial_collection_id = []\nintro_progress = 0\nregular_progress = set()\nwarn_progress = set()\n\nintro_total = 1.0\nregular_total = 18.0\nwarn_total = 25.0\n\n# ROUTES\n\n@app.route('/')\ndef index():\n return render_template('ud-index.html', title = '')\n\n@app.route('/mode')\ndef mode():\n return render_template('ud-mode.html', title = 'Select Mode')\n\n@app.route('/selectChapter')\ndef select():\n global intro_progress\n global regular_progress\n global warn_progress\n\n intro = 100 if intro_progress else 0\n regular = int(len(regular_progress)/regular_total * 100)\n warn = int(len(warn_progress)/warn_total * 100)\n return render_template('ud-selectChapter.html', intro=intro, regular=regular, warn=warn)\n\n@app.route('/intro')\ndef intro():\n return render_template('ud-intro.html')\n\n@app.route('/quiz/')\ndef generateQuiz(id = None):\n global quiz_questions\n global user_answers\n global marked_questions\n global end_time\n\n quiz_questions = random.sample(quiz_data, 10)\n user_answers = ['X'] * 10\n marked_questions = []\n end_time = time.time() + quiz_duration * 60\n\n return redirect(\"/quiz/1\", code=302)\n\n@app.route('/quiz/')\ndef quiz(num = None):\n try:\n num = int(num)\n except:\n return redirect(\"/quiz/1\", code=302)\n\n if quiz_questions == []:\n return redirect(\"/quiz/\", code=302)\n\n if num < 1 or num > 10:\n return redirect(\"/quiz/1\", code=302)\n\n return render_template('ud-quiz.html', title = 'Quiz', num = num, question = quiz_questions[num - 1], user_answers = user_answers, marked_questions = marked_questions, end_time = end_time)\n\n\n@app.route('/quizresult')\ndef quizresult():\n return render_template('ud-quizResult.html', title = 'Quiz', quiz_questions = quiz_questions, user_answers = user_answers)\n\n\n@app.route('/mark', methods=['POST'])\ndef mark():\n global marked_questions\n\n json_data = request.get_json()\n\n if json_data['operation'] == 'mark':\n marked_questions.append(int(json_data['num']))\n else:\n marked_questions.remove(int(json_data['num']))\n\n return jsonify({'status': 200, 'marked_questions': marked_questions})\n\n\n@app.route('/answer', methods=['POST'])\ndef answer():\n global user_answers\n\n json_data = request.get_json()\n user_answers[json_data['num'] - 1] = json_data['answer']\n\n return jsonify({'status': 200, 'user_answers': user_answers})\n\n@app.route('/learn/')\ndef learn(num = None):\n try:\n num = int(num)\n except:\n return redirect(\"/learn/1\", code=302)\n\n marked = False\n if num in tutorial_collection_id:\n marked = True\n\n if num < 21:\n return render_template('ud-learn.html', info=data[\"regulatory\"][num], num = num, marked=marked)\n\n return render_template('ud-learn.html', info=data[\"warning\"][num], num = num, marked=marked)\n\n@app.route('/mark_learn', methods=['GET', 'POST'])\ndef mark_learn():\n global tutorial_collection_id\n mark_id= request.get_json()\n mark = True\n\n if mark_id not in tutorial_collection_id:\n tutorial_collection_id.append(mark_id)\n else:\n tutorial_collection_id.remove(mark_id)\n mark = False\n\n return jsonify(mark=mark)\n\n@app.route('/complete_learn', methods=['POST'])\ndef complete_learn():\n global regular_progress\n global warn_progress\n\n id = request.get_json()\n \n if id > 20:\n warn_progress.add(id)\n else:\n regular_progress.add(id)\n\n return make_response(jsonify(message = \"success\"),200)\n\n@app.route('/complete_intro', methods=['GET','POST'])\ndef complete_intro():\n global intro_progress\n\n complete = request.get_json()\n intro_progress = complete\n\n return make_response(jsonify(message = \"success\"),200)\n\n@app.route('/learn/tutorialCollection')\ndef tutorial_collection():\n '''\n Route for tutorial collection page.\n Data entries are supposed to stored as int value represent the sign id.\n '''\n marked = []\n for id in tutorial_collection_id:\n if id<=20: # regulatory signs\n marked.append(data[\"regulatory\"][id])\n else: # warning signs\n marked.append(data[\"warning\"][id])\n\n return render_template('ud-tutorialCollection.html', data=marked)\n\n# Binding\nif __name__ == '__main__':\n app.run(debug = True, host=\"0.0.0.0\", port=\"2022\")\n","repo_name":"lyz9518/road_sign_tutorial","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4722,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"11596518013","text":"import config as cfg\n\n# Initialize boarding house\ndef initialize_boarding_house():\n for cls in cfg.classes:\n for food_pref in cfg.food_preferences:\n boarding_house = cls.upper() + food_pref.upper()\n cfg.boarding_house_allocation[boarding_house] = list()\n cfg.boarding_house_allocation[cfg.NOT_ALLOCATED] = list()\n\n# for getting each hostel capacity\ndef get_each_hostel_capacity():\n try:\n total_capacity = int(input(\"Please Insert Total Boarding Capacity: \"))\n if total_capacity>=0 and total_capacity%4 == 0: # to validate total capacity\n return int(total_capacity/4)\n else:\n print(\"Please provide initial capacity in multiplication of 4 and a positive number\")\n return get_each_hostel_capacity()\n except Exception as e:\n print(\"Please provide capacity in integer\")\n return get_each_hostel_capacity() # retrying until user will not give correct capacity value\n\n# validate and queue data received from user\ndef validate_and_queue_registration_data(registration_data):\n student_data = registration_data.split()\n if len(student_data) != 4:\n print(\"Number of arguments for registration is not correct, \"\n \"Please give input in mentioned format - reg roll_number class food_preference\")\n return\n if student_data[0].lower() != 'reg':\n print(\"Registration command is wrong please use (reg or Reg) for registration\")\n return\n try:\n if int(student_data[1]) > 9999 or int(student_data[1]) < 1:\n print(\"Roll number should be in range of 1 to 9999 and integer value only\")\n return\n except Exception as e:\n print(\"Roll number is not Integer type, Roll number should be in range of 1 to 9999\")\n return\n if student_data[2].lower() not in cfg.classes:\n print(\"Class Name should be A or B (case insensitive)\")\n return\n\n if student_data[3].lower() not in cfg.food_preferences:\n print(\"Food preferences should be V or NV (case insensitive)\")\n return\n\n if student_data[1] not in cfg.roll_number_list:\n cfg.registration_queue.append(student_data)\n cfg.roll_number_list.append(student_data[1])\n else:\n print(\"Student is already registered\")\n\n# receive input from user until user entered exit or fin\ndef get_input():\n print(\"Follow given format for student registration (reg roll_number class food_preference)\")\n # use space as a separator between attributes\n registration_data = input(\"Please Insert New Student Record: \")\n while registration_data not in ['fin', 'exit']:\n validate_and_queue_registration_data(registration_data)\n registration_data = input(\"Please Insert New Student Record: \")\n\n# to check boarding house is availability\ndef check_availability(boarding_house_capacity, boarding_house):\n if boarding_house_capacity == 0:\n return False\n if len(cfg.boarding_house_allocation.get(boarding_house)) < boarding_house_capacity:\n return True\n else:\n return False\n\n# returns boarding house name\ndef get_boarding_house_name(student_record):\n return student_record[2].upper() + student_record[3].upper()\n\n# pick students records from queue and allocate boarding house\ndef assign_boarding_house(boarding_house_capacity):\n for student_record in cfg.registration_queue:\n boarding_house = get_boarding_house_name(student_record)\n if check_availability(boarding_house_capacity, boarding_house):\n allocation_list = cfg.boarding_house_allocation.get(boarding_house)\n allocation_list.append(int(student_record[1]))\n cfg.boarding_house_allocation[boarding_house] = allocation_list\n\n else:\n allocation_list = cfg.boarding_house_allocation.get(cfg.NOT_ALLOCATED)\n allocation_list.append(int(student_record[1]))\n cfg.boarding_house_allocation[cfg.NOT_ALLOCATED] = allocation_list\n\n# for printing boarding allocation details\ndef show_boarding_allocation():\n print(\"\\nBoarding House Allocations : -\")\n for boarding_house, students_list in cfg.boarding_house_allocation.items():\n print(boarding_house,\":\",students_list)\n\n# entry point for boarding house allocation\ndef main():\n boarding_house_capacity = get_each_hostel_capacity()\n get_input()\n initialize_boarding_house()\n assign_boarding_house(boarding_house_capacity)\n show_boarding_allocation()\n\n# invoking main method to start the boarding process\nif __name__ == \"__main__\":\n main()\n","repo_name":"kapilkumar6305/BoardingHouseAllocation","sub_path":"HouseAllocator.py","file_name":"HouseAllocator.py","file_ext":"py","file_size_in_byte":4558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34200259922","text":"from utils import timed\n\nimport re\n\n\nwith open('inputs/2020-19.txt') as f:\n lines = f.read().splitlines()\n rules = lines[:lines.index('')]\n msgs = lines[lines.index('') + 1:]\n\n\ndef parse_rules(rules, part):\n rules_dict = {}\n for r in rules:\n if part == 2:\n if r == '8: 42':\n r = '8: 42 | 42 8'\n elif r == '11: 42 31':\n r = '11: 42 31 | 42 11 31'\n rule_num, required_match = r.split(': ')\n parsed_match = []\n if '\"' in required_match:\n parsed_match.append([required_match.strip('\"')])\n else:\n matches = required_match.split(' | ')\n for match in matches:\n parsed_match.append(list(map(int, match.split(' '))))\n\n rules_dict[int(rule_num)] = parsed_match\n\n return rules_dict\n\n\nREPEATED_RECURSION = 0\n\ndef get_rule_pattern(rules, num):\n global REPEATED_RECURSION\n options = []\n for option in rules[num]:\n match = []\n for sub_rule in option:\n if sub_rule in ('a', 'b'):\n match.append(sub_rule)\n else:\n if sub_rule == num:\n REPEATED_RECURSION += 1\n if REPEATED_RECURSION > 5: # This can be adjusted until the output stops changing or the recursion limit gets hit\n match.append('')\n REPEATED_RECURSION = 0\n else:\n match.append(get_rule_pattern(rules, sub_rule))\n\n\n options.append(''.join(match))\n\n if len(options) == 1:\n return fr'{options[0]}'\n return fr\"({'|'.join(options)})\"\n\n@timed\ndef part_one(rules, msgs):\n rules = parse_rules(rules, 1)\n re_pattern = get_rule_pattern(rules, 0)\n\n valid_msgs = 0\n for msg in msgs:\n if re.fullmatch(re_pattern, msg):\n valid_msgs += 1\n\n return valid_msgs\n\n@timed\ndef part_two(rules, msgs):\n rules = parse_rules(rules, 2)\n re_pattern = get_rule_pattern(rules, 0)\n\n valid_msgs = 0\n for msg in msgs:\n if re.fullmatch(re_pattern, msg):\n valid_msgs += 1\n\n return valid_msgs\n\n\nprint(part_one(rules, msgs))\nprint(part_two(rules, msgs))\n","repo_name":"SharpBit/adventofcode","sub_path":"2020/2020-19.py","file_name":"2020-19.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5190546631","text":"'''\nCreated on Feb 2, 2020\n\n@author: Lutaaya\n'''\nimport logging\nlogging.basicConfig(level=logging.DEBUG, filename='pylog.log', filemode='a', format='%(asctime)s-%(process)d-%(name)s-%(levelname)s-%(message)s')\nlogging.warning(\"This is a warning message\")\n\n\ninputString = input('')\n\nf=open(\"discreet.txt\",\"w+\")\nf.write(inputString)\nlogging.info(\"discreet logging completed\")\nf.close()\n\n","repo_name":"BoobLutos/zimba","sub_path":"pyprojects/discreetlog.py","file_name":"discreetlog.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17759052555","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def levelOrder(self, root: Optional[TreeNode]) -> List[List[int]]:\n if not root:\n return None\n \n res, q = [], []\n q.append(root)\n while q:\n level = []\n for i in range(len(q)):\n node = q.pop(0)\n level.append(node.val)\n if node.left:\n q.append(node.left)\n if node.right:\n q.append(node.right)\n res.append(level)\n return res\n\n\"\"\"\nBINARY TREE\n- level order traversal means bfs\n- to do bfs use queue to store nodes to visit\n- if nodes have child continue to add to queue\n- keep going while there are nodes in queue\n- each level is defined by nodes in queue at each iteration\n\n- have result list\n- have outer while loop that continues as long as there are nodes in queue\n- create level list\n- have inner for loop that iterates len(queue) amount of times\n - dequeue node and add to level list\n - add nodes children to queue\n- add level list to result\n\"\"\"","repo_name":"tilwe28/leetcode","sub_path":"0102-binary-tree-level-order-traversal/0102-binary-tree-level-order-traversal.py","file_name":"0102-binary-tree-level-order-traversal.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17323186383","text":"from collections.abc import Iterable\nfrom email.mime.image import MIMEImage\nfrom pathlib import Path\n\nfrom django.conf import settings\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.db.models import Q\nfrom django.template.loader import get_template\nfrom premailer import transform\n\nfrom trcustoms.celery import app as celery_app\nfrom trcustoms.levels.models import Level\nfrom trcustoms.reviews.models import Review\nfrom trcustoms.users.models import User\nfrom trcustoms.walkthroughs.models import Walkthrough\n\nFROM = \"admin@trcustoms.org\"\nPREFIX = \"[TRCustoms]\"\nSTATIC_DIR = Path(__file__).parent / \"common\" / \"static\"\n\n\ndef get_level_authors(level: Level) -> Iterable[tuple[str, str]]:\n q_obj = Q(uploaded_levels=level)\n if not level.is_pending_approval:\n q_obj |= Q(authored_levels=level)\n\n for item in (\n User.objects.filter(q_obj)\n .values(\"username\", \"email\")\n .distinct(\"email\")\n ):\n username = item[\"username\"]\n email = item[\"email\"]\n yield username, email\n\n\n@celery_app.task(autoretry_for=(Exception,), retry_backoff=2)\ndef send_mail(\n template_name: str,\n subject: str,\n recipients: list[str],\n context: dict[str, str],\n) -> None:\n plaintext = get_template(f\"{template_name}.txt\")\n html = get_template(f\"{template_name}.html\")\n\n logo_attachment_id = 100\n\n context.update(logo_url=f\"cid:{logo_attachment_id}\")\n\n msg_img = MIMEImage((STATIC_DIR / \"mail_logo.png\").read_bytes())\n msg_img.add_header(\"Content-ID\", f\"<{logo_attachment_id}>\")\n\n text_content = plaintext.render(context)\n html_content = transform(html.render(context))\n msg = EmailMultiAlternatives(subject, text_content, FROM, recipients)\n msg.attach_alternative(html_content, \"text/html\")\n msg.attach(msg_img)\n msg.send()\n\n\ndef send_email_confirmation_mail(user: User) -> None:\n if not user.email:\n return\n token = user.generate_email_token()\n link = f\"{settings.HOST_SITE}/email-confirmation/{token}\"\n send_mail.delay(\n template_name=\"email_confirmation\",\n subject=f\"{PREFIX} Confirm your registration\",\n recipients=[user.email],\n context={\n \"username\": user.username,\n \"link\": link,\n },\n )\n\n\ndef send_password_reset_mail(user: User) -> None:\n if not user.email:\n return\n token = user.generate_password_reset_token()\n link = f\"{settings.HOST_SITE}/password-reset/{token}\"\n send_mail.delay(\n template_name=\"password_reset\",\n subject=f\"{PREFIX} Password reset\",\n recipients=[user.email],\n context={\n \"username\": user.username,\n \"link\": link,\n },\n )\n\n\ndef send_welcome_mail(user: User) -> None:\n if not user.email:\n return\n send_mail.delay(\n template_name=\"welcome\",\n subject=f\"{PREFIX} Welcome to TRCustoms.org\",\n recipients=[user.email],\n context={\n \"username\": user.username,\n },\n )\n\n\ndef send_registration_rejection_mail(user: User, reason: str) -> None:\n if not user.email:\n return\n send_mail.delay(\n template_name=\"registration_rejection\",\n subject=f\"{PREFIX} Registration rejected\",\n recipients=[user.email],\n context={\n \"username\": user.username,\n \"reason\": reason,\n },\n )\n\n\ndef send_ban_mail(user: User, reason: str) -> None:\n if not user.email:\n return\n send_mail.delay(\n template_name=\"ban\",\n subject=f\"{PREFIX} Account banned\",\n recipients=[user.email],\n context={\n \"username\": user.username,\n \"reason\": reason,\n },\n )\n\n\ndef send_unban_mail(user: User) -> None:\n if not user.email:\n return\n send_mail.delay(\n template_name=\"unban\",\n subject=f\"{PREFIX} Account unbanned\",\n recipients=[user.email],\n context={\"username\": user.username},\n )\n\n\ndef send_level_submitted_mail(level: Level) -> None:\n if not level.uploader or not level.uploader.email:\n return\n link = f\"{settings.HOST_SITE}/levels/{level.id}\"\n send_mail.delay(\n template_name=\"level_submission\",\n subject=f\"{PREFIX} Level submitted\",\n recipients=[level.uploader.email],\n context={\n \"username\": level.uploader.username,\n \"level_name\": level.name,\n \"link\": link,\n },\n )\n\n\ndef send_level_approved_mail(level: Level) -> None:\n link = f\"{settings.HOST_SITE}/levels/{level.id}\"\n for username, email in get_level_authors(level):\n send_mail.delay(\n template_name=\"level_approval\",\n subject=f\"{PREFIX} Level approved\",\n recipients=[email],\n context={\n \"username\": username,\n \"level_name\": level.name,\n \"link\": link,\n },\n )\n\n\ndef send_level_rejected_mail(level: Level, reason: str) -> None:\n for username, email in get_level_authors(level):\n link = f\"{settings.HOST_SITE}/levels/{level.id}\"\n send_mail.delay(\n template_name=\"level_rejection\",\n subject=f\"{PREFIX} Level rejected\",\n recipients=[email],\n context={\n \"username\": username,\n \"level_name\": level.name,\n \"reason\": reason,\n \"link\": link,\n },\n )\n\n\ndef send_review_submission_mail(review: Review) -> None:\n link = f\"{settings.HOST_SITE}/levels/{review.level.id}\"\n for username, email in get_level_authors(review.level):\n send_mail.delay(\n template_name=\"review_submission\",\n subject=f\"{PREFIX} New review\",\n recipients=[email],\n context={\n \"username\": username,\n \"reviewer_username\": review.author.username,\n \"level_name\": review.level.name,\n \"link\": link,\n },\n )\n\n\ndef send_review_update_mail(review: Review) -> None:\n link = f\"{settings.HOST_SITE}/levels/{review.level.id}\"\n for username, email in get_level_authors(review.level):\n send_mail.delay(\n template_name=\"review_update\",\n subject=f\"{PREFIX} Review edited\",\n recipients=[email],\n context={\n \"username\": username,\n \"reviewer_username\": review.author.username,\n \"level_name\": review.level.name,\n \"link\": link,\n },\n )\n\n\ndef send_walkthrough_approved_mail(walkthrough: Walkthrough) -> None:\n link = f\"{settings.HOST_SITE}/walkthroughs/{walkthrough.id}\"\n if walkthrough.author:\n send_mail.delay(\n template_name=\"walkthrough_approval\",\n subject=f\"{PREFIX} Walkthrough approved\",\n recipients=[walkthrough.author.email],\n context={\n \"username\": walkthrough.author.username,\n \"level_name\": walkthrough.level.name,\n \"link\": link,\n },\n )\n\n\ndef send_walkthrough_rejected_mail(\n walkthrough: Walkthrough, reason: str\n) -> None:\n link = f\"{settings.HOST_SITE}/walkthroughs/{walkthrough.id}\"\n if walkthrough.author:\n send_mail.delay(\n template_name=\"walkthrough_rejection\",\n subject=f\"{PREFIX} Walkthrough rejected\",\n recipients=[walkthrough.author.email],\n context={\n \"username\": walkthrough.author.username,\n \"level_name\": walkthrough.level.name,\n \"reason\": reason,\n \"link\": link,\n },\n )\n\n\ndef send_walkthrough_submission_mail(walkthrough: Walkthrough) -> None:\n link = f\"{settings.HOST_SITE}/walkthroughs/{walkthrough.id}\"\n for username, email in get_level_authors(walkthrough.level):\n send_mail.delay(\n template_name=\"walkthrough_submission\",\n subject=f\"{PREFIX} New walkthrough\",\n recipients=[email],\n context={\n \"username\": username,\n \"author_username\": walkthrough.author.username,\n \"level_name\": walkthrough.level.name,\n \"link\": link,\n },\n )\n\n\ndef send_walkthrough_update_mail(walkthrough: Walkthrough) -> None:\n link = f\"{settings.HOST_SITE}/walkthroughs/{walkthrough.id}\"\n for username, email in get_level_authors(walkthrough.level):\n send_mail.delay(\n template_name=\"walkthrough_update\",\n subject=f\"{PREFIX} Walkthrough edited\",\n recipients=[email],\n context={\n \"username\": username,\n \"author_username\": walkthrough.author.username,\n \"level_name\": walkthrough.level.name,\n \"link\": link,\n },\n )\n","repo_name":"rr-/TRCustoms","sub_path":"backend/trcustoms/mails.py","file_name":"mails.py","file_ext":"py","file_size_in_byte":8817,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"3"} +{"seq_id":"21537612159","text":"import os\nimport numpy as np \n\n\nrootdir=os.path.join('/content/drive/My Drive/Project/logAnlysis/Annotation/')\nwrite_path=os.path.join('/content/drive/My Drive/Project/logAnlysis/convertedTXTlabel/')\n\nfor (dirpath,dirnames,filenames) in os.walk(rootdir):\n for filename in filenames:\n if os.path.splitext(filename)[1]=='.txt':\n with open(os.path.join(rootdir, filename),'r') as readTxt:\n print(\"\\nNow read \" + filename)\n with open(os.path.join(write_path, filename), 'w') as writeTxt:\n for line in readTxt.readlines():\n if 'Image size' in line:\n # print(line.split()[8:13])\n width = int(line.split()[8])\n height = int(line.split()[10])\n # print(\"hello world\")\n print(\"size Founded! --> \" + str(width) + \" \" + str(height))\n if 'Xmin, Ymin' in line:\n print(\"coordinate line Founded:\")\n dataLine = line.split()[12:17]\n xMin = int(dataLine[0].split('(')[1].split(',')[0])\n yMin = int(dataLine[1].split(')')[0])\n xMax = int(dataLine[3].split('(')[1].split(',')[0])\n yMax = int(dataLine[4].split(')')[0])\n print(xMin, yMin, xMax, yMax)\n writeTxt.write(str(5) + \" \" + str(((xMax-xMin)/2-1)/width) + \" \" + str(((yMax-yMin)/2-1)/height) + \" \" + str((xMax-xMin)/width) + \" \" + str((yMax-yMin)/height) + \"\\n\")\n","repo_name":"newjoy2018/MA_YOLOv4","sub_path":"Downloads/script/FudanPed_dataset_annotationConvert.py","file_name":"FudanPed_dataset_annotationConvert.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34666116814","text":"import os, unittest\n\nfrom pxr import Plug, Sdf\n\ndef DetachedLayerRulesEnvVarIsSet():\n return ('SDF_LAYER_INCLUDE_DETACHED' in os.environ or\n 'SDF_LAYER_EXCLUDE_DETACHED' in os.environ)\n\nclass TestSdfLayer(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n # Register dso plugins.\n testRoot = os.path.join(os.path.dirname(__file__), 'SdfPlugins')\n testPluginsDso = testRoot + '/lib'\n testPluginsDsoSearch = testPluginsDso + '/*/Resources/'\n Plug.Registry().RegisterPlugins(testPluginsDsoSearch)\n\n def setUp(self):\n if not DetachedLayerRulesEnvVarIsSet():\n Sdf.Layer.SetDetachedLayerRules(Sdf.Layer.DetachedLayerRules())\n\n @unittest.skipIf(DetachedLayerRulesEnvVarIsSet(), 'rules env var is set')\n def test_DetachedLayerRules(self):\n rules = Sdf.Layer.DetachedLayerRules()\n self.assertFalse(rules.IsIncluded('some_layer.sdf'))\n self.assertFalse(rules.IsIncluded('some_layer_2.sdf'))\n self.assertFalse(rules.IsIncluded('other_layer.sdf'))\n\n rules.Include(['some_layer'])\n self.assertFalse(rules.IncludedAll())\n self.assertEqual(rules.GetIncluded(), ['some_layer'])\n self.assertEqual(rules.GetExcluded(), [])\n self.assertTrue(rules.IsIncluded('some_layer.sdf'))\n self.assertTrue(rules.IsIncluded('some_layer_2.sdf'))\n self.assertFalse(rules.IsIncluded('other_layer.sdf'))\n\n rules.Exclude(['some_layer_2'])\n self.assertFalse(rules.IncludedAll())\n self.assertEqual(rules.GetIncluded(), ['some_layer'])\n self.assertEqual(rules.GetExcluded(), ['some_layer_2'])\n self.assertTrue(rules.IsIncluded('some_layer.sdf'))\n self.assertFalse(rules.IsIncluded('some_layer_2.sdf'))\n self.assertFalse(rules.IsIncluded('other_layer.sdf'))\n\n rules.IncludeAll()\n self.assertTrue(rules.IncludedAll())\n self.assertEqual(rules.GetIncluded(), [])\n self.assertEqual(rules.GetExcluded(), ['some_layer_2'])\n self.assertTrue(rules.IsIncluded('some_layer.sdf'))\n self.assertFalse(rules.IsIncluded('some_layer_2.sdf'))\n self.assertTrue(rules.IsIncluded('other_layer.sdf'))\n\n @unittest.skipIf(DetachedLayerRulesEnvVarIsSet(), 'rules env var is set')\n def test_SettingRulesAffectsNewLayers(self):\n # Create a test layer to open. We don't care what's inside.\n Sdf.Layer.CreateAnonymous().Export('open_layer.test_streaming_format')\n\n # Creating a new layer or opening an existing layer that uses our test\n # streaming file format should produce a non-detached layer, since an\n # empty rules is currently set.\n l = Sdf.Layer.CreateNew('new_layer.test_streaming_format')\n self.assertTrue(l)\n self.assertFalse(l.IsDetached())\n del l\n\n l = Sdf.Layer.FindOrOpen('open_layer.test_streaming_format')\n self.assertTrue(l)\n self.assertFalse(l.IsDetached())\n del l\n\n # If a new layer that uses our test streaming file format and whose\n # identifier is in the rules is created or opened, we should get a\n # detached layer instead.\n Sdf.Layer.SetDetachedLayerRules(\n Sdf.Layer.DetachedLayerRules().Include(\n ['new_layer.test_streaming_format',\n 'open_layer.test_streaming_format']))\n\n l = Sdf.Layer.CreateNew('new_layer.test_streaming_format')\n self.assertTrue(l)\n self.assertTrue(l.IsDetached())\n del l\n\n l = Sdf.Layer.FindOrOpen('open_layer.test_streaming_format')\n self.assertTrue(l)\n self.assertTrue(l.IsDetached())\n del l\n\n @unittest.skipIf(DetachedLayerRulesEnvVarIsSet(), 'rules env var is set')\n def test_SettingRulesAffectsOpenLayers(self):\n # Create a test layer to open. We don't care what's inside.\n Sdf.Layer.CreateAnonymous().Export('open_layer.test_streaming_format')\n\n # With an empty rules set, this layer should open as not detached.\n l = Sdf.Layer.FindOrOpen('open_layer.test_streaming_format')\n self.assertTrue(l)\n self.assertFalse(l.IsDetached())\n\n # Setting rules that includes the layer should reload it so that it\n # is now detached.\n Sdf.Layer.SetDetachedLayerRules(\n Sdf.Layer.DetachedLayerRules().Include(\n ['open_layer.test_streaming_format']))\n self.assertTrue(l.IsDetached())\n\n # Excluding the layer from the rules should reload it again so that it\n # goes back to non-detached.\n Sdf.Layer.SetDetachedLayerRules(\n Sdf.Layer.DetachedLayerRules()\n .IncludeAll()\n .Exclude(['open_layer.test_streaming_format']))\n self.assertFalse(l.IsDetached())\n\n @unittest.skipIf(not DetachedLayerRulesEnvVarIsSet(),\n 'rules env var is not set')\n def test_InitializingRulesWithEnvVar(self):\n included = os.environ.get('SDF_LAYER_INCLUDE_DETACHED')\n if included:\n included = included.split(',')\n else:\n included = []\n\n excluded = os.environ.get('SDF_LAYER_EXCLUDE_DETACHED')\n if excluded:\n excluded = excluded.split(',')\n else:\n excluded = []\n\n rules = Sdf.Layer.GetDetachedLayerRules()\n \n if '*' in included:\n self.assertTrue(rules.IncludedAll())\n self.assertEqual(rules.GetIncluded(), [])\n else:\n self.assertEqual(set(included), set(rules.GetIncluded()))\n \n self.assertEqual(set(excluded), set(rules.GetExcluded()))\n\nif __name__ == \"__main__\":\n unittest.main()\n\n\n","repo_name":"PixarAnimationStudios/OpenUSD","sub_path":"pxr/usd/sdf/testenv/testSdfDetachedLayer.py","file_name":"testSdfDetachedLayer.py","file_ext":"py","file_size_in_byte":5664,"program_lang":"python","lang":"en","doc_type":"code","stars":5042,"dataset":"github-code","pt":"3"} +{"seq_id":"27537570915","text":"\"\"\"Base schemas.\"\"\"\nfrom __future__ import annotations\n\nfrom pydantic import BaseModel, HttpUrl\n\nfrom imagesecrets.constants import URL_KEY_ALIAS\n\n\nclass ModelSchema(BaseModel):\n \"\"\"Base schema for database models.\"\"\"\n\n class Config:\n \"\"\"Pydantic configuration.\"\"\"\n\n orm_mode = True\n\n\ndef pretty_key(key: str) -> str:\n \"\"\"Return a pretty key if specified alias for it exists, otherwise camelcase.\"\"\"\n return URL_KEY_ALIAS.get(key) or \"\".join(\n w.capitalize() for w in key.split(\"_\")\n )\n\n\nclass Info(BaseModel):\n \"\"\"Response model for home route.\"\"\"\n\n app_name: str\n swagger_url: HttpUrl\n redoc_url: HttpUrl\n github_url: HttpUrl\n\n class Config:\n \"\"\"Model configuration.\"\"\"\n\n alias_generator = pretty_key\n\n\nclass Message(BaseModel):\n \"\"\"Response model for a single detail field.\"\"\"\n\n detail: str\n\n\nclass Field(Message):\n \"\"\"Response model for invalid field value.\"\"\"\n\n field: str\n\n\nclass Conflict(Field):\n \"\"\"Response model for conflicting field value.\"\"\"\n\n value: str\n\n\nclass Token(BaseModel):\n \"\"\"Response model for access token.\"\"\"\n\n access_token: str\n token_type: str\n","repo_name":"kucera-lukas/imagesecrets","sub_path":"imagesecrets/schemas/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"3852166652","text":"import spacy\nfrom flask import Flask, jsonify\n\napp = Flask(__name__)\n\nnlp = spacy.load('en_core_web_trf')\n\n# TERMINALBA:\n# export FLASK_APP=main.py\n# export FLASK_ENV=development\n# flask run vagy flask run --host=0.0.0.0\n\n\n@app.route('/')\ndef index():\n return 'Hello!'\n\ndef tagger(word, pos, dep):\n invoc = [\"MI6, robot, bot\"]\n get = [\"get\", \"bring\", \"fetch\", \"grab\", \"obtain\", \"give\"]\n build = [\"build\", \"construct\", \"erect\", \"assemble\", \"make\"]\n greeting = [\"hi\", \"hello\", \"bye\", \"greetings\", \"howdy\", \"welcome\"]\n item = [\"oak log\", \"birch log\", \"stone\", \"obsidian\", \"redstone\"]\n generic_item = [\"wood\", \"wool\"]\n structure = [\"house\", \"igloo\", \"building\"]\n location = [\"next\", \"front\", \"behind\", \"here\", \"there\"]\n quantity = [\"5\", \"6\", \"five\", \"six\"]\n generic_quantity = [\"some\", \"few\"]\n # LOCATIONS!! (next to, in front of, etc)\n\n if word in invoc and dep == \"nsubj\": return \"invoc\"\n if word in get: return \"get\"\n if word in build and (dep == \"xcomp\" or dep == \"conj\" or dep == \"ROOT\"): return \"build\"\n if word in greeting: return \"greeting\"\n if word in item: return \"item\"\n if word in generic_item: return \"gen_item\"\n if word in structure and pos == \"NOUN\": return \"structure\"\n if word in location: return \"location\"\n if word in quantity: return \"quant\"\n if word in generic_quantity: return \"gen_quant\"\n\n\nresponse = {\n \"message\": \"\", #\n \"command_type\": \"\",\n \"item\": \"\",\n \"quantity\": \"\",\n \"structure\": \"\",\n \"location\": \"\"\n}\n\n\ndef reset_response():\n response[\"message\"] = \"\"\n response[\"command_type\"] = \"\"\n response[\"item\"] = \"\"\n response[\"quantity\"] = \"\"\n response[\"structure\"] = \"\"\n response[\"location\"] = \"\"\n\n\n\n\n@app.route('//')\ndef api(id, command):\n print(id)\n doc = nlp(command)\n\n tagged_words = []\n i = 0\n for token in doc:\n temp = [token.text, token.pos_, token.dep_, token.head.text,\n tagger(token.lemma_.lower(), token.pos_, token.dep_)]\n tagged_words.append(temp)\n\n print(tagged_words)\n\n # for items in tagged_words:\n\n\n command_struct = []\n for word in tagged_words:\n if word[4] != None:\n command_struct.append(word)\n\n temp = []\n for command in command_struct:\n temp.append(command[4])\n print(temp)\n print()\n\n response_list = []\n\n for i in range(len(command_struct)):\n if command_struct[i][4] == \"build\":\n try:\n if command_struct[i + 1][4] == \"structure\":\n response[\"command_type\"] = command_struct[i][4]\n response[\"structure\"] = command_struct[i + 1][0]\n response[\"message\"] = \"I will build a(n) \" + command_struct[i + 1][0] + \".\"\n try:\n if command_struct[i + 2][4] == \"location\":\n response[\"location\"] = command_struct[i + 2][0]\n response[\"message\"] = \"I will build a(n) \" + command_struct[i + 1][0] + \" \" + command_struct[i + 2][0] + \".\"\n response_list.append(response.copy())\n reset_response()\n i += 2\n else:\n response[\"location\"] = \"?\"\n response_list.append(response.copy())\n reset_response()\n except:\n response[\"location\"] = \"?\"\n response_list.append(response.copy())\n reset_response()\n i += 1\n except:\n pass\n\n if command_struct[i][4] == \"get\":\n try:\n if (command_struct[i + 1][4] == \"quant\" or command_struct[i + 1][4] == \"gen_quant\") and \\\n (command_struct[i + 2][4] == \"item\" or command_struct[i + 2][4] == \"gen_item\"):\n response[\"command_type\"] = command_struct[i][4]\n response[\"quantity\"] = command_struct[i + 1][0]\n response[\"item\"] = command_struct[i + 2][0]\n response[\"message\"] = \"I will get you \" + command_struct[i + 1][0] + \" \" + command_struct[i + 2][0] + \".\"\n response_list.append(response.copy())\n reset_response()\n i += 2\n except:\n pass\n\n try:\n if command_struct[i + 1][4] == \"item\" or command_struct[i + 1][4] == \"gen_item\":\n response[\"command_type\"] = command_struct[i][4]\n response[\"quantity\"] = \"?\"\n response[\"item\"] = command_struct[i + 1][0]\n response[\"message\"] = \"I will get you \" + command_struct[i + 1][0] + \".\"\n response_list.append(response.copy())\n reset_response()\n i += 1\n except:\n pass\n\n if command_struct[i][4] == \"greeting\":\n response[\"command_type\"] = command_struct[i][4]\n response[\"message\"] = \"Hello!\"\n response_list.append(response.copy())\n reset_response()\n i += 1\n\n if not response_list:\n response[\"command_type\"] = \"Error\"\n response[\"message\"] = \"Sorry, I didn't understand that.\"\n response_list.append(response.copy())\n reset_response()\n\n print(response_list)\n return jsonify(response_list)\n","repo_name":"pandachann/gergo_project_testing","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71759242323","text":"# video_entry.py\nfrom PyQt5.QtWidgets import QWidget, QHBoxLayout, QLabel, QVBoxLayout, QPushButton\nfrom PyQt5.QtGui import QPixmap, QFont\nfrom PyQt5.QtCore import pyqtSignal, Qt\nimport requests\nimport pyperclip\n\nclass VideoEntry(QWidget):\n clicked = pyqtSignal(str)\n\n def __init__(self, video):\n super().__init__()\n self.video = video\n self.initUI()\n\n def initUI(self):\n layout = QHBoxLayout()\n image_label = QLabel(self)\n response = requests.get(self.video.image_path)\n image_data = response.content\n pixmap = QPixmap()\n pixmap.loadFromData(image_data)\n image_label.setPixmap(pixmap)\n\n title_label = QLabel(self.video.title)\n title_label.setFont(QFont(\"Roboto\", 18))\n image_label.mousePressEvent = lambda event: (self.clicked.emit(self.video.video_id))\n\n layout.addWidget(image_label)\n layout.addWidget(title_label)\n self.setLayout(layout)\n","repo_name":"omaramkotb22/UNM-SQA-2023-24-5","sub_path":"Interface/video_entry.py","file_name":"video_entry.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20366445259","text":"from unittest.mock import MagicMock, Mock, patch\nfrom .mock_mongo import MockPyMongo\nimport json\nimport sys\nimport os\n\n### TEST SETUP ###\nEXPECTED_DATA_FRAME_FILENAME = os.path.join(os.path.dirname(__file__),\n 'expected_response.json')\nEXPECTED_DATA_FRAME = open(EXPECTED_DATA_FRAME_FILENAME).read()\nsys.modules['pymongo'] = MockPyMongo\n\ncloud_mock = MagicMock()\nsys.modules['google.cloud'] = cloud_mock\n##################\n\nfrom src.main import _get_data, _refresh_data # noqa: E402\n\ndef test_snapshot():\n expected = _pretty_print(EXPECTED_DATA_FRAME)\n observed = _pretty_print(f'{_get_data().to_json()}')\n assert expected == observed\n\ndef test_cold_cache_json():\n _blob().return_value.exists = lambda: False\n _blob().return_value.download_as_string.return_value = '{}'\n\n find = MockPyMongo.MongoClient().talkspace.messages.find\n find.reset_mock()\n\n _refresh_data('json')\n\n find.assert_called_once()\n\ndef test_cold_cache_csv():\n _blob().return_value.exists = lambda: False\n _blob().return_value.download_as_string.return_value = '{}'\n\n find = MockPyMongo.MongoClient().talkspace.messages.find\n find.reset_mock()\n\n _refresh_data('csv')\n\n find.assert_called_once()\n\ndef test_warm_cache_json():\n _blob().return_value.exists = lambda: True\n _blob().return_value.download_as_string.return_value = '{}'\n\n find = MockPyMongo.MongoClient().talkspace.messages.find\n find.reset_mock()\n\n _refresh_data('json')\n find.assert_not_called()\n\ndef test_warm_cache_csv():\n _blob().return_value.exists = lambda: True\n _blob().return_value.download_as_string.return_value = '{}'\n\n find = MockPyMongo.MongoClient().talkspace.messages.find\n find.reset_mock()\n\n _refresh_data('csv')\n find.assert_not_called()\n\ndef _pretty_print(json_string):\n json.dumps(\n json.loads(json_string),\n indent=2,\n sort_keys=True\n )\n\ndef _blob():\n return cloud_mock.storage\\\n .Client.return_value\\\n .bucket.return_value\\\n .blob\n","repo_name":"vaughn-johnson/talkspace-public-api","sub_path":"test/main_test.py","file_name":"main_test.py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19100603809","text":"import gym\n\n__all__ = ['SetPlayingMode']\n\ndef SetPlayingMode(target_mode):\n \"\"\" target mode can be 'algo' or 'human' \"\"\"\n\n class SetPlayingModeWrapper(gym.Wrapper):\n \"\"\"\n Doom wrapper to change playing mode 'human' or 'algo'\n \"\"\"\n def __init__(self, env):\n super(SetPlayingModeWrapper, self).__init__(env)\n if target_mode not in ['algo', 'human']:\n raise gym.error.Error('Error - The mode \"{}\" is not supported. Supported options are \"algo\" or \"human\"'.format(target_mode))\n self.unwrapped.mode = target_mode\n\n return SetPlayingModeWrapper","repo_name":"ppaquette/gym-super-mario","sub_path":"ppaquette_gym_super_mario/wrappers/control.py","file_name":"control.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":285,"dataset":"github-code","pt":"3"} +{"seq_id":"18864814547","text":"def DarkTowerBackdrop0(nightlife_active, bouncer_happy, bouncer_has_seen_rose):\n scene_description = \"\"\n\n # Check if the nightlife is active and the Bouncer is happy\n if nightlife_active and bouncer_happy:\n # Description of the Dark Tower\n tower_description = [\"ominous\", \"imposing\", \"enigmatic\", \"shadowy\"]\n scene_description += \"As you glance beyond the vibrant nightlife, you catch sight of an {} structure in the distance. The Dark Tower stands tall and mysterious, seeming to pierce the very fabric of reality.\\n\".format(random.choice(tower_description))\n\n # Description of the Tower's aura\n tower_aura = [\"eerie glow\", \"crackling energy\", \"otherworldly aura\"]\n scene_description += \"The Dark Tower emits an {}, bathing its surroundings in an unsettling and captivating radiance.\\n\".format(random.choice(tower_aura))\n\n # The Tower's purpose and significance\n scene_description += \"Rumors abound about the Tower's purpose. Some say it holds the key to untold power and knowledge, while others believe it is a portal to other dimensions, where the boundaries of reality blur and merge.\\n\"\n\n # Check if the Bouncer has seen the rose\n if bouncer_has_seen_rose:\n scene_description += \"You notice a subtle shift in the Bouncer's demeanor, as if they carry a secret knowledge. Perhaps they have encountered the enigmatic Rose, a sight that can change anyone forever.\\n\"\n\n return scene_description\n\n# Example usage:\nnightlife_active = True\nbouncer_is_happy = True\nbouncer_has_seen_the_rose = True\n\nbackdrop_description = DarkTowerBackdrop0(nightlife_active, bouncer_is_happy, bouncer_has_seen_the_rose)\nif backdrop_description:\n print(backdrop_description)\n","repo_name":"txtatech/virtual-forest","sub_path":"virtual-forest/game-code/def DarkTowerBackdrop0.py","file_name":"def DarkTowerBackdrop0.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"27425422131","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 17 10:34:28 2019\n\n@author: Lenovo\n\"\"\"\n\nimport sys\nimport pandas as pd\nimport numpy as np\nimport time\nfrom datetime import datetime, timedelta\nimport pymysql\nfrom sqlalchemy import create_engine\nimport logging.config\n\n#logging.config.fileConfig(\"LSTM_llyc.dataset\")\n#logger = logging.getLogger()\n#logger.info(\"开始计算当前交通流量值》》》》》》》》》》\")\n#def rq_zh(d):\n## today = datetime.now() #取今天\n# today = (datetime.now()- timedelta(days=14)) #取昨天\n# yeday = (today - timedelta(weeks=d)).strftime(\"%Y-%m-%d\")\n# return yeday\n#dats = []\n#for i in range(0,8):\n# dat = rq_zh(i)\n# dats.append(dat)\n#dates = tuple(dats)\n\ndef forecast_real():\n today = datetime.now() #取今天\n yeday = (today - timedelta(days=1)).strftime(\"%Y-%m-%d\") #取昨天,因为只能拿到昨天的数据\n dates = (yeday)\n con = pymysql.connect('52.1.123.6','root','123456','keenIts')\n sql = \"select * from t_transportation_flow WHERE point_number in ('#GS002','#GS004','#GS005','#GS006','#GS007',\\\n '#GS008','#GS009','#GS010','#GS011','#GS012','#GS017','#GS024','#GS031','#GS033','#GS035') and date in \\\n (\\\"%s\\\")\"%(dates)\n \n data = pd.read_sql(sql,con)\n con.close()\n \n xq = []\n tm = data['date']\n for i in tm:\n dt = pd.to_datetime(i)\n dt1 = dt.strftime('%A')\n xq.append(dt1)\n \n tm = [int(x[0:2]) for x in data['time_part']]\n data['Tm'] = tm\n data['xq'] = xq\n data['rq'] = data.date\n \n size_mapping = {\n 'Monday': str(1),\n 'Tuesday': 2,\n 'Wednesday': 3,\n 'Thursday':4,\n 'Friday':5,\n 'Saturday':6,\n 'Sunday':7}\n data['xq'] = data['xq'].map(size_mapping)\n \n data['flow_e'] = data['flow_e_l'] + data['flow_e_s'] + data['flow_e_r']\n data['flow_w'] = data['flow_w_l'] + data['flow_w_s'] + data['flow_w_r']\n data['flow_s'] = data['flow_s_l'] + data['flow_s_s'] + data['flow_s_r']\n data['flow_n'] = data['flow_n_l'] + data['flow_n_s'] + data['flow_n_r']\n \n #10分钟的交通流量\n sjd = [int(x[12:14]) for x in data['time_part']]\n data['sjd'] = sjd\n \n size_sjd = {\n 5:1,\n 10:1,\n 15:2,\n 20:2,\n 25:3,\n 30:3,\n 35:4,\n 40:4,\n 45:5,\n 50:5,\n 55:6,\n 0:6\n }\n data['sjd'] = data['sjd'].map(size_sjd)\n \n# Tm = datetime.now().strftime(\"%Y-%m-%d %H:%M\")\n# th = Tm[11:13]\n# sd = Tm[14:16]\n# \n# if (int(th[0]==0 and int(th[1]==0))):\n# th = 24\n# else:\n# th = int(th)\n# \n# t = 0\n# if (int(sd[0]) == 0 and int(sd[1]) in range(5)) | (int(sd[0]) == 0 and int(sd[1]) in range(6,10)):\n# t += 1\n# if (int(sd[0]) == 1 and int(sd[1]) in range(5)) | (int(sd[0]) == 1 and int(sd[1]) in range(6,10)):\n# t += 2\n# if (int(sd[0]) == 2 and int(sd[1]) in range(5)) | (int(sd[0]) == 2 and int(sd[1]) in range(6,10)):\n# t += 3\n# if (int(sd[0]) == 3 and int(sd[1]) in range(5)) | (int(sd[0]) == 3 and int(sd[1]) in range(6,10)):\n# t += 4\n# if (int(sd[0]) == 4 and int(sd[1]) in range(5)) | (int(sd[0]) == 4 and int(sd[1]) in range(6,10)):\n# t += 5\n# if (int(sd[0]) == 5 and int(sd[1]) in range(5)) | (int(sd[0]) == 5 and int(sd[1]) in range(6,10)):\n# t += 6\n#\n# data1 = data[(data['sjd']==t)&(data['time_hour']==th)|(data['time_hour']==th-1)]\n \n \n #取当前时间所在10分钟的前6个10分钟\n def rq_zh(d):\n # today = datetime.now() #取今天\n today = datetime.now()-timedelta(days=1)\n Tm = (today - timedelta(minutes=d)).strftime(\"%Y-%m-%d %H:%M\")\n th = Tm[11:13]\n sd = Tm[14:16]\n \n if (int(th[0]==0 and int(th[1]==0))):\n th = 24\n else:\n th = int(th)\n \n t = 0\n if (int(sd[0]) == 0 and int(sd[1]) in range(5)) | (int(sd[0]) == 0 and int(sd[1]) in range(6,10)):\n t += 1\n if (int(sd[0]) == 1 and int(sd[1]) in range(5)) | (int(sd[0]) == 1 and int(sd[1]) in range(6,10)):\n t += 2\n if (int(sd[0]) == 2 and int(sd[1]) in range(5)) | (int(sd[0]) == 2 and int(sd[1]) in range(6,10)):\n t += 3\n if (int(sd[0]) == 3 and int(sd[1]) in range(5)) | (int(sd[0]) == 3 and int(sd[1]) in range(6,10)):\n t += 4\n if (int(sd[0]) == 4 and int(sd[1]) in range(5)) | (int(sd[0]) == 4 and int(sd[1]) in range(6,10)):\n t += 5\n if (int(sd[0]) == 5 and int(sd[1]) in range(5)) | (int(sd[0]) == 5 and int(sd[1]) in range(6,10)):\n t += 6\n return th,t\n dats = []\n for i in [10,20,30,40,50,60]:\n dat = rq_zh(i)\n dats.append(dat)\n# data1 = data[(data['time_hour'].isin([i[0]+1 for i in dats]))&(data['sjd'].isin([i[1] for i in dats]))]\n data1 = [] \n for i in dats:\n dat1 = data[(data['time_hour'].isin([i[0],i[0]+1]))&(data['sjd']==i[1])]\n data1.append(dat1)\n data1 = pd.concat(data1)\n \n ds = []\n for r in set(data1['date']):\n d1 = data1[data1['date']==r]\n for lk in set(d1['point_number']):\n d2 = d1[d1['point_number']==lk]\n for t in set(d2['Tm']):\n d3 = d2[d2['Tm']==t]\n for sj in set(d3['sjd']):\n d4 = d3[d3['sjd']==sj]\n \n def zj(col):\n zz = d4[col].sum()\n return zz\n \n flow_all = zj('flow_all') \n flow_e_l = zj('flow_e_l')\n flow_e_s = zj('flow_e_s')\n flow_e_r = zj('flow_e_r')\n flow_w_l = zj('flow_w_l')\n flow_w_s = zj('flow_w_s')\n flow_w_r = zj('flow_w_r')\n flow_s_l = zj('flow_s_l')\n flow_s_s = zj('flow_s_s')\n flow_s_r = zj('flow_s_r')\n flow_n_l = zj('flow_n_l')\n flow_n_s = zj('flow_n_s')\n flow_n_r = zj('flow_n_r')\n fl_e = zj('flow_e')\n fl_w = zj('flow_w')\n fl_s = zj('flow_s')\n fl_n = zj('flow_n')\n \n dsum = round(pd.DataFrame([flow_all,flow_e_l,flow_e_s,flow_e_r,flow_w_l,flow_w_s,\n flow_w_r,flow_s_l,flow_s_s,flow_s_r,flow_n_l,\n flow_n_s,flow_n_r,fl_e,fl_w,fl_s,fl_n]))\n \n dsum1 = pd.DataFrame(dsum).T\n dsum1.columns = ['flow_all','flow_e_l','flow_e_s','flow_e_r','flow_w_l','flow_w_s',\n 'flow_w_r','flow_s_l','flow_s_s','flow_s_r','flow_n_l',\n 'flow_n_s','flow_n_r','flow_e','flow_w','flow_s','flow_n']\n dsum1['Tm'] = t\n dsum1['lk'] = lk\n dsum1['rq'] = r\n dsum1['sjd'] = sj\n ds.append(dsum1)\n \n try:\n lk = pd.concat(ds,ignore_index=True)\n except ValueError:\n print(\" raise ValueError('No objects to concatenate')\")\n else:\n print(\"objects to concatenate success\")\n \n\n lk['time_p'] = lk['Tm'].astype(str) + \":\" + lk['sjd'].astype(str) + str(0)\n\n lk['Time slice'] = lk['rq'].astype(str) + \":\" + lk['time_p']\n \n xq = []\n tm = lk['rq']\n for i in tm:\n dt = pd.to_datetime(i)\n dt1 = dt.strftime('%A')\n xq.append(dt1)\n \n lk['xq'] = xq\n \n size_mapping = {\n 'Monday': str(1),\n 'Tuesday': 2,\n 'Wednesday': 3,\n 'Thursday':4,\n 'Friday':5,\n 'Saturday':6,\n 'Sunday':7}\n lk['xq'] = lk['xq'].map(size_mapping)\n \n lk1 = lk\n lk1['sj'] = lk1['rq'].astype('str')+\" \"+lk1['time_p']\n #交换列顺序\n cols = list(lk)\n cols.insert(0,cols.pop(cols.index('lk')))\n cols.insert(1,cols.pop(cols.index('sj')))\n cols.insert(2,cols.pop(cols.index('sjd')))\n lk1 = lk1.loc[:,cols]\n tz = lk1[lk1.columns[:20]]\n\n \n tzz=[]\n for lk in set(tz['lk']):\n tz1=tz[tz['lk']==lk]\n tz2=round(pd.DataFrame(tz1[tz1.columns[3:]].mean()).T)\n tz2['date']=set(lk1['rq'])\n tz2['point_number']=lk\n tzz.append(tz2)\n tzz=pd.concat(tzz)\n #交换列顺序\n cols = list(tzz)\n cols.insert(0,cols.pop(cols.index('point_number')))\n cols.insert(1,cols.pop(cols.index('date')))\n tzz = tzz.loc[:,cols]\n tzz = tzz.sort_values(by=['point_number'])\n tzz.to_csv(r'F:\\GZ\\DM\\Spyder\\LSTM_realTmpredict_rhyc\\tzz.csv')\n print(\"当前交通流量值计算完毕》》》》》》》》》》》》\")\n time.sleep(5)\n sql1 = \"select * from flow_forecast_TimeSeries where point_number in ('#GS002','#GS004','#GS005','#GS006','#GS007','#GS008','#GS009','#GS010','#GS011','#GS012','#GS017','#GS024','#GS031','#GS033','#GS035') and date in (\\\"%s\\\")\"%(dates)\n con = pymysql.connect('52.1.123.6','root','123456','keenIts')\n dat1 = pd.read_sql(sql1,con)\n dat1 = dat1.sort_values(by=['point_number'])\n con.close()\n\n# dat1.to_csv(r'F:\\GZ\\DM\\Spyder\\Flow_realTmpredict_rhyc\\dat1.csv')\n \n print(\"开始计算预测误差率并存入数据库》》》》》》》\") \n time.sleep(1) \n if data.empty == True:\n forecast_error = pd.DataFrame(np.zeros((15,16)))\n forecast_error.columns = ['point_number','date', 'time_part', 'flow_all', 'flow_e_l', 'flow_e_s','flow_e_r', \n 'flow_w_l', 'flow_w_s', 'flow_w_r', 'flow_s_l', 'flow_s_s','flow_s_r', 'flow_n_l', 'flow_n_s', 'flow_n_r']\n else:\n# dat1=dat1[dat1['point_number'].isin(set(tzz['point_number']))] #除去无数据的点情况\n p_nms = [x for x in set(tzz['point_number']) if x in set(dat1['point_number'])]\n dat1=dat1[dat1['point_number'].isin(p_nms)] #除去无数据的点情况\n tzz = tzz[tzz['point_number'].isin(p_nms)]\n forecast_error = pd.DataFrame((dat1[dat1.columns[4:]].values-tzz[tzz.columns[2:15]].values)/tzz[tzz.columns[2:15]].values,dtype='str')\n forecast_error.columns = ['flow_all', 'flow_e_l', 'flow_e_s','flow_e_r', 'flow_w_l', 'flow_w_s', \n 'flow_w_r', 'flow_s_l', 'flow_s_s','flow_s_r', 'flow_n_l', 'flow_n_s', 'flow_n_r']\n forecast_error['point_number'] = tzz['point_number'].values\n forecast_error['date']=dat1['date'].values\n forecast_error['time_part']=dat1['time_part'].values\n cols = list(forecast_error)\n cols.insert(0,cols.pop(cols.index('point_number')))\n cols.insert(1,cols.pop(cols.index('date')))\n cols.insert(2,cols.pop(cols.index('time_part')))\n forecast_error = forecast_error.loc[:,cols]\n forecast_error[forecast_error==0]==np.nan\n forecast_error[forecast_error=='inf']==np.nan\n# forecast_error=forecast_error[forecast_error['point_number']!=np.NaN]\n forecast_error.to_csv('forecast_error.csv')\n print(forecast_error)\n\n #预测结果写入数据库\n engine=create_engine(\"mysql+pymysql://root:123456@52.1.123.6:3306/keenIts?charset=utf8\")\n forecast_error.to_sql(name='flow_forecast_TimeSeries_error',con=engine,if_exists='append',index=False,index_label=False)\n print(\"预测误差率计算完毕!!!!!!!!\")\n\n \n \nforecast_real()","repo_name":"18786683795/LSTM_realTmpredict_rhyc","sub_path":"real_error.py","file_name":"real_error.py","file_ext":"py","file_size_in_byte":11992,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"3"} +{"seq_id":"74279665042","text":"import pickle\nimport sys\nsys.path.insert(0,'../src/')\n\nfrom optimizers import *\nfrom environment import *\nfrom IRLalgorithms import *\nfrom MDPsolver import *\nfrom utils import *\nfrom plot import *\nimport argparse\nimport copy\n\n\ndef CompareAlphas_tdw_loader(dim, env_type, noiseL, low_alpha = False, alphas = None, end_title= \"ablation\", legend = True):\n lr = 0.5\n fix_start = False\n alphaE = \"1.0\"\n noisesE = [\"0.0\", \"0.05\", \"0.1\", \"0.15\", \"0.2\"]\n noiseL = str(noiseL)\n\n data = load_result( \"CompareAlphas_tdw_env\" + str(env_type) + \"size\" + str(dim)+\"lr\"+str(lr)+noiseL+\"alphaE\"+alphaE+\"fix_start\"+str(fix_start))\n Vs_to_plot = data[\"Vs_to_plot\"]\n sigma_Vs_to_plot = data[\"sigma_Vs\"]\n plot_label = data[\"labels\"]\n ###\n\n\n for i,enoise in enumerate(noisesE):\n noisesE[i] = float(enoise)\n if not low_alpha:\n if noiseL == \"0.0\":\n Vs_to_plot.pop(6) \n plot_label.pop(6)\n sigma_Vs_to_plot.pop(6)\n Vs_to_plot.pop(5) \n plot_label.pop(5)\n sigma_Vs_to_plot.pop(5)\n \n if not alphas == None:\n current_pos = 0\n for i, alpha in enumerate([\"1.0\", \"0.95\", \"0.9\", \"0.85\", \"0.8\"]):\n if alpha not in alphas:\n Vs_to_plot.pop(current_pos) \n plot_label.pop(current_pos)\n sigma_Vs_to_plot.pop(current_pos)\n else:\n current_pos += 1\n if end_title == \"presentation\":\n plot_label = [\"MCE\", \"Robust MCE : \" + str(alphas[-1]), \"expert\"]\n \n plot_lines_and_ranges( list_to_plot = Vs_to_plot,\n list_sigmas = sigma_Vs_to_plot,\n list_name = plot_label,\n axis_label = [\"Expert Noise\", \"Total Return \"],\n folder = \"\",\n title = \"NotebookCompare Alphas_tdw Env \"+str(env_type)+\" noise L \"+ noiseL+\"dim\"+str(dim)+\"alphaE\"+alphaE+\"fix_start\"+str(fix_start)+end_title,\n x_axis = noisesE,\n show = True,\n legend = legend,\n vertical = noiseL)\n","repo_name":"lviano/RobustMCE_IRL","sub_path":"robustIRLcode/CompareAlphas/CompareAlphas_tdw_loader.py","file_name":"CompareAlphas_tdw_loader.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"18325558481","text":"import sys\nimport math\nimport bisect\nfrom heapq import heapify, heappop, heappush\nfrom collections import deque, defaultdict, Counter\nfrom functools import lru_cache\nfrom itertools import accumulate, combinations, permutations\n\nsys.setrecursionlimit(1000000)\nMOD = 10 ** 9 + 7\nMOD99 = 998244353\n\ninput = lambda: sys.stdin.readline().strip()\nNI = lambda: int(input())\nNMI = lambda: map(int, input().split())\nNLI = lambda: list(NMI())\nSI = lambda: input()\nSMI = lambda: input().split()\nSLI = lambda: list(SMI())\n\n\ndef inv_gcd(a, b):\n a = a % b\n if a == 0:\n return b, 0\n s = b\n t = a\n m0 = 0\n m1 = 1\n while t:\n u = s // t\n s -= t * u\n m0 -= m1 * u\n s, t = t, s\n m0, m1 = m1, m0\n if m0 < 0:\n m0 += b // s\n return s, m0\n\n\ndef inv_mod(x, m):\n assert 1 <= m\n z = inv_gcd(x, m)\n assert z[0] == 1\n return z[1]\n\n\ndef crt(r,m):\n # r: 余りのlist\n # m: modのlist\n assert len(r) == len(m)\n n = len(r)\n r0 = 0\n m0 = 1\n for i in range(n):\n assert 1 <= m[i]\n r1 = r[i] % m[i]\n m1 = m[i]\n if m0 < m1:\n r0, r1 = r1, r0\n m0, m1 = m1, m0\n if m0 % m1 == 0:\n if r0 % m1 != r1:\n return 0, 0\n continue\n g, im = inv_gcd(m0, m1)\n u1 = m1 // g\n if (r1 - r0) % g:\n return 0, 0\n x = (r1-r0) // g % u1 * im % u1\n r0 += x * m0\n m0 *= u1\n if r0 < 0:\n r0 += m0\n return r0,m0\n\n\n# 約数列挙(単体)\ndef divisors(x):\n res = set()\n for i in range(1, int(x**0.5) + 2):\n if x % i == 0:\n res.add(i)\n res.add(x//i)\n return res\n\n\ndef main():\n N = NI()\n D = divisors(2*N)\n ans = 10**20\n\n for x in D:\n y = 2 * N // x\n r, m = crt([0, -1], [x, y])\n if r > 0:\n ans = min(ans, r)\n\n print(ans)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Mao-beta/AtCoder","sub_path":"ACL1B.py","file_name":"ACL1B.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5691177242","text":"\"\"\"Provides various functions related to GROMACS. NOTE: Functions are only tested for specific applications. \"\"\"\n#TODO cleanup\n\nfrom math import floor\nfrom contextlib import suppress\nfrom bisect import bisect_left, bisect_right\nimport functools\nimport os\n\nimport core.system as system\n#import user.seq2itp_aminoacids as seq2itp\n\nTOP_HEADER_SECTION = \"header\"\nTOP_SYSTEM_SECTION = \"system\"\nTOP_MOLECULES_SECTION = \"molecules\"\nITP_MOLECULE_TYPE_SECTION = \"moleculetype\"\nITP_ATOMS_SECTION = \"atoms\"\n\nclass GROFileData:\n def __init__(self, header, atom_count, data, box_vector):\n self.header = header\n self.atom_count = atom_count\n self.data = data\n self.box_vector = box_vector\n\ndef TOP_get_system_charge(filepath_TOP, TOP_itp_directory_list=[]):\n def ITP_get_molecule_charges(filepath_ITP):\n class Molecule:\n def __init__(self, name):\n self.name = name\n self.charge = 0\n\n data = {}\n with open(filepath_ITP, 'rt') as file:\n state = None\n current_molecule = None\n for line in file:\n line = line.strip()\n if len(line) == 0 or line.startswith(\";\"):\n continue\n\n if line.startswith(\"[\"):\n if ITP_MOLECULE_TYPE_SECTION in line:\n state = ITP_MOLECULE_TYPE_SECTION\n elif ITP_ATOMS_SECTION in line:\n state = ITP_ATOMS_SECTION\n else:\n state = None\n if current_molecule is not None:\n data[current_molecule.name] = current_molecule.charge\n continue\n\n if state == ITP_MOLECULE_TYPE_SECTION:\n current_molecule = Molecule(line.split()[0])\n elif state == ITP_ATOMS_SECTION:\n current_molecule.charge += round(float(line.split()[6]))\n return data\n\n TOP_data = TOP_read(filepath_TOP)\n\n header_data = TOP_data[TOP_HEADER_SECTION]\n molecule_charge_data = {}\n TOP_itp_directory = \"\"\n for line in header_data:\n line = line.strip()\n if \"#include\" in line:\n itp_filename = line.split(\"\\\"\")[1].split(\"\\\"\")[0]\n if len(TOP_itp_directory_list) > 0:\n for dirname in TOP_itp_directory_list:\n full_path = os.path.join(dirname, itp_filename)\n if os.path.isfile(full_path):\n TOP_itp_directory = dirname\n break\n molecule_charge_data = {**ITP_get_molecule_charges(os.path.join(TOP_itp_directory, itp_filename)), **molecule_charge_data}\n\n total_charge_system = 0\n for line in TOP_data[TOP_MOLECULES_SECTION]:\n line = line.strip().split()\n if line[0] in molecule_charge_data:\n total_charge_system += molecule_charge_data[line[0]] * round(float(line[1]))\n return total_charge_system\n\ndef GRO_TOP_neutralize_system(filepath_input_GRO, filepath_input_TOP, filepath_output_GRO, filepath_output_TOP, name_Solvent, name_positive_ion, name_negative_ion, TOP_itp_directory_list=[]):\n system_charge = TOP_get_system_charge(filepath_input_TOP, TOP_itp_directory_list=TOP_itp_directory_list)\n data_TOP = TOP_read(filepath_input_TOP)\n data_GRO = GRO_read(filepath_input_GRO)\n\n if system_charge == 0:\n pass # No need to change the files\n else:\n #TOP#\n output_data_TOP = []\n molecules = data_TOP[TOP_MOLECULES_SECTION]\n for i in range(len(molecules)):\n molecule=molecules[i].strip().split()\n\n if molecule[0] == name_Solvent:\n if system_charge > 0:\n name_Ion = name_negative_ion\n elif system_charge < 0:\n name_Ion = name_positive_ion\n output_data_TOP.append(\"\" + name_Ion + \" \" + str(abs(system_charge)) + \"\\n\")\n\n new_count = int(molecule[1]) - abs(system_charge)\n output_data_TOP.append(\"\" + name_Solvent + \" \" + str(new_count) + \"\\n\")\n else:\n output_data_TOP.append(molecules[i])\n\n data_TOP[TOP_MOLECULES_SECTION] = output_data_TOP\n\n #GRO#\n count_ION = abs(system_charge)\n\n for i in range(len(data_GRO.data)):\n if count_ION <= 0:\n break\n\n if name_Solvent in data_GRO.data[i][5:10].strip():\n data_GRO.data[i] = data_GRO.data[i][0:5] + \"%-5s%5s\" % (name_Ion, name_Ion) + data_GRO.data[i][15:]\n count_ION -= 1\n \n with open(filepath_output_TOP, 'wt') as file:\n for line in data_TOP[TOP_HEADER_SECTION]:\n file.write(line)\n file.write(\"\\n\")\n file.write(\"[\" + TOP_SYSTEM_SECTION + \"]\\n\")\n for line in data_TOP[TOP_SYSTEM_SECTION]:\n file.write(line)\n file.write(\"\\n\")\n file.write(\"[\" + TOP_MOLECULES_SECTION + \"]\\n\")\n for line in data_TOP[TOP_MOLECULES_SECTION]:\n file.write(line)\n\n with open(filepath_output_GRO, 'wt') as file:\n for line in data_GRO.header:\n file.write(line)\n file.write(str(data_GRO.atom_count) + \"\\n\")\n for line in data_GRO.data:\n file.write(line)\n file.write(\" \".join(data_GRO.box_vector))\n\ndef GRO_read_box(filepath):\n \"\"\"\n Reads the box vector from GRO file.\n\n Args:\n filepath (string): Path to GRO file.\n Returns:\n (list): List of strings describing the box vector [v1(x), v2(y), v3(z), v1(y), v1(z), v2(x), v2(z), v3(x), v3(y)].\n \"\"\"\n box_string = None\n data = system.read_text_file(filepath)\n \n for line in reversed(data):\n line = line.strip()\n if not len(line) == 0:\n box_string = line.split()\n break\n\n return box_string\n\ndef GRO_read(filepath_GRO):\n \"\"\"\n Args:\n filepath_GRO (string): Path to GRO file.\n Returns:\n GROFileData object, with members:\n header (string): Title string of the file.\n atom_count (int): Number of atoms in file.\n data (list): List of strings, each string describing an atom. Strings contain line endings.\n box_vector (list): List of strings describing the box vector.\n \"\"\"\n file_data = system.read_text_file(filepath_GRO, strip=False)\n\n header = file_data[0]\n atom_count = int(file_data[1])\n data = [file_data[i] for i in range(2, 2 + atom_count)]\n box_vector = file_data[2 + atom_count].strip().split()\n\n return GROFileData(header, atom_count, data, box_vector)\n\ndef GRO_merge(filepath_GRO_base, filepath_GRO_insert, filepath_GRO_output):\n \"\"\"\n Combines two GRO files (base + insert) into one, keeping the header and box vector of filepath_GRO_base.\n \"\"\"\n data_base = system.read_text_file(filepath_GRO_base)\n data_insert = system.read_text_file(filepath_GRO_insert)\n \n # Strip box from data_base\n for i in reversed(range(0, len(data_base))):\n line = data_base[i].strip()\n if not len(line) == 0:\n del(data_base[i])\n break\n del(data_base[i])\n \n data_output = data_base + data_insert[2:]\n data_output[1] = str(len(data_output) - 3) + \"\\n\"\n system.write_text_file(filepath_GRO_output, data_output, add_newline=False)\n\ndef TOP_read(filepath):\n \"\"\"\n Returns:\n (dict): Each key represents a section of the TOP/ITP file (e.g. [ atoms ] becomes key \"atoms\").\n The corresponding value is a list of strings containing the lines belonging to that section.\n \"\"\"\n data_sections = {}\n with open(filepath, 'rt') as file:\n state = TOP_HEADER_SECTION\n data_sections[state] = []\n for row in file:\n row_stripped = row.strip()\n if len(row_stripped) == 0 or row_stripped.startswith(\";\"):\n continue\n if row_stripped.startswith(\"[\"):\n state = row_stripped.split(\"[\", 1)[-1].split(\"]\")[0].strip()\n if state not in data_sections:\n data_sections[state] = []\n continue\n\n data_sections[state].append(row)\n\n return data_sections\n\ndef TOP_merge(filepath_TOP_base, filepath_TOP_insert, filepath_TOP_output, sections_list, sections_list_append):\n \"\"\"\n Combines two TOP files into one (base + insert).\n \n Args:\n sections_list (list): List of strings describing which sections of 'filepath_TOP_base' will be written to the output file.\n sections_list_append (list): List of strings describing which sections of 'filepath_TOP_insert' are appended \n to the sections of 'filepath_TOP_base' in the output file.\n Only sections also present in 'sections_list' are included.\n \"\"\"\n data_sections_base = TOP_read(filepath_TOP_base)\n data_sections_insert = TOP_read(filepath_TOP_insert)\n header_union = None\n\n if TOP_HEADER_SECTION in sections_list and TOP_HEADER_SECTION in sections_list_append:\n header_combined = data_sections_base[TOP_HEADER_SECTION] + data_sections_insert[TOP_HEADER_SECTION]\n for i in range(len(header_combined)):\n include_filename = header_combined[i].split(\"\\\"\", 1)[-1].split(\"\\\"\")[0]\n header_combined[i] = \"#include \\\"\" + include_filename + \"\\\"\\n\"\n header_union = list(set(header_combined)) # Removes duplicates. Removes original order of list as well.\n\n #Ensure headers are in order in which they appear in header_combined\n i_sorted = []\n for i in range(len(header_union)):\n i_sorted.append(header_combined.index(header_union[i]))\n \n i_sorted = sorted(i_sorted)\n for i in range(len(i_sorted)):\n header_union[i] = header_combined[i_sorted[i]]\n\n with open(filepath_TOP_output, 'wt') as file:\n for section in sections_list:\n if section == TOP_HEADER_SECTION and header_union is not None:\n for line in header_union:\n file.write(line)\n continue\n elif not section == TOP_HEADER_SECTION:\n file.write(\"\\n[\" + section + \"]\\n\")\n else:\n continue\n for row in data_sections_base[section]:\n file.write(row)\n if section in sections_list_append:\n for row in data_sections_insert[section]:\n file.write(row)\n\n# def ITP_convert_to_GRO(filepath_ITP, filepath_GRO):\n# \"\"\"\n# Similar to old_ITP_convert_to_GRO, except that atoms are placed according to hard coded amino acid structures. \n# This helps prevent possible complications during steeping. Otherwise same in- and output as old_ITP_convert_to_GRO.\n# \"\"\"\n\n# data_ITP = TOP_read(filepath_ITP)[\"atoms\"]\n# seq2itp.seq2itp_ITP_to_GRO(data_ITP, filepath_GRO)\n\ndef ITP_constraints_to_bonds(filepath_ITP, filepath_ITP_noConstraints, constraint_forceConstant=5000):\n \"\"\"\n Creates a copy of 'filepath_ITP' where constraints have been replaced with harmonic bonds.\n NOTE: Only tested on ITP files containing a single molecule.\n\n Args:\n filepath_ITP (string): Path to input ITP file.\n filepath_ITP_noConstraints (string): Path to output ITP file.\n constraint_forceConstant (int): Force constant of the new harmonic bonds.\n \"\"\"\n data = []\n constraints = []\n with open(filepath_ITP, 'rt') as file:\n state_constraints = False\n for row in file:\n row = row.strip()\n if(row.startswith(\"[\")):\n if \"constraints\" in row:\n state_constraints = True\n continue\n else:\n state_constraints = False\n if state_constraints:\n constraints.append(row.split(\";\")[0])\n else:\n data.append(row)\n \n with open(filepath_ITP_noConstraints, 'wt') as file:\n for row in data:\n file.write(row + \"\\n\")\n if(row.startswith(\"[\") and \"bonds\" in row):\n file.write(\"; Constraints converted to bonds for energy minimization purposes\")\n for row_constraints in constraints:\n if not (len(row_constraints) == 0 or row_constraints.startswith(\";\")):\n row_constraints = row_constraints + \" \" + constraint_forceConstant\n file.write(row_constraints + \"\\n\")\n continue\n\ndef NDX_write(filepath_NDX, index_dict):\n \"\"\"\n Args:\n filepath_NDX (string): Path to output NDX file.\n index_dict (dict(string, int)): Dictionary with keys describing the index group names \n and values describing which atoms belong to that group.\n \"\"\"\n with open(filepath_NDX, 'wt') as file:\n for group, indices in index_dict.items():\n count = 0\n file.write(\"[ \" + group + \" ]\\n\")\n for index in indices:\n file.write(str(index) + \"\\t\")\n count+=1\n if count >= 15:\n file.write(\"\\n\")\n count = 0\n file.write(\"\\n\")\n\ndef NDX_read(filepath_NDX):\n \"\"\"Returns index_dict, see NDX_write\"\"\"\n\n NDX_dict = {}\n state = None\n with open(filepath_NDX, 'rt') as file:\n for line in file:\n line = line.strip()\n if len(line) == 0:\n continue\n\n if line.startswith(\"[\"):\n state = line.split(\"[\")[1].split(\"]\")[0].strip()\n if state not in NDX_dict:\n NDX_dict[state] = []\n continue\n \n if state is None:\n continue\n else:\n indices = line.split()\n for index in indices:\n NDX_dict[state].append(int(index))\n continue\n \n return NDX_dict\n\ndef NDX_from_GRO(filepath_GRO, filepath_NDX_out, resnm_to_group_dict):\n \"\"\"\n Produces an NDX file from a GRO file according to a reference dictionary.\n \n Args:\n resnm_to_group_dict (dict(string, string/tuple)): Dict with keys describing residue names, \n and values describing to which index group they belong.\n \"\"\"\n data = GRO_read(filepath_GRO).data\n NDX_dict = {}\n\n for i in range(len(data)):\n resnm = data[i][5:10].strip()\n groups = resnm_to_group_dict[resnm]\n\n if not isinstance(groups, tuple):\n groups = (groups,)\n\n for group in groups:\n if group not in NDX_dict:\n NDX_dict[group] = []\n NDX_dict[group].append(str(i+1)) # atom index counts from 1 instead of 0\n\n NDX_write(filepath_NDX_out, NDX_dict)\n\ndef NDX_split_group(filepath_NDX_input, filepath_NDX_output, resname, atoms_per_residue, resname_split):\n \"\"\"\n Split an index group into multiple smaller groups, keeping the original group intact.\n\n Args:\n resname (string): Index group to split\n atoms_per_residue (int): Number of indices that go into each new group\n resname_split (string): Name for the new groups, this name will be appended by an index number (e.g. \"CL\" -> \"CL1\" \"CL2\" \"CL3\" ...)\n \"\"\"\n NDX_dict = NDX_read(filepath_NDX_input)\n\n if resname not in NDX_dict:\n raise KeyError(\"Error: resname (\" + resname + \") not found in index file (\" + filepath_NDX_input + \")\")\n\n indices = NDX_dict[resname]\n\n count = 0\n index_new_residue = 1\n new_residue = []\n for index in indices:\n new_residue.append(index)\n count += 1\n\n if count >= atoms_per_residue:\n NDX_dict[resname_split + str(index_new_residue)] = new_residue\n\n index_new_residue += 1\n count = 0\n new_residue = []\n \n NDX_write(filepath_NDX_output, NDX_dict)\n\ndef GMX_average_ENERGY_output(filepath_GMX_ENERGY_output):\n # Returns dict containing energy selections and average\n\n output_column_template = [\"Energy\", \"Average\", \"Err.Est.\"]\n\n with open(filepath_GMX_ENERGY_output, 'rt') as file:\n state = False\n for line in file:\n line = line.strip()\n\n if state:\n if line.startswith('-'):\n continue\n columns = line.split()\n return float(columns[1]), float(columns[2])\n continue\n\n if line.startswith(\"Energy\"):\n columns = line.split()\n if columns[0:3] == output_column_template:\n state = True\n continue\n\ndef XVG_read(filepath_XVG):\n \"\"\"Returns a dictionary containing:\n\n \"x\": list of x data points\n\n \"y\": list of y data point lists,\n first index indicates data column\n\n \"title\": string\n\n \"legend\": list of strings matching the data columns in the file\n\n \"xlabel\": string\n\n \"ylabel\": string\n \"\"\"\n XVG_dict = {}\n\n x = []\n ylist = None\n legend = []\n title = \"\"\n xlabel = \"\"\n ylabel = \"\"\n\n with open(filepath_XVG, \"rt\") as file:\n for line in file:\n line = line.strip()\n if not line or line.startswith(\"#\"):\n continue\n\n if line.startswith(\"@\"):\n splt = line.split()\n if splt[1] == \"title\":\n title = \" \".join(splt[2:])\n elif splt[1] == \"xaxis\":\n xlabel = \" \".join(splt[3:])\n elif splt[1] == \"yaxis\":\n ylabel = \" \".join(splt[3:])\n elif len(splt) > 3 and splt[2] == \"legend\":\n legend.append(\" \".join(splt[3:]))\n continue\n\n data = line.split()\n if ylist is None:\n ylist = [[] for i in range(len(data)-1)]\n\n x.append(float(data[0]))\n data = data[1:]\n for i, value in enumerate(data):\n ylist[i].append(float(value))\n\n XVG_dict[\"x\"] = x\n XVG_dict[\"y\"] = ylist\n XVG_dict[\"title\"] = title\n XVG_dict[\"legend\"] = legend\n XVG_dict[\"xlabel\"] = xlabel\n XVG_dict[\"ylabel\"] = ylabel\n\n return XVG_dict\n\ndef XVG_ymax(filepath_XVG, x_range=None, column_index=0):\n XVG_dict = XVG_read(filepath_XVG)\n x_data = XVG_dict[\"x\"]\n y_data = XVG_dict[\"y\"][column_index]\n\n if x_range:\n idx_range = (bisect_left(x_data, x_range[0]), bisect_right(x_data, x_range[1]))\n y_data = y_data[idx_range[0]:idx_range[1]]\n\n ymax = y_data[0]\n for value in y_data:\n if value > ymax:\n ymax = value\n\n return ymax\n\n","repo_name":"nvanhilten/Evo-MD_curvature_sensing","sub_path":"core/gromacs.py","file_name":"gromacs.py","file_ext":"py","file_size_in_byte":18962,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"5953949805","text":"from python_OOP import Advay\n\n\nclass Inherit(Advay):\n\n e = 500\n\n def __init__(self, j, k):\n self.j = j\n self.k = k\n Advay.__init__(self, 200, 200)\n print(\"My name is Rupesh\")\n\n def methodone(self):\n print(\"I am Rupesh\")\n\n def methodtwo(self):\n return self.e + self.firstNumber + self.secondNumber + self.j + self.k\n\n\nnewobj = Inherit(300, 300)\nnewobj.function()\nnewobj.methodone()\nprint(newobj.methodtwo())\n\n\n\n","repo_name":"rupeshmodak/Automation_Testing","sub_path":"Python_Basics/Inheritance.py","file_name":"Inheritance.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"27412612421","text":"# https://www.meta-chart.com/share/untitled-chart-12319\n# import matplotlib\n# from matplotlib import pylab\n# import profile\n\n\ndef mains():\n data = None\n with open(\"values6.txt\") as f:\n data = f.read().splitlines()\n\n # establish min and max coordinates\n coords = []\n max_x = -5000\n min_x = 5000\n max_y = -5000\n min_y = 5000\n\n for line in data:\n j = line.split(', ')\n\n k = [int(x) for x in line.split(', ')]\n min_x = min(min_x, k[0])\n min_y = min(min_y, k[1])\n max_x = max(max_x, k[0])\n max_y = max(max_y, k[1])\n coords.append(k)\n\n width = max_x - min_x\n height = max_y - min_y\n\n # printing grid\n locations = []\n points = 0\n areas = [0 for x in range(len(coords))]\n for x in range(width):\n # grid.append([])\n for y in range(height):\n location = []\n\n for c in coords:\n absX = abs((min_x + x) - c[0])\n absY = abs((min_y + y) - c[1])\n d = absX + absY\n location.append(d)\n minDist = min(location)\n\n print(sum(location))\n locations.append(sum(location))\n if sum(location) < 10000:\n points += 1\n # else:\n # grid[y].append('*')# def idx(x,y):\n # return (x + y) * width\n\n # con = map(lambda x: abs(x - 43), grid[1])\n # print con\n # print sum(con)\n\n print(min(locations))\n print(\"points\", points)\n # largest = max(areas)\n # largestOwner = areas.index(largest)\n\n # print(largestOwner, coords[largestOwner], 'has most area: ', largest)\n\n\nmains()\n","repo_name":"jwin4740/adventOfCode2018","sub_path":"day6_2.py","file_name":"day6_2.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29251450697","text":"import pytest\n\nfrom maps_adv.export.lib.core.enum import CampaignType, CreativeType, ImageType\nfrom maps_adv.export.lib.pipeline.resolver.styles import styles_resolver\n\npytestmark = [pytest.mark.asyncio]\n\n\nasync def test_returns_expected_style_for_campaign_type_of_category(config):\n namespace = config.AVATARS_NAMESPACE\n campaign_id = 1\n campaigns = [\n dict(\n id=campaign_id,\n campaign_type=CampaignType.CATEGORY,\n creatives={\n CreativeType.ICON: dict(\n text=\"icon text\",\n position=5,\n title=\"icon title\",\n search_text=\"icon search text\",\n images=[\n dict(\n type=ImageType.CATEGORY,\n image_name=\"image-name\",\n group_id=\"group-id\",\n alias_template=\"category_{zoom}\",\n )\n ],\n )\n },\n )\n ]\n\n await styles_resolver(campaigns)\n\n creatives = campaigns[0][\"creatives\"]\n icon = creatives[CreativeType.ICON][\"style\"]\n\n assert icon == f\"{namespace}--group-id--image-name\"\n\n\n@pytest.mark.parametrize(\n \"campaign_type\", list(set(CampaignType) - {CampaignType.CATEGORY})\n)\nasync def test_no_exists_style_field_for_any_campaign_types_with_the_exception_of_category_and_pin_search( # noqa: E501\n campaign_type,\n):\n campaigns = [dict(campaign_type=campaign_type, creatives={})]\n\n await styles_resolver(campaigns)\n\n assert \"style\" not in campaigns[0]\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"maps/tests/pipeline/resolver/test_styles.py","file_name":"test_styles.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"28088972575","text":"import pytest\nfrom pytest_embedded import Dut\n\n\n@pytest.mark.supported_targets\n@pytest.mark.generic\ndef test_pthread(dut: Dut) -> None:\n\n # Note: this test doesn't really confirm anything, except that threads are created\n # and stdout is not being corrupted by multiple threads printing ot it.\n dut.expect(r'Created thread 0x[\\da-f]+')\n dut.expect(r'Created larger stack thread 0x[\\da-f]+')\n dut.expect(r'Threads have exited')\n dut.expect(r'Created thread 0x[\\da-f]+ with new default config')\n dut.expect('Thread has exited')\n","repo_name":"espressif/esp-idf","sub_path":"examples/system/pthread/pytest_pthread.py","file_name":"pytest_pthread.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":11541,"dataset":"github-code","pt":"3"} +{"seq_id":"22516193726","text":"import copy\n\nimport pytest\nimport torch\n\nfrom ai_traineree.agents.sac import SACAgent\nfrom ai_traineree.types import DataSpace\nfrom conftest import deterministic_interactions, feed_agent\n\nfloat_space = DataSpace(dtype=\"float\", shape=(5,), low=-2, high=2)\naction_space = DataSpace(dtype=\"float\", shape=(4,), low=-1, high=2)\n\n\ndef test_sac_seed(float_1d_space):\n # Assign\n agent_0 = SACAgent(float_1d_space, float_1d_space, device=\"cpu\") # Reference\n agent_1 = SACAgent(float_1d_space, float_1d_space, device=\"cpu\")\n agent_2 = copy.deepcopy(agent_1)\n\n # Act\n # Make sure agents have the same networks\n zip_agent_actors = zip(agent_1.actor.layers, agent_2.actor.layers)\n zip_agent_critics = zip(agent_1.double_critic.critic_1.layers, agent_2.double_critic.critic_1.layers)\n assert all([sum(sum(l1.weight - l2.weight)) == 0 for l1, l2 in zip_agent_actors])\n assert all([sum(sum(l1.weight - l2.weight)) == 0 for l1, l2 in zip_agent_critics])\n\n agent_0.seed(32167)\n actions_0 = deterministic_interactions(agent_0)\n agent_1.seed(0)\n actions_1 = deterministic_interactions(agent_1)\n agent_2.seed(0)\n actions_2 = deterministic_interactions(agent_2)\n\n # Assert\n # First we check that there's definitely more than one type of action\n assert actions_1[0] != actions_1[1]\n assert actions_2[0] != actions_2[1]\n\n # All generated actions need to identical\n assert any(a0 != a1 for (a0, a1) in zip(actions_0, actions_1))\n for idx, (a1, a2) in enumerate(zip(actions_1, actions_2)):\n assert a1 == pytest.approx(a2, 1e-4), f\"Action mismatch on position {idx}: {a1} != {a2}\"\n\n\ndef test_sac_from_state():\n # Assign\n agent = SACAgent(float_space, action_space)\n agent_state = agent.get_state()\n\n # Act\n new_agent = SACAgent.from_state(agent_state)\n\n # Assert\n assert id(agent) != id(new_agent)\n # assert new_agent == agent\n assert isinstance(new_agent, SACAgent)\n assert new_agent.hparams == agent.hparams\n assert all([torch.all(x == y) for (x, y) in zip(agent.actor.parameters(), new_agent.actor.parameters())])\n assert all([torch.all(x == y) for (x, y) in zip(agent.policy.parameters(), new_agent.policy.parameters())])\n assert all(\n [torch.all(x == y) for (x, y) in zip(agent.double_critic.parameters(), new_agent.double_critic.parameters())]\n )\n assert all(\n [\n torch.all(x == y)\n for (x, y) in zip(agent.target_double_critic.parameters(), new_agent.target_double_critic.parameters())\n ]\n )\n assert new_agent.buffer == agent.buffer\n\n\ndef test_sac_from_state_network_state_none():\n # Assign\n agent = SACAgent(float_space, action_space)\n agent_state = agent.get_state()\n agent_state.network = None\n\n # Act\n new_agent = SACAgent.from_state(agent_state)\n\n # Assert\n assert id(agent) != id(new_agent)\n # assert new_agent == agent\n assert isinstance(new_agent, SACAgent)\n assert new_agent.hparams == agent.hparams\n assert new_agent.buffer == agent.buffer\n\n\ndef test_sac_from_state_buffer_state_none():\n # Assign\n agent = SACAgent(float_space, float_space)\n agent_state = agent.get_state()\n agent_state.buffer = None\n\n # Act\n new_agent = SACAgent.from_state(agent_state)\n\n # Assert\n assert id(agent) != id(new_agent)\n # assert new_agent == agent\n assert isinstance(new_agent, SACAgent)\n assert new_agent.hparams == agent.hparams\n assert all([torch.all(x == y) for (x, y) in zip(agent.actor.parameters(), new_agent.actor.parameters())])\n assert all([torch.all(x == y) for (x, y) in zip(agent.policy.parameters(), new_agent.policy.parameters())])\n assert all(\n [torch.all(x == y) for (x, y) in zip(agent.double_critic.parameters(), new_agent.double_critic.parameters())]\n )\n assert all(\n [\n torch.all(x == y)\n for (x, y) in zip(agent.target_double_critic.parameters(), new_agent.target_double_critic.parameters())\n ]\n )\n\n\ndef test_sac_from_state_one_updated():\n # Assign\n agent = SACAgent(float_space, float_space)\n feed_agent(agent, 2 * agent.batch_size) # Feed 1\n agent_state = agent.get_state()\n feed_agent(agent, 100) # Feed 2 - to make different\n\n # Act\n new_agent = SACAgent.from_state(agent_state)\n\n # Assert\n assert id(agent) != id(new_agent)\n # assert new_agent == agent\n assert isinstance(new_agent, SACAgent)\n assert new_agent.hparams == agent.hparams\n assert all([torch.any(x != y) for (x, y) in zip(agent.actor.parameters(), new_agent.actor.parameters())])\n assert all([torch.all(x != y) for (x, y) in zip(agent.policy.parameters(), new_agent.policy.parameters())])\n assert all(\n [torch.any(x != y) for (x, y) in zip(agent.double_critic.parameters(), new_agent.double_critic.parameters())]\n )\n assert all(\n [\n torch.any(x != y)\n for (x, y) in zip(agent.target_double_critic.parameters(), new_agent.target_double_critic.parameters())\n ]\n )\n assert new_agent.buffer != agent.buffer\n","repo_name":"laszukdawid/ai-traineree","sub_path":"tests/agents/test_sac.py","file_name":"test_sac.py","file_ext":"py","file_size_in_byte":5068,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"3"} +{"seq_id":"10322375232","text":"from pyksburden.ksburden import KSBurden\nimport os\nimport logging\nimport argparse\n\nlogging.basicConfig(level=logging.INFO)\nlg = logging.getLogger(__name__)\n\n\npars = argparse.ArgumentParser(description='KS-Burden Test')\n\npars.add_argument('--bfile', type=str, required=True,\n help='Path to plink file stem')\npars.add_argument('--pheno', type=str, required=True,\n help='Path to pheno file (no header)')\npars.add_argument('--variants', type=str, required=True,\n help='Path to variant file (no header)')\npars.add_argument('--th', type=int, default=1,\n help='Number of threads')\npars.add_argument('--tests', default='ks,burden,cmc', type=str,\n help='comma separated list of tests to run')\npars.add_argument('--iter', default=1000, type=int,\n help='max. number of iteration for MC')\npars.add_argument('--out', default='ksburden_results',\n type=str, help='output path')\n\nargs = pars.parse_args()\n\nif __name__ == '__main__':\n work_dir = os.getcwd()\n lg.info('Starting KS-Burden')\n lg.debug('Currently working dir: %s', work_dir)\n\n models = args.tests.split(',')\n lg.info('Running %s tests: %s', len(models), args.tests)\n\n gene_runner = KSBurden(args.bfile, args.pheno, args.variants)\n\n lg.info('Starting')\n output = gene_runner.run_models(args.th, n_iter=args.iter)\n lg.info('Finished tests')\n output.to_csv(args.out+'.tsv', sep='\\t')\n","repo_name":"rmporsch/pyksburden","sub_path":"bin/ksburden.py","file_name":"ksburden.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19081031445","text":"# -*- coding: utf-8 -*-\n\nimport regex\nfrom tldextract import extract\nimport ssl\nimport socket\nfrom bs4 import BeautifulSoup\nimport urllib.request\nimport whois\nimport datetime\nimport ipaddress\n\ndef url_having_ip(url):\n try:\n ipaddress.ip_address(url)\n return(-1)\n except:\n return(1)\n\n\ndef url_length(url):\n length=len(url)\n if(length<54):\n return -1\n elif(54<=length<=75):\n return 0\n else:\n return 1\n\n\ndef url_short(url):\n match=regex.search('bit\\.ly|goo\\.gl|shorte\\.st|go2l\\.ink|x\\.co|ow\\.ly|t\\.co|tinyurl|tr\\.im|is\\.gd|cli\\.gs|'\n 'yfrog\\.com|migre\\.me|ff\\.im|tiny\\.cc|url4\\.eu|twit\\.ac|su\\.pr|twurl\\.nl|snipurl\\.com|'\n 'short\\.to|BudURL\\.com|ping\\.fm|post\\.ly|Just\\.as|bkite\\.com|snipr\\.com|fic\\.kr|loopt\\.us|'\n 'doiop\\.com|short\\.ie|kl\\.am|wp\\.me|rubyurl\\.com|om\\.ly|to\\.ly|bit\\.do|t\\.co|lnkd\\.in|'\n 'db\\.tt|qr\\.ae|adf\\.ly|goo\\.gl|bitly\\.com|cur\\.lv|tinyurl\\.com|ow\\.ly|bit\\.ly|ity\\.im|'\n 'q\\.gs|is\\.gd|po\\.st|bc\\.vc|twitthis\\.com|u\\.to|j\\.mp|buzurl\\.com|cutt\\.us|u\\.bb|yourls\\.org|'\n 'x\\.co|prettylinkpro\\.com|scrnch\\.me|filoops\\.info|vzturl\\.com|qr\\.net|1url\\.com|tweez\\.me|v\\.gd|tr\\.im|link\\.zip\\.net',url)\n if match:\n return(-1)\n else:\n return(1)\n\ndef having_at_symbol(url):\n symbol=regex.findall(r'@',url)\n if(len(symbol)==0):\n return -1\n else:\n return 1\n\ndef doubleSlash(url):\n list=[x.start(0) for x in regex.finditer('//', url)]\n if list[len(list)-1]>6:\n return(-1)\n else:\n return(1)\n\ndef prefix_suffix(url):\n subDomain, domain, suffix = extract(url)\n if(domain.count('-')):\n return 1\n else:\n return -1\n\ndef sub_domain(url):\n subDomain, domain, suffix = extract(url)\n if(subDomain.count('.')==0):\n return -1\n elif(subDomain.count('.')==1):\n return 0\n else:\n return 1\n\ndef SSLfinal_State(url):\n try:\n#check wheather contains https\n if(regex.search('^https',url)):\n usehttps = 1\n else:\n usehttps = 0\n#getting the certificate issuer to later compare with trusted issuer\n #getting host name\n subDomain, domain, suffix = extract(url)\n host_name = domain + \".\" + suffix\n context = ssl.create_default_context()\n sct = context.wrap_socket(socket.socket(), server_hostname = host_name)\n sct.connect((host_name, 443))\n certificate = sct.getpeercert()\n issuer = dict(x[0] for x in certificate['issuer'])\n certificate_Auth = str(issuer['commonName'])\n certificate_Auth = certificate_Auth.split()\n if(certificate_Auth[0] == \"Network\" or certificate_Auth == \"Deutsche\"):\n certificate_Auth = certificate_Auth[0] + \" \" + certificate_Auth[1]\n else:\n certificate_Auth = certificate_Auth[0]\n trusted_Auth = ['Comodo','Symantec','GoDaddy','GlobalSign','DigiCert','StartCom','Entrust','Verizon','Trustwave','Unizeto','Buypass','QuoVadis','Deutsche Telekom','Network Solutions','SwissSign','IdenTrust','Secom','TWCA','GeoTrust','Thawte','Doster','VeriSign']\n#getting age of certificate\n startingDate = str(certificate['notBefore'])\n endingDate = str(certificate['notAfter'])\n startingYear = int(startingDate.split()[3])\n endingYear = int(endingDate.split()[3])\n Age_of_certificate = endingYear-startingYear\n\n#checking final conditions\n if((usehttps==1) and (certificate_Auth in trusted_Auth) and (Age_of_certificate>=1) ):\n return -1 #legitimate\n elif((usehttps==1) and (certificate_Auth not in trusted_Auth)):\n return 0 #suspicious\n else:\n return 1 #phishing\n\n except Exception as e:\n\n return 1\n\ndef domain_registration(url):\n try:\n w = whois.whois(url)\n updated = w.updated_date\n exp = w.expiration_date\n length = (exp[0]-updated[0]).days\n if(length<=365):\n return 1\n else:\n return -1\n except:\n return 0\n\ndef favicon(url):\n #ongoing\n return 0\n\ndef port(url):\n domain = regex.findall(r\"://([^/]+)/?\", url)[0]\n if regex.match(r\"^www.\",domain):\n\t domain = domain.replace(\"www.\",\"\")\n try:\n port = domain.split(\":\")[1]\n if port:\n return(-1)\n else:\n return(1)\n except:\n return(1)\n\ndef https_token(url):\n subDomain, domain, suffix = extract(url)\n host =subDomain +'.' + domain + '.' + suffix\n if(host.count('https')): #attacker can trick by putting https in domain part\n return 1\n else:\n return -1\n\ndef request_url(url):\n try:\n subDomain, domain, suffix = extract(url)\n websiteDomain = domain\n\n opener = urllib.request.urlopen(url).read()\n soup = BeautifulSoup(opener, 'lxml')\n imgs = soup.findAll('img', src=True)\n total = len(imgs)\n\n linked_to_same = 0\n avg =0\n for image in imgs:\n subDomain, domain, suffix = extract(image['src'])\n imageDomain = domain\n if(websiteDomain==imageDomain or imageDomain==''):\n linked_to_same = linked_to_same + 1\n vids = soup.findAll('video', src=True)\n total = total + len(vids)\n\n for video in vids:\n subDomain, domain, suffix = extract(video['src'])\n vidDomain = domain\n if(websiteDomain==vidDomain or vidDomain==''):\n linked_to_same = linked_to_same + 1\n linked_outside = total-linked_to_same\n if(total!=0):\n avg = linked_outside/total\n\n if(avg<0.22):\n return -1\n elif(0.22<=avg<=0.61):\n return 0\n else:\n return 1\n except:\n return 0\n\n\ndef url_of_anchor(url):\n try:\n subDomain, domain, suffix = extract(url)\n websiteDomain = domain\n\n opener = urllib.request.urlopen(url).read()\n soup = BeautifulSoup(opener, 'lxml')\n anchors = soup.findAll('a', href=True)\n total = len(anchors)\n linked_to_same = 0\n avg = 0\n for anchor in anchors:\n subDomain, domain, suffix = extract(anchor['href'])\n anchorDomain = domain\n if(websiteDomain==anchorDomain or anchorDomain==''):\n linked_to_same = linked_to_same + 1\n linked_outside = total-linked_to_same\n if(total!=0):\n avg = linked_outside/total\n\n if(avg<0.31):\n return -1\n elif(0.31<=avg<=0.67):\n return 0\n else:\n return 1\n except:\n return 0\n\ndef Links_in_tags(url):\n try:\n opener = urllib.request.urlopen(url).read()\n soup = BeautifulSoup(opener, 'lxml')\n\n no_of_meta =0\n no_of_link =0\n no_of_script =0\n anchors=0\n avg =0\n for meta in soup.find_all('meta'):\n no_of_meta = no_of_meta+1\n for link in soup.find_all('link'):\n no_of_link = no_of_link +1\n for script in soup.find_all('script'):\n no_of_script = no_of_script+1\n for anchor in soup.find_all('a'):\n anchors = anchors+1\n total = no_of_meta + no_of_link + no_of_script+anchors\n tags = no_of_meta + no_of_link + no_of_script\n if(total!=0):\n avg = tags/total\n\n if(avg<0.25):\n return -1\n elif(0.25<=avg<=0.81):\n return 0\n else:\n return 1\n except:\n return 0\n\ndef sfh(url):\n #ongoing\n return 0\n\ndef email_submit(url):\n try:\n opener = urllib.request.urlopen(url).read()\n soup = BeautifulSoup(opener, 'lxml')\n if(soup.find('mailto:')):\n return 1\n else:\n return -1\n except:\n return 0\n\ndef abnormal_url(url):\n #ongoing\n return 0\n\ndef redirect(url):\n #ongoing\n return 0\n\ndef on_mouseover(url):\n #ongoing\n return 0\n\ndef rightClick(url):\n #ongoing\n return 0\n\ndef popup(url):\n #ongoing\n return 0\n\ndef iframe(url):\n #ongoing\n return 0\n\ndef age_of_domain(url):\n try:\n w = whois.whois(url)\n start_date = w.creation_date\n current_date = datetime.datetime.now()\n age =(current_date-start_date[0]).days\n if(age>=180):\n return -1\n else:\n return 1\n except Exception as e:\n #print(e)\n return 0\n\ndef dns(url):\n #ongoing\n return 0\n\ndef web_traffic(url):\n #ongoing\n return 0\n\ndef page_rank(url):\n #ongoing\n return 0\n\ndef google_index(url):\n #ongoing\n return 0\n\n\ndef links_pointing(url):\n #ongoing\n return 0\n\ndef statistical(url):\n #ongoing\n return 0\n\ndef main(url):\n\n # Converts the given URL into standard format\n if not regex.match(r\"^https?\", url):\n url = \"http://\" + url\n\n\n check = [[url_having_ip(url),url_length(url),url_short(url),having_at_symbol(url),\n doubleSlash(url),prefix_suffix(url),sub_domain(url),SSLfinal_State(url),\n domain_registration(url),favicon(url),port(url),https_token(url),request_url(url),\n url_of_anchor(url),Links_in_tags(url),sfh(url),email_submit(url),abnormal_url(url),\n redirect(url),on_mouseover(url),rightClick(url),popup(url),iframe(url),\n age_of_domain(url),dns(url),web_traffic(url),page_rank(url),google_index(url),\n links_pointing(url),statistical(url)]]\n\n\n #print(check)\n return check\n","repo_name":"guptatrisha97/Phishing-URL-Detector","sub_path":"inputScript.py","file_name":"inputScript.py","file_ext":"py","file_size_in_byte":9520,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"24620021205","text":"import pandas as pd\nimport numpy as np\nimport sys \n\n\nif __name__ == \"__main__\":\n\tyear = sys.argv[1]\n\tyear2 = year[2:]\n\tint_year = int(year2)\n\tprev_year = int_year -1\n\n\t# reading in file\n\tinput_file = 'original_data/{}-{}/RC{}U.csv'.format(prev_year, year2, year2)\n\tdf = pd.read_csv(input_file)\n\n\t# including only schools in the chicago district\n\tdf = df.loc[df[\"DISTRICT NAME\"].str.contains(\"299\") == True]\n\n\tpd.set_option('display.float_format', lambda x: '%.3f' % x)\n\n\n\trm = [\"READ\", \"MATH\"]\n\tgr3 = pd.DataFrame()\n\tgr4 = pd.DataFrame()\n\tgr5 = pd.DataFrame()\n\tgr6 = pd.DataFrame()\n\tgr7 = pd.DataFrame()\n\tgr8 = pd.DataFrame()\n\n\tdbs = [0, 0, 0, gr3, gr4, gr5, gr6, gr7, gr8]\n\n\n\tmerged = pd.DataFrame()\n\n\tfor i in range(3, 9):\n\t\tgrade = str(i)\n\t\tif i == 3 or i==6 or i==8:\n\t\t\t#do read, write, math\n\t\t\tfor subject in rm:\n\t\t\t\tnot_meet = 'DONOTMEET GOALS GR{} {} SCHOOL'.format(grade, subject)\n\t\t\t\tmeet = 'MEET GOALS GR{} {} SCHOOL'.format(grade, subject)\n\t\t\t\texceed = 'EXCEED GOALS GR{} {} SCHOOL'.format(grade, subject)\n\n\t\t\t\tnot_title = '{}-notmeet'.format(subject)\n\t\t\t\tmet_title = '{}-meet'.format(subject)\n\t\t\t\tex_title = '{}-exceed'.format(subject)\n\t\t\t\t\n\t\t\t\t# populate df, only include schools that teach specific grade of interest\n\t\t\t\tdbs[i][\"SCHOOL ID\"] = df[\"SCHOOL ID R-C-D-S\"].loc[df['GRADES IN SCHOOL'].str.contains(grade)==True]\n\t\t\t\tdbs[i][\"School Name\"] = df[\"SCHOOL NAME\"].loc[df['GRADES IN SCHOOL'].str.contains(grade)==True]\n\t\t\t\tdbs[i][\"Grade\"] = i\n\t\t\t\tdbs[i][\"Year\"] = year\n\t\t\t\tdbs[i][not_title] = df[not_meet].loc[df['GRADES IN SCHOOL'].str.contains(grade)==True]\n\t\t\t\tdbs[i][met_title] = df[meet].loc[df['GRADES IN SCHOOL'].str.contains(grade)==True]\n\t\t\t\tdbs[i][ex_title] = df[exceed].loc[df['GRADES IN SCHOOL'].str.contains(grade)==True]\n\n\n\t# merge dfs, sort by school ID and grade \n\tdbs_toappend = [dbs[6], dbs[8]]\n\tmerged = dbs[3].append(dbs_toappend)\n\tmerged.sort(columns = [\"SCHOOL ID\", \"Grade\"], inplace = True)\n\n\toutput_file = 'merged_{}.csv'.format(year)\n\tmerged.to_csv(output_file, index=False)\n\n\n\n\n\n","repo_name":"cernhofer/ISBE-Report-Card","sub_path":"extract_subjects_97-98.py","file_name":"extract_subjects_97-98.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17447300379","text":"from django.shortcuts import render, redirect\nfrom django.core.mail import send_mail\nfrom listings.models import Band, Listing\nfrom listings.forms import ContactUsForm, BandForm, ItemForm\n\n\ndef band_list(request):\n bands = Band.objects.all()\n return render(request, \"listings/band_list.html\", {\"bands\": bands})\n\n\ndef band_detail(request, id):\n band = Band.objects.get(id=id)\n return render(request, \"listings/band_detail.html\", {\"band\": band})\n\n\ndef band_create(request):\n if request.method == \"POST\":\n form = BandForm(request.POST)\n if form.is_valid():\n band = form.save()\n return redirect(\"band-detail\", band.id)\n else:\n form = BandForm()\n\n return render(request, \"listings/band_create.html\", {\"form\": form})\n\n\ndef band_update(request, id):\n band = Band.objects.get(id=id)\n if request.method == \"POST\":\n form = BandForm(request.POST, instance=band)\n if form.is_valid():\n form.save()\n return redirect(\"band-detail\", band.id)\n else:\n form = BandForm(instance=band)\n\n return render(request, \"listings/band_update.html\", {\"form\": form})\n\n\ndef band_delete(request, id):\n band = Band.objects.get(id=id)\n if request.method == \"POST\":\n band.delete()\n return redirect(\"band-list\")\n\n return render(request, \"listings/band_delete.html\", {\"band\": band})\n\n\ndef about(request):\n return render(request, \"listings/about.html\")\n\n\ndef item_list(request):\n items = Listing.objects.all()\n return render(request, \"listings/item_list.html\", {\"items\": items})\n\n\ndef item_detail(request, id):\n item = Listing.objects.get(id=id)\n return render(request, \"listings/item_detail.html\", {\"item\": item})\n\n\ndef item_create(request):\n if request.method == \"POST\":\n form = ItemForm(request.POST)\n if form.is_valid():\n item = form.save()\n return redirect(\"item-detail\", item.id)\n\n else:\n form = ItemForm()\n\n return render(request, \"listings/item_create.html\", {\"form\": form})\n\n\ndef item_update(request, id):\n item = Listing.objects.get(id=id)\n if request.method == \"POST\":\n form = ItemForm(request.POST, instance=item)\n if form.is_valid():\n form.save()\n return redirect(\"item-detail\", item.id)\n else:\n form = ItemForm(instance=item)\n\n return render(request, \"listings/item_update.html\", {\"form\": form})\n\n\ndef item_delete(request, id):\n item = Listing.objects.get(id=id)\n if request.method == \"POST\":\n item.delete()\n return redirect(\"item-list\")\n\n return render(request, \"listings/item_delete.html\", {\"item\": item})\n\n\ndef contact(request):\n if request.method == \"POST\":\n # créer une instance de notre formulaire et le remplir\n # avec les données POST\n form = ContactUsForm(request.POST)\n\n if form.is_valid():\n send_mail(\n subject=f\"\"\"Message from {form.cleaned_data[\"name\"]\n or \"anonyme\"} via MerchEx Contact Us form\"\"\",\n message=form.cleaned_data[\"message\"],\n from_email=form.cleaned_data[\"email\"],\n recipient_list=[\"admin@merchex.xyz\"],\n )\n return redirect(\"email-sent\")\n # si le formulaire n'est pas valide, nous laissons l'exécution\n # continuer jusqu'au return\n # ci-dessous et afficher à nouveau le formulaire (avec des erreurs).\n else:\n form = ContactUsForm()\n\n return render(request, \"listings/contact.html\", {\"form\": form})\n\n\ndef contact_success(request):\n return render(request, \"listings/contact_success.html\")\n","repo_name":"julienjego/django-web-app-OCR","sub_path":"merchex/listings/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23547905653","text":"import numba as nb\r\nimport numpy as np\r\n\r\nimport Util.aLENS as am\r\nimport Util.HDF5_Wrapper as h5\r\n\r\n# overwrite existing file\r\nh5filename = 'OrderLinkS.hdf5'\r\nh5.newFile(h5filename)\r\n\r\n\r\ndef calcOrder(pairs, orients):\r\n\r\n N = orients.shape[0] # number of rods\r\n Npair = pairs.shape[0] # number of pairs\r\n print(N, Npair)\r\n\r\n nbMat = am.getAdjacencyMatrixFromPairs(pairs, N)\r\n\r\n order = np.zeros((N, 2))\r\n for id in nb.prange(N):\r\n # get the column indices of the nnz in row id of nbMat\r\n nnz = nbMat.getrow(id).nonzero()\r\n neighbors = np.append(nnz[1], id)\r\n PList_local = orients[neighbors]\r\n\r\n S = am.calcNematicS(PList_local)\r\n order[id, 0] = S # nematic order S\r\n order[id, 1] = len(neighbors) # number of rods averaged\r\n\r\n return order\r\n\r\n\r\ndef calcLinkOrderS(TList, PList):\r\n centers, orients = am.calcCenterOrient(TList)\r\n pairs = PList[:, -2:]\r\n # find neighbors for each rod\r\n order = calcOrder(pairs, orients)\r\n\r\n return order\r\n\r\n\r\ndef main():\r\n SylinderFileList = am.getFileListSorted('./result*-*/SylinderAscii_*.dat')\r\n\r\n for file in SylinderFileList:\r\n frame = am.FrameAscii(file, readProtein=True, sort=True, info=True)\r\n order = calcLinkOrderS(frame.TList, frame.PList)\r\n print(order)\r\n path = am.get_basename(frame.filename)\r\n h5.saveData(h5filename, order, path, 'OrderLinkS', float)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"flatironinstitute/aLENS_analysis","sub_path":"alens_analysis/scripts/OrderLinkS.py","file_name":"OrderLinkS.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"16439798381","text":"import matplotlib as plt\nimport numpy as np\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\nfrom matplotlib.figure import Figure\n\nplt.use('TkAgg')\n\n#Adapted for python3 and contec data graphing from\n# https://stackoverflow.com/questions/43114508/can-a-pyqt-embedded-matplotlib-graph-be-interactive\n\nimport tkinter as tk \nfrom tkinter import ttk \n\nclass My_GUI:\n\n def __init__(self,master):\n self.master=master\n master.title(\"Data from Contex\")\n data = np.loadtxt(\"contec_data.csv\", skiprows = 1, delimiter=',')\n time, spo2, pulse = data[:,0], data[:,2], data[:,3]\n minutes = time/60.\n f = Figure(figsize=(8,5), dpi=100)\n ax1 = f.add_subplot(111)\n\n # plot the spO2 values in red\n color = 'tab:red'\n ax1.set_xlabel('time (minutes)')\n ax1.set_ylabel('spO2', color=color)\n ax1.plot(minutes,spo2,label='spO2',color=color)\n ax1.tick_params(axis='y', labelcolor=color)\n ax1.set_ylim(85,100)\n\n ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis\n # plot the heart rate values in blue\n color = 'tab:blue'\n ax2.set_ylabel('Pulse', color=color) # we already handled the x-label with ax1\n ax2.plot(minutes, pulse, color=color, label = 'pulse', picker=False)\n ax2.tick_params(axis='y', labelcolor=color, color=color)\n ax2.set_xlabel('Time (sec)')\n ax2.set_ylim(30,120)\n\n\n canvas1=FigureCanvasTkAgg(f,master)\n canvas1.draw()\n canvas1.get_tk_widget().pack(side=\"top\",fill='x',expand=True)\n f.canvas.mpl_connect('pick_event',self.onpick)\n\n toolbar=NavigationToolbar2Tk(canvas1,master)\n toolbar.update()\n toolbar.pack(side='top',fill='x')\n\n # Set \"picker=True\" in call to plot() to enable pick events\n def onpick(self,event):\n #do stuff\n print(\"My OnPick Event Worked!\")\n return True\n\nroot=tk.Tk()\ngui=My_GUI(root)\nroot.mainloop()\n","repo_name":"headrotor/contec_data","sub_path":"graph_data.py","file_name":"graph_data.py","file_ext":"py","file_size_in_byte":2014,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"34386394312","text":"##############################Importing modules\nimport re\nimport os \nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport tensorflow as tf\nimport keras.backend as K\nimport matplotlib.pyplot as plt\nimport glob\nimport warnings \nimport tqdm\n\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img\n\n\n\n#####taking note of NON GPU ENABLED DEVICES\nos.environ['CUDA_VISIBLE_DEVICES'] = '-1'\n\nif tf.test.gpu_device_name():\n print('GPU found')\nelse:\n print(\"No GPU found\")\n \nwarnings.filterwarnings(\"ignore\")\n\nclass Preprocessing():\n def __init__(self):\n ################## DATA INFORMATION\n # Data directory of the project\n self.DATA_DIR = '/mnt/d6cf3633-27d8-4655-8a8c-7fc0daf3bb07/1/Documents/machine_learning/Pneumonia detection/Dataset/data-task1'\n\n # dataset respective directory\n self.TRAIN_DIR = os.path.join(self.DATA_DIR, \"train\")\n self.TEST_DIR = os.path.join(self.DATA_DIR, \"test\")\n self.VAL_DIR = os.path.join(self.DATA_DIR, \"val\")\n\n\n ################# HYPERPARAMETER TUNING\n self.IMAGE_MIN_DIM = 224\n self.IMAGE_MAX_DIM = 224\n self.EPOCHS = 4\n self.BATCH_SIZE = 30\n\n def plot_random_dataset_images(self) -> None:\n '''\n Arguments:None\n Purpose: helps plot our classes of images for our train,test and val datasets\n Return : None\n '''\n fig, ax = plt.subplots(2, 3, figsize=(15, 7))\n ax = ax.ravel()\n plt.tight_layout()\n\n #plotting random non-pneumonia and pneumonia images from our train, test, and val to see how much different they look to our eye\n for i, dir_ in enumerate(glob.glob(f'{self.DATA_DIR}/*')):\n ax[i].imshow(plt.imread(dir_ + '/no_pneumonia/' +next(os.walk(dir_+'/no_pneumonia/'))[2][0]))\n ax[i].set_title(f'Set: {dir_.split(\"/\")[-1]}, Condition: no_pneumonia')\n\n ax[i+3].imshow(plt.imread(dir_ + '/pneumonia/' +next(os.walk(dir_+'/pneumonia/'))[2][0]))\n ax[i+3].set_title(f'Set: {dir_.split(\"/\")[-1]}, Condition: pneumonia')\n\n\n def basic_descriptive_of_images(self):\n '''\n '''\n #parsing and storing datasets into variable\n non_pneumonia_train = glob.glob(f'{self.TRAIN_DIR}/no_pneumonia/*.png')\n pneumonia_train = glob.glob(f'{self.TRAIN_DIR}/no_pneumonia/*.png')\n\n\n #Basic EDA -Descriptive\n for i in glob.glob(f'{self.DATA_DIR}/*'):\n non_pneumonia_images_count = len(glob.glob(i+'/no_pneumonia/*.png'))\n pneumonia_images_count = len(glob.glob(i+'/pneumonia/*.png'))\n # print(f'{i.split(\"/\")[-1]} Dataset \\n Non-pneumonia images: {non_pneumonia_images_count} \\n pneumonia images: {pneumonia_images_count} \\n')\n\n x = ['pneumonia', 'Non-pneumonia']\n y = [pneumonia_images_count, non_pneumonia_images_count]\n plt.bar(x, y)\n plt.title(f'Set:{i.split(\"/\")[-1]} pneumonia vs non pneumonia')\n plt.ylabel('Count')\n plt.xlabel('Classes')\n plt.show()\n\n def data_augmentation(self):\n '''\n argument:None\n purpose:Augmentation expands the size of the dataset by creating a modified version of the existing training set images that helps to increase dataset variation and ultimately improve the ability of the model to predict new images.\n return: train_image_gen, test_image_gen\n '''\n train_image_gen = ImageDataGenerator(\n horizontal_flip = True,\n vertical_flip = False, \n rescale = 1./255,\n shear_range = 0.2,\n zoom_range = 0.05,\n height_shift_range = 0.02,\n width_shift_range = 0.02,\n rotation_range = 3,\n fill_mode = 'nearest'\n )\n test_image_gen = ImageDataGenerator(\n rescale = 1./255\n )\n\n return train_image_gen, test_image_gen\n\n \n def dataset_splitting(self, train_image_gen, test_image_gen):\n '''\n argument:augmented train images and augmented test images\n purpose:get images batches into respective variable sets\n return: train_images, test_images, validation_images\n '''\n train_set = train_image_gen.flow_from_directory(\n self.TRAIN_DIR,\n target_size=(self.IMAGE_MIN_DIM, self.IMAGE_MAX_DIM),\n batch_size = self.BATCH_SIZE,\n class_mode = 'binary',\n # shuffle=True,\n # color_mode=\"grayscale\"\n )\n\n test_set = test_image_gen.flow_from_directory(\n self.TEST_DIR,\n target_size=(self.IMAGE_MIN_DIM, self.IMAGE_MAX_DIM),\n batch_size = self.BATCH_SIZE-10,\n class_mode = 'binary',\n # shuffle=True,\n # color_mode=\"grayscale\"\n )\n valid_set = train_image_gen.flow_from_directory(\n self.VAL_DIR,\n target_size=(self.IMAGE_MIN_DIM, self.IMAGE_MAX_DIM),\n batch_size = self.BATCH_SIZE,\n class_mode = 'binary',\n # shuffle=True,\n # color_mode=\"grayscale\"\n )\n return train_set, test_set, valid_set\n\n ","repo_name":"thoth2357/Transfer-And-NonTransfer-Learning-Models-for-Pneumonia-detection","sub_path":"preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":5189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"35995869896","text":"import copy\nimport random\nimport time\n\n\n\nclass MyPlayer():\n\n \"\"\"\n AHHHH(battle cry)\n \"\"\"\n\n \"\"\"\n Main player class for the AI.\n\n The algorithm utilizes MiniMax with Alpha-Beta pruning to look ahead certain amounts of moves, in this case, 3.\n Once reached at the leaf nodes, the heuristic functions generates a score for that particular node, that score\n gets passed back through recrusive means and further leaf node scores are generated and so on.\n\n The final output value is a {tuple} generated from the move function, that represents the current best move.\n \n \"\"\"\n\n def __init__(self, my_color, opponent_color, board_size=8):\n self.name = 'alimoha1'\n self.my_color = my_color\n self.opponent_color = opponent_color\n self.board_size = board_size\n self.min = float(\"-inf\")\n self.max = float(\"inf\")\n\n def move(self, board):\n\n \"\"\"\n This function runs through the allowed moves and determines the best choice.\n\n :param board: The current state of the board.\n :return: a tuple; the best possible co-ordinates to move to.\n \"\"\"\n valid_moves = self.get_all_valid_moves(self.my_color, board)\n if valid_moves == None:\n return None\n n = self.board_size\n Best_choice = [0,self.min] # ID, score\n # Scan through the legal moves available and replace Best_choice variable with the highest score possible.\n for move in valid_moves:\n choice = self.algorithm(move, board,self.my_color, 3, self.min, self.max, True)\n if choice > Best_choice[1]:\n Best_choice[1] = choice\n Best_choice[0] = move\n return Best_choice[0]\n\n\n def algorithm(self, move, board, player, depth, alpha, beta, maximizingPlayer):\n \"\"\"\n The main algorithm for \"predicting\" the future. This algorithm helps the main heuristic function by\n looking forward into the game and determining if the choice chosen is truly the best or not.\n\n :param move: {list} of the co-ordinates of the position to evaluate.\n :param board: {nested_list} of the current state of the board.\n :param player: {int} value representing a player's color.\n :param depth: {int} value representing how many moves ahead to look to.\n :param alpha: {int} or {float} value representing alpha value.\n :param beta: {int} or {float} value representing beta value.\n :param maximizingPlayer: {bool} True if maximizing player, False if not.\n :return: the score of the move given.\n \"\"\"\n if depth == 0 or not self.is_it_bounded(move[0],move[1]):\n # if algorithm has reached the leaf values, return the score.\n return self.bad_heuristic(board)\n valid_moves = self.get_all_valid_moves(player, board)\n if valid_moves == None:\n # This turn is invalid; return initial value.\n return self.bad_heuristic(board)\n\n if maximizingPlayer: # maximizing the AI\n v = self.min\n for move in valid_moves:\n boardTemp = copy.deepcopy(board)\n boardTemp = self.play_move(boardTemp, move, player)\n v = max(v, self.algorithm(move, boardTemp, player, depth - 1, alpha, beta, False)) # recursive call.\n alpha = max(alpha, v)\n if beta <= alpha:\n break\n\n return v\n\n else: # minimizing opponent\n v = self.max\n for move in valid_moves:\n boardTemp = copy.deepcopy(board)\n boardTemp = self.play_move(boardTemp, move,player)\n v = min(v, self.algorithm(move, boardTemp, player, depth - 1, alpha, beta, True))\n beta = min(beta, v)\n if beta <= alpha:\n break\n\n\n return v\n\n def bad_heuristic(self, board):\n \"\"\"\n Generates heuristic value for the given board.\n Faster but poor quality.\n :param board: current state of the board\n :return: Score\n \"\"\"\n b = self.board_size\n Score = 0\n for x in range(b):\n for y in range(b):\n if board[x][y] == self.my_color:\n # Corner check\n if [x,y] == [0,0] or [x,y] == [0,b-1] or [x,y] == [b-1,b-1] or [x,y] == [b-1,0]:\n Score += 4\n # General ownership.\n else:\n Score += 1\n\n return Score\n\n\n def good_heuristic(self, board):\n \"\"\"\n Using this heuristic crosses the time limit, as such, could not use it.\n\n The main heuristic function of the AI. Determines the worth of a position.\n The score calculated is relative to max player (the AI).\n Good score = better choice for AI (max)\n Bad score = better choice for Opponent (min)\n There are three factors that will be taken into account, them being:\n 1) Corners - 60%\n 2) Mobility - 35%\n 3) Parity - 5%\n The MaxScore calculates the score for the AI\n The MinSCore for the opponent\n Final score = MaxScore - MinScore\n\n :param board:\n :return: integer; the score of the position.\n \"\"\"\n\n n = self.board_size\n MaxCor, MaxMob, MaxPar = 0,0,0\n MinCor, MinMob, MinPar = 0,0,0\n Corners = [[0,0],[0,n-1],[n-1,n-1],[n-1,0]]\n for x in range(n):\n for y in range(n):\n if board[x][y] == self.my_color:\n if [x,y] in Corners:\n MaxCor += 1\n MaxPar += 1\n else:\n MaxPar += 1\n elif board[x][y] == self.opponent_color:\n if [x,y] in Corners:\n MinCor += 1\n MinPar += 1\n else:\n MinPar += 1\n else: # Empty space; check for mobility.\n if self.__is_correct_move([x, y], board, self.my_color):\n MaxMob += 1\n if self.__is_correct_move([x, y], board, self.opponent_color):\n MaxMob += 1\n\n MaxScore = (60/100)*MaxCor + (35/100)*MaxMob +(5/100)*MaxPar\n MinScore = (60/100)*MinCor + (35/100)*MinMob + (5/100)*MinPar\n\n Final_score = MaxScore - MinScore\n\n return Final_score\n\n########## Auxillary functions (for simulations & assistance) #########################################################\n\n\n def get_all_valid_moves(self, players_color, board):\n '''\n Generates a list of legal moves.\n\n :param players_color: {int} of player color, board: {list} of the current state of the board.\n :return: {list} of valid moves\n '''\n valid_moves = []\n for x in range(self.board_size):\n for y in range(self.board_size):\n if ((board[x][y] == -1) and\n self.__is_correct_move([x, y], board, players_color)):\n valid_moves.append((x, y))\n\n if len(valid_moves) <= 0:\n return None\n return valid_moves\n\n def __is_correct_move(self, move, board, players_color):\n '''\n :param move: {list} of {int} for position\n :param players_color: {int}\n :return: {bool}\n '''\n if board[move[0]][move[1]] == -1:\n dx = [-1, -1, -1, 0, 1, 1, 1, 0]\n dy = [-1, 0, 1, 1, 1, 0, -1, -1]\n for i in range(len(dx)):\n if self.__confirm_direction(move, dx[i], dy[i], players_color, board):\n return True\n\n return False\n\n def __confirm_direction(self, move, dx, dy, players_color, board):\n '''\n Looks into direction [dx,dy] to find if the move in this direction\n is correct. This means that first stone in the direction is oponents\n and last stone is players.\n :param move: position where the move is made [x,y]\n :param dx: x direction of the search\n :param dy: y direction of the search\n :param player: player that made the move\n :return: True if move in this direction is correct\n '''\n\n if players_color == self.my_color:\n opponents_color = self.opponent_color\n else:\n opponents_color = self.my_color\n posx = move[0] + dx\n posy = move[1] + dy\n if self.is_it_bounded(posx, posy):\n if board[posx][posy] == opponents_color:\n while self.is_it_bounded(posx, posy):\n posx += dx\n posy += dy\n if self.is_it_bounded(posx, posy):\n if board[posx][posy] == -1:\n return False\n if board[posx][posy] == players_color:\n return True\n\n return False\n\n def is_it_bounded(self, posx, posy):\n '''\n Check if position is in the limits of the board and non-zero.\n :param posx: {int}\n :param posy: {int}\n :return: {bool}\n '''\n return ((posx >= 0) and\n (posx < self.board_size) and\n (posy >= 0) and\n (posy < self.board_size))\n\n\n def play_move(self, board, move, players_color):\n '''\n :param move: {list} of {int} for position where the move is made\n :param players_color: {int} color of who made the move\n :return: {None}\n '''\n board[move[0]][move[1]] = players_color\n dx = [-1, -1, -1, 0, 1, 1, 1, 0]\n dy = [-1, 0, 1, 1, 1, 0, -1, -1]\n for i in range(len(dx)):\n if self.__confirm_direction(move, dx[i], dy[i], players_color, board):\n self.__change_stones_in_direction(board, move, dx[i], dy[i], players_color)\n return board\n\n\n def can_play(self, players_color):\n '''\n :param players_color: {int} of player color\n :return: True if there is a possible move for player\n '''\n for x in range(self.board_size):\n for y in range(self.board_size):\n if self.is_correct_move([x, y], players_color):\n return True\n\n return False\n\n\n def __change_stones_in_direction(self, board, move, dx, dy, players_color):\n '''\n :param move: position as a {list} of {int}\n :param dx: {int}\n :param dy: {int}\n :param players_color: {int} of player color\n :return: {None}\n '''\n posx = move[0]+dx\n posy = move[1]+dy\n while (not(board[posx][posy] == players_color)):\n board[posx][posy] = players_color\n posx += dx\n posy += dy\n\n\n\n\n","repo_name":"ali207715/Reversi-playing-AI-agent","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":10810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"9380870017","text":"import ROOT as r\nimport random\n\nhist = r.TH1D('hist','hist',100,0,1)\nfor i in range(int(1e6)) : hist.Fill(random.gauss(0.5,0.1))\n\nx = r.RooRealVar('x','x',0,1)\n\nroohist = r.RooDataHist('roohist','roohist',r.RooArgList(x),hist)\npdf = r.RooHistPdf('pdf','pdf',r.RooArgSet(x), roohist, 0)\n\ny = r.RooRealVar('y','y',0,20)\npois = r.RooPoisson('pois','pois',y, r.RooFit.RooConst(5.5))\npois2 = r.RooPoisson('pois2','pois2',y, r.RooFit.RooConst(6))\npois3 = r.RooPoisson('pois3','pois3',y, r.RooFit.RooConst(5))\n\npl = x.frame()\npdf.plotOn(pl)\npl.Draw()\n\nraw_input()\n\nplot = y.frame()\n\npois.plotOn(plot)\npois2.plotOn(plot,r.RooFit.LineColor(r.kRed))\npois3.plotOn(plot,r.RooFit.LineColor(r.kGreen))\nplot.Draw()\n\n\n\nraw_input()\n","repo_name":"betchart/statsTA","sub_path":"unused/test/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"34059478685","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\n# [ 1, 2, 3, 4, 5]\n# c\n# cnt 1 2\n# ind 0\n\nclass Solution:\n def middleNode(self, head: ListNode) -> ListNode:\n # initiate\n cur = head\n cnt = 0\n # get the length of ListNode\n while cur != None:\n cnt += 1\n cur = cur.next\n # get the middle_index\n middle_index = cnt // 2\n # get the index and return middle point\n cur = head\n index = 0\n while cur != None:\n if index == middle_index:\n return cur\n cur = cur.next\n index += 1","repo_name":"TinaCXu/Leetcode","sub_path":"redo-876.py","file_name":"redo-876.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26226357396","text":"from django.shortcuts import render, HttpResponse, HttpResponseRedirect, redirect\nfrom django.contrib.auth import login, authenticate\nfrom django.urls import reverse\nfrom django.core.paginator import EmptyPage, PageNotAnInteger, Paginator\nfrom app.forms import LoginForm\nfrom app.models import Post, PostCategory\n\n# index (login form)\ndef index(request):\n # if request.session.get('user', True):\n # return redirect('app:posts')\n \n form = LoginForm()\n context = {\n 'css': [\n 'app/css/index.css'\n ],\n 'js': [\n 'app/js/index.js'\n ],\n 'form': form\n }\n\n return render(request, 'app/index.html', context) \n\n# posts\ndef posts(request):\n post_list = Post.objects.all().order_by('-created_date').values(\n 'id',\n 'title',\n 'created_date',\n 'category_id',\n 'category__display_name'\n )\n paginator = Paginator(post_list, 20)\n page = request.GET.get('page') if request.GET.get('page') else 1 \n posts = paginator.get_page(page)\n context = {\n 'css': [\n 'app/css/posts.css'\n ],\n 'js': [\n 'app/js/posts.js'\n ],\n 'breadcrumbs': [\n {\n 'display': '',\n 'url': 'app:posts',\n 'class': '',\n },\n {\n 'display': '포스트',\n 'url': '#',\n 'class': 'active',\n }\n ],\n 'posts': posts\n }\n return render(request, 'app/posts.html', context)\n\ndef post_detail(request, pk):\n post = Post.objects.values(\n 'id',\n 'author__username',\n 'title',\n 'content',\n 'category__display_name',\n 'created_date',\n ).get(id=pk)\n context = {\n 'css': [\n 'app/css/post_detail.css'\n ],\n 'js': [\n 'app/js/post_detail.js'\n ],\n 'breadcrumbs': [\n {\n 'display': '',\n 'url': 'app:posts',\n 'class': '',\n },\n {\n 'display': '포스트',\n 'url': 'app:posts',\n 'class': '',\n },\n {\n 'display': '상세',\n 'url': '#',\n 'class': 'active',\n }\n ],\n 'post': post\n }\n return render(request, 'app/post_detail.html', context)\n\ndef post_write(request):\n post_categories = PostCategory.objects.all().values('id', 'display_name')\n context = {\n 'css': [\n 'app/css/post_write.css'\n ],\n 'js': [\n 'app/js/post_write.js'\n ],\n 'breadcrumbs': [\n {\n 'display': '',\n 'url': 'app:posts',\n 'class': '',\n },\n {\n 'display': '포스트',\n 'url': 'app:posts',\n 'class': '',\n },\n {\n 'display': '글쓰기',\n 'url': '#',\n 'class': 'active',\n }\n ],\n 'post_categories': post_categories\n }\n\n return render(request, 'app/post_write.html', context)\n\n","repo_name":"leejinseok/leejinseok.io","sub_path":"back-end/myblog/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2764,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"4818815459","text":"import math\r\nimport sys\r\nfrom collections import Counter, defaultdict\r\nfrom typing import List\r\n\r\nSTART_TAG = \"\"\r\nEND_TAG = \"\"\r\n\r\ninput_file = \"NLP6320_POSTaggedTrainingSet-Windows.txt\"\r\nwith open(input_file) as f:\r\n lines = f.readlines()\r\n tag_bigrams = Counter()\r\n tag_unigrams = Counter()\r\n word_tag_freq = Counter()\r\n\r\n for sentence in lines:\r\n sentence = f\"{START_TAG} {sentence} {END_TAG}\"\r\n tags = []\r\n for word in sentence.split():\r\n if word:\r\n sp = word.split('_')\r\n tags.append(sp[0] if len(sp) == 1 else sp[1])\r\n \r\n unigrams = zip(*[tags[_i:] for _i in range(1)])\r\n tag_unigram_list = [\" \".join(unigram) for unigram in unigrams]\r\n\r\n bigrams = zip(*[tags[_i:] for _i in range(2)])\r\n tag_bigram_list = [\" \".join(bigram) for bigram in bigrams]\r\n \r\n word_tag = [word for word in sentence.split() if word]\r\n\r\n tag_bigrams += Counter(tag_bigram_list)\r\n tag_unigrams += Counter(tag_unigram_list)\r\n word_tag_freq += Counter(word_tag)\r\n\r\nbigram_prob = defaultdict(float)\r\nfor word_tag in word_tag_freq:\r\n s = word_tag.split(\"_\")\r\n if len(s) == 1:\r\n tag = s[0]\r\n else:\r\n tag = s[1]\r\n bigram_prob[word_tag] = word_tag_freq[word_tag] / tag_unigrams[tag]\r\n\r\ndef pos_tag(input_str):\r\n tagged_input = ''\r\n max_prob = 0\r\n max_tag = ''\r\n prev_max_tag = START_TAG\r\n prob = 1\r\n for input_word in input_str.split()[:-1]:\r\n for tag in tag_unigrams:\r\n tag_prob = bigram_prob[f\"{input_word}_{tag}\"] * tag_bigrams[f\"{prev_max_tag} {tag}\"]\r\n if tag_prob > max_prob:\r\n max_prob = tag_prob\r\n max_tag = tag\r\n prob *= max_prob\r\n tagged_input += f\"{input_word}_{max_tag} \"\r\n prev_max_tag = max_tag\r\n max_prob = 0\r\n input_word = input_str.split()[-1]\r\n for tag in tag_unigrams:\r\n tag_prob = bigram_prob[f\"{input_word}_{tag}\"] * tag_bigrams[f\"{prev_max_tag} {tag}\"] * tag_bigrams[f\"{tag} \"]\r\n if tag_prob > max_prob:\r\n max_prob = tag_prob\r\n max_tag = tag\r\n prob *= max_prob \r\n tagged_input += f\"{input_word}_{max_tag}\"\r\n print(\"Given Sentence: \", input_str)\r\n print(\"POS Tagged: \", tagged_input)\r\n\r\ninput_line = input(\"Enter the test sentence: \")\r\npos_tag(input_line)\r\n","repo_name":"Xnkr/NLP-Project","sub_path":"assignments/3/pos_tagger.py","file_name":"pos_tagger.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23426317563","text":"import cam5procs, time, sensor, colour, image\n\ndef runIt ():\n print (\"track_eye\");\n cam5procs.logLine(\"track_eye\")\n clock = time.clock()\n framecount = 0\n ledState = 0\n ledCounter = 0\n sensor.set_framesize(sensor.HQVGA)\n sensor.set_pixformat(sensor.GRAYSCALE)\n face_cascade = image.HaarCascade(\"frontalface\", stages=25)\n eyes_cascade = image.HaarCascade(\"eye\", stages=24)\n #print(face_cascade,eyes_cascade)\n while not cam5procs.receive_packet():\n ledCounter += 1\n if ((ledCounter % 5) == 0 ):\n if ( ledState == 0 ):\n ledState = 1\n cam5procs.ledShowColour([0,0,255])\n else:\n ledState = 0\n cam5procs.ledShowColour([0,0,0])\n clock.tick()\n img = sensor.snapshot() # Capture snapshot\n # Find faces.\n # Note: Lower scale factor scales-down the image more and detects smaller objects.\n # Higher threshold results in a higher detection rate, with more false positives.\n faces = img.find_features(face_cascade, threshold=0.75, scale=1.35)\n # Draw objects\n framecount +=1\n for face in faces[:4]:\n img.draw_rectangle(face)\n # Now find eyes within each face.\n # Note: Use a higher threshold here (more detections) and lower scale (to find small objects)\n eyes = img.find_features(eyes_cascade, threshold=0.5, scale=1.2, roi=face)\n for e in eyes[:2]:\n img.draw_rectangle(e)\n tracked = [framecount&0xff,0, 0,e[0],e[1],e[0]+e[2],e[1]+e[3]]\n #print(\"fps: \", clock.fps(),tracked)\n cam5procs.send_packet(tracked,7, cam5procs.TRK_BLOB)\n # Print FPS.\n # Note: Actual FPS is higher, streaming the FB makes it slower.\n return True\n\n\n","repo_name":"mindsensors/NXTCamView5","sub_path":"qt-creator/share/qtcreator/examples/NXTCamv5-defaults/track_eye.py","file_name":"track_eye.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"26776728789","text":"import pytest\nfrom selenium.webdriver import Chrome\n\nfrom pages.GoogleSearchPage import GoogleSearchPage\nfrom pages.GoogleResultsPage import GoogleResultPage\n\n@pytest.fixture\ndef browser():\n chromeDriver = Chrome()\n chromeDriver.implicitly_wait(10)\n yield chromeDriver\n chromeDriver.quit()\n\ndef test_search(browser):\n searchPage = GoogleSearchPage(browser)\n resultPage = GoogleResultPage(browser)\n keyword = 'test'\n\n #Display google homepage\n searchPage.load()\n searchPage.search(keyword)\n\n #verify that the result tab title contains the word \"test\"\n assert keyword in resultPage.title()\n\n #verify that the search box contain the keyword in result page\n assert keyword == resultPage.SearchInputValue()\n\n #verify that results contain at least one result having \"test' in their title\n assert resultPage.ResultCountForKeyword(keyword)>0","repo_name":"AshrakatA/Ashrakat-Hefny","sub_path":"tests/test_search_functionality.py","file_name":"test_search_functionality.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"24581677869","text":"import os\nimport torch\nimport pickle\nimport collections\nfrom torch.autograd import Variable\n\nDUMMY_RELATION = 'PAD'\nSTART_RELATION = 'DUMMY_START_RELATION'\nNO_OP_RELATION = 'NO_OP'\nUNK_RELATION = 'UNK'\nDUMMY_ENTITY = 'PAD'\nNO_OP_ENTITY = 'UNK'\n\nDUMMY_RELATION_ID = 0\nSTART_RELATION_ID = 1\nNO_OP_RELATION_ID = 2\nUNK_RELATION_ID = 3\nDUMMY_ENTITY_ID = 0\nNO_OP_ENTITY_ID = 1\n\n\ndef get_train_path(data_dir):\n if 'NELL' in data_dir:\n train_path = os.path.join(data_dir, 'train.dev.large.triples')\n else:\n train_path = os.path.join(data_dir, 'train.triples')\n return train_path\n\n\ndef load_triples(data_path, entity_index_path, relation_index_path, add_reverse_relations=False,\n seen_entities=None, group_examples_by_query=False, verbose=False, inverse_triple=False):\n entity2id, _ = load_index(entity_index_path)\n relation2id, _ = load_index(relation_index_path)\n\n # for e2 in label: y[e2] = 1.0\n\n def triple2ids(e1, e2, r):\n return entity2id[e1], entity2id[e2], relation2id[r]\n\n triples = []\n inv_triples = []\n # sr2o = collections.defaultdict(set)\n if group_examples_by_query:\n triple_dict = {}\n inv_triple_dict = {}\n with open(data_path) as f:\n num_skipped = 0\n for line in f:\n e1, e2, r = line.strip().split()\n if seen_entities and (not e1 in seen_entities or not e2 in seen_entities):\n num_skipped += 1\n if verbose:\n print('Skip triple ({}) with unseen entity: {}'.format(num_skipped, line.strip()))\n continue\n if group_examples_by_query:\n e1_id, e2_id, r_id = triple2ids(e1, e2, r)\n if e1_id not in triple_dict:\n triple_dict[e1_id] = {}\n if r_id not in triple_dict[e1_id]:\n triple_dict[e1_id][r_id] = set()\n triple_dict[e1_id][r_id].add(e2_id)\n if add_reverse_relations:\n r_inv = '_' + r\n e2_id, e1_id, r_inv_id = triple2ids(e2, e1, r_inv)\n if e2_id not in triple_dict:\n triple_dict[e2_id] = {}\n if r_inv_id not in triple_dict[e2_id]:\n triple_dict[e2_id][r_inv_id] = set()\n triple_dict[e2_id][r_inv_id].add(e1_id)\n # To predict head entity\n if inverse_triple:\n r_inv = '_' + r\n e2_id, e1_id, r_inv_id = triple2ids(e2, e1, r_inv)\n if e2_id not in inv_triple_dict:\n inv_triple_dict[e2_id] = {}\n if r_inv_id not in inv_triple_dict[e2_id]:\n inv_triple_dict[e2_id][r_inv_id] = set()\n inv_triple_dict[e2_id][r_inv_id].add(e1_id)\n else:\n triples.append(triple2ids(e1, e2, r))\n if add_reverse_relations:\n triples.append(triple2ids(e2, e1, '_' + r))\n if inverse_triple:\n inv_triples.append(triple2ids(e2, e1, '_' + r))\n\n # sub, obj, rel = triple2ids(e1, e2, r)\n # sr2o[(sub, rel)].add(obj)\n # sub, obj, rel = triple2ids(e2, e1, '_' + r)\n # sr2o[(sub, rel)].add(obj)\n\n if group_examples_by_query:\n for e1_id in triple_dict:\n for r_id in triple_dict[e1_id]:\n triples.append((e1_id, list(triple_dict[e1_id][r_id]), r_id))\n if inverse_triple:\n for e1_id in inv_triple_dict:\n for r_id in inv_triple_dict[e1_id]:\n inv_triples.append((e1_id, list(inv_triple_dict[e1_id][r_id]), r_id))\n print('{} triples loaded from {}'.format(len(triples), data_path))\n return triples, inv_triples\n\n\ndef load_index(input_path):\n index, rev_index = {}, {}\n with open(input_path) as f:\n for i, line in enumerate(f.readlines()):\n v, _ = line.strip().split()\n index[v] = i\n rev_index[i] = v\n return index, rev_index\n\n\ndef load_seen_entities(adj_list_path, entity_index_path):\n _, id2entity = load_index(entity_index_path)\n with open(adj_list_path, 'rb') as f:\n adj_list = pickle.load(f)\n seen_entities = set()\n for e1 in adj_list:\n seen_entities.add(id2entity[e1])\n for r in adj_list[e1]:\n for e2 in adj_list[e1][r]:\n seen_entities.add(id2entity[e2])\n print('{} seen entities loaded...'.format(len(seen_entities)))\n return seen_entities\n\n\ndef format_batch(batch_data, num_labels=-1, num_tiles=1, isMul=False):\n def convert_to_binary_multi_subject(e1):\n e1_label = zeros_var_cuda([len(e1), num_labels])\n for i in range(len(e1)):\n e1_label[i][e1[i]] = 1\n return e1_label\n\n def convert_to_binary_multi_object(e2):\n e2_label = zeros_var_cuda([len(e2), num_labels])\n for i in range(len(e2)):\n e2_label[i][e2[i]] = 1\n return e2_label\n\n def tile_along_beam(v, beam_size, dim=0):\n if dim == -1:\n dim = len(v.size()) - 1\n v = v.unsqueeze(dim + 1)\n v = torch.cat([v] * beam_size, dim=dim + 1)\n new_size = []\n for i, d in enumerate(v.size()):\n if i == dim + 1:\n new_size[-1] *= d\n else:\n new_size.append(d)\n return v.view(new_size)\n\n batch_e1, batch_e2, batch_r = [], [], []\n for i in range(len(batch_data)):\n e1, e2, r = batch_data[i]\n batch_e1.append(e1)\n if isMul:\n batch_e2.append(e2[0])\n else:\n batch_e2.append(e2)\n batch_r.append(r)\n batch_e1 = var_cuda(torch.LongTensor(batch_e1), requires_grad=False)\n batch_r = var_cuda(torch.LongTensor(batch_r), requires_grad=False)\n if type(batch_e2[0]) is list:\n batch_e2 = convert_to_binary_multi_object(batch_e2)\n elif type(batch_e1[0]) is list:\n batch_e1 = convert_to_binary_multi_subject(batch_e1)\n else:\n batch_e2 = var_cuda(torch.LongTensor(batch_e2), requires_grad=False)\n # Rollout multiple times for each example\n if num_tiles > 1:\n batch_e1 = tile_along_beam(batch_e1, num_tiles)\n batch_r = tile_along_beam(batch_r, num_tiles)\n batch_e2 = tile_along_beam(batch_e2, num_tiles)\n return batch_e1, batch_e2, batch_r\n\n\ndef prepare_kb_envrioment(train_path, dev_path, test_path, add_reverse_relations=True):\n data_dir = os.path.dirname(train_path)\n\n def get_type(e_name):\n if e_name == DUMMY_ENTITY:\n return DUMMY_ENTITY\n if 'nell-995' in data_dir.lower():\n if '_' in e_name:\n return e_name.split('_')[1]\n else:\n return 'numerical'\n else:\n return 'entity'\n\n def hist_to_vocab(_dict):\n return sorted(sorted(_dict.items(), key=lambda x: x[0]), key=lambda x: x[1], reverse=True)\n\n # Create entity and relation indices\n entity_hist = collections.defaultdict(int)\n relation_hist = collections.defaultdict(int)\n type_hist = collections.defaultdict(int)\n with open(train_path) as f:\n train_triples = [l.strip() for l in f.readlines()]\n with open(dev_path) as f:\n dev_triples = [l.strip() for l in f.readlines()]\n with open(test_path) as f:\n test_triples = [l.strip() for l in f.readlines()]\n\n # if test_mode:\n # keep_triples = train_triples + dev_triples\n # removed_triples = test_triples\n # else:\n keep_triples = train_triples\n removed_triples = dev_triples + test_triples\n\n # Index entities and relations\n for line in set(keep_triples + removed_triples):\n e1, e2, r = line.strip().split()\n entity_hist[e1] += 1\n entity_hist[e2] += 1\n if 'nell-995' in data_dir.lower():\n t1 = e1.split('_')[1] if '_' in e1 else 'numerical'\n t2 = e2.split('_')[1] if '_' in e2 else 'numerical'\n else:\n t1 = get_type(e1)\n t2 = get_type(e2)\n type_hist[t1] += 1\n type_hist[t2] += 1\n relation_hist[r] += 1\n if add_reverse_relations:\n # inv_r = r + '_inv'\n inv_r = '_' + r\n relation_hist[inv_r] += 1\n # Save the entity and relation indices sorted by decreasing frequency\n with open(os.path.join(data_dir, 'entity2id.txt'), 'w') as o_f:\n o_f.write('{}\\t{}\\n'.format(DUMMY_ENTITY, DUMMY_ENTITY_ID))\n o_f.write('{}\\t{}\\n'.format(NO_OP_ENTITY, NO_OP_ENTITY_ID))\n for e, freq in hist_to_vocab(entity_hist):\n o_f.write('{}\\t{}\\n'.format(e, freq))\n with open(os.path.join(data_dir, 'relation2id.txt'), 'w') as o_f:\n o_f.write('{}\\t{}\\n'.format(DUMMY_RELATION, DUMMY_RELATION_ID))\n o_f.write('{}\\t{}\\n'.format(START_RELATION, START_RELATION_ID))\n o_f.write('{}\\t{}\\n'.format(NO_OP_RELATION, NO_OP_RELATION_ID))\n o_f.write('{}\\t{}\\n'.format(UNK_RELATION, UNK_RELATION_ID))\n for r, freq in hist_to_vocab(relation_hist):\n o_f.write('{}\\t{}\\n'.format(r, freq))\n with open(os.path.join(data_dir, 'type2id.txt'), 'w') as o_f:\n for t, freq in hist_to_vocab(type_hist):\n o_f.write('{}\\t{}\\n'.format(t, freq))\n print('{} entities indexed'.format(len(entity_hist)))\n print('{} relations indexed'.format(len(relation_hist)))\n print('{} types indexed'.format(len(type_hist)))\n entity2id, id2entity = load_index(os.path.join(data_dir, 'entity2id.txt'))\n relation2id, id2relation = load_index(os.path.join(data_dir, 'relation2id.txt'))\n type2id, id2type = load_index(os.path.join(data_dir, 'type2id.txt'))\n\n removed_triples = set(removed_triples)\n adj_list = collections.defaultdict(collections.defaultdict)\n entity2typeid = [0 for i in range(len(entity2id))]\n num_facts = 0\n for line in set(keep_triples):\n e1, e2, r = line.strip().split()\n triple_signature = '{}\\t{}\\t{}'.format(e1, e2, r)\n e1_id = entity2id[e1]\n e2_id = entity2id[e2]\n t1 = get_type(e1)\n t2 = get_type(e2)\n t1_id = type2id[t1]\n t2_id = type2id[t2]\n entity2typeid[e1_id] = t1_id\n entity2typeid[e2_id] = t2_id\n if not triple_signature in removed_triples:\n r_id = relation2id[r]\n if not r_id in adj_list[e1_id]:\n adj_list[e1_id][r_id] = set()\n if e2_id in adj_list[e1_id][r_id]:\n print('Duplicate fact: {} ({}, {}, {})!'.format(\n line.strip(), id2entity[e1_id], id2relation[r_id], id2entity[e2_id]))\n adj_list[e1_id][r_id].add(e2_id)\n num_facts += 1\n if add_reverse_relations:\n # inv_r = r + '_inv'\n inv_r = '_' + r\n inv_r_id = relation2id[inv_r]\n if not inv_r_id in adj_list[e2_id]:\n adj_list[e2_id][inv_r_id] = set([])\n if e1_id in adj_list[e2_id][inv_r_id]:\n print('Duplicate fact: {} ({}, {}, {})!'.format(\n line.strip(), id2entity[e2_id], id2relation[inv_r_id], id2entity[e1_id]))\n adj_list[e2_id][inv_r_id].add(e1_id)\n num_facts += 1\n print('{} facts processed'.format(num_facts))\n # Save adjacency list\n adj_list_path = os.path.join(data_dir, 'adj_list.pkl')\n with open(adj_list_path, 'wb') as o_f:\n pickle.dump(dict(adj_list), o_f)\n with open(os.path.join(data_dir, 'entity2typeid.pkl'), 'wb') as o_f:\n pickle.dump(entity2typeid, o_f)\n\n\ndef load_graph_data(data_dir):\n # Load indices\n entity2id, id2entity = load_index(os.path.join(data_dir, 'entity2id.txt'))\n print('Sanity check: {} entities loaded'.format(len(entity2id)))\n # type2id, id2type = load_index(os.path.join(data_dir, 'type2id.txt'))\n # print('Sanity check: {} types loaded'.format(len(type2id)))\n # with open(os.path.join(data_dir, 'entity2typeid.pkl'), 'rb') as f:\n # entity2typeid = pickle.load(f)\n relation2id, id2relation = load_index(os.path.join(data_dir, 'relation2id.txt'))\n print('Sanity check: {} relations loaded'.format(len(relation2id)))\n return entity2id, id2entity, len(entity2id), relation2id, id2relation, len(relation2id)\n\n\ndef load_all_answers(data_dir, add_reversed_edges=False):\n def add_subject(e1, e2, r, d):\n if not e2 in d:\n d[e2] = {}\n if not r in d[e2]:\n d[e2][r] = set()\n d[e2][r].add(e1)\n\n def add_object(e1, e2, r, d):\n if not e1 in d:\n d[e1] = {}\n if not r in d[e1]:\n d[e1][r] = set()\n d[e1][r].add(e2)\n\n def triple2ids(e1, e2, r):\n return entity2id[e1], entity2id[e2], relation2id[r]\n\n def get_inv_relation_id(r_id):\n return relation2id['_' + id2relation[r_id]]\n\n entity_index_path = os.path.join(data_dir, 'entity2id.txt')\n relation_index_path = os.path.join(data_dir, 'relation2id.txt')\n entity2id, _ = load_index(entity_index_path)\n relation2id, id2relation = load_index(relation_index_path)\n\n # store subjects for all (rel, object) queries and\n # objects for all (subject, rel) queries\n train_subjects, train_objects = {}, {}\n dev_subjects, dev_objects = {}, {}\n all_subjects, all_objects = {}, {}\n # include dummy examples\n de = dummy_e()\n dr = dummy_r()\n add_subject(de, de, dr, train_subjects)\n add_subject(de, de, dr, dev_subjects)\n add_subject(de, de, dr, all_subjects)\n add_object(de, de, dr, train_objects)\n add_object(de, de, dr, dev_objects)\n add_object(de, de, dr, all_objects)\n for file_name in ['train.triples', 'dev.triples', 'test.triples']:\n if 'NELL' in data_dir and file_name == 'train.triples':\n continue\n with open(os.path.join(data_dir, file_name)) as f:\n for line in f:\n e1, e2, r = line.strip().split()\n e1, e2, r = triple2ids(e1, e2, r)\n if file_name in ['train.triples']:\n add_subject(e1, e2, r, train_subjects)\n add_object(e1, e2, r, train_objects)\n if add_reversed_edges:\n add_subject(e2, e1, get_inv_relation_id(r), train_subjects)\n add_object(e2, e1, get_inv_relation_id(r), train_objects)\n if file_name in ['train.triples', 'dev.triples']:\n add_subject(e1, e2, r, dev_subjects)\n add_object(e1, e2, r, dev_objects)\n if add_reversed_edges:\n add_subject(e2, e1, get_inv_relation_id(r), dev_subjects)\n add_object(e2, e1, get_inv_relation_id(r), dev_objects)\n add_subject(e1, e2, r, all_subjects)\n add_object(e1, e2, r, all_objects)\n if add_reversed_edges:\n add_subject(e2, e1, get_inv_relation_id(r), all_subjects)\n add_object(e2, e1, get_inv_relation_id(r), all_objects)\n return dev_objects, all_objects\n\n\ndef ones_var_cuda(s, requires_grad=False):\n return Variable(torch.ones(s), requires_grad=requires_grad).cuda()\n\n\ndef zeros_var_cuda(s, requires_grad=False):\n return Variable(torch.zeros(s), requires_grad=requires_grad).cuda()\n\n\ndef int_fill_var_cuda(s, value, requires_grad=False):\n return int_var_cuda((torch.zeros(s) + value), requires_grad=requires_grad)\n\n\ndef int_var_cuda(x, requires_grad=False):\n return Variable(x, requires_grad=requires_grad).long().cuda()\n\n\ndef var_cuda(x, requires_grad=False):\n return Variable(x, requires_grad=requires_grad).cuda()\n\n\ndef var_to_numpy(x):\n return x.data.cpu().numpy()\n\n\ndef self_edge():\n return NO_OP_RELATION_ID\n\n\ndef self_e():\n return NO_OP_ENTITY_ID\n\n\ndef dummy_r():\n return DUMMY_RELATION_ID\n\n\ndef dummy_e():\n return DUMMY_ENTITY_ID\n\n\ndef dummy_start_r():\n return START_RELATION_ID\n","repo_name":"wanghaobit/KGE-Models","sub_path":"src/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":15959,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"73448854800","text":"from __future__ import print_function\nfrom __future__ import division\n\nimport os\nimport codecs\nimport collections\nimport numpy as np\n\n\nclass Vocab:\n\n def __init__(self, token2index=None, index2token=None):\n self._token2index = token2index or {}\n self._index2token = index2token or []\n\n def feed(self, token):\n if token not in self._token2index:\n # allocate new index for this token\n index = len(self._token2index)\n self._token2index[token] = index\n self._index2token.append(token)\n\n return self._token2index[token]\n\n @property\n def size(self):\n return len(self._token2index)\n\n @property\n def token2index(self):\n return self._token2index\n\n def token(self, index):\n return self._index2token[index]\n\n def __getitem__(self, token):\n index = self.get(token)\n if index is None:\n raise KeyError(token)\n return index\n\n def get(self, token, default=None):\n return self._token2index.get(token, default)\n\n def save(self, filename):\n with open(filename, 'wb') as f:\n pickle.dump((self._token2index, self._index2token), f, pickle.HIGHEST_PROTOCOL)\n\n @classmethod\n def load(cls, filename):\n with open(filename, 'rb') as f:\n token2index, index2token = pickle.load(f)\n\n return cls(token2index, index2token)\n\n\ndef load_data(data_dir, max_doc_length=10, max_sent_length=50):\n\n word_vocab = Vocab()\n word_vocab.feed(' ')\n word_vocab.feed('{')\n word_vocab.feed('}')\n\n actual_max_doc_length = 0\n\n word_tokens = collections.defaultdict(list)\n labels = collections.defaultdict(list)\n\n for fname in ('train', 'valid', 'test'):\n print('reading', fname)\n pname = os.path.join(data_dir, fname)\n for dname in os.listdir(pname):\n\n with codecs.open(os.path.join(pname, dname), 'r', 'utf-8') as f:\n lines = f.read().split('\\n\\n')\n word_doc = []\n label_doc = []\n\n for line in lines[1].split('\\n'):\n line = line.strip()\n line = line.replace('}', '').replace('{', '').replace('|', '')\n line = line.replace('', ' | ')\n\n sent, label = line.split('\\t\\t\\t')\n label_doc.append(label)\n sent = sent.split(' ')\n\n if len(sent) > max_sent_length - 2: # space for 'start' and 'end' words\n sent = sent[:max_sent_length-2]\n\n word_array = [word_vocab.feed(c) for c in ['{'] + sent + ['}']]\n word_doc.append(word_array)\n\n if len(word_doc) > max_doc_length:\n word_doc = word_doc[:max_doc_length]\n label_doc = label_doc[:max_doc_length]\n\n actual_max_doc_length = max(actual_max_doc_length, len(word_doc))\n\n word_tokens[fname].append(word_doc)\n labels[fname].append(label_doc)\n\n assert actual_max_doc_length <= max_doc_length\n\n print()\n print('actual longest document length is:', actual_max_doc_length)\n print('size of word vocabulary:', word_vocab.size)\n print('number of tokens in train:', len(word_tokens['train']))\n print('number of tokens in valid:', len(word_tokens['valid']))\n print('number of tokens in test:', len(word_tokens['test']))\n\n # now we know the sizes, create tensors\n word_tensors = {}\n label_tensors = {}\n for fname in ('train', 'valid', 'test'):\n word_tensors[fname] = np.zeros([len(word_tokens[fname]), actual_max_doc_length, max_sent_length], dtype=np.int32)\n label_tensors[fname] = np.zeros([len(labels[fname]), actual_max_doc_length], dtype=np.int32)\n\n for i, word_doc in enumerate(word_tokens[fname]):\n for j, word_array in enumerate(word_doc):\n word_tensors[fname][i][j][0:len(word_array)] = word_array\n\n for i, label_doc in enumerate(labels[fname]):\n label_tensors[fname][i][0:len(label_doc)] = label_doc\n\n return word_vocab, word_tensors, actual_max_doc_length, label_tensors\n\n\nclass DataReader:\n\n def __init__(self, word_tensor, label_tensor, batch_size):\n\n length = word_tensor.shape[0]\n\n doc_length = word_tensor.shape[1]\n sent_length = word_tensor.shape[2]\n\n # round down length to whole number of slices\n\n clipped_length = int(length / batch_size) * batch_size\n word_tensor = word_tensor[:clipped_length]\n label_tensor = label_tensor[:clipped_length]\n\n x_batches = word_tensor.reshape([batch_size, -1, doc_length, sent_length])\n y_batches = label_tensor.reshape([batch_size, -1, doc_length])\n\n x_batches = np.transpose(x_batches, axes=(1, 0, 2, 3))\n y_batches = np.transpose(y_batches, axes=(1, 0, 2))\n\n self._x_batches = list(x_batches)\n self._y_batches = list(y_batches)\n assert len(self._x_batches) == len(self._y_batches)\n self.length = len(self._y_batches)\n self.batch_size = batch_size\n self.max_sent_length = sent_length\n\n def iter(self):\n\n for x, y in zip(self._x_batches, self._y_batches):\n yield x, y\n\n\ndef load_data_abs(data_dir, max_doc_length=10, max_sent_length=50, max_output_length=100, use_abs=True):\n '''\n data loader for generation models\n use_abs: When it is set to True, we use the human summaries as target;\n otherwise we use the sentences labeled with 1 as target.\n '''\n\n word_vocab = Vocab()\n word_vocab.feed(' ')\n word_vocab.feed('{')\n word_vocab.feed('}')\n\n abs_vocab = Vocab()\n abs_vocab.feed(' ')\n abs_vocab.feed('{')\n abs_vocab.feed('}')\n\n actual_max_doc_length = 0\n actual_max_ext_length = 0\n actual_max_abs_length = 0\n\n word_tokens = collections.defaultdict(list)\n ext_output = collections.defaultdict(list)\n abs_output = collections.defaultdict(list)\n\n for fname in ('train', 'valid', 'test'):\n print('reading', fname)\n pname = os.path.join(data_dir, fname)\n for dname in os.listdir(pname):\n\n with codecs.open(os.path.join(pname, dname), 'r', 'utf-8') as f:\n lines = f.read().split('\\n\\n')\n word_doc = []\n ext_doc = []\n\n for line in lines[1].split('\\n'):\n line = line.strip()\n line = line.replace('}', '').replace('{', '').replace('|', '')\n line = line.replace('', ' | ')\n\n sent, label = line.split('\\t\\t\\t')\n sent = sent.split(' ')\n\n if len(sent) > max_sent_length - 2: # space for 'start' and 'end' words\n sent = sent[:max_sent_length-2]\n\n word_array = [word_vocab.feed(c) for c in ['{'] + sent + ['}']]\n\n word_doc.append(word_array)\n\n if label == '1':\n ext_doc.extend(word_array[1:-1])\n\n if len(word_doc) == max_doc_length:\n break\n\n actual_max_doc_length = max(actual_max_doc_length, len(word_doc))\n\n word_tokens[fname].append(word_doc)\n\n if len(ext_doc) > max_output_length - 2:\n ext_doc = ext_doc[:max_output_length-2]\n\n ext_doc = [word_vocab['{']] + ext_doc + [word_vocab['}']]\n ext_output[fname].append(ext_doc)\n\n actual_max_ext_length = max(actual_max_ext_length, len(ext_doc))\n\n abs_doc = lines[2].replace('\\n', ' ')\n abs_doc = abs_doc.split(' ')\n if len(abs_doc) > max_output_length - 2:\n abs_doc = abs_doc[:max_output_length-2]\n\n abs_doc = [abs_vocab.feed(c) for c in ['{'] + abs_doc + ['}']]\n abs_output[fname].append(abs_doc)\n\n actual_max_abs_length = max(actual_max_abs_length, len(abs_doc))\n\n assert actual_max_doc_length <= max_doc_length\n\n print()\n print('actual longest document length is:', actual_max_doc_length)\n print('size of word vocabulary:', word_vocab.size)\n print('number of tokens in train:', len(word_tokens['train']))\n print('number of tokens in valid:', len(word_tokens['valid']))\n print('number of tokens in test:', len(word_tokens['test']))\n\n # now we know the sizes, create tensors\n word_tensors = {}\n target_tensors = {}\n target_vocab = word_vocab\n actual_max_target_length = actual_max_ext_length\n\n if use_abs:\n target_vocab = abs_vocab\n actual_max_target_length = actual_max_abs_length\n\n for fname in ('train', 'valid', 'test'):\n word_tensors[fname] = np.zeros([len(word_tokens[fname]), actual_max_doc_length, max_sent_length], dtype=np.int32)\n target_tensors[fname] = np.zeros([len(ext_output[fname]), max_output_length], dtype=np.int32)\n\n for i, word_doc in enumerate(word_tokens[fname]):\n for j, word_array in enumerate(word_doc):\n word_tensors[fname][i][j][0:len(word_array)] = word_array\n\n if use_abs:\n for i, abs_doc in enumerate(abs_output[fname]):\n target_tensors[fname][i][0:len(abs_doc)] = abs_doc\n else:\n for i, ext_doc in enumerate(ext_output[fname]):\n target_tensors[fname][i][0:len(ext_doc)] = ext_doc\n\n return word_vocab, word_tensors, actual_max_doc_length, target_vocab, target_tensors, actual_max_target_length\n\n\nclass DataReader_abs:\n\n def __init__(self, word_tensor, target_tensor, batch_size):\n\n length = word_tensor.shape[0]\n\n doc_length = word_tensor.shape[1]\n sent_length = word_tensor.shape[2]\n\n output_length = target_tensor.shape[1]\n # round down length to whole number of slices\n\n clipped_length = int(length / batch_size) * batch_size\n word_tensor = word_tensor[:clipped_length]\n target_tensor = target_tensor[:clipped_length]\n\n x_batches = word_tensor.reshape([batch_size, -1, doc_length, sent_length])\n y_batches = target_tensor.reshape([batch_size, -1, output_length])\n\n x_batches = np.transpose(x_batches, axes=(1, 0, 2, 3))\n y_batches = np.transpose(y_batches, axes=(1, 0, 2))\n\n self._x_batches = list(x_batches)\n self._y_batches = list(y_batches)\n assert len(self._x_batches) == len(self._y_batches)\n self.length = len(self._y_batches)\n self.batch_size = batch_size\n self.max_sent_length = sent_length\n\n def iter(self):\n\n for x, y in zip(self._x_batches, self._y_batches):\n yield x, y\n\n\n\nif __name__ == '__main__':\n\n vocab, word_tensors, max_length, label_tensors = load_data('data/demo', 5, 10)\n\n count = 0\n for x, y in DataReader(word_tensors['valid'], label_tensors['valid'], 6).iter():\n count += 1\n print (x.shape, y.shape)\n if count > 0:\n break\n\n vocab, word_tensors, max_length, target_vocab, target_tensors, max_length_target = load_data_abs('data/demo', 5, 50, 150, use_abs=False)\n count = 0\n for x, y in DataReader_abs(word_tensors['valid'], target_tensors['valid'], 6).iter():\n count += 1\n print (x.shape, y.shape, max_length_target)\n if count > 0:\n break\n","repo_name":"kata-ai/indosum","sub_path":"neuralsum/data_reader.py","file_name":"data_reader.py","file_ext":"py","file_size_in_byte":11384,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"3"} +{"seq_id":"26542109593","text":"import logging\n\nfrom sagemaker_ssh_helper.log import SSHLog\nfrom sagemaker_ssh_helper.proxy import SSMProxy\n\nlogger = logging.getLogger('sagemaker-ssh-helper')\n\n\ndef test_sagemaker_studio(request):\n kernel_gateway_name = request.config.getini('kernel_gateway_name')\n\n studio_ids = SSHLog().get_studio_kgw_ssm_instance_ids(kernel_gateway_name, retry=30)\n studio_id = studio_ids[0]\n\n ssm_proxy = SSMProxy(10022)\n ssm_proxy.connect_to_ssm_instance(studio_id)\n\n services_running = ssm_proxy.run_command_with_output(\"sm-ssh-ide status\")\n services_running = services_running.decode('latin1')\n\n ssm_proxy.disconnect()\n\n assert \"127.0.0.1:8889\" in services_running\n assert \"127.0.0.1:5901\" in services_running\n","repo_name":"Krishlogic/sagemaker","sub_path":"tests/test_ide.py","file_name":"test_ide.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"31065280035","text":"# -*- coding: utf-8 -*-\n# @Time : 2023/2/11 16:30\n# @Author : jinjie\n# @File : HJ17_坐标移动.py\n\n\"\"\"\n开发一个坐标计算工具, A表示向左移动,D表示向右移动,W表示向上移动,S表示向下移动。从(0,0)点开始移动,从输入字符串里面读取一些坐标,并将最终输入结果输出到输出文件里面。\n输入:\n合法坐标为A(或者D或者W或者S) + 数字(两位以内)\n坐标之间以;分隔。\n非法坐标点需要进行丢弃。如AA10; A1A; $%$; YAD; 等。\n下面是一个简单的例子 如:\nA10;S20;W10;D30;X;A1A;B10A11;;A10;\n\"\"\"\n\"\"\"\n处理过程:\n起点(0,0)\n+ A10 = (-10,0)\n+ S20 = (-10,-20)\n+ W10 = (-10,-10)\n+ D30 = (20,-10)\n+ x = 无效\n+ A1A = 无效\n+ B10A11 = 无效\n+ 一个空 不影响\n+ A10 = (10,-10)\n结果 (10, -10)\n\"\"\"\n\nstr_in = list(input().split(\";\"))\n\nx,y = 0,0\n\nfor i in str_in:\n i_direct = i[:1]\n i_step = i[1:]\n #print(i_direct,i_step)\n if i_step.isdigit():\n i_step = int(i_step)\n if i_direct == \"A\":\n x -= i_step\n elif i_direct == \"D\":\n x += i_step\n elif i_direct == \"W\":\n y += i_step\n elif i_direct == \"S\":\n y -= i_step\n else:\n continue\n\nprint(f\"{x},{y}\")","repo_name":"JiinJie/Python_algorithm_exercise","sub_path":"算法汇总/牛客hw/2_中等/HJ17_坐标移动.py","file_name":"HJ17_坐标移动.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36294403235","text":"# Write a Python program to add two objects if both objects are an integer type.\n\ndef add_numbers(a, b):\n if not (isinstance(a, int) and isinstance(b, int)):\n raise TypeError(\"Inputs must be integers\")\n return a+b\n\n\nprint(add_numbers(10, 20))\n\n\n\n# Write a Python program to display your details like name, age, address in three different lines.\ndef personal_details():\n name, age = \"rumon\", 19\n address = \"Bangladesh , india, Napel\"\n print(\"Name: {}\\nAge:{}\\nAddress: {}\".format(name, age, address))\npersonal_details()\n\n\n\n\n# Write a Python program to solve (x + y) * (x + y).\nx, y = 2, 3\n\nresult = x * x + 2 * x * y + y * y\nprint(\"(({} + {}) ^ 2)={} \".format(x, y, result))\n\n\n# Write a Python program to compute the distance between the points (x1, y1) and (x2, y2).\n\nimport math\np1 = [4, 0]\np2 = [6, 6]\ndistance = math.sqrt(((p1[0]-p2[0])**2)+((p1[1]-p2[1])**2))\n\nprint(distance)\n","repo_name":"islamrumon/PythonProblems","sub_path":"FindNumbers.py","file_name":"FindNumbers.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7056096601","text":"# https://adventofcode.com/2020/day/13\nfrom __future__ import print_function\nfrom math import prod, gcd\n\n\ndef find_bus(timestamp, buses):\n mind_wait = min(buses)[0]\n for t in range(mind_wait):\n for b, _ in buses:\n if (timestamp + t) % b == 0:\n return t * b\n return None\n\n\n# from https://rosettacode.org/wiki/Chinese_remainder_theorem#Python_3.6\ndef mul_inv(a, b):\n b0 = b\n x0, x1 = 0, 1\n if b == 1:\n return 1\n while a > 1:\n q = a // b\n a, b = b, a % b\n x0, x1 = x1 - q * x0, x0\n if x1 < 0:\n x1 += b0\n return x1\n\n\n# parameter: list of (module, remainder) tuples\ndef chinese_remainder(factors):\n \"\"\"\n The Chinese Remainder Theorem supposes that given the\n integers n_1...n_k that are pairwise co-prime, then for\n any sequence of integers a_1...a_k there exists an integer\n x that solves the system of linear congruences:\n\n x === a_1 (mod n_1)\n ...\n x === a_k (mod n_k)\n \"\"\"\n assert gcd(*[n for n, _ in factors]) == 1\n prd = prod(n for n, _ in factors)\n suma = 0\n for n_i, rem_i in factors:\n p = prd // n_i\n suma += rem_i * mul_inv(p, n_i) * p\n return suma % prd\n\n\ndef process(data):\n # part 1\n result = find_bus(*data)\n print(\"part 1:\", result)\n # part 2\n factors = [(b, (b - id) % b) for b, id in data[1]]\n result = chinese_remainder(factors)\n print(\"part 2:\", result)\n\n\ndef load_data(fileobj):\n lines = fileobj.readlines()\n timestamp = int(lines[0])\n buses = [(int(wait), i)\n for i, wait in enumerate(lines[1].rstrip().split(','))\n if wait != 'x']\n return timestamp, buses\n\n\ndef main(file):\n print(file)\n with open(file) as f:\n process(load_data(f))\n\n\nif __name__ == \"__main__\":\n main(\"test.txt\")\n main(\"input.txt\")\n","repo_name":"PetrPrazak/AdventOfCode","sub_path":"2020/13/aoc2020_13.py","file_name":"aoc2020_13.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22548772310","text":"\"\"\"\nChicago Heatmap Generation Main Module.\n\nCrawls crime data from database table and converts it to a heatmap.\n\"\"\"\n\n__author__ = 'Udo Schlegel'\n\nimport numpy as np\nimport pandas as pd\n\nfrom sqlalchemy import create_engine, Column, String, Integer\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.dialects.postgresql import BYTEA\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.schema import UniqueConstraint\n\n\nDEBUG = False\n\nengine = create_engine(\n \"\", # Database connection\n connect_args={'sslmode': 'require'}\n)\n\nBase = declarative_base(engine)\n\n\ndef get_table(table_name):\n \"\"\"Crawl the table information from the DB.\"\"\"\n class Crimes(Base):\n \"\"\"Class for the Crimes table.\"\"\"\n\n __tablename__ = table_name\n __table_args__ = {\"autoload\": True}\n\n return Crimes\n\n\ndef loadSession():\n \"\"\"Load database session to crawl table info.\"\"\"\n Session = sessionmaker(bind=engine)\n session = Session()\n return session\n\n\ndef create_image_table(name):\n \"\"\"Crawl the table information from the DB.\"\"\"\n class CrimesImages(Base):\n \"\"\"Class for the Crimes Images table.\"\"\"\n\n __tablename__ = name\n\n id = Column(Integer, primary_key=True)\n image = Column(BYTEA)\n woy = Column(Integer)\n year = Column(Integer)\n crime = Column(String)\n\n __table_args__ = (UniqueConstraint('woy', 'year', 'crime'),)\n\n return CrimesImages\n\n\nsession = loadSession()\n\ntable_name = \"chicago_all_crimes_images_weeks_grayscale\"\n\nCrimesImages_ = create_image_table(table_name)\nBase.metadata.create_all(engine)\n\n\ndef createHeatmaps(table):\n \"\"\"\n Create heatmaps with the data from the table.\n\n Args:\n table - table to get the data from\n \"\"\"\n print(\"-\"*5, \"Started\", \"-\"*5)\n\n sql_ = \"\"\"\n SELECT\n to_char(date, 'YYYY') as year,\n to_char(date, 'MM') as month,\n to_char(date, 'DD') as day,\n to_char(date, 'WW') as woy,\n x_coordinate,\n y_coordinate\n FROM {}\n ORDER BY year, month, day;\"\"\".format(table)\n df = pd.read_sql(sql_, engine)\n\n df = df[(df != 0.0).all(1)]\n df = df.dropna()\n\n data_x_max = df[\"x_coordinate\"].max()\n data_x_min = df[\"x_coordinate\"].min()\n\n data_y_max = df[\"y_coordinate\"].max()\n data_y_min = df[\"y_coordinate\"].min()\n\n granularity = 32\n image_array = np.zeros((granularity, granularity))\n\n old_woy = -1\n old_year = -1\n\n count = 0\n\n for index, row in df.iterrows():\n\n if old_woy == -1:\n old_woy = row[\"woy\"]\n if old_year == -1:\n old_year = row[\"year\"]\n if old_woy != row[\"woy\"]:\n if DEBUG:\n print(\"Week of the year:\", row[\"woy\"], \"/\", row[\"year\"],\n \"Amount of crimes:\", count)\n\n image_array = np.ravel(image_array)\n\n new_image = CrimesImages_(image=image_array, woy=old_woy,\n year=old_year)\n session.add(new_image)\n\n image_array = np.zeros((granularity, granularity))\n old_year = row[\"year\"]\n old_woy = row[\"woy\"]\n count = 0\n\n pos_n = int((row[\"x_coordinate\"] - data_x_min) /\n (data_x_max - data_x_min) * (granularity - 1))\n pos_m = int((row[\"y_coordinate\"] - data_y_min) /\n (data_y_max - data_y_min) * (granularity - 1))\n\n image_array[pos_n, pos_m] = image_array[pos_n, pos_m] + 1\n count += 1\n\n print(\"-\"*5, \"Finished\", \"-\"*5)\n\n session.commit()\n\n\nif __name__ == \"__main__\":\n\n table = \"chicago_crimes\"\n createHeatmaps(table)\n","repo_name":"merowech/Towards-Crime-Forecasting-Using-Deep-Learning","sub_path":"generateHeatmaps/create_chicago_heatmaps.py","file_name":"create_chicago_heatmaps.py","file_ext":"py","file_size_in_byte":3618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8804058605","text":"import threading, time\n\nsem = threading.BoundedSemaphore(value=5)\n\ndef access(number):\n print(f\"{number} is accessing\")\n sem.acquire()\n print(f\"{number} acquired access\")\n time.sleep(2)\n print(f\"{number} is now releasing\")\n sem.release()\n\nfor num in range(1,11):\n t= threading.Thread(target=access, args=(num,))\n t.start()\n time.sleep(1)\n","repo_name":"iand66/scratch","sub_path":"semaphore.py","file_name":"semaphore.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71514077202","text":"from qgl2.qgl2 import qgl2decl, qreg, pulse, sequence, QRegister\n\nfrom qgl2.qgl1 import MEAS, Id, X, AC, clifford_seq, Y90m, X90\n\n# RB uses Cliffords. Importing all of QGL.Cliffords forces QGL2\n# to read more of QGL than we want.\n# So instead we redo (some of) Cliffords in QGL2 with QGL1 stub for clifford_seq\n# TODO: Redo more of Cliffords as QGL2. See issues 51-53 that make this hard.\n\n# from QGL.Cliffords import clifford_seq, clifford_mat, inverse_clifford\nfrom qgl2.Cliffords import clifford_mat, inverse_clifford\n\nfrom qgl2.basic_sequences.helpers import create_cal_seqs, measConcurrently, cal_descriptor, delay_descriptor\n\nfrom qgl2.util import init\n\nfrom csv import reader\nfrom functools import reduce\nimport operator\nimport os\n\nimport numpy as np\n\n# This is not pulses, just math; so this is just the original\ndef create_RB_seqs(numQubits, lengths, repeats=32, interleaveGate=None, recovery=True):\n \"\"\"\n Create a list of lists of Clifford gates to implement RB.\n \"\"\"\n # This function seems to just give lists of numbers. So leave it intact\n\n # Sample output:\n # create_RB_seqs(2,[3,3]):\n # [[8697,5492,1910], [num, num, num], .... for 64 such lists of 3 #s\n # create_RB_seqs(2,[2,2]) gives 64 lists of 2 #s\n # create_RB_seqs(1,[2,2]) gives 64 lists of 2 #s that are smaller\n # create_RB_seqs(1,[2,3]) gives 64 lists, 32 of 2 #s, followed by 32 of 3 #s\n\n if numQubits == 1:\n cliffGroupSize = 24\n elif numQubits == 2:\n cliffGroupSize = 11520\n else:\n raise Exception(\"Can only handle one or two qubits.\")\n\n # Create lists of random integers \n # Subtract one from length for recovery gate\n seqs = []\n for length in lengths:\n seqs += np.random.randint(0, cliffGroupSize,\n size=(repeats, length-1)).tolist()\n\n # Possibly inject the interleaved gate\n if interleaveGate:\n newSeqs = []\n for seq in seqs:\n newSeqs.append(np.vstack((np.array(\n seq, dtype=np.int), interleaveGate*np.ones(\n len(seq), dtype=np.int))).flatten(order='F').tolist())\n seqs = newSeqs\n\n if recovery:\n # Calculate the recovery gate\n for seq in seqs:\n if len(seq) == 1:\n mat = clifford_mat(seq[0], numQubits)\n else:\n mat = reduce(lambda x,y: np.dot(y,x), [clifford_mat(c, numQubits) for c in seq])\n seq.append(inverse_clifford(mat))\n\n return seqs\n\n@qgl2decl\ndef SingleQubitRB(qubit: qreg, seqs, purity=False, add_cals=True):\n \"\"\"\n Single qubit randomized benchmarking using 90 and 180 generators. \n\n Parameters\n ----------\n qubit : logical channel to implement sequence (LogicalChannel)\n seqs : list of lists of Clifford group integers\n \"\"\"\n # Original:\n # seqsBis = []\n # op = [Id(qubit, length=0), Y90m(qubit), X90(qubit)]\n # for ct in range(3 if purity else 1):\n # for seq in seqs:\n # seqsBis.append(reduce(operator.add, [clifford_seq(c, qubit) for c in seq]))\n\n # #append tomography pulse to measure purity\n # seqsBis[-1].append(op[ct])\n # # Add the measurement to all sequences\n # seqsBis[-1].append(MEAS(qubit))\n\n # # Tack on the calibration sequences\n # if add_cals:\n # seqsBis += create_cal_seqs((qubit,), 2)\n\n# axis_descriptor = [{\n# 'name': 'length',\n# 'unit': None,\n# 'points': list(map(len, seqs)),\n# 'partition': 1\n# }]\n# metafile = compile_to_hardware(seqsBis, 'RB/RB', axis_descriptor = axis_descriptor, extra_meta = {'sequences':seqs})\n\n\n # seqs are result of create_RB_seqs: list of lists of integers\n # clifford_seq() returns a sequence of pulses itself\n # [clifford_seq() for c in seq]\n # gives a list of len(seq) sequences\n # reduce(operator.add, listOfSequences)\n # gives a single sequence of all the elements in listOfSequences\n # So the first for loop creates a single list of sequences\n\n ops = [Id]\n if purity:\n ops = [Id, Y90m, X90]\n for op in ops:\n for seq in seqs:\n init(qubit)\n for c in seq:\n clifford_seq(c, qubit)\n # append tomography pulse to measure purity\n if op == Id:\n op(qubit, length=0)\n else:\n op(qubit)\n # append measurement\n MEAS(qubit)\n\n if add_cals:\n # Tack on calibration sequences\n create_cal_seqs(qubit, 2)\n\n@qgl2decl\ndef TwoQubitRB(q1: qreg, q2: qreg, seqs, add_cals=True):\n \"\"\"\n Two qubit randomized benchmarking using 90 and 180 single qubit generators and ZX90 \n\n Parameters\n ----------\n q1,q2 : logical channels to implement sequence (LogicalChannel)\n seqs : list of lists of Clifford group integers\n \"\"\"\n\n # Original:\n # seqsBis = []\n # for seq in seqs:\n # seqsBis.append(reduce(operator.add, [clifford_seq(c, q1, q2) for c in seq]))\n\n # # Add the measurement to all sequences\n # for seq in seqsBis:\n # seq.append(MEAS(q1, q2))\n\n # # Tack on the calibration sequences\n # if add_cals:\n # seqsBis += create_cal_seqs((q1,q2), 2)\n\n# axis_descriptor = [{\n# 'name': 'length',\n# 'unit': None,\n# 'points': list(map(len, seqs)),\n# 'partition': 1\n# }]\n# metafile = compile_to_hardware(seqsBis, 'RB/RB', axis_descriptor = axis_descriptor, suffix = suffix, extra_meta = {'sequences':seqs})\n\n bothQs = QRegister(q1, q2)\n for seq in seqs:\n init(bothQs)\n for c in seq:\n clifford_seq(c, q2, q1)\n measConcurrently(bothQs)\n\n # Tack on the calibration sequences\n if add_cals:\n create_cal_seqs((q1, q2), 2)\n\n@qgl2decl\ndef SingleQubitRB_AC(qubit: qreg, seqs, purity=False, add_cals=True):\n \"\"\"\n Single qubit randomized benchmarking using atomic Clifford pulses. \n\n Parameters\n ----------\n qubit : logical channel to implement sequence (LogicalChannel)\n seqFile : file containing sequence strings\n \"\"\"\n\n # Original:\n # seqsBis = []\n # op = [Id(qubit, length=0), Y90m(qubit), X90(qubit)]\n # for ct in range(3 if purity else 1):\n # for seq in seqs:\n # seqsBis.append([AC(qubit, c) for c in seq])\n # #append tomography pulse to measure purity\n # seqsBis[-1].append(op[ct])\n # #append measurement\n # seqsBis[-1].append(MEAS(qubit))\n\n # # Tack on the calibration sequences\n # if add_cals:\n # seqsBis += create_cal_seqs((qubit,), 2)\n\n# axis_descriptor = [{\n# 'name': 'length',\n# 'unit': None,\n# 'points': list(map(len, seqs)),\n# 'partition': 1\n# }]\n# metafile = compile_to_hardware(seqsBis, 'RB/RB', axis_descriptor = axis_descriptor, extra_meta = {'sequences':seqs})\n\n # AC() gives a single pulse on qubit\n\n op = [Id, Y90m, X90]\n for ct in range(3 if purity else 1):\n for seq in seqs:\n init(qubit)\n for c in seq:\n AC(qubit, c)\n # append tomography pulse to measure purity\n # See issue #: 53\n func = op[ct]\n if ct == 0:\n func(qubit, length=0)\n else:\n func(qubit)\n # append measurement\n MEAS(qubit)\n\n if add_cals:\n # Tack on calibration sequences\n create_cal_seqs(qubit, 2)\n\n@qgl2decl\ndef SingleQubitRB_DiAC(qubit, seqs, compiled=True, purity=False, add_cals=True):\n \"\"\"Single qubit randomized benchmarking using diatomic Clifford pulses.\n\n Parameters\n ----------\n qubit : logical channel to implement sequence (LogicalChannel)\n seqFile : file containing sequence strings\n compiled : if True, compile Z90(m)-X90-Z90(m) to Y90(m) pulses\n purity : measure ,, of final state, to measure purity. See J.J.\n Wallman et al., New J. Phys. 17, 113020 (2015)\n \"\"\"\n op = [Id, Y90m, X90]\n for ct in range(3 if purity else 1):\n for seq in seqs:\n init(qubit)\n for c in seq:\n DiAC(qubit, c, compiled)\n # append tomography pulse to measure purity\n if ct == 0:\n op[ct](qubit, length=0)\n else:\n op[ct](qubit)\n # append measurement\n MEAS(qubit)\n\n# axis_descriptor = [{\n# 'name': 'length',\n# 'unit': None,\n# 'points': list(map(len, seqs)),\n# 'partition': 1\n# }]\n\n # Tack on the calibration sequences\n if add_cals:\n for _ in range(2):\n init(qubit)\n Id(qubit)\n MEAS(qubit)\n for _ in range(2):\n init(qubit)\n X90(qubit)\n X90(qubit)\n MEAS(qubit)\n# axis_descriptor.append(cal_descriptor((qubit,), 2))\n\n# metafile = compile_to_hardware(seqsBis, 'RB_DiAC/RB_DiAC', axis_descriptor = axis_descriptor, extra_meta = {'sequences':seqs})\n\n\n@qgl2decl\ndef doACPulse(qubit: qreg, cliffNum) -> sequence:\n if cliffNum == 24:\n cliffNum = 0\n if cliffNum > 24:\n raise Exception(\"Max cliffNum 24, got %d\" % cliffNum)\n AC(qubit, cliffNum)\n\n@qgl2decl\ndef getPulseSeq(qubit: qreg, pulseSeqStr) -> sequence:\n init(qubit)\n for pulseStr in pulseSeqStr:\n doACPulse(qubit, int(pulseStr))\n MEAS(qubit)\n\ndef readSeqFile(seqFile):\n pulseSeqStrs = []\n if seqFile is None:\n raise ValueError(\"Missing file of sequences\")\n # This next block as QGL2 gives warnings\n with open(seqFile, 'r') as FID:\n fileReader = reader(FID)\n # each line in the file is a sequence, but I don't know how many that is\n for pulseSeqStr in fileReader:\n pulseSeqStrs.append(pulseSeqStr)\n return pulseSeqStrs\n\n@qgl2decl\ndef SingleQubitIRB_AC(qubit: qreg, seqFile):\n \"\"\"\n Single qubit interleaved randomized benchmarking using atomic Clifford pulses. \n\n Parameters\n ----------\n qubit : logical channel to implement sequence (LogicalChannel)\n seqFile : file containing sequence strings\n \"\"\"\n\n # Original:\n # # Setup a pulse library\n # pulseLib = [AC(qubit, cliffNum) for cliffNum in range(24)]\n # pulseLib.append(pulseLib[0])\n # measBlock = MEAS(qubit)\n\n # with open(seqFile,'r') as FID:\n # fileReader = reader(FID)\n # seqs = []\n # for pulseSeqStr in fileReader:\n # seq = []\n # for pulseStr in pulseSeqStr:\n # seq.append(pulseLib[int(pulseStr)])\n # seq.append(measBlock)\n # seqs.append(seq)\n\n # # Hack for limited APS waveform memory and break it up into multiple files\n # # We've shuffled the sequences so that we loop through each gate length on the inner loop\n # numRandomizations = 36\n # for ct in range(numRandomizations):\n # chunk = seqs[ct::numRandomizations]\n # chunk1 = chunk[::2]\n # chunk2 = chunk[1::2]\n # # Tack on the calibration scalings\n # chunk1 += [[Id(qubit), measBlock], [X(qubit), measBlock]]\n # fileNames = compile_to_hardware(chunk1, 'RB/RB', suffix='_{0}'.format(2*ct+1))\n # chunk2 += [[Id(qubit), measBlock], [X(qubit), measBlock]]\n # fileNames = compile_to_hardware(chunk2, 'RB/RB', suffix='_{0}'.format(2*ct+2))\n\n # Issue #54:\n # FIXME: If the helper here raises an error, we get a QGL2 compiler error like:\n # error: ast eval failure [readSeqFile(seqFile)]: type Missing file of sequences\n # error: failed to evaluate assignment [pulseSeqStrs___ass_006 = readSeqFile(seqFile)]\n pulseSeqStrs = readSeqFile(seqFile)\n numSeqs = len(pulseSeqStrs)\n\n # Hack for limited APS waveform memory and break it up into multiple files\n # We've shuffled the sequences so that we loop through each gate length on the inner loop\n numRandomizations = 36\n fileNames = []\n for ct in range(numRandomizations):\n doCt = ct\n isOne = True\n while doCt < numSeqs:\n getPulseSeq(qubit, pulseSeqStrs[doCt])\n\n # Tack on calibration scalings\n if isOne:\n init(qubit)\n Id(qubit)\n MEAS(qubit)\n init(qubit)\n X(qubit)\n MEAS(qubit)\n else:\n init(qubit)\n Id(qubit)\n MEAS(qubit)\n init(qubit)\n X(qubit)\n MEAS(qubit)\n\n # Now write these sequences\n # FIXME: Then magically get the sequences here....\n # This needs to get refactored....\n # We need to split creating seqs from c_to_h\n# fileNames = compile_to_hardware([], 'RB/RB',\n# suffix='_{0}'.format(2*ct+1+1*(not\n# isOne)),\n# qgl2=True)\n\n doCt += numRandomizations\n isOne = not isOne\n\n# NOTE: This one not expected to work\n@qgl2decl\ndef SingleQubitRBT(qubit: qreg, seqFileDir, analyzedPulse: pulse, add_cals=True):\n \"\"\"\tSingle qubit randomized benchmarking tomography using atomic Clifford pulses.\n\n This relies on specific sequence files and is here for historical purposes only.\n\n Parameters\n ----------\n qubit : logical channel to implement sequence (LogicalChannel)\n seqFile : file containing sequence strings\n analyzedPulse : specific pulse to analyze\n \"\"\"\n\n # Original:\n # # Setup a pulse library\n # pulseLib = [AC(qubit, cliffNum) for cliffNum in range(24)]\n # pulseLib.append(analyzedPulse)\n # measBlock = MEAS(qubit)\n\n # seqs = []\n # for ct in range(10):\n # fileName = 'RBT_Seqs_fast_{0}_F1.txt'.format(ct+1)\n # tmpSeqs = []\n # with open(os.path.join(seqFileDir, fileName),'r') as FID:\n # fileReader = reader(FID)\n # for pulseSeqStr in fileReader:\n # seq = []\n # for pulseStr in pulseSeqStr:\n # seq.append(pulseLib[int(pulseStr)-1])\n # seq.append(measBlock)\n # tmpSeqs.append(seq)\n # seqs += tmpSeqs[:12]*12 + tmpSeqs[12:-12] + tmpSeqs[-12:]*12\n\n # seqsPerFile = 100\n # numFiles = len(seqs)//seqsPerFile\n\n # for ct in range(numFiles):\n # chunk = seqs[ct*seqsPerFile:(ct+1)*seqsPerFile]\n # # Tack on the calibration scalings\n # if add_cals:\n # numCals = 4\n # chunk += [[Id(qubit), measBlock]]*numCals + [[X(qubit), measBlock]]*numCals\n # fileNames = compile_to_hardware(chunk, 'RBT/RBT', suffix='_{0}'.format(ct+1))\n\n pulseSeqStrs = []\n for ct in range(10):\n fileName = 'RBT_Seqs_fast_{0}_F1.txt'.format(ct+1)\n tmpSeqs = []\n with open(os.path.join(seqFileDir, fileName),'r') as FID:\n fileReader = reader(FID)\n for pulseSeqStr in fileReader:\n tmpSeqs.append(pulseSeqStr)\n pulseSeqStrs = tmpSeqs[:12]*12 + tmpSeqs[12:-12] + tmpSeqs[-12:]*12\n\n numSeqs = len(pulseSeqStrs)\n seqsPerFile = 100\n numFiles = numSeqs//seqsPerFile\n numCals = 4\n\n for ct in range(numFiles):\n for s in range(seqsPerFile):\n init(qubit)\n seqStr = pulseSeqStrs[ct*seqsPerFile+s]\n getPulseSeq(qubit, seqStr)\n if add_cals:\n # Add numCals calibration scalings\n for _ in range(numCals):\n init(qubit)\n Id(qubit)\n MEAS(qubit)\n\n init(qubit)\n X(qubit)\n MEAS(qubit)\n# # FIXME: Then magically get the sequences here....\n# # This needs to get refactored....\n# # We need to split creating seqs from c_to_h\n# fileNames = compile_to_hardware([], 'RBT/RBT',\n# suffix='_{0}'.format(ct+1), qgl2=True)\n\n@qgl2decl\ndef SimultaneousRB_AC(qubits: qreg, seqs, add_cals=True):\n \"\"\"\n Simultaneous randomized benchmarking on multiple qubits using atomic Clifford pulses. \n\n Parameters\n ----------\n qubits : QRegister of logical channels to implement seqs on\n seqs : a tuple of sequences created for each qubit in qubits\n\n Example\n -------\n >>> q1 = QubitFactory('q1')\n >>> q2 = QubitFactory('q2')\n >>> seqs1 = create_RB_seqs(1, [2, 4, 8, 16])\n >>> seqs2 = create_RB_seqs(1, [2, 4, 8, 16])\n >>> qr = QRegister(q1, q2)\n >>> SimultaneousRB_AC(qr, (seqs1, seqs2))\n \"\"\"\n # Original:\n # seqsBis = []\n # for seq in zip(*seqs):\n # seqsBis.append([reduce(operator.__mul__, [AC(q,c) for q,c in zip(qubits,\n # pulseNums)]) for pulseNums in zip(*seq)])\n\n # # Add the measurement to all sequences\n # for seq in seqsBis:\n # seq.append(reduce(operator.mul, [MEAS(q) for q in qubits]))\n\n# axis_descriptor = [{\n# 'name': 'length',\n# 'unit': None,\n# 'points': list(map(len, seqs)),\n# 'partition': 1\n# }]\n\n # # Tack on the calibration sequences\n # if add_cals:\n # seqsBis += create_cal_seqs((qubits), 2)\n # axis_descriptor.append(cal_descriptor((qubits), 2))\n\n # metafile = compile_to_hardware(seqsBis, 'RB/RB', axis_descriptor = axis_descriptor, extra_meta = {'sequences':seqs})\n\n for seq in zip(*seqs):\n # Start sequence\n init(qubits)\n for pulseNums in zip(*seq):\n Barrier(qubits)\n for q, c in zip(qubits, pulseNums):\n AC(q, c)\n # Measure at end of each sequence\n measConcurrently(qubits)\n\n if add_cals:\n # Tack on calibration\n create_cal_seqs(qubits, 2)\n\n# A main for running the sequences here with some typical argument values\n# Here it runs all of them; could do a parse_args like main.py\ndef main():\n from pyqgl2.qreg import QRegister\n import pyqgl2.test_cl\n from pyqgl2.main import compile_function, qgl2_compile_to_hardware\n import numpy as np\n import random\n\n toHW = True\n plotPulses = False\n pyqgl2.test_cl.create_default_channelLibrary(toHW, True)\n\n# # To turn on verbose logging in compile_function\n# from pyqgl2.ast_util import NodeError\n# from pyqgl2.debugmsg import DebugMsg\n# NodeError.MUTE_ERR_LEVEL = NodeError.NODE_ERROR_NONE\n# DebugMsg.set_level(0)\n\n # Now compile the QGL2 to produce the function that would generate the expected sequence.\n # Supply the path to the QGL2, the main function in that file, and a list of the args to that function.\n # Can optionally supply saveOutput=True to save the qgl1.py\n # file,\n # and intermediate_output=\"path-to-output-file\" to save\n # intermediate products\n\n # Pass in QRegister(s) NOT real Qubits\n q1 = QRegister(\"q1\")\n q2 = QRegister(\"q2\")\n qr = QRegister(q1, q2)\n\n # FIXME: See issue #44: Must supply all args to qgl2main for now\n\n # Functions here have some extra code to run before running the compiled QGL2,\n # so define functions for those; random number seeding\n\n def beforeSingleRB():\n np.random.seed(20152606) # set seed for create_RB_seqs()\n random.seed(20152606) # set seed for random.choice()\n # SingleQubitRB(q1, create_RB_seqs(1, 2**np.arange(1,7)))\n\n # The original unit test had this comment:\n \"\"\" Fails on APS1, APS2, and Tek7000 due to:\n File \"QGL/PatternUtils.py\", line 129, in add_gate_pulses\n if has_gate(chan) and not pulse.isZero and not (chan.gate_chan\n AttributeError: 'CompositePulse' object has no attribute 'isZero'\n \"\"\"\n def beforeTwoRB():\n np.random.seed(20152606) # set seed for create_RB_seqs()\n # TwoQubitRB(q2, q1, create_RB_seqs(2, [2, 4, 8, 16, 32], repeats=16))\n\n def beforeSimRBAC():\n np.random.seed(20151709) # set seed for create_RB_seqs\n #seqs1 = create_RB_seqs(1, 2**np.arange(1,7))\n #seqs2 = create_RB_seqs(1, 2**np.arange(1,7))\n # SimultaneousRB_AC((q1, q2), (seqs1, seqs2))\n\n def beforeSingleRBAC():\n np.random.seed(20152606) # set seed for create_RB_seqs\n # SingleQubitRB_AC(q1,create_RB_seqs(1, 2**np.arange(1,7)))\n\n# FIXME: Add test of SingleQubitRB_DiAC\n\n sqCSeqs = create_RB_seqs(1, 2**np.arange(1,7))\n tqCSeqs = create_RB_seqs(2, [2, 4, 8, 16, 32], repeats=16)\n simCSeqs = (sqCSeqs, sqCSeqs)\n tAddCals = True\n def getSingleQubitRBAD(seqs, add_cals):\n ad = [{\n 'name': 'length',\n 'unit': None,\n 'points': list(map(len, seqs)),\n 'partition': 1\n }]\n if add_cals:\n ad.append(cal_descriptor(('qubit',), 2))\n return ad\n\n def getTwoQubitRBAD(seqs, add_cals):\n axis_descriptor = [{\n 'name': 'length',\n 'unit': None,\n 'points': list(map(len, seqs)),\n 'partition': 1\n }]\n if add_cals:\n axis_descriptor.append(cal_descriptor(('q1', 'q2'), 2))\n return axis_descriptor\n\n def getSingleQubitRBAC_AD(seqs, add_cals):\n axis_descriptor = [{\n 'name': 'length',\n 'unit': None,\n 'points': list(map(len, seqs)),\n 'partition': 1\n }]\n if add_cals:\n axis_descriptor.append(cal_descriptor(('qubit',), 2))\n return axis_descriptor\n\n def getSimRBACAD(seqs, add_cals, qubits):\n axis_descriptor = [{\n 'name': 'length',\n 'unit': None,\n 'points': list(map(len, seqs)),\n 'partition': 1\n }]\n if add_cals:\n axis_descriptor.append(cal_descriptor((qubits), 2))\n return axis_descriptor\n\n# FIXME: SingleQubitIRB_AC filenames are more complex\n# FIXME: SingleQubitRBT has complex suffix it should pass to compile_to_hardware\n\n# for func, args, label, beforeFunc, axisDesc, cseqs in [(\"SingleQubitRB\", (q1, sqCSeqs), \"RB\", beforeSingleRB, getSingleQubitRBAD(sqCSeqs, tAddCals), sqCSeqs),\n# (\"TwoQubitRB\", (q1, q2, tqCSeqs), \"RB\", beforeTwoRB, getTwoQubitRBAD(tqCSeqs, tAddCals), tqCSeqs),\n# (\"SingleQubitRB_AC\", (q1,sqCSeqs), \"RB\", beforeSingleRBAC, getSingleQubitRBAC_AD(sqCSeqs, tAddCals), sqCSeqs),\n# (\"SimultaneousRB_AC\", (qr, simCSeqs), \"RB\", beforeSimRBAC, getSimRBACAD(simCSeqs, tAddCals, qr), simCSeqs),\n# (\"SingleQubitIRB_AC\", (q1,''), \"RB\", None, None, None),\n# Comment says this next one relies on a specific file, so don't bother running\n# (\"SingleQubitRBT\", (q1,'', fixmePulse), \"RBT\", None, None, None),\n# ]:\n\n for func, args, label, beforeFunc, axisDesc, cseqs in [(\"SingleQubitRB\", (q1, sqCSeqs, False, tAddCals), \"RB\", beforeSingleRB, getSingleQubitRBAD(sqCSeqs, tAddCals), sqCSeqs),\n (\"TwoQubitRB\", (q1, q2, tqCSeqs, tAddCals), \"RB\", beforeTwoRB, getTwoQubitRBAD(tqCSeqs, tAddCals), tqCSeqs),\n (\"SingleQubitRB_AC\", (q1,sqCSeqs, False, tAddCals), \"RB\", beforeSingleRBAC, getSingleQubitRBAC_AD(sqCSeqs, tAddCals), sqCSeqs),\n# Warning: This next one is slow....\n (\"SimultaneousRB_AC\", (qr, simCSeqs, tAddCals), \"RB\", beforeSimRBAC, getSimRBACAD(simCSeqs, tAddCals, qr), simCSeqs),\n# This next one relies on a file of sequence strings, which I don't have\n# (\"SingleQubitIRB_AC\", (q1,None), \"RB\", None, None, None),\n# Comment says this next one relies on a specific file, so don't bother running\n# # (\"SingleQubitRBT\", (q1,'', fixmePulse, True), \"RBT\", None, None, None),\n ]:\n\n print(f\"\\nRun {func}...\")\n\n # This is typically setting random seed\n if beforeFunc is not None:\n beforeFunc()\n\n # Here we know the function is in the current file\n # You could use os.path.dirname(os.path.realpath(__file)) to find files relative to this script,\n # Or os.getcwd() to get files relative to where you ran from. Or always use absolute paths.\n resFunc = compile_function(__file__, func, args)\n # Run the QGL2. Note that the generated function takes no arguments itself\n seq = resFunc()\n if toHW:\n import QGL\n print(f\"Compiling {func} sequences to hardware\\n\")\n # QGL.Compiler.set_log_level()\n em = None\n if cseqs:\n em = {'sequences':cseqs}\n fileNames = qgl2_compile_to_hardware(seq, filename=f'{label}/{label}', axis_descriptor=axisDesc, extra_meta = em)\n print(f\"Compiled sequences; metafile = {fileNames}\")\n if plotPulses:\n from QGL.PulseSequencePlotter import plot_pulse_files\n # FIXME: As called, this returns a graphical object to display\n plot_pulse_files(fileNames)\n else:\n print(f\"\\nGenerated {func} sequences:\\n\")\n from QGL.Scheduler import schedule\n\n scheduled_seq = schedule(seq)\n from IPython.lib.pretty import pretty\n print(pretty(scheduled_seq))\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"BBN-Q/pyqgl2","sub_path":"src/python/qgl2/basic_sequences/RB.py","file_name":"RB.py","file_ext":"py","file_size_in_byte":25219,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"3"} +{"seq_id":"11640736159","text":"\"\"\"\nLeetcode 973: K Closest Points to Origin\nhttps://leetcode.com/problems/k-closest-points-to-origin/\n\"\"\"\nfrom typing import List\nfrom math import sqrt\nfrom heapq import heapify, heappop, nsmallest\n\n\ndef kClosest(points: List[List[int]], k: int) -> List[List[int]]:\n \"\"\"\n Time: O(klogn)\n Space: O(k)\n Approach: Use a min heap to keep track of the k closest points.\n - Use nsmallest to get the k closest points.\n \"\"\"\n def get_distance(coordinates: List[List[int]]):\n \"\"\"\n - Get the distance of a point from the origin.\n \"\"\"\n x, y = coordinates\n return sqrt(pow(x, 2) + pow(y, 2))\n return nsmallest(k, points, key=get_distance)\n\n\ndef kClosestTwo(points: List[List[int]], k: int) -> List[List[int]]:\n \"\"\"\n - Detailed approach\n - Time: O(klogn)\n - Space: O(n)\n - Approach: Use a min heap to keep track of the k closest points.\n - Push all points onto the heap.\n - Pop k times from the heap.\n \"\"\"\n res, min_heap = [], []\n for x, y in points:\n distance = sqrt(pow(x, 2) + pow(y, 2))\n min_heap.append([distance, x, y])\n heapify(min_heap)\n\n while k:\n _, x, y = heappop(min_heap)\n res.append([x, y])\n k -= 1\n return res\n","repo_name":"ngugimuchangi/coding_challenges","sub_path":"leetcode/heap_and_priority_queues/kth_closest_point_to_origin.py","file_name":"kth_closest_point_to_origin.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20326186643","text":"import random\r\nfrom pyvis.network import Network\r\nfrom constants import *\r\nfrom PIL import Image, ImageColor, ImageDraw\r\n\r\n\r\ndef random_genome() -> str:\r\n \"\"\"Create random genome\"\"\"\r\n return \" \".join(\r\n \"%06x\" % random.randrange(16 ** GENE_LENGTH) for _ in range(GENOME_LENGTH)\r\n )\r\n\r\n\r\ndef is_in_world(x: int, y: int) -> bool:\r\n \"\"\"Return true if the given coordinates are within the boundaries of the world\"\"\"\r\n return (0 <= y < WORLD_HEIGHT) and (0 <= x < WORLD_WIDTH)\r\n\r\n\r\ndef get_brain_visualization(neurons: dict[str, dict]) -> Network:\r\n \"\"\"Return a pyvis Network of the neurons\"\"\"\r\n net = Network(bgcolor=\"#222222\", font_color=\"#dddddd\", directed=True)\r\n net.toggle_physics(True)\r\n net.set_edge_smooth(\"dynamic\")\r\n\r\n for t in neurons:\r\n for n in neurons[t]:\r\n net.add_node(n, color=NEURON_COLORS[t])\r\n for t in [\"sensory\", \"internal\"]:\r\n for n in neurons[t]:\r\n for cc in neurons[t][n].connected_to:\r\n x = int((cc[\"weight\"] + MAX_WEIGHT) * 255 / (MAX_WEIGHT * 2))\r\n net.add_edge(\r\n n,\r\n cc[\"neuron\"].name,\r\n color=(\"#%02x%02x%02x\" % (255 - x, x, 0)),\r\n width=4,\r\n )\r\n net.set_options(\r\n \"\"\"\r\n var options = {\r\n \"physics\": {\r\n \"barnesHut\": {\r\n \"centralGravity\": 0.2,\r\n \"springLength\": 250,\r\n \"springConstant\": 0.03,\r\n \"damping\": 0.5\r\n },\r\n \"maxVelocity\": 25,\r\n \"minVelocity\": 0.75\r\n }\r\n }\r\n \"\"\"\r\n )\r\n\r\n return net\r\n\r\n\r\ndef draw_world(world) -> Image:\r\n \"\"\"Return an image of the world\"\"\"\r\n im = Image.new(\"RGB\", (IMAGE_WIDTH, IMAGE_HEIGHT), (255, 255, 255))\r\n gridx = IMAGE_WIDTH // WORLD_WIDTH\r\n gridy = IMAGE_HEIGHT // WORLD_HEIGHT\r\n draw = ImageDraw.Draw(im)\r\n for y in range(WORLD_HEIGHT):\r\n for x in range(WORLD_WIDTH):\r\n if world[y][x] != 0:\r\n draw.rectangle(\r\n (gridx * x, gridy * y, gridx * (x + 1), gridy * (y + 1)),\r\n fill=ImageColor.getcolor(world[y][x].color, \"RGB\"),\r\n )\r\n return im\r\n","repo_name":"Harrelix/biosim4py","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2254,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"28972454658","text":"from pygsheets import authorize\nfrom pygsheets.cell import Cell\nfrom pygsheets.client import Client\nfrom pygsheets.spreadsheet import Spreadsheet\nfrom pygsheets.worksheet import Worksheet\n\n\nclass GoogleSheet:\n def __init__(\n self,\n credential_file: str = \"\",\n googlesheet_file_key: str = \"\",\n ) -> None:\n self.credential_file = credential_file\n self.googlesheet_file_key = googlesheet_file_key\n\n def search_subscription(\n self,\n data: str,\n search_col: int = 1,\n balance_col: int = 4,\n end_date_col: int = 5,\n ) -> (list[str | int]):\n gc: Client = authorize(service_account_file=self.credential_file)\n sh: Spreadsheet = gc.open_by_key(self.googlesheet_file_key)\n wk1: Worksheet = sh.sheet1\n try:\n find_cell: Cell = wk1.find(\n data, matchEntireCell=True, cols=(search_col, search_col)\n )[0]\n except Exception:\n return []\n find_cell_row = find_cell.row\n end_date = wk1.get_value((find_cell_row, end_date_col))\n balance = wk1.get_value((find_cell_row, balance_col))\n return [str(end_date), int(balance)]\n","repo_name":"redboo/python-telegram-bots","sub_path":"0001/googlesheet.py","file_name":"googlesheet.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29027736847","text":"from decimal import Decimal\nfrom math import isclose\n\nimport pytest\n\nfrom staff.lib.testing import RoomFactory, FloorFactory\n\nfrom staff.workspace_management.usage_report import bc_usage_model, offices_area\nfrom staff.workspace_management.tests.factories import (\n BusinessUnitFactory,\n OfficeAreaFactory,\n RoomSharePieFactory,\n ShareFactory,\n)\n\n\n@pytest.mark.django_db\ndef test_bc_usage_model_for_single_bc():\n # given\n office_area = OfficeAreaFactory(office_area=Decimal(1000))\n office = office_area.office\n\n bu1 = BusinessUnitFactory()\n bu2 = BusinessUnitFactory()\n\n room1 = RoomFactory(floor=FloorFactory(office=office))\n room1_share_pie = RoomSharePieFactory(room=room1, room_area=Decimal(300))\n ShareFactory(room_share_pie=room1_share_pie, business_unit=bu1, share_value=Decimal(66.6666))\n ShareFactory(room_share_pie=room1_share_pie, business_unit=bu2, share_value=Decimal(33.3333))\n\n room2 = RoomFactory(floor=FloorFactory(office=office))\n room2_share_pie = RoomSharePieFactory(room=room2, room_area=Decimal(400))\n ShareFactory(room_share_pie=room2_share_pie, business_unit=bu1, share_value=Decimal(50))\n ShareFactory(room_share_pie=room2_share_pie, business_unit=bu2, share_value=Decimal(25))\n\n # when\n result = list(bc_usage_model(offices_area()))\n\n # then\n assert len(result) == 1\n row = result[0]\n assert row.office_name == office.name\n assert row.office_name_en == office.name_en\n assert row.bc_rent_area == office_area.office_area\n assert row.work_area == room1_share_pie.room_area + room2_share_pie.room_area\n expected_public_area = office_area.office_area - (room1_share_pie.room_area + room2_share_pie.room_area)\n assert row.public_area == expected_public_area\n expected_bu_work_area = room1_share_pie.room_area + room2_share_pie.room_area * Decimal(0.75)\n assert isclose(row.bu_work_area, expected_bu_work_area, abs_tol=0.001)\n expected_free_area = office_area.office_area - expected_public_area - expected_bu_work_area\n assert isclose(row.free_area, expected_free_area, abs_tol=0.001)\n\n\n@pytest.mark.django_db\ndef test_bc_usage_model_do_not_fall_on_empty_shares():\n # given\n office_area = OfficeAreaFactory(office_area=Decimal(1000))\n office = office_area.office\n room1 = RoomFactory(floor=FloorFactory(office=office))\n room1_share_pie = RoomSharePieFactory(room=room1, room_area=Decimal(300))\n\n # when\n result = list(bc_usage_model(offices_area()))\n\n # then\n assert len(result) == 1\n row = result[0]\n assert row.office_name == office.name\n assert row.office_name_en == office.name_en\n assert row.bc_rent_area == office_area.office_area\n assert row.work_area == room1_share_pie.room_area\n expected_public_area = office_area.office_area - room1_share_pie.room_area\n assert row.public_area == expected_public_area\n assert row.bu_work_area == 0\n assert row.free_area == room1_share_pie.room_area\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"Intranet/tests/test_bc_usage_model.py","file_name":"test_bc_usage_model.py","file_ext":"py","file_size_in_byte":2962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12011358984","text":"import rpc_pb2 as ln\nimport rpc_pb2_grpc as lnrpc\nimport grpc\nimport os\nimport codecs\n\nwith open(os.path.expanduser('/Users/paulcote/Downloads/stuff/admin.macaroon'), 'rb') as f:\n\n\tmacaroon_bytes = f.read()\n\tmacaroon = codecs.encode(macaroon_bytes, 'hex')\n\nos.environ[\"GRPC_SSL_CIPHER_SUITES\"] = 'HIGH+ECDSA'\n\ncert = open(os.path.expanduser('/Users/paulcote/Downloads/stuff/tls.cert'), 'rb').read()\ncreds = grpc.ssl_channel_credentials(cert)\nchannel = grpc.secure_channel('72.137.117.210:10009', creds)\nstub = lnrpc.LightningStub(channel)\n\nsatoshi_amt=1000\n\nln_request = stub.AddInvoice(ln.Invoice(value=satoshi_amt,memo=\"Test\"), metadata=[('macaroon', macaroon)])\n\nln_response=[]\nln_response.insert(0,str(ln_request.payment_request))\nln_response.insert(1,ln_request.r_hash)\nln_response[1] = codecs.encode(ln_response[1], 'base64')\nln_response[1] = ln_response[1].decode('utf-8')\n\nprint(ln_response)","repo_name":"TheRebelOfBabylon/obilisk","sub_path":"tests/misc_tests/test_lnd.py","file_name":"test_lnd.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"17294500313","text":"# https://leetcode.com/problems/find-peak-element/\n\nclass Solution(object):\n def findPeakElement(self, nums):\n l, r = 1, len(nums)\n nums.append(-float('inf'))\n nums.insert(0, -float('inf'))\n while l < r:\n mid = (l + r) // 2\n if nums[mid] > nums[mid-1] and nums[mid] > nums[mid+1]:\n return mid-1\n if nums[mid+1] > nums[mid]:\n l = mid + 1\n else:\n r = mid\n return l-1\n\n ","repo_name":"eldor-galiev/LeetcodeTasks","sub_path":"Linked List/160.py","file_name":"160.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33895821771","text":"# 부분수열의 합\n\n# n, s=5, 0\n# nums=[-7, -3, -2, 5, 8]\n\nn, s=map(int, input().split())\nnums=list(map(int, input().split()))\n\ndef dfs(n, s, nums, check, depth, hap):\n ans=0\n if depth>0 and hap==s:\n ans+=1\n if depth==n:\n return ans\n for i in range(n):\n if check[i]: continue\n check[i]=True\n ans+=dfs(n, s, nums, check, depth+1, hap+nums[i])\n for j in range(i+1, n):\n check[j]=False\n return ans\n\ncheck=[False]*n\nans=dfs(n, s, nums, check, 0, 0)\nprint(ans)","repo_name":"Greek-and-Roman-God/Athena","sub_path":"codingtest/week12/sum_of_partial_sequence.py","file_name":"sum_of_partial_sequence.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23177578641","text":"import numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras.models import clone_model\n\n\ndef params_conversion_weights(weights):\n shapes = [w.shape for w in weights]\n flatten_dim = [np.multiply(*s) if len(s) > 1 else s[0] for s in shapes]\n\n ind = np.concatenate([w.flatten() for w in weights]).reshape(1, -1)\n params = {\n 'shapes': shapes,\n 'flatten_dim': flatten_dim\n }\n return ind, params\n\n\ndef reconstruct_weights(ind, params):\n shapes, flatten_dim = params['shapes'], params['flatten_dim']\n reconstruct = []\n ind = ind.reshape(-1, )\n flatten_dim = np.cumsum(flatten_dim)\n flatten_dim = np.insert(flatten_dim, 0, 0)\n for i in range(len(shapes)):\n reconstruct.append(ind[flatten_dim[i]:flatten_dim[i + 1]].reshape(shapes[i]))\n\n return reconstruct\n\n\ndef get_last_layer_weights(model, layer_name='moo_layer'):\n relevant_layers = [l for l in model.layers if layer_name in l.name]\n if len(relevant_layers) > 1:\n raise Exception('More than one layer found')\n else:\n last_layer = relevant_layers[0]\n return last_layer.get_weights(), last_layer\n\n\ndef get_moo_layer(model, layer_name='moo_layer'):\n relevant_layers = []\n for i, l in enumerate(model.layers):\n if layer_name in l.name:\n relevant_layers.append((i, l))\n if len(relevant_layers) > 1:\n raise Exception('More than one layer found')\n else:\n i, last_layer = relevant_layers[0]\n # return {'weights': last_layer.get_weights(), 'ix': i, 'last_layer': last_layer}\n return last_layer.get_weights(), last_layer\n\n\ndef batch_array(arr, batch_size=None):\n if batch_size is None:\n return [arr]\n\n batches = []\n for i in range((arr.shape[0] // batch_size) + 1):\n batches.append(arr[i * batch_size:min(i * batch_size + batch_size, arr.shape[0]), Ellipsis])\n\n return batches\n\n\ndef batch_from_list_or_array(input_, batch_size=None):\n if isinstance(input_, list):\n batched_arrs = [batch_array(arr, batch_size) for arr in input_]\n batched = []\n for i in range(len(batched_arrs[0])):\n batched.append([batch[i] for batch in batched_arrs])\n else:\n batched = batch_array(input_, batch_size)\n return batched\n\n\ndef predict_from_batches(model,\n batches,\n to_numpy=True,\n concat_output=True,\n use_gpu=True):\n with tf.device('/device:GPU:0' if use_gpu else \"/cpu:0\"):\n outputs = []\n for batch in batches:\n pred = model(batch)\n if isinstance(pred, list):\n outputs.append([(out.numpy() if to_numpy else out) for out in pred])\n else:\n outputs.append(pred.numpy() if to_numpy else pred)\n\n if concat_output:\n if isinstance(outputs[0], list):\n concat_outputs = []\n for i in range(len(outputs[0])):\n concat_outputs.append(np.concatenate([out[i] for out in outputs], axis=0) if to_numpy\n else tf.concat([out[i] for out in outputs], axis=0))\n else:\n concat_outputs = np.concatenate(outputs, axis=0) if to_numpy else tf.concat(outputs, axis=0)\n return concat_outputs\n else:\n return outputs\n\n\ndef get_one_output_model(model, output_layer_name):\n return tf.keras.Model(inputs=model.inputs,\n outputs=model.get_layer(output_layer_name).output)\n\n\ndef split_model(model, intermediate_layers):\n base_model = tf.keras.Model(inputs=model.inputs,\n outputs=[model.get_layer(l).output for l in intermediate_layers])\n\n trainable_model = tf.keras.Model(inputs=[model.get_layer(l).output for l in intermediate_layers],\n outputs=model.outputs, )\n\n base_model.compile()\n trainable_model.compile()\n return {'base_model': base_model,\n 'trainable_model': trainable_model}\n","repo_name":"samlopezruiz/stochastic-directed-search-moo","sub_path":"src/sds/nn/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4054,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"30199767241","text":"from google.cloud import storage\nfrom google.cloud import bigquery\nfrom google.cloud.exceptions import NotFound\nimport os\nimport json\nfrom datetime import datetime\n\n\nclass CentralConfig:\n def __init__(self, project, config_bucket, config_file, date_query, run_date):\n self.project = project\n self.config_bucket = config_bucket\n self.config_file = config_file\n self.date_query = date_query\n self.run_date = run_date\n self._bucket = storage.Client(self.project).get_bucket(self.config_bucket)\n\n def get_full_conf(self):\n env_config = self._get_env_specific_config()\n if self.run_date == 'Current':\n env_config['run_date'] = datetime.now().strftime(\"%Y-%m-%d\")\n else:\n env_config['run_date'] = self.run_date\n row = self._get_date_values(env_config['run_date'])\n env_config['ace_date'] = row.get(\"ace_date\").strftime(\"%Y-%m-%d\")\n env_config['wers_date'] = row.get(\"wers_date\").strftime(\"%Y-%m-%d\")\n env_config['wips_date'] = row.get(\"wips_date\").strftime(\"%Y-%m-%d\")\n env_config['cmms3_date'] = row.get(\"cmms3_date\").strftime(\"%Y-%m-%d\")\n return env_config\n\n def _get_date_values(self, run_date):\n try:\n result = self._run_query(run_date)\n rows = [row for row in result]\n return rows[0] # Result only has 1\n except IndexError:\n print(\"Index error in initial dag loading or don't have ace/wips/wers data for current date\")\n\n def _run_query(self, run_date):\n try:\n raw_conf = self._get_env_specific_config()\n client = bigquery.Client(raw_conf['project'])\n query_bytes = self._download_central_file(self.date_query)\n query_text = query_bytes.decode(\"utf-8\").format(partition_date=run_date,\n project=raw_conf['project'],\n loading_dataset=raw_conf['bigquery']['loading_dataset'])\n\n query_job = client.query(query=query_text)\n return query_job.result()\n except NotFound as Error:\n print(f\"Table Not found{Error}\")\n\n def _get_env_specific_config(self):\n env = os.getenv(\"APP_ENV\", \"development\") or \"development\"\n env_config = self._parse_central_file(self.config_file)[env]\n if self.run_date == 'Current':\n env_config['run_date'] = datetime.now().strftime(\"%Y-%m-%d\")\n else:\n env_config['run_date'] = self.run_date\n return env_config\n\n def _parse_central_file(self, file):\n parsed_file = json.loads(self._download_central_file(file))\n return parsed_file\n\n def _download_central_file(self, file):\n blob = self._bucket.get_blob(file)\n file = blob.download_as_bytes()\n return file\n","repo_name":"atanejajlr/vehicle-sim-practice","sub_path":"sharedlib/helpers/central_config.py","file_name":"central_config.py","file_ext":"py","file_size_in_byte":2876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"70201291601","text":"#!/usr/bin/python3\n\"\"\"\nBase models\n\"\"\"\nimport uuid\nfrom datetime import datetime\nfrom models import storage\n\n\nclass BaseModel:\n \"\"\"\n BaseModel that defines all common attributes/methods for other classes\n \"\"\"\n def __init__(self, *args, **kwargs):\n \"\"\"\n Initialising instance\n \"\"\"\n if kwargs is not None and len(kwargs) != 0:\n if '__class__' in kwargs:\n del kwargs['__class__']\n kwargs['created_at'] = datetime.fromisoformat(kwargs['created_at'])\n kwargs['updated_at'] = datetime.fromisoformat(kwargs['updated_at'])\n self.__dict__.update(kwargs)\n else:\n self.id = str(uuid.uuid4())\n self.created_at = datetime.now()\n self.updated_at = datetime.now()\n storage.new(self)\n\n def __str__(self):\n \"\"\"The string representation of the instance\"\"\"\n return \"[{}] ({}) {}\".format(\n type(self).__name__, self.id, self.__dict__\n )\n\n def save(self):\n \"\"\"Updated the public instance attribute at updated_at\"\"\"\n\n self.updated_at = datetime.now()\n storage.save()\n\n def to_dict(self):\n \"\"\"Returns all the dictionary attributes of an instance\"\"\"\n\n in_dict = dict(self.__dict__)\n in_dict.update({\n \"__class__\": type(self).__name__,\n \"updated_at\": self.updated_at.isoformat(),\n \"id\": self.id,\n \"created_at\": self.created_at.isoformat()\n })\n return in_dict\n","repo_name":"damtrix/AirBnB_clone","sub_path":"models/base_model.py","file_name":"base_model.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"73362383760","text":"import sys\nimport threading\nfrom pathlib import Path\n\n# Third party imports.\ntry:\n import cv2\n import numpy as np\n import matplotlib\n import matplotlib.backends.backend_tkagg as backend\n import tkinter as tk\n from matplotlib import pyplot as plt\nexcept (ImportError, ModuleNotFoundError) as import_err:\n print('*** OpenCV, Numpy, Matplotlib or tkinter (tk/tcl) was not found or needs an update:\\n\\n'\n 'To install: from the current folder, run this command'\n ' for the Python package installer (PIP):\\n'\n ' python3 -m pip install -r requirements.txt\\n\\n'\n 'Alternative command formats (system dependent):\\n'\n ' py -m pip install -r requirements.txt (Windows)\\n'\n ' pip install -r requirements.txt\\n\\n'\n 'A package may already be installed, but needs an update;\\n'\n ' this may be the case when the error message (below) is a bit cryptic\\n'\n ' Example update command:\\n'\n ' python3 -m pip install -U matplotlib\\n'\n 'On Linux, if tkinter is the problem, then you may need to run:\\n'\n ' sudo apt-get install python3-tk\\n'\n ' See also: https://tkdocs.com/tutorial/install.html \\n\\n'\n f'Error message:\\n{import_err}')\n sys.exit(1)\n\n# Local application imports\nfrom contour_modules import (vcheck, utils, constants as const)\n\n\n# noinspection PyUnresolvedReferences\nclass ProcessImage:\n __slots__ = ('clahe_img', 'clahe_mean', 'clahe_sd', 'clip_limit',\n 'gray_img', 'input_img', 'input_mean',\n 'input_sd', 'settings_txt',\n 'settings_win', 'tile_size',\n 'fig', 'ax1', 'ax2')\n\n def __init__(self):\n\n # The np.ndarray arrays for images to be processed.\n self.input_img = None\n self.gray_img = None\n self.clahe_img = None\n\n # Matplotlib plotting with live updates.\n plt.style.use(('bmh', 'fast'))\n self.fig, (self.ax1, self.ax2) = plt.subplots(\n nrows=2,\n num='Histograms', # Provide a window title to replace 'Figure 1'.\n sharex='all',\n sharey='all',\n clear=True\n )\n # Note that plt.ion() needs to be called\n # AFTER subplots(), otherwise\n # a \"Segmentation fault (core dumped)\" error is raised.\n # plt.ion() is used with fig.canvas.start_event_loop(0.1);\n # it is not needed if fig.canvas.draw_idle() is used.\n # matplotlib.get_backend()\n plt.ion()\n\n # Image processing parameters amd metrics.\n self.clip_limit = 2.0 # Default trackbar value.\n self.tile_size = (8, 8) # Default trackbar value.\n self.input_sd = 0\n self.input_mean = 0\n self.clahe_sd = 0\n self.clahe_mean = 0\n\n self.settings_txt = ''\n self.settings_win = ''\n\n # Note: This order of calls in Linus and Windows is\n # needed for histograms to display at start; it's an\n # event issue (where setup_trackbars() triggers an event\n # that prompts drawing of histogram window).\n self.manage_input()\n self.setup_canvas_window()\n self.show_input_histogram()\n self.setup_trackbars()\n\n def manage_input(self):\n \"\"\"\n Reads input images, creates grayscale image and its flattened\n array, adjusts displayed image size, displays input and grayscale\n side-by-side in one window.\n\n Returns: None\n \"\"\"\n\n # utils.args_handler() has verified image path, so read from it.\n self.input_img = cv2.imread(arguments['input'])\n self.gray_img = cv2.imread(arguments['input'], cv2.IMREAD_GRAYSCALE)\n\n cv2.namedWindow(const.WIN_NAME['input+gray'],\n flags=cv2.WINDOW_GUI_NORMAL)\n\n # Need to scale only images to display, not those to be processed.\n # Default --scale arg is 1.0, so no scaling when option not used.\n input_img_scaled = utils.scale_img(self.input_img, arguments['scale'])\n gray_img_scaled = utils.scale_img(self.gray_img, arguments['scale'])\n side_by_side = cv2.hconcat(\n [input_img_scaled, cv2.cvtColor(gray_img_scaled, cv2.COLOR_GRAY2RGB)])\n cv2.imshow(const.WIN_NAME['input+gray'], side_by_side)\n\n @staticmethod\n def setup_canvas_window() -> None:\n \"\"\"\n A tkinter window for the Matplotlib figure canvas.\n \"\"\"\n\n # histogram_window is the Tk mainloop defined in if __name__ == \"__main__\".\n canvas_window.title('Histograms')\n canvas_window.resizable(False, False)\n\n canvas_window.bind_all('', utils.quit_keys)\n canvas_window.bind('', utils.quit_keys)\n\n canvas = backend.FigureCanvasTkAgg(plt.gcf(), canvas_window)\n toolbar = backend.NavigationToolbar2Tk(canvas, canvas_window)\n\n # Need to remove navigation button.\n # Source: https://stackoverflow.com/questions/59155873/\n # how-to-remove-toolbar-button-from-navigationtoolbar2tk-figurecanvastkagg\n # Remove all tools from toolbar because the Histograms window is\n # non-responsive while in event_loop.\n for child in toolbar.children:\n toolbar.children[child].pack_forget()\n\n # Now display remaining widgets in histogram_window.\n # NOTE: toolbar must be gridded BEFORE canvas to prevent\n # FigureCanvasTkAgg from preempting window geometry with its pack().\n toolbar.grid(row=1, column=0,\n padx=5, pady=(0, 5), # Put a border around toolbar.\n sticky=tk.NSEW,\n )\n canvas.get_tk_widget().grid(row=0, column=0,\n ipady=10, ipadx=10,\n padx=5, pady=5, # Put a border around plot.\n sticky=tk.NSEW,\n )\n\n def setup_trackbars(self) -> None:\n \"\"\"\n All trackbars that go in a separate window of image processing\n settings.\n\n Returns: None\n \"\"\"\n\n self.settings_win = \"cv2.createCLAHE settings (dbl-click text to save)\"\n\n # Move the control window away from the processing windows.\n # Place window at right edge of screen by using an excessive x-coordinate.\n cv2.namedWindow(self.settings_win, flags=cv2.WINDOW_AUTOSIZE)\n cv2.moveWindow(self.settings_win, 800, 35)\n\n cv2.setMouseCallback(self.settings_win,\n self.save_with_click)\n\n clip_tb_name = 'Clip limit\\n10X'\n tile_tb_name = 'Tile size (N, N)\\n'\n\n cv2.createTrackbar(clip_tb_name,\n self.settings_win,\n 20,\n 50,\n self.clip_selector)\n cv2.setTrackbarMin(clip_tb_name,\n self.settings_win,\n 1)\n\n cv2.createTrackbar(tile_tb_name,\n self.settings_win,\n 8,\n 200,\n self.tile_selector)\n cv2.setTrackbarMin(tile_tb_name,\n self.settings_win,\n 1)\n\n def save_with_click(self, event, *args):\n \"\"\"\n Double-click on the namedWindow calls module that saves the image\n and settings.\n Calls utils.save_img_and_settings.\n Called by cv2.setMouseCallback event.\n\n Args:\n event: The implicit mouse event.\n *args: Return values from setMouseCallback(); not used here.\n\n Returns: *event* as a formality.\n\n \"\"\"\n\n if event == cv2.EVENT_LBUTTONDBLCLK:\n utils.save_img_and_settings(self.clahe_img,\n self.settings_txt,\n f'{Path(__file__).stem}')\n return event\n\n def clip_selector(self, c_val) -> None:\n \"\"\"\n The \"CLAHE clip limit (10X)\" trackbar handler. Limits tile_size\n to greater than zero.\n\n Args:\n c_val: The integer value passed from trackbar.\n Returns: None\n \"\"\"\n\n self.clip_limit = c_val / 10\n\n self.apply_clahe()\n\n def tile_selector(self, t_val) -> None:\n \"\"\"\n The \"CLAHE tile size\" trackbar handler. Limits tile_size\n to greater than zero.\n\n Args:\n t_val: The integer value passed from trackbar.\n Returns: None\n \"\"\"\n\n self.tile_size = t_val, t_val\n\n self.apply_clahe()\n\n def apply_clahe(self) -> None:\n \"\"\"\n Applies CLAHE adjustments to image and calculates pixel values\n for reporting.\n\n Returns: None\n \"\"\"\n\n clahe = cv2.createCLAHE(clipLimit=self.clip_limit,\n tileGridSize=self.tile_size,\n )\n self.clahe_img = clahe.apply(self.gray_img)\n\n self.input_sd = int(self.gray_img.std())\n self.input_mean = int(self.gray_img.mean())\n self.clahe_sd = int(self.clahe_img.std())\n self.clahe_mean = int(self.clahe_img.mean())\n\n self.show_clahe_histogram()\n self.show_settings()\n\n cv2.namedWindow(const.WIN_NAME['clahe'],\n flags=cv2.WINDOW_GUI_NORMAL)\n clahe_img_scaled = utils.scale_img(self.clahe_img, arguments['scale'])\n cv2.imshow(const.WIN_NAME['clahe'], clahe_img_scaled)\n\n def show_input_histogram(self) -> None:\n \"\"\"\n Allows a one-time rendering of the input histogram, thus\n providing a faster response for updating the histogram Figure\n with CLAHE Trackbar changes.\n Called from __init__().\n\n Returns: None\n \"\"\"\n\n # hist() returns tuple of (counts(n), bins(edges), patches(artists)\n # histtype='step' draws a line, 'stepfilled' fills under the line;\n # both are patches.Polygon artists that provide faster rendering\n # than the default 'bar', which is a BarContainer object of\n # Rectangle artists.\n # Need to match these parameters with those for ax2.hist().\n self.ax1.hist(self.gray_img.ravel(),\n bins=255,\n range=[0, 256],\n color='blue',\n alpha=0.4,\n histtype='stepfilled',\n )\n self.ax1.set_ylabel(\"Pixel count\")\n self.ax1.set_title('Input (grayscale)')\n\n def show_clahe_histogram(self) -> None:\n \"\"\"\n Updates CLAHE adjusted histogram plot with Matplotlib from\n trackbar changes. Called from apply_clahe().\n\n Returns: None\n \"\"\"\n\n # Need to clear prior histograms before drawing new ones.\n self.ax2.clear()\n\n self.ax2.hist(self.clahe_img.ravel(),\n bins=255,\n range=[0, 256],\n color='orange',\n histtype='stepfilled', # 'step' draws a line.\n # linewidth=1.2\n )\n self.ax2.set_title('CLAHE adjusted')\n self.ax2.set_xlabel(\"Pixel value\")\n self.ax2.set_ylabel(\"Pixel count\")\n\n # From: https://stackoverflow.com/questions/28269157/\n # plotting-in-a-non-blocking-way-with-matplotlib\n # and, https://github.com/matplotlib/matplotlib/issues/11131\n # Note that start_event_loop is needed for live updates of clahe histograms.\n self.fig.canvas.start_event_loop(0.1)\n\n def show_settings(self) -> None:\n \"\"\"\n Display name of file and processing parameters in contour_tb_win\n window. Displays real-time parameter changes.\n Calls module utils.text_array() in contour_modules directory.\n\n Returns: None\n \"\"\"\n\n the_text = (\n f'Input image: {arguments[\"input\"]}\\n'\n f'Input grayscale pixel value: mean {self.input_mean},'\n f' stdev {self.input_sd}\\n'\n f'cv2.createCLAHE cliplimit={self.clip_limit}, tileGridSize{self.tile_size}\\n'\n f'CLAHE grayscale pixel value: mean {self.clahe_mean},'\n f' stdev {self.clahe_sd}'\n )\n\n # Put text into contoured_txt for printing and saving to file.\n self.settings_txt = the_text\n\n # Need to set the dimensions of the settings area to fit all text.\n # Font style parameters are set in constants.py module.\n settings_img = utils.text_array((150, 500), the_text)\n\n cv2.imshow(self.settings_win, settings_img)\n\n\nif __name__ == \"__main__\":\n # Program exits here if system platform or Python version check fails.\n vcheck.minversion('3.7')\n\n # All checks are good, so grab as a 'global' the dictionary of\n # command line argument values.\n arguments = utils.args_handler()\n\n # Need to not set up tk canvas to display Histograms b/c\n # generates a fatal memory allocation error. It has something\n # to do with the start_event_loop function.\n # Run the Matplotlib histogram plots in a tkinter window.\n canvas_window = tk.Tk()\n\n PI = ProcessImage()\n print(f'{Path(__file__).name} is now running...\\n',\n 'Quit program with Esc or Q key, or Ctrl-C from Terminal.\\n')\n\n# Set infinite loop with sigint handler to monitor \"quit\" keystrokes.\n quit_thread = threading.Thread(\n target=utils.quit_keys(), daemon=True)\n quit_thread.start()\n\n canvas_window.mainloop()\n","repo_name":"csecht/opencv-contour-utility","sub_path":"equalize_tk.py","file_name":"equalize_tk.py","file_ext":"py","file_size_in_byte":13549,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"71268618961","text":"import disnake\nfrom disnake.ext import commands\nfrom .modules.host import host\n\nclass SalmonCommand(commands.Cog):\n def __init__(self, bot: commands.Bot):\n self.bot = bot\n\n @commands.slash_command(name=\"しゃけ\")\n async def salmon(self,\n inter: disnake.ApplicationCommandInteraction,\n at: str = commands.Param(default=\"\", name=\"at\", choices=([\"1-3\",\"1-2\"] + [str(i) for i in reversed(range(1, 3 + 1))])),\n hour: int = commands.Param(default=-1, name=\"時\", ge=-1, le=23, choices=[i for i in range(0,24,1)]),\n min: int = commands.Param(default=-1, name=\"分\", ge=-1, le=59, choices=[i for i in range(0,60,10)]),\n description: str = commands.Param(name=\"備考\", default=\"\")):\n \"\"\"\n サーモンランの募集を行います\n\n Parameters\n ----------\n at: 募集人数\n hour: 開始時刻(時)\n min: 開始時刻(分)\n description: 補足事項\n \"\"\"\n at = f'@{at}' if at else ''\n \n await host(\n bot = self.bot,\n inter = inter,\n genre = 'splatoon',\n content = f'サーモンラン',\n at = at,\n color = 0xff7500,\n hour = hour,\n min = min,\n description = description\n )\n\ndef setup(bot: commands.Bot):\n bot.add_cog(SalmonCommand(bot))\n","repo_name":"ikanoasi10/ikastool","sub_path":"cogs/host/salmon.py","file_name":"salmon.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33249920424","text":"\"\"\"Logged-in page routes.\"\"\"\nfrom flask import Blueprint, render_template, redirect, url_for, flash\nfrom flask_login import current_user, login_required\n\nfrom my_stuff import db, make_random_gradient\nfrom my_stuff.models.all_models import (\n User,\n Space,\n Container,\n ContainerCategory,\n Item,\n Tag\n)\n\nfrom my_stuff.forms.search_form import SearchForm\n\n\n# Blueprint Configuration\nsearch_bp = Blueprint(\n 'search_bp', __name__,\n template_folder='templates',\n static_folder='static'\n)\n\n\n@search_bp.route('/search', methods=['POST'])\n@login_required\ndef search():\n\n # spaces, containers, items = None, None, None\n\n form = SearchForm()\n\n search_txt = form.search_txt.data\n\n # Get all SPACES for this user\n spaces = Space.query.join(Space.users).filter_by(id=current_user.id).all()\n\n # See if any space matches this search text\n selected_space = None\n for space in spaces:\n if space.name.lower() == search_txt.lower():\n selected_space = space\n\n # See if any containers have this name\n space_id_list = [s.uid for s in spaces]\n all_containers = Container.query.filter(Container.space_id.in_(space_id_list)).all()\n\n selected_containers = []\n for container in all_containers:\n if container.name.lower() == search_txt.lower() \\\n or search_txt.lower() in container.name.lower():\n selected_containers.append(container)\n\n if len(selected_containers) == 0:\n selected_containers = None\n\n # See if any items have this name\n selected_items = []\n for container in all_containers:\n for item in container.items:\n # Perfect match?\n if item.name.lower() == search_txt.lower() \\\n or search_txt.lower() in item.name.lower():\n selected_items.append(item)\n\n if len(selected_items) == 0:\n selected_items = None\n\n # See if any tags match this search\n search_txt_as_tag = search_txt.replace(\" \", \"-\").lower()\n\n tag = Tag.query.filter_by(\n name=search_txt_as_tag\n ).first()\n\n # If there's a matching tag, get all items with that tag\n if tag:\n tagged_items = Item.query.join(Item.tags).filter_by(name=search_txt_as_tag).all()\n else:\n tagged_items = None\n\n if selected_space is None and selected_containers is None \\\n and selected_items is None and tag is None \\\n and tagged_items is None:\n result_txt = f'Nothing matched the search \"{search_txt}\"'\n else:\n result_txt = \"\"\n\n item_count = 0\n if tagged_items:\n item_count += len(tagged_items)\n if selected_items:\n item_count += len(selected_items)\n\n return render_template(\n 'search.html',\n make_random_gradient=make_random_gradient,\n search_txt=search_txt,\n selected_space=selected_space,\n selected_containers=selected_containers,\n selected_items=selected_items,\n tag=tag,\n search_form=SearchForm(),\n tagged_items=tagged_items,\n result_txt=result_txt,\n item_count=item_count\n )\n\n","repo_name":"aaronfraint/where-is-my-stuff","sub_path":"my_stuff/routes/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":3084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13560519295","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.stats import norm\n\n#定义常数\nr = 0.03 # 无风险收益率\nS0 = 100 # 股票价格\nK = 90 # 行权价\nT = 1 # 时间\n\n#设定波动率的范围���以及步长\nsigma_min, sigma_max, h = 0.01, 0.30, 0.01\n\n#生成波动率的序列\nsigma_list = np.arange(sigma_min, sigma_max + h, h)\n\n#建立空的列表,用于存放隐含波动率\nimplied_volatilities = []\n\n#计算隐含波动率\nfor sigma in sigma_list:\n d1 = (np.log(S0 / K) + (r + 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))\n d2 = (np.log(S0 / K) + (r - 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))\n implied_volatility = sigma * np.sqrt(2 * np.pi / T) * np.exp(-d1 ** 2 / 2) / (S0 * norm.cdf(d1) - K * np.exp(-r * T) * norm.cdf(d2))\n implied_volatilities.append(implied_volatility)\n\n#绘制波动率锥\nplt.title('Implied Volatility Cone')\nplt.plot(sigma_list, implied_volatilities, 'b-')\nplt.xlabel('sigma')\nplt.ylabel('implied volatility')\nplt.xlim(0, 0.3)\nplt.ylim(0, 0.3)\nplt.gca().set_aspect('equal', adjustable='box')\nplt.show()","repo_name":"aktiger/option_tools","sub_path":"impliedVolatilityCones.py","file_name":"impliedVolatilityCones.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"18655029590","text":"from django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path('',views.homePage,name='home'),\n path('home', views.homePage, name='home'),\n path('login',views.loginPage,name='login'),\n path('register',views.registerPage,name='register'),\n path('landing',views.landingPage,name='landing'),\n path('message',views.messagePage,name='message'),\n path('form',views.application,name=\"form\"),\n]","repo_name":"usshaa/BankProject","sub_path":"Bank/myapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37866358617","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 6 10:53:19 2018\n\n@author: Alexis Martin\n\"\"\"\n\nimport numpy as np\nfrom computeElasticPrincipalGraph import computeElasticPrincipalGraph\n\n\ndef computeElasticPrincipalCurve(data, NumNodes, newDim=None, drawPCAview=True,\n drawAccuracyComplexity=True, drawEnergy=True,\n Lambda=0.01, Mu=0.1, InitNodeP=None,\n InitEdges=None, ComputeMSEP=False,\n MaxBlockSize=100000, TrimmingRadius=np.inf,\n MaxNumberOfIterations=10, eps=0.01,\n verbose=True):\n return computeElasticPrincipalGraph(data, NumNodes, newDim, drawPCAview,\n drawAccuracyComplexity, drawEnergy,\n Lambda, Mu, InitNodeP, InitEdges,\n np.array([[\"bisectedge\"]]),\n np.array([]), ComputeMSEP,\n MaxBlockSize, TrimmingRadius,\n MaxNumberOfIterations, eps, verbose)\n","repo_name":"AlexiMartin/ElPiGraph.P","sub_path":"computeElasticPrincipalCurve.py","file_name":"computeElasticPrincipalCurve.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"22418226700","text":"import pytest\nfrom selenium import webdriver\n\n\ndef pytest_addoption(parser):\n parser.addoption(\n \"--b\",\n action=\"store\",\n default=\"firefox\",\n help=\"Chose your browser\"\n )\n parser.addoption(\n \"--url\",\n action=\"store\",\n default=\"http://192.168.0.101\",\n help=\"Insert yout URL\"\n )\n parser.addoption(\n \"--path\",\n action=\"store\",\n default=\"/\",\n help=\"Url path\"\n )\n\n\n@pytest.fixture\ndef browser(request):\n param = request.config.getoption(\"--b\")\n if param == \"chrome\":\n wd = webdriver.Chrome()\n elif param == \"firefox\":\n wd = webdriver.Firefox()\n else:\n raise (\"Browser is not supported\")\n wd.implicitly_wait(5)\n wd.get(request.config.getoption(\"--url\") + request.config.getoption(\"--path\"))\n return wd\n","repo_name":"maslovaleksandr/selenium_tests","sub_path":"test_page_object/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38425469660","text":"#coding=utf-8\n#Version:python3.5.2\n#Tools:Pycharm\n#Date:\n__author__ = \"Colby\"\n#coding=utf-8\n#Version:python3.5.2\n#Tools:Pycharm\n#Date:\n__author__ = \"Colby\"\nimport pymongo\nimport pymysql\n\n\n#--------------------------数据库启动函数------------------------------\ndef start_MySQL():\n conn = pymysql.connect(host='localhost', user='root', passwd='root', db='youboy', charset='utf8')\n cur = conn.cursor()\n myConn_list = [conn, cur]\n print('success',myConn_list)\n return myConn_list\n#---------------------------------------------------------------------\n\n#--------------------------关闭数据库--------------------------------\ndef close_MySQL(cur,conn):\n cur.close()\n conn.commit()\n conn.close()\n#------------------------------------------------------------------\n\nif __name__ == \"__main__\":\n user='xlh_data_branch'\n pwd='xlh_data_branch'\n server='43.254.149.60'\n port='27017'\n dbName='xlh_yrtong'\n url = 'mongodb://' + user + ':' + pwd + '@' + server + ':' + port + '/' + dbName\n client = pymongo.MongoClient(url)\n c_enterprise_cash_flow = client[dbName]['enterprise_cash_flow']\n print('c_enterprise_cash_flow',c_enterprise_cash_flow)\n for temple in c_enterprise_cash_flow.find():\n print('data',temple)\n # myConn_list = start_MySQL()\n # cur = myConn_list[1]\n # conn = myConn_list[0]\n #\n # sqli = \"replace into ods_mongodb_enterprise(\" \\\n # \"_id\" \\\n # \",catagory_1_Name\" \\\n # \",catagory_1_Url\" \\\n # \",catagory_2_Name\" \\\n # \",catagory_2_Url\" \\\n # \",catagory_3_Name\" \\\n # \",catagory_3_Url\" \\\n # \",cityName,cityUrl\" \\\n # \",contactPerson\" \\\n # \",enterpriseAddr\" \\\n # \",enterpriseFax\" \\\n # \",enterpriseMobile\" \\\n # \",enterpriseName\" \\\n # \",enterprisePhone\" \\\n # \",enterpriseUrl\" \\\n # \",provinceName\" \\\n # \",url) \" \\\n # \"values(%s,%s,%s,%s,%s\" \\\n # \",%s,%s,%s,%s,%s\" \\\n # \",%s,%s,%s,%s,%s\" \\\n # \",%s,%s,%s)\"\n # #print('sqli',sqli)\n # dataList=[]\n # for temple in enterprise_collect.find():\n # print(temple['_id'])\n # #print('temple',temple)\n # data=(str(temple['_id']),\n # temple['catagory_1_Name'],\n # temple['catagory_1_Url'],\n # temple['catagory_2_Name'],\n # temple['catagory_2_Url'],\n # temple['catagory_3_Name'],\n # temple['catagory_3_Url'],\n # temple['cityName'],\n # temple['cityUrl'],\n # temple['contactPerson'],\n # temple['enterpriseAddr'],\n # temple['enterpriseFax'],\n # temple['enterpriseMobile'],\n # temple['enterpriseName'],\n # temple['enterprisePhone'],\n # temple['enterpriseUrl'],\n # temple['provinceName'],\n # temple['url'])\n # dataList.append(data)\n # #print('dataList', dataList)\n #\n # cur.executemany(sqli,dataList)\n # #conn.commit()\n # close_MySQL(cur, conn)","repo_name":"13661892653/workspace","sub_path":"pyCode/MongoToMysql/ff.py","file_name":"ff.py","file_ext":"py","file_size_in_byte":3167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5614256693","text":"import json\nimport logging\nimport boto3\n\nsqs = boto3.client('sqs')\n\ndef lambda_handler(event, context):\n sqs_qs = sqs.list_queues()\n sqs_q = str(sqs_qs['QueueUrls'][0])\n logging.info(f\"widget-request-handler sqs queue: {sqs_q}.\")\n body = event['body']\n if(len(body) > 0):\n request_string_for_sqs = body.replace(\"'\", \"\\\"\")\n body_json = json.loads(request_string_for_sqs)\n \n if 'type' not in body_json:\n logging.info(\"no type in body in widget-request-handler.\")\n return {\n 'statusCode': 499,\n 'body': \"Request not processed due to no type of request.\"\n }\n req_type = body_json['type']\n logging.info(f\"the type of the request is: {req_type}.\")\n\n try:\n sqs.send_message(QueueUrl=sqs_q, MessageBody=request_string_for_sqs)\n logging.info(\"Put request into queue.\")\n except Exception:\n logging.info(\"Could not put request into SQS from lambda.\")\n raise Exception\n \n return_msg = f\"{req_type} request created: {json.dumps(body)}\"\n return {\n 'statusCode': 200,\n 'body': return_msg\n }\n \n else:\n logging.info(\"Request was empty.\")\n return {\n 'statusCode': 499,\n 'body': \"Request not processed due to no valid request (empty body).\"\n }\n","repo_name":"TheWifflebrain/cs5260","sub_path":"widget-request-handler.py","file_name":"widget-request-handler.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5124948219","text":"from __future__ import annotations\n\n\nimport asyncio\nimport torch\nimport pytest\nimport typing as t\nfrom transformers.modeling_outputs import ImageClassifierOutput\n\nif t.TYPE_CHECKING:\n import PIL.Image\n from pytest_mock import MockerFixture\n\n\ndef test_preprocess(xray_image: tuple[PIL.Image.Image, str]):\n import service\n\n im, _ = xray_image\n\n res = service.preprocess(im)\n assert res.mode == \"RGB\"\n\n\n@pytest.mark.parametrize(\n \"result_tensor,expected\",\n [\n (torch.tensor([[-1.2620, 1.3168]]), \"PNEUMONIA\"),\n (torch.tensor([[1.4293, -1.5865]]), \"NORMAL\"),\n ],\n)\ndef test_output_from_result(result_tensor: torch.Tensor, expected: int):\n import service\n\n assert expected == service.Output.from_result(result_tensor).class_name\n\n\n@pytest.mark.asyncio\nasync def test_classify(mocker: MockerFixture, xray_image: tuple[PIL.Image.Image, str]):\n import service\n\n im, _ = xray_image\n\n e_runner = mocker.patch(\"service.extractor\")\n e_runner.async_run = e_runner.object(service.extractor, \"async_run\")\n future = asyncio.Future()\n future.set_result({\"pixel_values\": torch.Tensor([1.0])})\n e_runner.async_run.return_value = future\n\n m_runner = mocker.patch(\"service.model\")\n m_runner.async_run = m_runner.object(service.model, \"async_run\")\n future = asyncio.Future()\n future.set_result(\n ImageClassifierOutput(\n logits=torch.tensor([[-1.2620, 1.3168]], dtype=torch.float32)\n )\n )\n m_runner.async_run.return_value = future\n\n res = await service.classify(im)\n assert res.class_name == \"PNEUMONIA\"\n","repo_name":"bentoml/Pneumonia-Detection-Demo","sub_path":"tests/unit_test.py","file_name":"unit_test.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"42854118877","text":"from src.balancing import oversampling, undersampling, multiclass_resampling\n\nfrom src.balancing.data_controller import DataController\nimport numpy as np\n\n\nclass Resampler:\n def __init__(self, method_name: str, filepath_source: str):\n self.unbalanced_dataset = DataController.read_categorized_criteo(filepath_source)\n self.X_unbalanced, self.y_unbalanced = DataController.split_data_on_x_y(self.unbalanced_dataset)\n self.resampling_method_object = self.__method_selector(method_name)\n\n def __method_selector(self, balancing_type: str):\n if balancing_type == 'ros':\n return oversampling.random_over_sampler_optimized()\n elif balancing_type == 'smotenc':\n max_samples = 10000 # it just cant handle more than 10k samples because of ram\n if self.unbalanced_dataset.shape[0] > max_samples:\n self.X_unbalanced = self.X_unbalanced.head(max_samples)\n self.y_unbalanced = self.y_unbalanced.head(max_samples)\n return oversampling.smotenc_optimized(self.X_unbalanced)\n elif balancing_type == 'rus':\n return undersampling.random_under_sampler_optimized()\n elif balancing_type == 'nearmiss':\n return undersampling.nearmiss_optimized()\n elif balancing_type == 'enn':\n return undersampling.edited_nearest_neighbours_optimized()\n elif balancing_type == 'renn':\n return undersampling.repeated_edited_nearest_neighbours_optimized()\n elif balancing_type == 'allknn':\n return undersampling.allknn_optimized()\n elif balancing_type == 'onesided':\n return undersampling.one_sided_selection_optimized()\n elif balancing_type == 'ncr':\n return undersampling.neighbourhood_cleaning_rule_optimized()\n elif balancing_type == 'iht':\n return undersampling.instance_hardness_threshold_optimized()\n elif balancing_type == 'globalcs':\n return multiclass_resampling.global_cs_optimized()\n elif balancing_type == 'soup':\n return multiclass_resampling.soup_optimized()\n else:\n raise ValueError(\"Incorrect resampler type: \" + balancing_type)\n\n def get_name(self) -> str:\n return self.resampling_method_object.__str__().title().split(\"(\")[0]\n\n def set_params(self, **params) -> None:\n self.resampling_method_object.set_params(**params)\n\n def get_params(self) -> str:\n print(self.resampling_method_object.get_params())\n return str(self.resampling_method_object.get_params())\n\n def resample_to_ndarray(self) -> (np.ndarray, np.ndarray):\n X_resampled, y_resampled = self.resampling_method_object.fit_resample(self.X_unbalanced, self.y_unbalanced)\n return X_resampled.values, y_resampled.values.ravel()\n\n def resample_and_write_to_csv(self, filepath_destination: str, name: str = None) -> str:\n if name is None:\n name = self.resampling_method_object.__str__().split(\"(\")[0]\n\n X_resampled, y_resampled = self.resampling_method_object.fit_resample(self.X_unbalanced, self.y_unbalanced)\n\n balanced_df = X_resampled\n balanced_df[\"Sales\"] = y_resampled\n\n filepath_balanced = f\"{filepath_destination}/{name}.csv\"\n\n balanced_df.to_csv(filepath_balanced, index=False)\n print(\"Balanced:\", name, DataController.count_classes_size(y_resampled))\n\n return filepath_balanced\n","repo_name":"Zerkles/SRUDA","sub_path":"src/balancing/resampler.py","file_name":"resampler.py","file_ext":"py","file_size_in_byte":3456,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"22667530362","text":"#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom __future__ import print_function\n\nimport os\nimport time\n\nfrom twitter.common import app\nfrom twitter.common.recordio import RecordIO, ThriftRecordReader\n\nfrom apache.thermos.common.ckpt import CheckpointDispatcher\n\nfrom gen.apache.thermos.ttypes import ProcessState, RunnerCkpt, RunnerState, TaskState\n\n\n@app.command\n@app.command_option(\"--simple\", default=False, dest='simple', action='store_true',\n help=\"Only print the checkpoint records, do not replay them.\")\ndef read(args, options):\n \"\"\"Replay a thermos checkpoint.\n\n Usage: thermos read [options] checkpoint_filename\n \"\"\"\n if len(args) != 1:\n app.error('Expected one checkpoint file, got %s' % len(args))\n if not os.path.exists(args[0]):\n app.error('Could not find %s' % args[0])\n\n dispatcher = CheckpointDispatcher()\n state = RunnerState(processes={})\n with open(args[0], 'r') as fp:\n try:\n for record in ThriftRecordReader(fp, RunnerCkpt):\n if not options.simple:\n dispatcher.dispatch(state, record)\n else:\n print('CKPT: %s' % record)\n except RecordIO.Error as err:\n print(\"Failed to recover from %s: %s\" % (fp.name, err))\n return\n\n if not options.simple:\n if state is None or state.header is None:\n print('Checkpoint stream CORRUPT or outdated format')\n return\n print('Recovered Task Header:')\n print(' id: %s' % state.header.task_id)\n print(' user: %s' % state.header.user)\n print(' host: %s' % state.header.hostname)\n print(' sandbox: %s' % state.header.sandbox)\n if state.header.ports:\n print(' ports: %s' % ' '.join(\n '%s->%s' % (name, port) for (name, port) in state.header.ports.items()))\n print('Recovered Task States:')\n for task_status in state.statuses:\n print(' %s [pid: %d] => %s' % (\n time.asctime(time.localtime(task_status.timestamp_ms / 1000.0)),\n task_status.runner_pid,\n TaskState._VALUES_TO_NAMES[task_status.state]))\n print('Recovered Processes:')\n for process, process_history in state.processes.items():\n print(' %s runs: %s' % (process, len(process_history)))\n for k in reversed(range(len(process_history))):\n run = process_history[k]\n print(' %2d: pid=%d, rc=%s, finish:%s, state:%s' % (\n k,\n run.pid,\n run.return_code if run.return_code is not None else '',\n time.asctime(time.localtime(run.stop_time)) if run.stop_time else 'None',\n ProcessState._VALUES_TO_NAMES.get(run.state, 'Unknown')))\n","repo_name":"apache/aurora","sub_path":"src/main/python/apache/thermos/cli/commands/read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":3093,"program_lang":"python","lang":"en","doc_type":"code","stars":630,"dataset":"github-code","pt":"3"} +{"seq_id":"24849755278","text":"from datetime import datetime\n\n\nclass SensorRetentionPolicy:\n def __init__(self, m: int, initial_trust_points: int, max_trust_points: int):\n self.K = 0 # number of sensors in the cluster\n self.theta = self.K # threshold for manual investigation, dependent on K\n self.m = m # number of consecutive clean data entries to increment a sensor's trust points\n self.sensors_stats = {}\n self.initial_trust_points = initial_trust_points\n self.max_trust_points = max_trust_points\n\n # TODO: Implement this.\n # number of times theta condition must be met during the last 10 evaluations\n # before going into manual investigation\n # self.phi = phi\n\n # self.consec_eval = 0\n # self.consec_manual_inves = 0\n\n def register_sensor(self, sensor_id: str) -> None:\n self.sensors_stats[sensor_id] = {\n \"consecutive_clean\": 0,\n \"trust_points\": self.initial_trust_points\n }\n self.K += 1\n self.theta = self.K\n\n def unregister_sensor(self, sensor_id: str) -> None:\n self.sensors_stats.pop(sensor_id)\n self.K -= 1\n self.theta = self.K\n\n def evaluate_sensors(self, classification_res, curr_date: datetime):\n print(self.sensors_stats)\n self.curr_date = curr_date\n malicious_amount = sum([\n 1 for is_malicious in list(classification_res.values()) if is_malicious\n ])\n\n if self.K > 1 and malicious_amount >= self.theta:\n return self._do_manual_investigation()\n else:\n return self._update_trust_points(classification_res)\n \n def _do_manual_investigation(self):\n removed_sensors = []\n print('Commence manual investigation...')\n investigation_res = '' \n \n while True:\n investigation_res = input(\"Result of manual investigation ('s' for legitimate spike/dip in sensor readings and 'h' for hacking of sensors): \")\n if investigation_res.lower() in ['s', 'h']:\n investigation_res = investigation_res.lower()\n break\n\n if investigation_res == 's':\n message_back = 'Retrain classifier from scratch'\n return (removed_sensors, message_back)\n elif investigation_res == 'h':\n invalid_input = True\n while invalid_input:\n hacked_sensors = input('List the ids of all the hacked sensors (format: ,,...): ')\n hacked_sensors = hacked_sensors.split(',')\n\n sensors = list(self.sensors_stats.keys())\n for sensor in hacked_sensors:\n if sensor not in sensors:\n print('{} is not in the list of sensors.'.format(sensor))\n break\n else:\n for sensor_id in hacked_sensors:\n self.unregister_sensor(sensor_id)\n removed_sensors.append(sensor_id)\n\n print(\"{}: '{}' is removed from sensors since it was hacked!\".format(self.curr_date, sensor_id))\n \n for sensor_id in self.sensors_stats:\n self._is_not_malicious_action(sensor_id)\n\n message_back = 'Hacked sensors'\n return (removed_sensors, message_back)\n return ([], 'Cannot be reached')\n\n def _update_trust_points(self, classification_res):\n removed_sensors = []\n for sensor_id, is_malicious in classification_res.items():\n if is_malicious:\n self._is_malicious_action(sensor_id, removed_sensors)\n else:\n self._is_not_malicious_action(sensor_id)\n\n message_back = 'Updated trust points'\n return (removed_sensors, message_back)\n \n def _is_malicious_action(self, sensor_id, removed_sensors):\n self.sensors_stats[sensor_id]['consecutive_clean'] = 0\n self.sensors_stats[sensor_id]['trust_points'] -= 1\n\n if self.sensors_stats[sensor_id]['trust_points'] == 0:\n self.unregister_sensor(sensor_id)\n removed_sensors.append(sensor_id)\n self.K -= 1\n self.theta = self.K\n\n print(\"{}: '{}' is removed from the cluster since its trust points have reached 0!\".format(self.curr_date, sensor_id))\n \n def _is_not_malicious_action(self, sensor_id):\n self.sensors_stats[sensor_id]['consecutive_clean'] += 1\n \n if self.sensors_stats[sensor_id]['consecutive_clean'] == self.m:\n self.sensors_stats[sensor_id]['consecutive_clean'] = 0\n self.sensors_stats[sensor_id]['trust_points'] = min(\n self.sensors_stats[sensor_id]['trust_points'] + 1, \n self.max_trust_points\n )\n\n# NOTE: Minodify ko muna yung code mo para maging compatible sa codebase.\n# import socket\n# from sensor import SocketlessSensor\n\n# # Notes:\n\n# # For SRP_sensor\n# # SRP_sensor is a subclass of the Sensor class from sensor.py\n# # I just added three new attributes for SRP\n\n# # For SensorRetentionPolicy\n# # Based sa pagkakaintindi ko sa gateway.py\n# # Diba si process_data() ay function na ang input ay one piece of data lang\n# # Tapos sa loob ni process_data() ang SRP diba\n# # So that means one piece of data lang din input kay SRP\n# # So ayun assumption sa gawa ko so far\n\n# # Next, I created two special attributes kay SRP\n# # The first ay si __message_to_gateway\n# # Dito ko iistore yung mga commands ni SRP kay gateway\n# # The second is __message_from_gateway\n# # Dito naman pwede ilagay ni gateway message niya to gateway\n# # by doing this (assuming that the variable my_srp is an instance of the class SensorRetentionPolicy):\n# # my_srp.message_from_gateway = \"legit spike\"\n\n# class SRP_sensor(SocketlessSensor):\n# def __init__(self,id,station):\n# super().__init__(id,station)\n# self.__consecutive_clean = 0\n# self.__trust_points = 5\n# self.__isMalicious = False\n \n# @property\n# def consecutive_clean(self):\n# return self.__consecutive_clean\n# @consecutive_clean.setter\n# def consecutive_clean(self,new_value):\n# self.__consecutive_clean = new_value\n# @property\n# def trust_points(self):\n# return self.__trust_points\n# @trust_points.setter\n# def trust_points(self,new_value):\n# self.__trust_points = new_value\n# @property\n# def isMalicious(self):\n# return self.__isMalicious\n# @isMalicious.setter\n# def isMalicious(self,new_status: bool):\n# self.__isMalicious = new_status\n\n# class SensorRetentionPolicy:\n# # bale list na to nung tuple ng mga sensors\n# def __init__(self,K):\n# self.__K = K\n# self.__theta = self.__K//2\n# self.__malicious_sensors = dict()\n# self.__clean_sensors = dict()\n# self.__message_to_gateway = \"\"\n# self.__message_from_gateway = \"\"\n# self.__initial_classification_results: dict[str,bool] = dict()\n \n# @property\n# def K(self):\n# return self.__K\n# @property\n# def theta(self):\n# return self.__theta\n# @property\n# def status(self):\n# return self.__status\n# @property\n# def message_to_gateway(self):\n# return self.__message_to_gateway\n# @property\n# def message_from_gateway(self):\n# return self.__message_from_gateway\n# @property\n# def initial_classification_results(self):\n# return self.__initial_classification_results\n\n# @initial_classification_results.setter\n# def initial_classification_results(self,new_results):\n# self.__initial_classification_results = new_results\n\n# @message_from_gateway.setter\n# def message_from_gateway(self,new_message):\n# self.__message_from_gateway = new_message\n\n# @status.setter\n# def status(self,new_status):\n# self.__status = new_status\n\n# def add_clean_sensor(self,SENSOR: SocketlessSensor):\n# temp_sensor = SRP_sensor(SENSOR.id,SENSOR.station)\n# self.__clean_sensors[SENSOR.id] = temp_sensor\n# temp_sensor.isMalicious = False\n\n# def add_malicious_sensor(self,SENSOR: SocketlessSensor):\n# temp_sensor = SRP_sensor(SENSOR.id,SENSOR.station)\n# self.__malicious_sensors[SENSOR.id] = temp_sensor\n# temp_sensor.isMalicious = True\n\n# def successful_send(self,sensorID):\n# # increment consecutive successful send counter\n# self.__clean_sensors[sensorID].consecutive_clean += 1\n# if self.__clean_sensors[sensorID].consecutive_clean == 5:\n# self.__clean_sensors[sensorID].trust_points += 1\n# # reset consecutive count\n# self.__clean_sensors[sensorID].consecutive_clean = 0\n \n# def store_to_array(self,isDataMalicious,sensorID):\n# if isDataMalicious == False:\n# self.add_clean_sensor(sensorID)\n# # announce message to store data to blockchain\n# self.__message_to_gateway = \"SEND TO BLOCKCHAIN\"\n# # increment consecutive successful send counter\n# # call successful_send() sa gateway kapag nakapag send sa blockchain\n# else:\n# self.add_malicious_sensor(sensorID)\n\n# def store_initial_results(self):\n# for sensorID in self.__initial_classification_results:\n# self.store_to_array(self.__initial_classification_results[sensorID],sensorID)\n\n# def no_manual_investigation(self):\n# if len(self.__malicious_sensors >= self.__theta):\n# # announce manual investigation\n# # tell gateway to cache upcoming sensor data\n# self.__message_to_gateway = \"START_MANUAL_INVESTIGATION\"\n# return self.__message_to_gateway\n# else:\n# for id in self.__malicious_sensors:\n# self.__malicious_sensors[id].trust_points -= 1\n# to_remove = list(filter(lambda x: x.trust_points == 0, self.__malicious_sensors))\n# # tells gateway to remove the sensors in the list called to_remove\n# self.__message_to_gateway = (\"remove_from_cluster\",to_remove)\n# return (\"remove_from_cluster\",to_remove)\n# def legit_invalid_sensors(self):\n# for id in self.__malicious_sensors:\n# self.__malicious_sensors[id].trust_points -= 1\n# to_remove = list(filter(lambda x: x.trust_points == 0, self.__malicious_sensors))\n# # tells gateway to remove the sensors in the list called to_remove\n# self.__message_to_gateway = (\"remove_from_cluster\",to_remove)\n# return (\"remove_from_cluster\",to_remove)","repo_name":"vinocastello/CS-199","sub_path":"simulation-classes/srp.py","file_name":"srp.py","file_ext":"py","file_size_in_byte":10752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"5635924481","text":"\nimport mlflow\nimport time\n\nclass MlFlow:\n def __init__(self) -> None:\n mlflow.set_experiment(experiment_name ='aula_GPS')\n \n def log_result(self,paramters,metrics):\n\n with mlflow.start_run(run_name = paramters[\"model_name\"]+\"-\"+str(time.time())):\n print(paramters)\n mlflow.log_params(paramters)\n mlflow.log_metrics(metrics)","repo_name":"faustinothiagos/Pipiline","sub_path":"modules/log_experiments.py","file_name":"log_experiments.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19164149753","text":"def solution(n):\n namuji = []\n answer = ''\n pt = ['1','2','4']\n # *** 3진법과 방식은 같은데, 표현이 0 1 2 가 1 2 4로 바뀐 것뿐이라고 생각했는데.. 차이점이 있다.\n # 10을 3진법으로 바꾸면 101이다. 즉 세 자리수인데 여기선 2자리수다.\n # 아마도 0을 표현하지 않아서 하나씩 밀린 것 같다.\n # 3이 10으로 두자리여야 하는데 한자리다.\n # 따라서 n에 1을 빼준거로 계산해야 하는 게 싱크가 맞다. 문제 참 엿같다.\n \n # *기억좀 하자 - X진법은 숫자 N을 X로 나눠서 나온 나머지들을 아래에서 위로 다 문자열로 더한 것. \n while True:\n # 그전에 3진법 바꾸기 문제처럼 *초반에 3보다 작을 때를 대비해서\n if n-1 // 3 == 0:\n break # 끝내기\n namuji.append(pt[(n-1)%3]) # 3으로 나누고 나머지를 리스트에 넣기 대신 패턴화 시켜서\n n = n-1\n n = n//3\n for i in namuji:\n answer += i\n return answer[::-1]","repo_name":"jea0902/Algorithm-Python","sub_path":"프로그래머스/lv2/12899. 124 나라의 숫자/124 나라의 숫자.py","file_name":"124 나라의 숫자.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"19810599792","text":"import torch\n\nfrom leaspy.io.realizations.realization import Realization\nfrom .abstract_personalize_algo import AbstractPersonalizeAlgo\nfrom ..samplers.gibbs_sampler import GibbsSampler\nfrom ..samplers.hmc_sampler import HMCSampler\nfrom ...io.outputs.individual_parameters import IndividualParameters\n\n\n# import time\n\n\nclass ModeReal(AbstractPersonalizeAlgo):\n\n def __init__(self, settings):\n\n # Algorithm parameters\n super().__init__(settings)\n\n # TODO cloned --> factorize in a utils ???\n def _initialize_samplers(self, model, data):\n infos_variables = model.random_variable_informations()\n self.samplers = dict.fromkeys(infos_variables.keys())\n for variable, info in infos_variables.items():\n if info[\"type\"] == \"individual\":\n if self.algo_parameters['sampler_ind'] == 'Gibbs':\n self.samplers[variable] = GibbsSampler(info, data.n_individuals)\n else:\n self.samplers[variable] = HMCSampler(info, data.n_individuals, self.algo_parameters['eps'])\n else:\n if self.algo_parameters['sampler_pop'] == 'Gibbs':\n self.samplers[variable] = GibbsSampler(info, data.n_individuals)\n else:\n self.samplers[variable] = HMCSampler(info, data.n_individuals, self.algo_parameters['eps'])\n\n def _initialize_annealing(self):\n if self.algo_parameters['annealing']['do_annealing']:\n if self.algo_parameters['annealing']['n_iter'] is None:\n self.algo_parameters['annealing']['n_iter'] = int(self.algo_parameters['n_iter'] / 2)\n\n self.temperature = self.algo_parameters['annealing']['initial_temperature']\n self.temperature_inv = 1 / self.temperature\n\n def _get_individual_parameters(self, model, data):\n\n # Initialize realizations storage object\n realizations_history = []\n\n # Initialize samplers\n self._initialize_samplers(model, data)\n\n # Initialize annealing\n self._initialize_annealing()\n\n # initialize realizations\n realizations = model.get_realization_object(data.n_individuals)\n realizations.initialize_from_values(data.n_individuals, model)\n # TODO: remove method ``realizations.initialize_from_values`` and add scale_individual parameter to\n # ``model.get_realization_object`` to be passed to ``realization.initialize``\n\n # Gibbs sample n_iter times\n for i in range(self.algo_parameters['n_iter']):\n for key in realizations.reals_ind_variable_names:\n self.samplers[key].sample(data, model, realizations, self.temperature_inv)\n\n # Append current realizations if burn in is finished\n if i > self.algo_parameters['n_burn_in_iter']:\n realizations_history.append(realizations.copy())\n\n # Get for each patient the realization that best fit\n attachments = torch.stack(\n [model.compute_individual_attachment_tensorized(data, model.get_param_from_real(realizations), \"MCMC\") for\n realizations in realizations_history])\n\n # Regularity\n regularity = []\n for realizations in realizations_history:\n regularity_ind = 0\n for var_ind in model.get_individual_variable_name():\n regularity_ind += model.compute_regularity_realization(realizations[var_ind]).sum(dim=1)\n regularity.append(regularity_ind)\n regularity = torch.stack(regularity)\n\n\n # Indices min\n indices_min = torch.min(attachments+regularity, dim=0)\n\n # Compute mode of n_iter realizations for each individual variable\n mode_output = {}\n\n ind_var_names = model.get_individual_variable_name()\n infos = model.random_variable_informations()\n\n for ind_var_name in ind_var_names:\n mode_output[ind_var_name] = Realization.from_tensor(\n ind_var_name,\n infos[ind_var_name][\"shape\"],\n \"individual\",\n torch.stack(\n [realizations_history[indices_min[1][i]][ind_var_name].tensor_realizations[i].clone() for i, idx in\n enumerate(data.indices)]))\n\n ind_parameters = model.get_param_from_real(\n mode_output) # TODO ordering between the ind variables, should not be the case\n\n ### TODO : The following was adding for the conversion from Results to IndividualParameters. Everything should be changed\n\n individual_parameters = IndividualParameters()\n p_names = list(ind_parameters.keys())\n n_sub = len(ind_parameters[p_names[0]])\n\n for i in range(n_sub):\n p_dict = {k: ind_parameters[k][i].numpy() for k in p_names}\n p_dict = {k: v[0] if v.shape[0] == 1 else v.tolist() for k, v in p_dict.items()}\n individual_parameters.add_individual_parameters(str(i), p_dict)\n\n return individual_parameters\n","repo_name":"bsauty/leaspy_fork","sub_path":"leaspy/algo/personalize/mode_realisations.py","file_name":"mode_realisations.py","file_ext":"py","file_size_in_byte":4985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10711478510","text":"\"\"\"\nЗадание 1.\n\nПоработайте с переменными, создайте несколько,\nвыведите на экран, запросите у пользователя несколько чисел и\nстрок и сохраните в переменные, выведите на экран.\n\nПример:\nВедите ваше имя: Василий\nВедите ваш пароль: vas\nВведите ваш возраст: 45\nВаши данные для входа в аккаунт: имя - Василий, пароль - vas, возраст - 45\n\"\"\"\na = input(\"введите имя: \")\nb = input(\"введите фамилию: \")\nc = int(input(\"введите ваш возраст: \"))\n\"\"\"\nкак можно прописать ( if c ???)если в возрасте не цифра, то снова попросить ввести число?\n\"\"\"\nd = input(\"введите логин: \")\ne = input(\"придумайте пароль: \")\n\nif c >= 18:\n print(\"ваш логин:\", d, \"ваш пароль:\", e)\nelse:\n print(\"Возрастные ограничения 18+\")","repo_name":"LyudmilaGit1/python","sub_path":"lesson1.py","file_name":"lesson1.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"9509704871","text":"from tkinter import *\r\n\r\nwindow = Tk()\r\nwindow.title(\"Mile to km Converter\")\r\nwindow.config(padx=20, pady=20)\r\n\r\ninput = Entry(width=10)\r\ninput.grid(column=1, row=0)\r\n\r\nmile_text = Label(text=\"Miles\", font=(\"Arial\", 16))\r\nmile_text.grid(column=2, row=0)\r\nmile_text.config(padx=10)\r\n\r\nequal_text = Label(text=\"is equal to\", font=(\"Arial\", 16))\r\nequal_text.grid(column=0, row=1)\r\n\r\nresult = Label(text=\"\")\r\nresult.grid(column=1, row=1)\r\n\r\nkm_text = Label(text=\"Km\", font=(\"Arial\", 16))\r\nkm_text.grid(column=2, row=1)\r\nkm_text.config(padx=10)\r\n\r\n\r\ndef button_clicked():\r\n miles = float(input.get())\r\n km = miles * 1.689\r\n result.config(text=f\"{km}\")\r\n\r\n\r\n# Button\r\nbutton = Button(text=\"Calculate\", command=button_clicked)\r\nbutton.grid(column=1, row=2)\r\n\r\nwindow.mainloop()\r\n","repo_name":"Robprogram2002/one_hundred_code_challenge_course","sub_path":"days_16_40_intermediate/Day27/miles_to_km.py","file_name":"miles_to_km.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8890243606","text":"import time\nstart = time.time()\nx = 1\ndef factor(n):\n\tnumber = 0\n\tfor i in range(1,int(n**0.5)+1):\n\t\tif n % i == 0:\n\t\t\tnumber += 2\n\t\telse:\n\t\t\tcontinue\n\treturn number \nfor i in range(2,100000):\n\tx += i\n\tif factor(x) >= 500:\n\t\tprint (x)\n\t\tbreak\nelapsed = time.time() - start\nprint (elapsed)\n","repo_name":"simpleman182/Project_Euler","sub_path":"project12.py","file_name":"project12.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6159874775","text":"#! /usr/bin/env python3\n#coding:utf-8\n\n#require bitstring\nimport bitstring\nimport math\nimport sys\n\nfilename = sys.argv[1]\npacket_length = 188\n#packet_length = 204\nsyncbyte = 0x47\nf = bitstring.ConstBitStream(filename=filename)\n\ncount = 0\nfailcount = 0\nsummary = {}\nfirst = True\n\npidNames = {\n 0x0: 'PAT*', #PMTのPID一覧\n 0x1: 'CAT*', #CAS関係\n 0x10: 'NIT*', #伝送路情報(変調方式など)\n 0x11: 'SDT/BAT*', #SDT:チャンネルの名称、送出されるEITの種類\n 0x12: 'EIT*', #番組に関する情報。EPGに使用\n 0x26: 'EIT*',\n 0x27: 'EIT*',\n 0x13: 'RST',\n 0x14: 'TDT/TOT*', #現在時\n 0x17: 'DCT',\n 0x1e: 'DIT',\n 0x1f: 'SIT',\n 0x20: 'LIT',\n 0x21: 'ERT',\n 0x22: 'PCAT',\n 0x23: 'SDTT*', #ソフトウェアダウンロード\n 0x28: 'SDTT*',\n 0x24: 'BIT*', #SI送信情報(EPG関連?)\n 0x25: 'NBIT/LDT',\n 0x29: 'CDT',\n 0x2F: '多重フレームヘッダ情報',\n 0x1fff: 'Null*', #ビットレート調整\n 0x1fc8: 'PMT(1seg)*', #画像や音声のPID一覧(1segのみPMTのPIDは固定)\n}\n\n\nclass TS:\n def __init__(self, packet):\n self.packet = packet\n \n def unpack(self):\n if packet_length == 188:\n #188-5(header+pointer)-4(CRC)=179, 179*8=1432bits\n self.sync, self.error, self.pid, self.scramble, self.counter, self.pointer, self.payload, self.crc = \\\n self.packet.unpack('uint:8, bool, pad:2, uint:13, bits:2, pad:2, uint:4, uint:8, bits:1432, bytes:4')\n else:\n #204(16がリードソロモン)\n self.sync, self.error, self.pid, self.scramble, self.counter, self.pointer, self.payload, self.crc, self.reedsolomon = \\\n self.packet.unpack('uint:8, bool, pad:2, uint:13, bits:2, pad:2, uint:4, uint:8, bits:1432, bytes:4, bytes:16')\n \n #find sync\n if self.sync != syncbyte:\n found = self.packet.find('0x47', bytealigned=True) #findには文字列で渡す…\n return True\n \n return False\n \n\nwhile True:\n try:\n packet = f.read(packet_length*8) #bit\n except bitstring.ReadError: #終端\n break\n \n ts = TS(packet)\n if ts.unpack():\n #syncできなかったら戻る\n if ts.packet.bytepos != 0: #このパケット内にsyncがあったら\n f.bytepos = f.bytepos - packet_length + ts.packet.bytepos\n failcount += 1\n \n else:\n #カウンタチェック\n #if not first:\n # print('PID:{} {}'.format(ts.pid, ts.counter))\n\n if not ts.error:\n try:\n summary[ts.pid] += 1\n except KeyError:\n summary[ts.pid] = 1\n \n first = False\n count += 1\n\nfor pid, num in sorted(summary.items(), key=lambda x:x[1], reverse=True):\n try:\n pidName = pidNames[pid]\n print('PID {}({}): {}'.format(hex(pid), pidName, num))\n except KeyError:\n print('PID {}: {}'.format(hex(pid), num))\n\nprint('-------------------------------')\nprint('PID count:',len(summary))\nprint('Fail:', failcount)\nprint('Total:', count)","repo_name":"mooyax/epgdump_py","sub_path":"pidcount.py","file_name":"pidcount.py","file_ext":"py","file_size_in_byte":3127,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20733016856","text":"from flask import Flask, jsonify, request\n\napp = Flask(__name__)\n\n# In-memory data storage\ntasks = []\n\n# Helper function to find a task by its ID\ndef find_task(task_id):\n for task in tasks:\n if task['id'] == task_id:\n return task\n return None\n\n# Helper function to generate a unique ID for a task\ndef generate_task_id():\n if len(tasks) == 0:\n return 1\n else:\n return tasks[-1]['id'] + 1\n\n# Create a new task\n@app.route('/tasks', methods=['POST'])\ndef create_task():\n data = request.get_json()\n title = data.get('title')\n description = data.get('description')\n due_date = data.get('due_date')\n status = data.get('status', 'Incomplete')\n\n if not title:\n return jsonify({'error': 'Title is required'}), 400\n\n task = {\n 'id': generate_task_id(),\n 'title': title,\n 'description': description,\n 'due_date': due_date,\n 'status': status\n }\n\n tasks.append(task)\n return jsonify(task), 201\n\n# Retrieve a single task by its ID\n@app.route('/tasks/', methods=['GET'])\ndef get_task(task_id):\n task = find_task(task_id)\n if task:\n return jsonify(task)\n else:\n return jsonify({'error': 'Task not found'}), 404\n\n# Update an existing task\n@app.route('/tasks/', methods=['PUT'])\ndef update_task(task_id):\n task = find_task(task_id)\n if task:\n data = request.get_json()\n title = data.get('title')\n description = data.get('description')\n due_date = data.get('due_date')\n status = data.get('status')\n\n if title:\n task['title'] = title\n if description:\n task['description'] = description\n if due_date:\n task['due_date'] = due_date\n if status:\n task['status'] = status\n\n return jsonify(task)\n else:\n return jsonify({'error': 'Task not found'}), 404\n\n# Delete a task\n@app.route('/tasks/', methods=['DELETE'])\ndef delete_task(task_id):\n task = find_task(task_id)\n if task:\n tasks.remove(task)\n return jsonify({'message': 'Task deleted'})\n else:\n return jsonify({'error': 'Task not found'}), 404\n\n# List all tasks with pagination\n@app.route('/tasks', methods=['GET'])\ndef list_tasks():\n page = request.args.get('page', default=1, type=int)\n per_page = request.args.get('per_page', default=10, type=int)\n start = (page - 1) * per_page\n end = start + per_page\n\n paginated_tasks = tasks[start:end]\n return jsonify(paginated_tasks)\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"rhitambanerjee/backend-intern-assignment","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"27950200857","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Aug 30 18:48:54 2022\r\n\r\n@author: harik\r\n\"\"\"\r\n\r\nimport numpy as np\r\nfrom flask import Flask,request,render_template\r\nimport pickle\r\nimport bz2\r\n\r\n#function to decompress and unpickle the data model\r\ndef decompress_pickle(file):\r\n data = bz2.BZ2File(file, 'rb')\r\n data = pickle.load(data)\r\n return data\r\n\r\n#creating flask app\r\nflask_app = Flask(__name__)\r\n\r\n#model=pickle.load(open('model.pkl','rb'))\r\nmodel = decompress_pickle('model.pbz2')\r\nle = pickle.load(open('label_enc.pkl','rb'))\r\n\r\n\r\n#print(le.inverse_transform([18]))\r\n\r\n\r\n@flask_app.route('/')\r\ndef home():\r\n return render_template('home.html')\r\n\r\n@flask_app.route('/prediction',methods=['POST'])\r\ndef predict():\r\n PM25= float(request.values['PM2.5'])\r\n PM25=np.reshape(PM25,(-1,1))\r\n \r\n PM10= float(request.values['PM10'])\r\n PM10=np.reshape(PM10,(-1,1))\r\n \r\n NO= float(request.values['NO'])\r\n NO=np.reshape(NO,(-1,1))\r\n \r\n NO2= float(request.values['NO2'])\r\n NO2=np.reshape(NO2,(-1,1))\r\n \r\n NH3= float(request.values['NH3'])\r\n NH3=np.reshape(NH3,(-1,1))\r\n \r\n CO= float(request.values['CO'])\r\n CO=np.reshape(CO,(-1,1))\r\n \r\n SO2= float(request.values['SO2'])\r\n SO2=np.reshape(SO2,(-1,1))\r\n \r\n O3= float(request.values['O3'])\r\n O3=np.reshape(O3,(-1,1))\r\n \r\n Benz= float(request.values['Benzene'])\r\n Benz=np.reshape(Benz,(-1,1))\r\n \r\n Tol= float(request.values['Toluene'])\r\n Tol=np.reshape(Tol,(-1,1))\r\n \r\n Xyl= float(request.values['Xylene'])\r\n Xyl=np.reshape(Xyl,(-1,1))\r\n \r\n City = request.form.get('City')\r\n City = le.transform([City]) #inverse_transform\r\n City = City.item()\r\n print(City)\r\n\r\n features = [City,PM25,PM10,NO,NO2,NH3,CO,SO2,O3,Benz,Tol,Xyl]\r\n #features = [15.05,43.47,3.30,14.59,19.91,0.95,10.12,22.85,0.10,0.04,1.270,2]\r\n data_out=np.array(features).reshape(1,-1)\r\n prediction=model.predict(data_out)\r\n prediction= prediction.astype('int')\r\n prediction = prediction.item()\r\n \r\n\r\n if (prediction >=0) & (prediction <=50):\r\n bucket = \"Good\"\r\n elif (prediction >=51) & (prediction <=100):\r\n bucket = \"Satisfactory\"\r\n elif (prediction >=101) & (prediction <=200):\r\n bucket = \"Moderate\"\r\n elif (prediction >=201) & (prediction <=300):\r\n bucket = \"Poor\"\r\n elif (prediction >=301) & (prediction <=400):\r\n bucket = \"Very Poor\"\r\n else: \r\n bucket = \"Severe\"\r\n\r\n \r\n \r\n return render_template('home.html',prediction_text=\"Air quality index (AQI) is : {}\".format(prediction)\r\n ,bucket_text=\"Air quality bucket (AQI) is : {}\".format(bucket))\r\n\r\nif __name__=='__main__':\r\n flask_app.run(port=8000)\r\n\r\n\r\n","repo_name":"harikrish242/ICT_Main_Project","sub_path":"web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"28990778","text":"import unittest\n\nfrom simulation import *\n\n\nclass TestPlateWasteCalculator(unittest.TestCase):\n\n def setUp(self):\n self.food_item_1 = FoodItem(\"Bread\", FoodType.PERISHABLE, 5, 7, 1.0) # 1 kg of Bread\n self.food_item_2 = FoodItem(\"Apple\", FoodType.PERISHABLE, 3, 4, 0.5) # 0.5 kg of Apple\n\n def test_plate_waste_percentage(self):\n calculator = FixedPercentageWasteCalculator(0.1) # 10% of the food is wasted\n consumed_items = [(self.food_item_1, 0.5), (self.food_item_2, 0.3)]\n\n waste = calculator.compute_plate_waste(consumed_items)\n\n self.assertEqual(waste, [(self.food_item_1, 0.05), (self.food_item_2, 0.03)])\n\n def test_total_waste(self):\n calculator = FixedPercentageWasteCalculator(0.2) # 20% of the food is wasted\n consumed_items = [(self.food_item_1, 1.0), (self.food_item_2, 0.5)]\n\n waste = calculator.compute_plate_waste(consumed_items)\n\n self.assertAlmostEqual(sum([item[1] for item in waste]), 0.3,places=4)\n\n\nclass TestPerishableLeftoversCalculator(unittest.TestCase):\n\n def setUp(self):\n self.food_item_1 = FoodItem(\"Bread\", FoodType.PERISHABLE, 5, 7, 1.0)\n self.food_item_2 = FoodItem(\"Apple\", FoodType.PERISHABLE, 3, 4, 0.5)\n\n def test_leftovers_after_waste(self):\n calculator = FixedPercentageLeftoverGenerator(0.2) # 20% of uneaten food is leftover\n\n consumed_items = [(self.food_item_1, 0.7), (self.food_item_2, 0.4)]\n plate_waste = [(self.food_item_1, 0.1), (self.food_item_2, 0.05)]\n\n leftovers = calculator.compute_leftovers(consumed_items, plate_waste)\n\n self.assertAlmostEqual(leftovers, 0.22) # No non-perishable leftovers\n\n\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"CarrKnight/pantryManager","sub_path":"tests/test_leftovers.py","file_name":"test_leftovers.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"25608728017","text":"#!/usr/bin/env python3.5\nimport sys\nimport re\n\nload_time = 0\nruntime = 0\n\nwith open(sys.argv[1], 'r') as f:\n for line in f:\n line = line.strip().split()\n benchmark = line[0]\n raw_time = line[1]\n\n m = re.match(r\"(.*)(m)(.*)(s)\", raw_time)\n if m:\n minutes = int(m.group(1))\n #print(minutes)\n seconds = float(m.group(3))\n #print(m.group(2))\n runtime = minutes * 60 + seconds\n\n print(\"%s %f\" % (benchmark, runtime))\n","repo_name":"portersrc/blankit","sub_path":"audit/mispredict-timeline-results/parse_runtimes_dot_out.py","file_name":"parse_runtimes_dot_out.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"2840414773","text":"\"\"\"\"\nThis version uses the standard ProcessPoolExecutor for parallelizing the concurrent calls to the GROBID services.\nGiven the limits of ThreadPoolExecutor (input stored in memory, blocking Executor.map until the whole input\nis acquired), it works with batches of PDF of a size indicated in the config.json file (default is 1000 entries).\nWe are moving from first batch to the second one only when the first is entirely processed - which means it is\nslightly sub-optimal, but should scale better. However acquiring a list of million of files in directories would\nrequire something scalable too, which is not implemented for the moment.\n\"\"\"\n\nimport argparse\nimport concurrent.futures\nimport io\nimport json\nimport os\nimport time\n\nimport requests\n\nimport ntpath\nfrom grobid.client import ApiClient\n\n\nclass grobid_client(ApiClient):\n\n def __init__(self, config_path='./config.json'):\n self.config = None\n self._load_config(config_path)\n\n def _load_config(self, path='./config.json'):\n \"\"\"\n Load the json configuration\n \"\"\"\n config_json = open(path).read()\n self.config = json.loads(config_json)\n\n # test if the server is up and running...\n the_url = 'http://' + self.config['grobid_server']\n if len(self.config['grobid_port']) > 0:\n the_url += \":\" + self.config['grobid_port']\n the_url += \"/api/isalive\"\n r = requests.get(the_url)\n status = r.status_code\n\n if status != 200:\n print('GROBID server does not appear up and running ' + str(status))\n else:\n print(\"GROBID server is up and running\")\n\n def process(\n self,\n input,\n output,\n n,\n service,\n generateIDs,\n consolidate_header,\n consolidate_citations,\n force,\n teiCoordinates):\n batch_size_pdf = self.config['batch_size']\n pdf_files = []\n\n for (dirpath, dirnames, filenames) in os.walk(input):\n for filename in filenames:\n if filename.endswith('.pdf') or filename.endswith('.PDF'):\n pdf_files.append(os.sep.join([dirpath, filename]))\n\n if len(pdf_files) == batch_size_pdf:\n self.process_batch(\n pdf_files,\n output,\n n,\n service,\n generateIDs,\n consolidate_header,\n consolidate_citations,\n force,\n teiCoordinates)\n pdf_files = []\n\n # last batch\n if len(pdf_files) > 0:\n self.process_batch(\n pdf_files,\n output,\n n,\n service,\n generateIDs,\n consolidate_header,\n consolidate_citations,\n force,\n teiCoordinates)\n\n def process_batch(\n self,\n pdf_files,\n output,\n n,\n service,\n generateIDs,\n consolidate_header,\n consolidate_citations,\n force,\n teiCoordinates):\n print(len(pdf_files), \"PDF files to process\")\n results = []\n with concurrent.futures.ProcessPoolExecutor(max_workers=n) as executor:\n for pdf_file in pdf_files:\n executor.submit(\n self.process_pdf,\n pdf_file,\n output,\n service,\n generateIDs,\n consolidate_header,\n consolidate_citations,\n force,\n teiCoordinates)\n\n def process_pdf(\n self,\n pdf_file,\n output,\n service,\n generateIDs,\n consolidate_header,\n consolidate_citations,\n force,\n teiCoordinates):\n # check if TEI file is already produced\n # we use ntpath here to be sure it will work on Windows too\n pdf_file_name = ntpath.basename(pdf_file)\n if output is not None:\n filename = os.path.join(output, os.path.splitext(\n pdf_file_name)[0] + '.tei.xml')\n else:\n filename = os.path.join(ntpath.dirname(\n pdf_file), os.path.splitext(pdf_file_name)[0] + '.tei.xml')\n\n if not force and os.path.isfile(filename):\n print(\n filename,\n \"already exist, skipping... (use --force to reprocess pdf input files)\")\n return\n\n print(pdf_file)\n files = {\n 'input': (\n pdf_file,\n open(pdf_file, 'rb'),\n 'application/pdf',\n {'Expires': '0'}\n )\n }\n\n the_url = 'http://' + self.config['grobid_server']\n if len(self.config['grobid_port']) > 0:\n the_url += \":\" + self.config['grobid_port']\n the_url += \"/api/\" + service\n\n # set the GROBID parameters\n the_data = {}\n if generateIDs:\n the_data['generateIDs'] = '1'\n if consolidate_header:\n the_data['consolidateHeader'] = '1'\n if consolidate_citations:\n the_data['consolidateCitations'] = '1'\n if teiCoordinates:\n the_data['teiCoordinates'] = self.config['coordinates']\n\n res, status = self.post(\n url=the_url,\n files=files,\n data=the_data,\n headers={'Accept': 'text/plain'}\n )\n\n if status == 503:\n time.sleep(self.config['sleep_time'])\n return self.process_pdf(pdf_file, output)\n elif status != 200:\n print('Processing failed with error ' + str(status))\n else:\n # writing TEI file\n try:\n with io.open(filename, 'w', encoding='utf8') as tei_file:\n tei_file.write(res.text)\n except OSError:\n print(\"Writing resulting TEI XML file %s failed\" % filename)\n pass\n\n\ndef run(args_dict):\n '''\n {'service': 'processFulltextDocument',\n 'input': '/Users/mitchell_bregman/projects/aske-multivac/',\n 'output': 'out_dump',\n 'config': './config.json',\n 'n': '4',\n 'generateIDs': False,\n 'consolidate_header': True,\n 'consolidate_citations': True,\n 'force': False,\n 'teiCoordinates': False}\n '''\n\n input_path = args_dict['input']\n config_path = args_dict['config']\n output_path = args_dict['output']\n\n n = 10\n if args_dict['n'] is not None:\n try:\n n = int(args_dict['n'])\n except ValueError:\n print(\"Invalid concurrency parameter n:\",\n n, \"n = 10 will be used by default\")\n pass\n\n # if output path does not exist, we create it\n if output_path is not None and not os.path.isdir(output_path):\n try:\n print(\n \"output directory does not exist but will be created:\",\n output_path)\n os.makedirs(output_path)\n except OSError:\n print(\"Creation of the directory %s failed\" % output_path)\n else:\n print(\"Successfully created the directory %s\" % output_path)\n\n service = args_dict['service']\n generateIDs = args_dict['generateIDs']\n consolidate_header = args_dict['consolidate_header']\n consolidate_citations = args_dict['consolidate_citations']\n force = args_dict['force']\n teiCoordinates = args_dict['teiCoordinates']\n\n client = grobid_client(config_path=config_path)\n\n start_time = time.time()\n\n client.process(\n input_path,\n output_path,\n n,\n service,\n generateIDs,\n consolidate_header,\n consolidate_citations,\n force,\n teiCoordinates)\n\n runtime = round(time.time() - start_time, 3)\n print(\"runtime: %s seconds \" % (runtime))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Client for GROBID services\")\n parser.add_argument(\n \"service\",\n help=\"one of [processFulltextDocument, processHeaderDocument, processReferences]\")\n parser.add_argument(\n \"--input\",\n default=None,\n help=\"path to the directory containing PDF to process\")\n parser.add_argument(\n \"--output\",\n default=None,\n help=\"path to the directory where to put the results (optional)\")\n parser.add_argument(\n \"--config\",\n default=\"./config.json\",\n help=\"path to the config file, default is ./config.json\")\n parser.add_argument(\n \"--n\",\n default=10,\n help=\"concurrency for service usage\")\n parser.add_argument(\n \"--generateIDs\",\n action='store_true',\n help=\"generate random xml:id to textual XML elements of the result files\")\n parser.add_argument(\n \"--consolidate_header\",\n action='store_true',\n help=\"call GROBID with consolidation of the metadata extracted from the header\")\n parser.add_argument(\n \"--consolidate_citations\",\n action='store_true',\n help=\"call GROBID with consolidation of the extracted bibliographical references\")\n parser.add_argument(\n \"--force\",\n action='store_true',\n help=\"force re-processing pdf input files when tei output files already exist\")\n parser.add_argument(\n \"--teiCoordinates\",\n action='store_true',\n help=\"add the original PDF coordinates (bounding boxes) to the extracted elements\")\n\n args_dict = vars(parser.parse_args())\n\n run(args_dict)\n","repo_name":"GallupGovt/multivac","sub_path":"src/data/extract_text.py","file_name":"extract_text.py","file_ext":"py","file_size_in_byte":9780,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"3"} +{"seq_id":"70767655121","text":"\"\"\"Snake, classic arcade game.\n\nExercises\n\n1. How do you make the snake faster or slower? [DONE]\n2. How can you make the snake go around the edges? [DONE]\n3. How would you move the food? [ALREADY SATISFIED]\n4. Change the snake to respond to arrow keys. [ALREADY SATISFIED]\n*5. Make snake and fruit be different colors for every new game. [DONE]\n\"\"\"\n\nfrom turtle import *\nfrom random import randrange, choice\nfrom freegames import square, vector\n\nfood = vector(0, 0)\nsnake = [vector(10, 0)]\naim = vector(0, -10)\n\ndef set_initial_colors():\n colorOptions = [\n 'black',\n 'green',\n 'blue',\n 'orange',\n 'purple',\n 'yellow',\n 'pink',\n ]\n snakeColor = choice(colorOptions)\n fruitColor = choice(colorOptions)\n if fruitColor == snakeColor:\n while fruitColor == snakeColor:\n fruitColor = choice(colorOptions)\n return snakeColor,fruitColor\n\ndef change(x, y):\n \"Change snake direction.\"\n aim.x = x\n aim.y = y\n\ndef inside(head):\n \"Return True if head inside boundaries.\"\n return -200 < head.x < 190 and -200 < head.y < 190\n\ndef move():\n colors = clrs\n \"Move snake forward one segment.\"\n head = snake[-1].copy()\n head.move(aim)\n\n if head in snake:\n square(head.x, head.y, 9, 'red')\n update()\n return\n\n if head.x == -200:\n head.x = 190\n update()\n elif head.x == 190:\n head.x = -200\n update()\n if head.y == -200:\n head.y = 190\n update()\n elif head.y == 190:\n head.y = -200\n update()\n\n snake.append(head)\n\n if head == food:\n print('Snake:', len(snake))\n food.x = randrange(-15, 15) * 10\n food.y = randrange(-15, 15) * 10\n else:\n snake.pop(0)\n\n clear()\n\n for body in snake:\n square(body.x, body.y, 9, colors[0])\n\n square(food.x, food.y, 9, colors[1])\n update()\n \n #We change the speed of the snake making this value higher\n #Higher value = Slower\n #Lower value = Faster \n ontimer(move, 50) \n\nsetup(420, 420, 370, 0)\nhideturtle()\ntracer(False)\nglobal clrs\nclrs = set_initial_colors()\nlisten()\nonkey(lambda: change(10, 0), 'Right')\nonkey(lambda: change(-10, 0), 'Left')\nonkey(lambda: change(0, 10), 'Up')\nonkey(lambda: change(0, -10), 'Down')\nonkey(lambda: change(10, 0), 'd')\nonkey(lambda: change(-10, 0), 'a')\nonkey(lambda: change(0, 10), 'w')\nonkey(lambda: change(0, -10), 's')\nonkey(lambda: change(10, 0), 'D')\nonkey(lambda: change(-10, 0), 'A')\nonkey(lambda: change(0, 10), 'W')\nonkey(lambda: change(0, -10), 'S')\nmove()\ndone()\n","repo_name":"DavidF2714/Website-202113-100","sub_path":"Python/snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":2587,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"19401345812","text":"import os\nimport numpy as np\nfrom imageio import imread\nimport cv2\n\nimport utils\nimport align_dataset_test as align_dataset\nfrom config import CONFIG\n\nfrom train import AlignNet\n\nimport matplotlib\nmatplotlib.use(\"Agg\")\n\nimport logging\nlogger = logging.getLogger('matplotlib')\nlogger.setLevel(logging.INFO)\n\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nplt.rcParams['animation.ffmpeg_path'] = '/usr/bin/ffmpeg'\n\nimport random\nimport itertools\nimport argparse\n\nfrom numpy import array, zeros, full, argmin, inf, ndim\nfrom scipy.spatial.distance import cdist\nfrom math import isinf\n\nfrom dtw import dtw\n\ndef dist_fn(x, y):\n dist = np.sum((x-y)**2)\n return dist\n\n\ndef get_nn(embs, query_emb):\n dist = np.linalg.norm(embs - query_emb, axis=1)\n assert len(dist) == len(embs)\n return np.argmin(dist), np.min(dist)\n\ndef show_mat_align(D, nns):\n plt.imshow(D.T)\n plt.plot(nns, 'r', linewidth=1)\n plt.colorbar()\n plt.show()\n\ndef save_mat_align(D, nns, path):\n plt.imshow(D.T)\n plt.plot(nns, 'r', linewidth=1)\n plt.colorbar()\n plt.savefig(path)\n plt.close()\n\ndef align(query_feats, candidate_feats, use_dtw):\n \"\"\"Align videos based on nearest neighbor or dynamic time warping.\"\"\"\n if use_dtw:\n _, D, _, path = dtw(query_feats, candidate_feats, dist=dist_fn)\n _, uix = np.unique(path[0], return_index=True)\n nns = path[1][uix]\n\n else:\n nns = []\n _, D, _, _ = dtw(query_feats, candidate_feats, dist=dist_fn)\n for i in range(len(query_feats)):\n nn_frame_id, _ = get_nn(candidate_feats, query_feats[i])\n nns.append(nn_frame_id)\n return nns, D\n\ndef align_and_video(args, a_emb, b_emb, a_name, b_name, a_frames, b_frames):\n nns_a, dist_mat_a = align(a_emb, a_emb, use_dtw=args.use_dtw)\n save_mat_align(dist_mat_a, nns_a, args.dest+'Self-{}-align-{}-stride-{}-dtw-{}-bs-{}.png'.format(a_name, args.mode,\n args.stride, args.use_dtw, args.batch_size).replace('/', '_'))\n\n nns_b, dist_mat_b = align(b_emb, b_emb, use_dtw=args.use_dtw)\n save_mat_align(dist_mat_b, nns_b, args.dest+'Self-{}-align-{}-stride-{}-dtw-{}-bs-{}.png'.format(b_name, args.mode,\n args.stride, args.use_dtw, args.batch_size).replace('/', '_'))\n\n nns, dist_mat = align(a_emb[::args.stride], b_emb[::args.stride], use_dtw=args.use_dtw)\n\n print(dist_mat.shape)\n\n save_mat_align(dist_mat, nns, args.dest+'{}-{}-align-{}-stride-{}-dtw-{}-bs-{}.png'.format(a_name, b_name, args.mode, \n args.stride, args.use_dtw, args.batch_size).replace('/', '_'))\n\n aligned_imgs = []\n a_frames = a_frames[::args.stride]\n b_frames = b_frames[::args.stride]\n\n max_len = max(len(a_frames), len(b_frames))\n\n for i in range(max_len):\n \n aimg = imread(a_frames[min(i, len(a_frames)-1)])\n aimg = cv2.resize(aimg, (224, 224))\n bimg_nn = imread(b_frames[nns[min(i, len(nns)-1)]])\n bimg_nn = cv2.resize(bimg_nn, (224, 224))\n\n bimg_i = imread(b_frames[min(i, len(b_frames)-1)])\n bimg_i = cv2.resize(bimg_i, (224, 224))\n\n print('Aligned {} - {}'.format(min(i, len(a_frames)-1), nns[min(i, len(a_frames)-1)]))\n\n ab_img_nn = np.concatenate((aimg, bimg_nn), axis=1)\n ab_img_i = np.concatenate((aimg, bimg_i), axis=1)\n\n ab_img = np.concatenate((ab_img_nn, ab_img_i), axis=0)\n aligned_imgs.append(ab_img)\n \n def make_video(img):\n\n frames = [] # for storing the generated images\n fig = plt.figure()\n\n print('LEN: ', len(img))\n\n for i in range(len(img)):\n frames.append([plt.imshow(img[i],animated=True)])\n\n ani = animation.ArtistAnimation(fig, frames, interval=50, blit=True,\n repeat_delay=1000)\n ani.save(args.dest+'{}-{}-align-{}-stride-{}-dtw-{}-bs-{}.mp4'.format(a_name, b_name, args.mode, \n args.stride, args.use_dtw, args.batch_size).replace('/', '_'))\n plt.close(fig)\n\n make_video(aligned_imgs)\n\ndef main(args):\n\n model = AlignNet.load_from_checkpoint(args.model_path, map_location=args.device)\n model.to(args.device)\n\n if args.mode == 'train':\n model.train()\n else:\n model.eval()\n\n eval_transforms = utils.get_transforms(augment=False)\n\n random.seed(args.seed)\n data = align_dataset.AlignData(args.data_path, args.batch_size, CONFIG.DATA, transform=eval_transforms, flatten=False)\n\n for i in range(data.n_classes):\n # get 2 videos of 0th action\n data.set_action_seq(action=i, num_seqs=args.num_seqs)\n\n embeddings = []\n frame_paths = []\n names = []\n\n for act_iter in iter(data):\n for seq_iter in act_iter:\n\n seq_embs = []\n seq_fpaths = []\n for _, batch in enumerate(seq_iter):\n \n a_X, a_name, a_frames = batch\n \n print(a_X.shape)\n print(a_name)\n \n a_emb = model(a_X.to(args.device).unsqueeze(0))\n print(a_emb.shape)\n\n seq_embs.append(a_emb.squeeze(0).detach().cpu().numpy())\n seq_fpaths.extend(a_frames)\n \n seq_embs = np.concatenate(seq_embs, axis=0)\n embeddings.append(seq_embs)\n frame_paths.append(seq_fpaths)\n names.append(a_name)\n\n print(len(embeddings))\n print(len(frame_paths))\n\n print(embeddings[0].shape)\n print(embeddings[1].shape)\n print(frame_paths[0][-1])\n print(frame_paths[1][-1])\n print(names)\n \n for i, j in itertools.combinations(range(len(embeddings)), 2):\n align_and_video(args, embeddings[i], embeddings[j], names[i], names[j], frame_paths[i], frame_paths[j])\n\nif __name__ == \"__main__\":\n \n parser = argparse.ArgumentParser()\n parser.add_argument('--data_path', type=str, required=True)\n parser.add_argument('--model_path', type=str, required=True)\n parser.add_argument('--batch_size', type=int, default=40)\n parser.add_argument('--mode', type=str, default='eval')\n parser.add_argument('--dest', type=str, default='./')\n parser.add_argument('--stride', type=int, default=1)\n parser.add_argument('--use_dtw', dest='use_dtw', action='store_true')\n\n parser.add_argument('--num_seqs', type=int, default=2)\n\n parser.add_argument('--device', type=str, default='cuda')\n parser.add_argument('--seed', type=int, default=0)\n\n args = parser.parse_args()\n\n main(args)\n","repo_name":"trquhuytin/LAV-CVPR21","sub_path":"visualize_alignment.py","file_name":"visualize_alignment.py","file_ext":"py","file_size_in_byte":6803,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"70433493523","text":"\"\"\"Test feed-forward time.\n\nThis file is used to test feed-forward\ntime of trained model for CNN head.\ncomment line 641,642 of models_all,\nuncomment line 644-646 of models_all.\n\"\"\"\nimport time\nimport torch\nimport numpy as np\nfrom torchvision import transforms\nfrom PIL import Image\nfrom wiring import NCPWiring\nfrom nets.ltc_cell import LTCCell\nfrom nets.cnn_head import ConvolutionHead_Nvidia, ConvolutionHead_AlexNet, \\\n ConvolutionHead_ResNet\nfrom nets.models_all import NCP_Model\nfrom utils import __crop\n\nNCP_N_100 = \"/home/ubuntu/repos/results_haotian/01_CARLA_Data/\" \\\n \"06_CNN_head_comparison/05_Trained_model/\" \\\n \"01_NvidiaCNN+NCP/seed_100/\"\nNCP_A_100 = \"/home/ubuntu/repos/results_haotian/01_CARLA_Data/\" \\\n \"06_CNN_head_comparison/05_Trained_model/\" \\\n \"02_AlexNet+NCP/seed_100/\"\nNCP_R_100 = \"/home/ubuntu/repos/results_haotian/01_CARLA_Data/\" \\\n \"06_CNN_head_comparison/05_Trained_model/\" \\\n \"03_ResNet+NCP/seed_100/\"\nIMAGE_PATH = \"/home/ubuntu/repos/results_haotian/01_CARLA_Data/\" \\\n \"06_CNN_head_comparison/04_Comparison/05_016_0009_image.png\"\n\n\nS_DIM = (3, 66, 200)\nA_DIM = 3\nSEQ_LENGTH = 1 # takes each image sequentially.\ntransform = None if isinstance(S_DIM, int) else \\\n transforms.Compose(\n [transforms.ToPILImage(),\n transforms.Lambda(lambda img:__crop(img, (10, 80), (500, 176))),\n transforms.Resize((66, 200)),\n transforms.ToTensor()]\n )\n\ncnn_head_n = ConvolutionHead_Nvidia(\n S_DIM,\n SEQ_LENGTH,\n num_filters=32,\n features_per_filter=4\n)\n# declare AlexNet CNN head\ncnn_head_a = ConvolutionHead_AlexNet(\n S_DIM,\n SEQ_LENGTH,\n num_filters=32,\n features_per_filter=4\n)\n# declare ResNet CNN head\ncnn_head_r = ConvolutionHead_ResNet(\n S_DIM,\n SEQ_LENGTH,\n num_filters=32,\n features_per_filter=4\n)\n\ninput_shape = (1, 32 * 4) # the same for every cnn head.\n# cnn_head_a.num_filters * cnn_head.features_per_filter\n# for nvidia+NCP\nwiring_n = NCPWiring(inter_neurons=64, command_neurons=32, motor_neurons=3,\n sensory_fanout=48, inter_fanout=24,\n recurrent_command=24, motor_fanin=16)\n# for AlexNet+NCP\nwiring_a = NCPWiring(inter_neurons=64, command_neurons=32, motor_neurons=3,\n sensory_fanout=48, inter_fanout=24,\n recurrent_command=24, motor_fanin=16)\n# for ResNet+NCP\nwiring_r = NCPWiring(inter_neurons=64, command_neurons=32, motor_neurons=3,\n sensory_fanout=48, inter_fanout=24,\n recurrent_command=24, motor_fanin=16)\n\nwiring_n.build(input_shape)\nwiring_a.build(input_shape)\nwiring_r.build(input_shape)\n\n# time interval between 2 consecutive pics is 0.04s.\nltc_cell_n = LTCCell(wiring=wiring_n, time_interval=0.04)\nltc_cell_a = LTCCell(wiring=wiring_a, time_interval=0.04)\nltc_cell_r = LTCCell(wiring=wiring_r, time_interval=0.04)\n\npolicy_n = NCP_Model(ltc_cell=ltc_cell_n, conv_head=cnn_head_n)\npolicy_a = NCP_Model(ltc_cell=ltc_cell_a, conv_head=cnn_head_a)\npolicy_r = NCP_Model(ltc_cell=ltc_cell_r, conv_head=cnn_head_r)\n\npolicy_n.load(NCP_N_100)\npolicy_n.eval() # in evaluation mode\n\npolicy_a.load(NCP_A_100)\npolicy_a.eval() # in evaluation mode\n\npolicy_r.load(NCP_R_100)\npolicy_r.eval() # in evaluation mode\n\n# image\nstates = np.asarray(Image.open(IMAGE_PATH), dtype=np.uint8)\nstates_CNN = transform(states) # this is for CNN\n# input of CNN_head, should be B,T,C,H,W, needs to broadcast for use.\nstates_NCP = torch.unsqueeze(states_CNN, 0)\nstates_NCP = torch.unsqueeze(states_NCP, 0)\n\nhidden_state_n, hidden_state_a, hidden_state_r = None, None, None\nt_n, t_a, t_r = [], [], [] # save time,\n# t_n: time_nvidia, t_a: time_alexNet, t_r: time_resnet\n\n# start calculating time for feedforward process\nfor i in range(200):\n\n t_1 = time.time()\n act_pairs_r, hidden_state_r = policy_r.evaluate_on_single_sequence(\n states_NCP,\n hidden_state=hidden_state_r\n )\n t_r.append(time.time() - t_1)\n\n t_2 = time.time()\n act_pairs_a, hidden_state_a = policy_a.evaluate_on_single_sequence(\n states_NCP,\n hidden_state=hidden_state_a\n )\n t_a.append(time.time() - t_2)\n\n t_3 = time.time()\n act_pairs_n, hidden_state_n = policy_n.evaluate_on_single_sequence(\n states_NCP,\n hidden_state=hidden_state_n\n )\n t_n.append(time.time() - t_3)\n\nprint(t_r)\nprint('---')\nprint(t_a)\nprint('---')\nprint(t_n)\n\nTIME_1 = f\"Average Feedforward time for ResNet+NCP {sum(t_r)/200}; \"\nTIME_2 = f\"Average Feedforward time for AlexNet+NCP {sum(t_a)/200}; \"\nTIME_3 = f\"Average Feedforward time for NvidiaCNN+NCP {sum(t_n)/200}; \"\n\nF = \"/home/ubuntu/repos/results_haotian/01_CARLA_Data/\" \\\n \"06_CNN_head_comparison/latency.txt\"\nwith open(F, 'w', encoding='utf-8', errors='surrogateescape') as fil:\n fil.write(TIME_1)\n fil.write(TIME_2)\n fil.write(TIME_3)\n","repo_name":"xhtsansiro/End-to-End-learning-for-Autonomous-Driving","sub_path":"latency_cnn_head.py","file_name":"latency_cnn_head.py","file_ext":"py","file_size_in_byte":4914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21746813375","text":"#!/usr/bin/python3\nimport os\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\ninp = os.path.join(dir_path, 'ops')\n\nops_file = open(inp, 'r')\nops_lines = ops_file.readlines()\n\nops_lines = list(map(lambda x: x.strip(), ops_lines))\n\ni = 0\nwhile i < len(ops_lines):\n # reserved opcodes are skipped (default case of the switch)\n if \"(reserved)\" in ops_lines[i]:\n i += 2\n continue\n\n name = str(ops_lines[i])\n opcode = str(ops_lines[i+1])\n\n # else and end commands have no type so we are done with them\n if(not name == \"else\" and not name == \"end\"):\n # now we have to parse the type signatures that appear on line i+2\n # so let's define a function that take the line and gives back two strings\n # one with the args and one with the ret\n\n def parse_args(line):\n if line == \"\":\n return \"\",\"\"\n args, ret = line.split('→');\n args = args.strip(\"[]\")\n ret = ret.strip(\"[]\")\n return args, ret\n\n args, ret = parse_args(ops_lines[i+2])\n\n # Now that we have args and ret as strings with no brackets etc we need to map\n # them to push_back statements. Let's write a function that take a type string\n # and returns a list of itypes\n\n def get_itypes(args):\n types = args.split(\" \")\n # Now we need a functions that reads a type and return the push_back statement\n def statement_for_type(type):\n if type == \"\":\n return \"\"\n if type[0] == 't':\n # we have a polymorphic type\n if len(type) > 1:\n if type[1] == '∗':\n if len(type) == 3:\n return \"ptype({}, true, false)\".format(type[2])\n return \"ptype(0, true, false)\"\n if type[1] == '?':\n if len(type) == 3:\n return \"ptype({}, false, true)\".format(type[2])\n return \"ptype(0, false, true)\"\n return \"ptype({}, false, false)\".format(type[1])\n return \"ptype(0, false, false)\"\n return \"type::Value::{}\".format(type)\n return list(map(statement_for_type, types))\n # for type in types:\n # print(statement_for_type(type))\n i+=4\n else:\n args = \"\"\n ret = \"\"\n i+=3\n\n args = get_itypes(args)\n ret = get_itypes(ret)\n\n args = \"{\" + \", \".join(args) + \"}\"\n ret = \"{\" + \", \".join(ret) + \"}\"\n\n print(\"\"\"\\\n new InstrFactory<{}, Numeric>(\"{}\",\n {}, {}\n ),\"\"\".format(opcode, name, args, ret))\n","repo_name":"ioanalex/cwasm","sub_path":"instrs.py","file_name":"instrs.py","file_ext":"py","file_size_in_byte":2750,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"18266908246","text":"import random\r\nfrom random import randint\r\n\r\ndef lojaSapato(escolha, lista_Num_Sap):\r\n\r\n preco_sapato = (escolha + 50) // 2\r\n\r\n if escolha in lista_Num_Sap:\r\n print('Purchased size', escolha, 'shoe for $', preco_sapato)\r\n lista_Num_Sap.remove(escolha)\r\n return\r\n else:\r\n print('size ', escolha, ' no longer available, so no purchase.')\r\n \r\n return preco_sapato\r\n\r\ndef cliente():\r\n nome = input('Digite seu nome! \\n')\r\n return nome\r\n\r\nnumSap = int(input('Número de Sapatos na Loja: '))\r\nif numSap > 0 and numSap < 10**3:\r\n lista_Num_Sap = [random.randint(2, 20) for x in range(numSap)]\r\n print('Tamanhos Disponiveis: \\n', lista_Num_Sap)\r\n\r\nlistaSoma = []\r\n\r\nlaco = int(input('\\nGostaria de comprar?\\n1 - Para comprar \\n2 - Para sair\\n'))\r\nif laco == 1:\r\n escolha = int(input('Qual o tamanho de sapato desejado?\\n'))\r\n Cliente = cliente(), lojaSapato(escolha, lista_Num_Sap)\r\n listaSoma[len(listaSoma):] = [lojaSapato(escolha, lista_Num_Sap)]\r\n print(listaSoma)\r\nelse:\r\n print('Fim do programa!')\r\n\r\n","repo_name":"EvertonJosebc/Desafios-Python","sub_path":"exePY.py","file_name":"exePY.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"15726969977","text":"# IMPORTATION STANDARD\nimport os\nfrom collections import namedtuple\n\n# IMPORTATION THIRDPARTY\nimport pandas as pd\nimport pytest\n\n# IMPORTATION INTERNAL\nfrom openbb_terminal.stocks.options import payoff_controller\n\n# pylint: disable=E1101\n# pylint: disable=W0603\n# pylint: disable=E1111\n\nEXPIRY_DATES = [\n \"2022-01-07\",\n \"2022-01-14\",\n \"2022-01-21\",\n \"2022-01-28\",\n \"2022-02-04\",\n \"2022-02-18\",\n \"2022-03-18\",\n \"2022-04-14\",\n \"2022-05-20\",\n \"2022-06-17\",\n \"2022-07-15\",\n \"2022-09-16\",\n \"2023-01-20\",\n \"2023-03-17\",\n \"2023-06-16\",\n \"2023-09-15\",\n \"2024-01-19\",\n]\n\nCALLS = pd.DataFrame(\n data={\n \"contractSymbol\": [\"TSLA211231C00200000\", \"TSLA211231C00250000\"],\n \"lastTradeDate\": [\n pd.Timestamp(\"2021-12-29 15:01:33\"),\n pd.Timestamp(\"2021-12-10 15:09:36\"),\n ],\n \"strike\": [200.0, 250.0],\n \"lastPrice\": [878.02, 744.2],\n \"bid\": [884.5, 834.5],\n \"ask\": [887.0, 837.0],\n \"change\": [-11.849976, 0.0],\n \"percentChange\": [-1.3316524, 0.0],\n \"volume\": [30.0, 11.0],\n \"openInterest\": [36, 12],\n \"impliedVolatility\": [9.46875408203125, 8.238286101074216],\n \"inTheMoney\": [True, True],\n \"contractSize\": [\"REGULAR\", \"REGULAR\"],\n \"currency\": [\"USD\", \"USD\"],\n }\n)\n\nPUTS = pd.DataFrame(\n {\n \"contractSymbol\": [\"TSLA211231P00200000\", \"TSLA211231P00250000\"],\n \"lastTradeDate\": [\n pd.Timestamp(\"2021-12-29 20:42:48\"),\n pd.Timestamp(\"2021-12-29 17:42:53\"),\n ],\n \"strike\": [200.0, 250.0],\n \"lastPrice\": [0.01, 0.01],\n \"bid\": [0.0, 0.0],\n \"ask\": [0.01, 0.01],\n \"change\": [0.0, 0.0],\n \"percentChange\": [0.0, 0.0],\n \"volume\": [22.0, 1.0],\n \"openInterest\": [1892, 513],\n \"impliedVolatility\": [6.125002343749999, 5.375003281249999],\n \"inTheMoney\": [False, False],\n \"contractSize\": [\"REGULAR\", \"REGULAR\"],\n \"currency\": [\"USD\", \"USD\"],\n }\n)\n\nOptions = namedtuple(\"Options\", [\"calls\", \"puts\"])\nCHAIN = Options(calls=CALLS, puts=PUTS)\n\n\n@pytest.fixture(scope=\"module\")\ndef vcr_config():\n return {\n \"filter_headers\": [(\"User-Agent\", None)],\n \"filter_query_parameters\": [\n (\"period1\", \"MOCK_PERIOD_1\"),\n (\"period2\", \"MOCK_PERIOD_2\"),\n (\"date\", \"MOCK_DATE\"),\n ],\n }\n\n\n@pytest.mark.vcr(record_mode=\"none\")\n@pytest.mark.parametrize(\n \"queue, expected\",\n [\n ([\"load\", \"help\"], []),\n ([\"quit\", \"help\"], [\"help\"]),\n ],\n)\ndef test_menu_with_queue(expected, mocker, queue):\n path_controller = \"openbb_terminal.stocks.options.payoff_controller\"\n\n # MOCK GET_CHAIN + GET_PRICE\n mocker.patch(\n target=f\"{path_controller}.get_option_chain\",\n return_value=CHAIN,\n )\n mocker.patch(\n target=f\"{path_controller}.get_price\",\n return_value=95.0,\n )\n\n # MOCK SWITCH\n mocker.patch(\n target=f\"{path_controller}.PayoffController.switch\",\n return_value=[\"quit\"],\n )\n result_menu = payoff_controller.PayoffController(\n ticker=\"MOCK_TICKER\",\n expiration=\"2022-01-07\",\n queue=queue,\n ).menu()\n\n assert result_menu == expected\n\n\n@pytest.mark.vcr(record_mode=\"none\")\ndef test_menu_without_queue_completion(mocker):\n path_controller = \"openbb_terminal.stocks.options.payoff_controller\"\n\n # MOCK GET_CHAIN + GET_PRICE\n mocker.patch(\n target=f\"{path_controller}.get_option_chain\",\n return_value=CHAIN,\n )\n mocker.patch(\n target=f\"{path_controller}.get_price\",\n return_value=95.0,\n )\n\n # ENABLE AUTO-COMPLETION : HELPER_FUNCS.MENU\n mocker.patch(\n target=\"openbb_terminal.feature_flags.USE_PROMPT_TOOLKIT\",\n new=True,\n )\n mocker.patch(\n target=\"openbb_terminal.parent_classes.session\",\n )\n mocker.patch(\n target=\"openbb_terminal.parent_classes.session.prompt\",\n return_value=\"quit\",\n )\n\n # ENABLE AUTO-COMPLETION : CONTROLLER.COMPLETER\n mocker.patch.object(\n target=payoff_controller.obbff,\n attribute=\"USE_PROMPT_TOOLKIT\",\n new=True,\n )\n mocker.patch(\n target=f\"{path_controller}.session\",\n )\n mocker.patch(\n target=f\"{path_controller}.session.prompt\",\n return_value=\"quit\",\n )\n\n controller = payoff_controller.PayoffController(\n ticker=\"MOCK_TICKER\",\n expiration=\"2022-01-07\",\n queue=None,\n )\n mocker.patch(\n target=f\"{path_controller}.PayoffController\",\n return_value=controller,\n )\n result_menu = payoff_controller.PayoffController(\n ticker=\"MOCK_TICKER\",\n expiration=\"2022-01-07\",\n queue=None,\n ).menu()\n\n assert result_menu == []\n\n\n@pytest.mark.vcr(record_mode=\"none\")\n@pytest.mark.parametrize(\n \"mock_input\",\n [\"help\", \"homee help\", \"home help\", \"mock\"],\n)\ndef test_menu_without_queue_sys_exit(mock_input, mocker):\n path_controller = \"openbb_terminal.stocks.options.payoff_controller\"\n\n # MOCK GET_CHAIN + GET_PRICE\n mocker.patch(\n target=f\"{path_controller}.get_option_chain\",\n return_value=CHAIN,\n )\n mocker.patch(\n target=f\"{path_controller}.get_price\",\n return_value=95.0,\n )\n\n # DISABLE AUTO-COMPLETION\n mocker.patch.object(\n target=payoff_controller.obbff,\n attribute=\"USE_PROMPT_TOOLKIT\",\n new=False,\n )\n mocker.patch(\n target=f\"{path_controller}.session\",\n return_value=None,\n )\n\n # MOCK USER INPUT\n mocker.patch(\"builtins.input\", return_value=mock_input)\n\n # MOCK SWITCH\n class SystemExitSideEffect:\n def __init__(self):\n self.first_call = True\n\n def __call__(self, *args, **kwargs):\n if self.first_call:\n self.first_call = False\n raise SystemExit()\n return [\"quit\"]\n\n mock_switch = mocker.Mock(side_effect=SystemExitSideEffect())\n mocker.patch(\n target=f\"{path_controller}.PayoffController.switch\",\n new=mock_switch,\n )\n\n result_menu = payoff_controller.PayoffController(\n ticker=\"MOCK_TICKER\",\n expiration=\"2022-01-07\",\n queue=None,\n ).menu()\n\n assert result_menu == []\n\n\n@pytest.mark.vcr(record_mode=\"none\")\n@pytest.mark.record_stdout\n@pytest.mark.parametrize(\n \"underlying\",\n [\"long\", \"short\", \"none\"],\n)\ndef test_print_help(mocker, underlying):\n path_controller = \"openbb_terminal.stocks.options.payoff_controller\"\n\n # MOCK GET_CHAIN + GET_PRICE\n mocker.patch(\n target=f\"{path_controller}.get_option_chain\",\n return_value=CHAIN,\n )\n mocker.patch(\n target=f\"{path_controller}.get_price\",\n return_value=95.0,\n )\n\n controller = payoff_controller.PayoffController(\n ticker=\"MOCK_TICKER\",\n expiration=\"2022-01-07\",\n queue=None,\n )\n controller.call_pick([f\"--type={underlying}\"])\n controller.print_help()\n\n\n@pytest.mark.vcr(record_mode=\"none\")\n@pytest.mark.parametrize(\n \"an_input, expected_queue\",\n [\n (\"\", []),\n (\"/help\", [\"home\", \"help\"]),\n (\"help/help\", [\"help\", \"help\"]),\n (\"q\", [\"quit\"]),\n (\"h\", []),\n (\n \"r\",\n [\n \"quit\",\n \"quit\",\n \"quit\",\n \"reset\",\n \"stocks\",\n \"load MOCK_TICKER\",\n \"options\",\n \"exp -d 2022-01-07\",\n \"payoff\",\n ],\n ),\n ],\n)\ndef test_switch(an_input, expected_queue, mocker):\n path_controller = \"openbb_terminal.stocks.options.payoff_controller\"\n\n # MOCK GET_CHAIN + GET_PRICE\n mocker.patch(\n target=f\"{path_controller}.get_option_chain\",\n return_value=CHAIN,\n )\n mocker.patch(\n target=f\"{path_controller}.get_price\",\n return_value=95.0,\n )\n\n controller = payoff_controller.PayoffController(\n ticker=\"MOCK_TICKER\",\n expiration=\"2022-01-07\",\n queue=None,\n )\n queue = controller.switch(an_input=an_input)\n\n assert queue == expected_queue\n\n\n@pytest.mark.vcr(record_mode=\"none\")\ndef test_call_cls(mocker):\n path_controller = \"openbb_terminal.stocks.options.payoff_controller\"\n\n # MOCK SYSTEM\n mocker.patch(\"os.system\")\n\n # MOCK GET_CHAIN + GET_PRICE\n mocker.patch(\n target=f\"{path_controller}.get_option_chain\",\n return_value=CHAIN,\n )\n mocker.patch(\n target=f\"{path_controller}.get_price\",\n return_value=95.0,\n )\n\n controller = payoff_controller.PayoffController(\n ticker=\"MOCK_TICKER\",\n expiration=\"2022-01-07\",\n queue=None,\n )\n controller.call_cls([])\n\n assert controller.queue == []\n os.system.assert_called_once_with(\"cls||clear\")\n\n\n@pytest.mark.vcr(record_mode=\"none\")\n@pytest.mark.parametrize(\n \"func, queue, expected_queue\",\n [\n (\n \"call_exit\",\n [],\n [\"quit\", \"quit\", \"quit\", \"quit\"],\n ),\n (\"call_exit\", [\"help\"], [\"quit\", \"quit\", \"quit\", \"quit\", \"help\"]),\n (\"call_home\", [], [\"quit\", \"quit\", \"quit\"]),\n (\"call_help\", [], []),\n (\"call_quit\", [], [\"quit\"]),\n (\"call_quit\", [\"help\"], [\"quit\", \"help\"]),\n (\n \"call_reset\",\n [],\n [\n \"quit\",\n \"quit\",\n \"quit\",\n \"reset\",\n \"stocks\",\n \"load MOCK_TICKER\",\n \"options\",\n \"exp -d 2022-01-07\",\n \"payoff\",\n ],\n ),\n (\n \"call_reset\",\n [\"help\"],\n [\n \"quit\",\n \"quit\",\n \"quit\",\n \"reset\",\n \"stocks\",\n \"load MOCK_TICKER\",\n \"options\",\n \"exp -d 2022-01-07\",\n \"payoff\",\n \"help\",\n ],\n ),\n ],\n)\ndef test_call_func_expect_queue(expected_queue, func, mocker, queue):\n path_controller = \"openbb_terminal.stocks.options.payoff_controller\"\n\n # MOCK GET_CHAIN + GET_PRICE\n mocker.patch(\n target=f\"{path_controller}.get_option_chain\",\n return_value=CHAIN,\n )\n mocker.patch(\n target=f\"{path_controller}.get_price\",\n return_value=95.0,\n )\n\n controller = payoff_controller.PayoffController(\n ticker=\"MOCK_TICKER\",\n expiration=\"2022-01-07\",\n queue=queue,\n )\n result = getattr(controller, func)([])\n\n assert result is None\n assert controller.queue == expected_queue\n\n\n@pytest.mark.vcr(record_mode=\"none\")\n@pytest.mark.parametrize(\n \"tested_func, other_args, mocked_func, called_args, called_kwargs\",\n [\n (\n \"call_list\",\n [],\n \"\",\n [],\n dict(),\n ),\n # (\n # \"call_add\",\n # [\n # \"0\",\n # \"--put\",\n # \"--short\",\n # ],\n # \"\",\n # [],\n # dict(),\n # ),\n # (\n # \"call_rmv\",\n # [\n # \"0\",\n # \"--all\",\n # ],\n # \"\",\n # [],\n # dict(),\n # ),\n # (\n # \"call_rmv\",\n # [\n # \"0\",\n # ],\n # \"\",\n # [],\n # dict(),\n # ),\n (\n \"call_pick\",\n [\"--type=long\"],\n \"\",\n [],\n dict(),\n ),\n (\n \"call_pick\",\n [\"--type=none\"],\n \"\",\n [],\n dict(),\n ),\n (\n \"call_pick\",\n [\"--type=short\"],\n \"\",\n [],\n dict(),\n ),\n (\n \"call_plot\",\n [],\n \"plot_payoff\",\n [],\n dict(),\n ),\n (\n \"call_sop\",\n [],\n \"\",\n [],\n dict(),\n ),\n ],\n)\ndef test_call_func_test(\n tested_func, mocked_func, other_args, called_args, called_kwargs, mocker\n):\n path_controller = \"openbb_terminal.stocks.options.payoff_controller\"\n\n # MOCK GET_CHAIN + GET_PRICE\n mocker.patch(\n target=f\"{path_controller}.get_option_chain\",\n return_value=CHAIN,\n )\n mocker.patch(\n target=f\"{path_controller}.get_price\",\n return_value=95.0,\n )\n\n if mocked_func:\n mock = mocker.Mock()\n mocker.patch(\n target=f\"{path_controller}.{mocked_func}\",\n new=mock,\n )\n\n controller = payoff_controller.PayoffController(\n ticker=\"MOCK_TICKER\",\n expiration=\"2022-01-07\",\n queue=None,\n )\n controller.call_add([\"0\", \"--put\", \"--short\"])\n getattr(controller, tested_func)(other_args)\n\n if called_args or called_kwargs:\n mock.assert_called_once_with(*called_args, **called_kwargs)\n else:\n mock.assert_called_once()\n else:\n controller = payoff_controller.PayoffController(\n ticker=\"MOCK_TICKER\",\n expiration=\"2022-01-07\",\n queue=None,\n )\n controller.call_add([\"0\", \"--put\", \"--short\"])\n getattr(controller, tested_func)(other_args)\n\n\n@pytest.mark.vcr(record_mode=\"none\")\n@pytest.mark.parametrize(\n \"ticker, expected\",\n [\n (None, []),\n (\n \"MOCK_TICKER\",\n [\"stocks\", \"load MOCK_TICKER\", \"options\", \"exp -d 2022-01-07\", \"payoff\"],\n ),\n ],\n)\ndef test_custom_reset(expected, mocker, ticker):\n path_controller = \"openbb_terminal.stocks.options.payoff_controller\"\n\n # MOCK GET_CHAIN + GET_PRICE\n mocker.patch(\n target=f\"{path_controller}.get_option_chain\",\n return_value=CHAIN,\n )\n mocker.patch(\n target=f\"{path_controller}.get_price\",\n return_value=95.0,\n )\n\n controller = payoff_controller.PayoffController(\n ticker=\"\",\n expiration=\"2022-01-07\",\n queue=None,\n )\n controller.ticker = ticker\n\n result = controller.custom_reset()\n\n assert result == expected\n","repo_name":"rohankumardubey/OpenBBTerminal","sub_path":"tests/openbb_terminal/stocks/options/test_payoff_controller.py","file_name":"test_payoff_controller.py","file_ext":"py","file_size_in_byte":14298,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"74420667921","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('app01', '0010_simplemodel'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Host_new',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('hostname', models.CharField(max_length=64)),\n ('ip', models.GenericIPAddressField()),\n ('user_group', models.ForeignKey(to='app01.UserGroup')),\n ],\n ),\n migrations.CreateModel(\n name='UserGroup_new',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('caption', models.CharField(max_length=64)),\n ('user_info', models.ManyToManyField(to='app01.UserInfo')),\n ],\n ),\n ]\n","repo_name":"xuefenga616/mygit","sub_path":"python_stu/s11_19/app01/migrations/0011_host_new_usergroup_new.py","file_name":"0011_host_new_usergroup_new.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"39118462152","text":"from math import ceil\nimport random\nimport unittest\n\ndef _merge_sort(a, start, end, b):\n if end <= start:\n return\n mid = start + int(ceil((end-start)/2.0))\n _merge_sort(a, start, mid-1, b)\n _merge_sort(a, mid, end, b)\n index1=start\n index2=mid\n indexb=start\n while index1 <= (mid-1) and index2 <= end:\n if a[index1] < a[index2]:\n b[indexb] = a[index1]\n index1 += 1\n else:\n b[indexb] = a[index2]\n index2 += 1\n indexb += 1\n for i in range(index1, mid):\n b[indexb] = a[i]\n indexb += 1\n for i in range(index2, end+1):\n b[indexb] = a[i]\n indexb += 1\n for i in range(start, end+1):\n a[i] = b[i]\n return\n\ndef merge_sort(a):\n b = [0 for x in a]\n _merge_sort(a,0,len(a)-1,b)\n\ndef _qsort(a, start, end):\n if end <= start:\n return\n pivot = end\n for i in range(start, end):\n if a[i] < a[end] and pivot != end:\n a[i], a[pivot] = a[pivot], a[i]\n pivot += 1\n elif a[i] >= a[end] and pivot == end:\n pivot = i\n a[pivot], a[end] = a[end], a[pivot]\n _qsort(a, start, pivot-1)\n _qsort(a, pivot+1, end)\n return\n\ndef qsort(a):\n _qsort(a, 0, len(a)-1)\n\nclass Tests(unittest.TestCase):\n def sort_helper(self, func):\n for i in range(100):\n len=random.randint(0,100)\n a = [random.randint(0,100) for i in range(len)]\n b = a[:]\n b.sort()\n func(a)\n self.assertEqual(b,a)\n def test_mergesort(self):\n self.sort_helper(merge_sort)\n def test_qsort(self):\n self.sort_helper(qsort)\n\nif __name__ == '__main__':\n unittest.main()\n\n","repo_name":"kasunbg/CrackingTheCodingInterview","sub_path":"Misc/sorters.py","file_name":"sorters.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23948166894","text":"#!/usr/bin/env python\n# -*- coding: utf8 -*-\n\nimport sys,os,json\nimport urllib, urllib2\n\nclass EsScanner():\n def __init__(self, host, port):\n self.host = host\n self.port = port\n\n def get_scrollid(self, index, size=200):\n url = \"http://%s:%s/%s/_search?search_type=scan&scroll=1m\" % (self.host, self.port, index)\n param = {\"size\": size}\n res = urllib.urlopen(url, data=json.dumps(param)).read()\n # print res\n jres = json.loads(res)\n total = jres[\"hits\"][\"total\"]\n return (jres[\"_scroll_id\"], total)\n\n def get_scan_datas(self, scrollid):\n url = \"http://%s:%s/_search/scroll?scroll=1m&scroll_id=%s\" % (self.host, self.port, scrollid)\n res = urllib.urlopen(url).read()\n jres = json.loads(res)\n return jres[\"hits\"][\"hits\"]\n\n def delete_datas(self, ids, index, estype):\n if ids:\n url = \"http://%s:%s/_bulk\" % (self.host, self.port)\n param = \"\"\n for id in ids:\n bulk_action = {\"delete\":{\"_index\":index, \"_type\":estype, \"_id\":id}}\n param += json.dumps(bulk_action)+\"\\n\"\n res = urllib.urlopen(url, data=param).read()\n else:\n #print \"no data to es\"\n pass\n","repo_name":"ohliming/algorithm","sub_path":"common/es_scanner.py","file_name":"es_scanner.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14391567892","text":"import numpy as np\nimport sys\nfrom sys import argv\n\ndir_parent = argv[1]\ndir_child = argv[2]\n\n\n# re-arrange the vocabulary, such that we transfer the the equivalent tokens properly\nd_child = []\nfor l in open(dir_child + \"/vocab.yml\", \"r\"):\n w = l.strip().split()[0][:-1]\n d_child.append(w)\n\nvocab_size = len(d_child)\n\nchild_new_vocab = open(dir_child + \"/vocab.yml\", \"w\")\nnew_vocab = []\nfor l in open(dir_parent + \"/vocab.yml\", \"r\"):\n w = l.strip().split()[0][:-1]\n if (w in d_child):\n new_vocab.append(w)\n d_child.remove(w)\n else:\n new_vocab.append(None)\n if (len(new_vocab) == vocab_size):\n break\n\nfor cnt in range(vocab_size):\n if (len(new_vocab) > cnt) and (new_vocab[cnt] is not None):\n w = new_vocab[cnt]\n else:\n w = d_child[0]\n d_child.remove(w)\n child_new_vocab.write(w + \": \"+str(cnt)+\"\\n\")\n \n\n\nprint(\"RESIZING TO \", vocab_size)\n\n#if parent has less vocab size, double it first\nold_model = np.load(dir_parent + \"/model.npz.best-translation.npz\")\nnew_model = dict(old_model)\n\nold_size = len(old_model[\"Wemb\"])\nnew_size = vocab_size\n\nwhile (new_size > len(new_model[\"Wemb\"])):\n new_model[\"decoder_ff_logit_out_b\"] = np.concatenate((new_model[\"decoder_ff_logit_out_b\"], new_model[\"decoder_ff_logit_out_b\"]), axis=1)\n new_model[\"Wemb\"] = np.concatenate((new_model[\"Wemb\"], new_model[\"Wemb\"]))\n\n# resize the parent's embedding size to match the child's vocab size\nprint(\"Before: \", new_model[\"decoder_ff_logit_out_b\"].shape, new_model[\"Wemb\"].shape)\nnew_model[\"decoder_ff_logit_out_b\"] = new_model[\"decoder_ff_logit_out_b\"][:,:new_size]\nnew_model[\"Wemb\"] = new_model[\"Wemb\"][:new_size]\n\nprint(\"After: \", new_model[\"decoder_ff_logit_out_b\"].shape, new_model[\"Wemb\"].shape)\n\n# replace the vocab size in yml comfiguration\nprint(\"Old yml: \", new_model[\"special:model.yml\"].tostring())\n\ntmp = new_model[\"special:model.yml\"].tostring().decode(\"utf-8\") \ntmp = tmp.replace(str(old_size), str(new_size))\nnew_model[\"special:model.yml\"] = np.array(bytearray(tmp, 'utf-8'))\n\nprint(\"New yml: \", new_model[\"special:model.yml\"].tostring())\n\nnp.savez(dir_child + \"/model.npz\", **new_model)\n\n","repo_name":"afaji/Marian-transfer","sub_path":"transfer_model.py","file_name":"transfer_model.py","file_ext":"py","file_size_in_byte":2116,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"29058712197","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\n\nimport collections\nimport functools\nimport itertools\nimport uuid\n\nfrom rep.utils import balance_utils as ut\nfrom rep.core.config import AbstractConfig\n\n__all__ = ['TestConfig', 'create_issue']\n\n\nclass TestConfig(AbstractConfig):\n def __init__(self, items=None):\n self._items = items or dict()\n\n def __getitem__(self, item):\n return self._items[item.upper()]\n\n def __setitem__(self, key, value):\n self._items[key.upper()] = value\n\n def __getattr__(self, item):\n try:\n return self._items[item.upper()]\n except KeyError as e:\n raise AttributeError(*e.args)\n\n def get(self, key, default=None):\n return self._items.get(key.upper(), default)\n\n\ndef user(login):\n return ut.Struct(id=login, login=login)\n\n\ndef status(key):\n return ut.Struct(key=key)\n\n\ndef type_(key):\n return ut.Struct(key=key)\n\n\ndef priority(id_):\n return ut.Struct(id=str(id_))\n\n\ndef component(id_):\n return ut.Struct(id=int(id_))\n\n\ndef components(comp):\n if isinstance(comp, basestring):\n return [component(comp)]\n elif isinstance(comp, collections.Iterable):\n return [component(c) for c in comp]\n else:\n return [component(comp)]\n\n\ndef queue(key):\n return ut.Struct(key=key)\n\n\ndef changelog_item(obj_info):\n login, dt_, params = obj_info\n\n res = ut.Struct(\n updatedAt=dt_,\n updatedBy=user(login),\n id=uuid.uuid4().hex\n )\n fields = list()\n res['fields'] = fields\n\n fields_creators = {\n 'type': type_,\n 'status': status,\n 'queue': queue,\n 'assignee': user,\n 'createdBy': user,\n 'priority': priority,\n 'components': components,\n }\n\n for field, (from_, to) in params.iteritems():\n creator = fields_creators.get(field)\n if creator:\n if from_ is not None:\n from_ = creator(from_)\n\n if to is not None:\n to = creator(to)\n\n fields.append({\n 'field': ut.Struct(id=field),\n 'from': from_,\n 'to': to\n })\n\n return res\n\n\ndef iterable(objects, wrap=None):\n if wrap:\n return itertools.imap(wrap, objects)\n else:\n return iter(objects)\n\n\nclass Collection(object):\n def __init__(self, objects, wrap=None):\n if wrap:\n self._objects = map(wrap, objects)\n else:\n self._objects = list(objects)\n\n def __iter__(self):\n return iter(self._objects)\n\n def get_all(self, field=None):\n if field is None:\n return iter(self._objects)\n else:\n fields = field.split(',')\n return itertools.ifilter(lambda cl_el: any(f['field'].id in fields for f in cl_el.fields), self._objects)\n\n\ndef create_issue(params):\n attrs_creators = {\n 'assignee': user,\n 'createdBy': user,\n 'status': status,\n 'type': type_,\n 'priority': priority,\n 'queue': queue,\n 'components': functools.partial(iterable, wrap=component),\n 'tags': iterable,\n 'changelog': functools.partial(Collection, wrap=changelog_item)\n }\n res = ut.Struct()\n for param, value in params:\n creator = attrs_creators.get(param)\n if creator and value is not None:\n value = creator(value)\n\n res[param] = value\n\n return res\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"billing/tests/kpi/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"39148547200","text":"import os\nimport glob\n#import shutil\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nfrom keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img\n\nIMAGE_FILE = \"cat.jpg\"\n\ndef draw_image(data_argumentation, numpy_array, \\\n result_images, temporary_directory, number):\n\n # generate 9 images by ImageDataGenerator\n generate_data = data_argumentation.flow(numpy_array, batch_size=1,\\\n save_to_dir = temporary_directory,\\\n save_prefix='image', save_format='jpg')\n\n for i in range(9):\n batch = generate_data.next()\n\nif __name__ == '__main__':\n\n # preprocessing image\n image = load_img(IMAGE_FILE)\n numpy_array = img_to_array(image)\n numpy_array = np.expand_dims(numpy_array, axis=0)\n\n temporary_directory = \"Data-Argument\"\n os.mkdir(temporary_directory)\n\n # load ImageDataGenerator & processing images\n for number in range(90):\n data_argumentation = ImageDataGenerator(rotation_range=number)\n draw_image(data_argumentation, numpy_array, \\\n \"result_rotation90.jpg\", temporary_directory, number)","repo_name":"r2en/data-argumentation","sub_path":"template-data-argumentation.py","file_name":"template-data-argumentation.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14268107659","text":"from asyncio import subprocess\n\nimport subprocess\nimport logging\nimport regex as re\n\nfrom usbipy.device import usbipyDevice\nlog = logging.getLogger(\"usbipy\")\nhdl = logging.StreamHandler()\nhdl.setFormatter(logging.Formatter(\"%(asctime)s %(levelname)s %(message)s\"))\nlog.addHandler(hdl)\nlog.setLevel(logging.DEBUG)\n\n\nclass usbipClient():\n def __init__(self, debug=False, flags=[]):\n log.info(\"Creating client\")\n self.usbip = [\"usbip\"] + flags\n self.debug = debug\n if self.debug:\n self.usbip += [\"-d\"]\n\n def list(self, remote=None) -> list:\n devices = []\n if remote is None:\n try:\n out = subprocess.getoutput(\n \" \".join(self.usbip + [\"list\", \"-l\"])).split(\"\\n\")\n for bus, device_name in zip(out[::3], out[1::3]):\n match = re.search(\"[a-z0-9]{4}:[0-9a-z]{4}\", device_name)\n _id = match.group() if match is not None else None\n _name = device_name.split(\":\")[0][:-1]\n _info = device_name.split(\":\")[1].split(\"(\")[0]\n dev = usbipyDevice(\n name=_name, busid=bus, info=_info, id=_id)\n devices.append(dev)\n except Exception as e:\n log.log(logging.ERROR, e)\n else:\n try:\n out = subprocess.getoutput(\n \" \".join(self.usbip + [\"list\", \"-r\", remote]))\n if \"no exportable devices\" in out:\n raise Exception(\"no exportable devices\")\n else:\n out = out.split(\"\\n\")[3:]\n for iline, line in enumerate(out):\n if re.findall(\"[0-9]{1,}-[0-9.]{1,}\", line.split(\":\")[0]):\n device_name = out[iline]\n match = re.search(\"[a-z0-9]{4}:[0-9a-z]{4}\", device_name)\n _id = match.group() if match is not None else None\n match = re.search(\"[0-9]{1,}-[0-9.]{1,}\", device_name)\n _busid = match.group()\n _name = device_name.split(\":\")[1][:-1]\n _info = device_name.split(\":\")[2].split(\"(\")[0]\n dev = usbipyDevice(\n name=_name,\n busid=_busid,\n info=_info,\n id=_id,\n host=remote)\n devices.append(dev)\n except Exception as e:\n log.log(logging.INFO, out)\n log.log(logging.ERROR, e)\n return devices\n\n def list_ports(self):\n log.info(\"Active ports:\")\n try:\n out = subprocess.getoutput(\n \" \".join(\n self.usbip +\n [\"port\"])).split(\"\\n\")[\n 2:]\n for port, dev, busid in zip(out[::4], out[1::4], out[2::4]):\n port = port.split(\":\")[0]\n match = re.search(\"[0-9]{1,}\", port)\n if match:\n port = match.group()\n name = dev.replace(\" \", \"\")\n match = re.search(\"[0-9]{1,}-[0-9.]{1,}\", busid)\n if match:\n _busid = match.group()\n else:\n _busid = None\n print(f\"Port {port} {name} {_busid}\")\n except Exception as e:\n log.log(logging.ERROR, e)\n\n def get_port(self, device: usbipyDevice):\n try:\n out = subprocess.getoutput(\n \" \".join(\n self.usbip +\n [\"port\"])).split(\"\\n\")[\n 2:]\n for port, busid in zip(out[::4], out[2::4]):\n port = port.split(\":\")[0]\n match = re.search(\"[0-9]{1,}\", port)\n if match:\n port = match.group()\n match = re.search(\"[0-9]{1,}-[0-9.]{1,}\", busid)\n if match:\n _busid = match.group()\n else:\n _busid = None\n if device.busid == _busid:\n device.port = port\n return True\n except Exception as e:\n log.log(logging.ERROR, e)\n return False\n\n def bind(self, device: usbipyDevice):\n try:\n out = subprocess.getoutput(\n \" \".join(self.usbip + [\"bind\", \"-b\", device.busid])).split(\"\\n\")\n log.log(logging.INFO, out[0])\n except Exception as e:\n log.log(logging.ERROR, e)\n\n def unbind(self, device: usbipyDevice):\n try:\n out = subprocess.getoutput(\n \" \".join(self.usbip + [\"unbind\", \"-b\", device.busid])).split(\"\\n\")\n log.log(logging.INFO, out[0])\n except Exception as e:\n log.log(logging.ERROR, e)\n\n def attach(self, device: usbipyDevice):\n try:\n out = subprocess.getoutput(\" \".join(\n self.usbip + [\"attach\", \"-r\", device.host, \"-b\", device.busid])).split(\"\\n\")\n log.log(logging.INFO, out[0])\n except Exception as e:\n log.log(logging.ERROR, e)\n\n def detach(self, device: usbipyDevice):\n try:\n if device.port is None:\n if not self.get_port(device):\n raise Exception(\"Cannot find device port\")\n out = subprocess.getoutput(\" \".join(self.usbip + [\"detach\", \"-p\", device.port])).split(\"\\n\")\n except Exception as e:\n log.log(logging.ERROR, e)\n","repo_name":"Elemento-Modular-Cloud/usbipy","sub_path":"usbipy/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":5612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18358193567","text":"# Дана строка, состоящая из русских слов, разделенных пробелами\n# (одним или несколькими). Найти длину самого короткого слова.\n\n\nS = 'Мне часто снился этот сон и снился сердцу милый двор '\ni = 0\nsize = 100\n\n\nfor total in S:\n if total == ' ':\n i = 0\n else:\n i += 1 # шаг\n b = i\n if i == 0 and b < size:\n size = b\nprint('Длина самого короткого слова: ', size)","repo_name":"ullllosta/Yakushova_sem1","sub_path":"PZ_7/PZ7_2.py","file_name":"PZ7_2.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17448050807","text":"from rest_framework import status\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\n\nfrom apis.controllers.post import PostController\nfrom apis.serializers import PostSerializer\n\n\n@api_view(['GET', 'POST'])\n@permission_classes((IsAuthenticated,))\ndef get_list_or_create(request):\n \"\"\"查询帖子列表 or 新建一个帖子\n \"\"\"\n if request.method == 'GET':\n details = request.GET.dict()\n posts = PostController().get_list(**details)\n serializer = PostSerializer(posts, many=True)\n return Response(serializer.data)\n\n if request.method == 'POST':\n details = dict(request.data)\n post = PostController().create(**details)\n if post:\n serializer = PostSerializer(post)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(None, status=status.HTTP_400_BAD_REQUEST)\n\n\n@api_view(['PUT', 'PATCH', 'DELETE', 'GET'])\n@permission_classes((IsAuthenticated,))\ndef single_entity(request, id):\n \"\"\"通过 id 访问某个帖子,进行查询、修改、删除等操作\n \"\"\"\n if request.method == 'GET':\n post = PostController().get_single(id=id)\n if post:\n serializer = PostSerializer(post)\n return Response(serializer.data, status=status.HTTP_200_OK)\n return Response(None, status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'DELETE':\n res = PostController().delete(id=id)\n if res:\n return Response(None, status=status.HTTP_200_OK)\n return Response(None, status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'PUT':\n details = dict(request.data)\n res = PostController().update(**details)\n if res:\n post = PostController().get_single(id=id)\n serializer = PostSerializer(post)\n return Response(serializer.data, status=status.HTTP_200_OK)\n return Response(None, status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'PATCH':\n details = dict(request.data)\n res = PostController().update(**details)\n if res:\n post = PostController().get_single(id=id)\n serializer = PostSerializer(post)\n return Response(serializer.data, status=status.HTTP_200_OK)\n return Response(None, status=status.HTTP_404_NOT_FOUND)\n","repo_name":"OIdiotLin/projexor-backend","sub_path":"apis/views/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":2449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5059288012","text":"from itertools import groupby\nfrom heapq import *\nimport os\n\ndict_rc = {\n \"A\":\"T\",\"G\":\"C\",\"T\":\"A\",\"C\":\"G\"\n}\n\nDNA_dict = {\n \"0\":{\"A\":\"T\",\"T\":\"G\",\"G\":\"C\",\"C\":\"A\"},\n \"1\":{\"A\":\"G\",\"T\":\"C\",\"G\":\"A\",\"C\":\"T\"}\n}\n\nIX = []\nP = []\nIX_dna = []\nData_fragments = []\nfragments = []\nfragment = []\ntemp_i = \"\"\nfragments_i = []\nID = bin(176)[2:len(bin(176))]\n\ndef huffman(input):\n\n codes = {}\n class Node(object):\n left = None\n right = None\n item = None\n weight = 0\n\n def __init__(self, i, w):\n self.item = i\n self.weight = w\n\n def setChildren(self, ln, rn):\n self.left = ln\n self.right = rn\n\n def __repr__(self):\n return \"%s - %s — %s - %s\" % (self.item, self.weight, self.left, self.right)\n\n def __lt__(self, a):\n return (self.weight, a.weight)\n def codeIt(s, node):\n if node.item:\n if not s:\n codes[node.item] = \"0\"\n else:\n codes[node.item] = s\n else:\n codeIt(s+\"0\", node.left)\n codeIt(s+\"1\", node.right)\n\n\n itemqueue = [Node(a,len(list(b))) for a,b in groupby(sorted(input))]\n \n heapify(itemqueue)\n while len(itemqueue) > 1:\n l = heappop(itemqueue)\n r = heappop(itemqueue)\n n = Node(None, r.weight+l.weight)\n n.setChildren(l,r)\n heappush(itemqueue, n)\n\n codeIt(\"\",itemqueue[0])\n return codes, \"\".join([codes[ai] for ai in input])\n\t\n\n\ndef apend_zeroes(temp_i, length):\n temp_i = bin(temp_i)\n temp_i = temp_i[2:len(temp_i)]\n while(len(temp_i) < length):\n temp_i = \"0\" + temp_i\n return temp_i\n\ndef final_appenders(fragment_val):\n if fragment_val.startswith('T'):\n fragment_val = 'A' + fragment_val\n elif fragment_val.startswith('A'):\n fragment_val = 'T' + fragment_val\n else:\n fragment_val = 'T' + fragment_val\n \n if fragment_val.endswith('C'):\n fragment_val =fragment_val + 'G'\n elif fragment_val.endswith('G'):\n fragment_val =fragment_val + 'C'\n else:\n fragment_val =fragment_val + 'C'\n\n return fragment_val\n\ndef DNA_Encoder(valuetopass,Start_DNA = \"T\"):\n temp_dna = Start_DNA\n for j in valuetopass:\n temp_dna = temp_dna + DNA_dict[j][Start_DNA]\n Start_DNA = DNA_dict[j][Start_DNA]\n return temp_dna[0:(len(temp_dna)-1)]\n\n\n\n\ndef fragmenting(var):\n for i in range(0, len(var), 5):\n fragments.append(var[i:i+10])\n\n for i in range(0, len(fragments)-1):\n if (i==0 or (i%2 ==0)):\n print('Fragment' + str(i) + ': ' + fragments[i])\n fragment.append(fragments[i])\n else:\n temp_s = \"\"\n for j in fragments[i]:\n temp_s = temp_s + dict_rc[j]\n print('Fragment' + str(i) + ': ' + temp_s)\n fragment.append(temp_s)\n #print(apend_zeroes(i, 12))\n fragments_i.append(apend_zeroes(i, 6))\n\n for i in range (0, len(fragments_i)):\n sumP = \"0\"\n for j in fragments_i[i]:\n if not (i==0 or (i%2 == 0 )):\n sumP = bin(int(sumP,2) + int(j,2))\n sumP = bin(int(sumP,2) + int(ID,2))\n P.append(sumP[2:len(sumP)])\n \n for i in range(0, len(fragments_i)):\n IX.append(str(ID) + str(fragments_i[i]) + str(P[i]))\n\n for i in range(0, len(IX)):\n IX_dna.append(DNA_Encoder(IX[i]))\n for i in range(0, len(fragment)):\n Data_fragments.append(final_appenders(fragment[i] + IX_dna[i]))\n\nfile = open('DNA_input.txt',encoding='utf-8')\ninput = file.read()\nprint(input)\nvalue_input = huffman(input)\nn = len(value_input[len(value_input)-1])\nS2 = apend_zeroes(n, 10)\nS3 = \"0\"\nS4 = value_input[len(value_input)-1] + S3 + S2\nwhile(len(S4)%5 != 0):\n S3 = S3 + \"0\"\n S4 = value_input[len(value_input)-1] + S3 + S2\n\nfragmenting(DNA_Encoder(S4))\n\ndst=\"DNA_output.txt\"\nif(os.path.isfile(dst)):\n os.remove(dst)\n \nwith open(dst, \"a\") as myDNAdata:\n for i in Data_fragments:\n myDNAdata.write(i)\n \n \nprint(\"Data encoding into DNA complete.\")\n \n","repo_name":"mahandas/DNA-based-Cryptography","sub_path":"Dna_encrypter.py","file_name":"Dna_encrypter.py","file_ext":"py","file_size_in_byte":4396,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"949048785","text":"import csv\r\nimport re\r\n\r\nr=open('Nslang.txt','r')\r\nwr=csv.writer(open('sl.txt','w'))\r\nh={}\r\nfor i in r:\r\n c=i.split('-')\r\n h[c[0].strip()]=c[1].strip()\r\nl=sorted(h.keys())\r\nfor k in l:\r\n x=re.sub(r'[^a-zA-Z0-9]',' ',h[k])\r\n wr.writerow([k.lower(),x.lower()])\r\n\r\n\r\n\r\n\r\n","repo_name":"upadhysh/FAQ-Retrieval-System","sub_path":"making_slang_list.py","file_name":"making_slang_list.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"12331563209","text":"'''\n@author ldc\n\n'''\nimport pymysql\nimport redis as redis\n\n'''\nMySQL增删改查操作类\n'''\nclass MySQLUtil:\n\tdef __init__(self,dbName,tableName):\n\t\tself.dbName = dbName\n\t\tself.tableName = tableName\n\n\t# 连接数据库,并生成全局可用的连接对象和查询游标\n\tdef connect(self):\n\t\tself.conn = pymysql.connect(\n\t\t\thost='localhost', user='root', password=\"123456\",\n\t\t\tdatabase=self.dbName, port=3306,\n\t\t)\n\t\tself.cursor = self.conn.cursor()\n\n\t# 关闭全局游标,断开全局连接\n\tdef disconnect(self):\n\t\tself.cursor.close()\n\t\tself.conn.close()\n\n\t# 查询用户名是否存在\n\tdef exists(self,dataDict):\n\t\tcaluse = ''\n\t\tfor key,value in dataDict.items():\n\t\t\tcaluse += key + '=\"'+ value + '\"'\n\t\t# print(caluse)\n\t\tsql = \"\"\"\n\t\t\t\tselect * from %s where %s ;\n\t\t\t \"\"\" % (self.tableName, caluse)\n\t\treturn self.execute(sql)\n\t# 验证用户名和密码是否正确\n\tdef query(self, dataDict):\n\t\t# 查询子条件拼接\n\t\tcaluse = ''\n\t\tfor key, value in dataDict.items():\n\t\t\tcaluse += key + '=\"' + value + '\" and '\n\t\tcaluse = caluse[:-4]\n\t\t# print(caluse)\n\t\tsql = \"\"\"\n\t\t\t\tselect * from %s where %s;\n\t\t \"\"\"% (self.tableName, caluse)\n\t\treturn self.execute(sql)\n\n\t# 添加新用户\n\tdef insert(self, dataDict):\n\t\t# sql语句拼接\n\t\tcolumns = ''\n\t\tvalues = ''\n\t\tfor key, value in dataDict.items():\n\t\t\tcolumns += key + ','\n\t\t\tvalues += '\"' + value + '\",'\n\t\tcolumns = columns[:-1]\n\t\tvalues = values[:-1]\n\t\tsql = \"\"\"\n\t\t\t\tinsert into %s (%s) VALUES (%s);\n\t\t\t \"\"\" % (self.tableName, columns,values)\n\t\t# print(sql)\n\t\treturn self.execute(sql)\n\n\t# 更新\n\tdef update(self, dataDict):\n\t\t# sql语句拼接\n\t\tchangeCol = dataDict['changeCol'] #要改变值的列名\n\t\tcaluse = dataDict['caluse'] #要改变值的子条件\n\t\tsql = 'update %s set %s where %s' %(self.tableName, changeCol, caluse)\n\t\treturn self.execute(sql)\n\n\t# 删除\n\tdef delete(self, dataDict):\n\t\t# sql语句拼接\n\t\tcaluse = ''\n\t\tfor key,value in dataDict.items():\n\t\t\tcaluse += key + '=\"' + value + '\"'\n\n\t\tsql = \"\"\"\n\t\t\t\tdelete from %s where %s;\n\t\t\t \"\"\" % (self.tableName,caluse)\n\t\t# print(sql)\n\t\treturn self.execute(sql)\n\t# print(sql)\n\n\t# 执行sql语句\n\tdef execute(self, sql):\n\t\tself.connect()\n\t\taffected = 0\n\t\ttry:\n\t\t\taffected = self.cursor.execute(sql)\n\t\texcept BaseException as e:\n\t\t\tprint(e)\n\t\t\taffected = 0\n\t\tfinally:\n\t\t\tself.conn.commit()\n\t\t\tself.disconnect()\n\t\t\treturn affected\n\n'''\nredis增删改查操作类\n'''\nclass RedisUtil:\n\t# redis连接\n\t@classmethod\n\tdef connect(cls):\n\t\tcls.client = redis.Redis(\n\t\t\thost='localhost', port=6379,\n\t\t\tdb=1, password='123456',\n\t\t)\n\n\t# 判断键是否存在\n\t@classmethod\n\tdef exists(cls,key):\n\t\treturn cls.client.exists(key)\n\n\t# 存储键值,\n\t@classmethod\n\tdef set(cls,key,value):\n\t\t# 键值存储在缓存中,保留时间为30秒\n\t\tcls.client.setex(key,value,30)\n\n\t# 获取键值\n\t@classmethod\n\tdef get(cls,key):\n\t\tres = cls.client.get(key).decode(\"utf-8\")\n\t\treturn res\n\t# 删除键值\n\tdef delete(cls, key):\n\t\tcls.client.delete(key)\n","repo_name":"liangdongchang/pyCheckLoginSys","sub_path":"utils/dbUtil.py","file_name":"dbUtil.py","file_ext":"py","file_size_in_byte":2960,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"29580378439","text":"import functools\nimport math\n\nimport torch\n\n\"\"\"\nPE(pos, 2i) = sin(pos / 10000 ^ {2i / d})\nPE(pos, 2i+1) = cos(pos / 10000 ^ {2i / d})\n\n10000 ^ {- 2i / d} = e^log_e{10000 ^ {-2i/d}} = e^{ -2i/d * log_e{10000} }\n\"\"\"\n\n\nclass PositionalEmbedding(torch.nn.Module):\n def __init__(self, channels):\n super().__init__()\n self.channels = channels\n\n i_pos = torch.arange(0, channels, step=2)\n loge_10000 = torch.log(torch.tensor(10000, dtype=torch.float))\n div = torch.exp(-i_pos / channels * loge_10000)\n\n # self.div = div\n # self.pe_default = self.calc_pe_(1024)\n self.register_buffer(\"div\", div, persistent=False)\n self.register_buffer(\"pe_default\", self.calc_pe_(1024), persistent=False)\n\n @torch.no_grad()\n def calc_pe_(self, length: int) -> torch.Tensor:\n pe = torch.zeros((length, self.channels), dtype=torch.float, device=self.div.device)\n position = torch.arange(0, length).unsqueeze(1).to(self.div.device)\n pe[:, 0::2] = torch.sin(position * self.div.unsqueeze(0))\n pe[:, 1::2] = torch.cos(position * self.div.unsqueeze(0))\n return pe\n\n @torch.no_grad()\n def get_pe(self, length: int) -> torch.Tensor:\n if length <= 1024:\n return self.pe_default[:length, :].clone()\n return self.calc_pe_(length)\n\n @torch.no_grad()\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n N = x.shape[0]\n F = x.shape[-1]\n assert F == self.channels\n pe = self.get_pe(N)\n if x.dim() == 3:\n pe = pe.unsqueeze(1)\n return x + pe / math.sqrt(self.channels)\n\n\nclass TreePositionEmbedding(PositionalEmbedding):\n def __init__(self, channels) -> None:\n super().__init__(channels)\n\n @torch.inference_mode()\n def forward(self, parents: torch.Tensor) -> torch.Tensor:\n N = parents.shape[0]\n tree_pe = self.get_pe(N)\n for idx, parent in enumerate(parents[1:]):\n child = idx + 1\n tree_pe[child, :] = tree_pe[child, :] + tree_pe[parent, :] / 2\n return tree_pe / (2 * math.sqrt(self.channels))\n","repo_name":"ya-hong/AST-clone-detection","sub_path":"detecter/position_embedding.py","file_name":"position_embedding.py","file_ext":"py","file_size_in_byte":2119,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"36464255381","text":"from __future__ import annotations\n\nimport logging\nimport pathlib\nimport sys\nimport tempfile\nimport urllib.parse\nfrom dataclasses import dataclass, field\n\nfrom docker_export import Platform\nfrom offspot_config.utils.misc import is_http, parse_size\n\nfrom image_creator import __version__ as vers\nfrom image_creator.logger import Logger\n\n# version of the python interpreter\npyvers = \".\".join([str(p) for p in sys.version_info[:3]])\nbanner: str = rf\"\"\"\n _ _\n (_)_ __ ___ __ _ __ _ ___ ___ _ __ ___ __ _| |_ ___ _ __\n | | '_ ` _ \\ / _` |/ _` |/ _ \\_____ / __| '__/ _ \\/ _` | __/ _ \\| '__|\n | | | | | | | (_| | (_| | __/_____| (__| | | __/ (_| | || (_) | |\n |_|_| |_| |_|\\__,_|\\__, |\\___| \\___|_| \\___|\\__,_|\\__\\___/|_|\n |___/ v{vers}|py{pyvers}\n\n\"\"\"\n\n\n@dataclass(kw_only=True)\nclass Options:\n \"\"\"Command-line options\"\"\"\n\n CONFIG_SRC: str\n OUTPUT: str\n BUILD_DIR: str\n CACHE_DIR: str\n\n show_cache: bool\n check_only: bool\n debug: bool\n\n config_path: pathlib.Path | None = None\n output_path: pathlib.Path | None = None\n build_dir: pathlib.Path | None = None\n cache_dir: pathlib.Path | None = None\n\n keep_failed: bool\n overwrite: bool\n concurrency: int\n max_size: int | None = None\n\n config_url: urllib.parse.ParseResult | None = None\n logger: Logger = field(init=False)\n\n def __post_init__(self):\n self.logger = self.get_logger()\n if is_http(self.CONFIG_SRC):\n self.config_url = urllib.parse.urlparse(self.CONFIG_SRC)\n else:\n self.config_path = pathlib.Path(self.CONFIG_SRC).expanduser().resolve()\n\n if self.debug:\n self.logger.setLevel(logging.DEBUG)\n\n self.output_path = pathlib.Path(self.OUTPUT).expanduser().resolve()\n\n if not self.BUILD_DIR:\n # holds reference to tempdir until Options is released\n # and will thus automatically remove actual folder\n self.__build_dir = tempfile.TemporaryDirectory(\n prefix=\"image-creator_build-dir_\", ignore_cleanup_errors=True\n )\n self.build_dir = (\n pathlib.Path(self.BUILD_DIR or self.__build_dir.name).expanduser().resolve()\n )\n\n if self.CACHE_DIR:\n self.cache_dir = pathlib.Path(self.CACHE_DIR).expanduser().resolve()\n\n if isinstance(self.max_size, str):\n self.max_size = parse_size(self.max_size)\n\n @property\n def version(self):\n return vers\n\n @property\n def config_src(self) -> pathlib.Path | urllib.parse.ParseResult:\n if self.config_url is not None:\n return self.config_url\n if self.config_path is not None:\n return self.config_path\n raise OSError(\"Neither config_url nor config_path\")\n\n @classmethod\n def get_logger(cls) -> Logger:\n return Logger()\n\n\nclass _Global:\n _ready: bool = False\n _debug: bool = False\n options: Options\n platform = Platform.parse(\"linux/arm64/v8\") # our only target arch\n default_eviction: str = \"lru\"\n\n @property\n def debug(self):\n return Global.options.debug if Global._ready else self._debug\n\n @property\n def logger(self):\n return Global.options.logger if Global._ready else Options.get_logger()\n\n\nGlobal = _Global()\nlogger = Global.logger\n","repo_name":"offspot/image-creator","sub_path":"src/image_creator/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":3411,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"23777954454","text":"from nltk.classify.scikitlearn import SklearnClassifier\r\nfrom nltk.corpus import sentence_polarity\r\nfrom nltk.tokenize import sent_tokenize\r\nfrom sklearn import svm\r\nfrom sklearn.model_selection import GridSearchCV\r\nfrom sklearn.pipeline import Pipeline\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn.linear_model import SGDClassifier\r\nfrom Polarity_NLTK.dbhandler2 import DatabaseHandler\r\nfrom sklearn.feature_extraction.text import TfidfTransformer\r\n\r\n###LinearSVM without additional preprocessing, with tfidf vectorizer and without tfidf vectorizer\r\n#create list for training and list of target values\r\ntrain=[]\r\ntarget = []\r\n#add positive sentences with polarity to training set\r\ntrain = sent_tokenize(sentence_polarity.raw(categories = 'pos'))\r\nl = len(train)\r\n#add positive target to target file\r\nfor i in range(len(train)):\r\n target.append('pos')\r\n#add negative sentences with polarity to training set\r\ntrain.extend(sent_tokenize(sentence_polarity.raw(categories = 'neg')))\r\n#add negative target to target file\r\nfor i in range(len(train)- l):\r\n target.append('neg')\r\n\r\n#get logarithmic values for alpha to be tested during parameter tuning\r\nalpha = []\r\n\r\nfor exponent in range(-15, 1):\r\n alpha.append(0.00001)\r\n alpha.append(0.000001)\r\n alpha.append(2 ** exponent)\r\n\r\n#get parameters tuned for classifier only (approach referred to in the report)\r\n# params = {'clf__alpha': (alpha),\r\n# 'clf__penalty': ('l2', 'elasticnet'),\r\n# 'clf__loss': ('hinge', 'squared_hinge'),\r\n# 'clf__n_iter': (10, 50, 80),\r\n# 'clf__fit_intercept': (True, False),\r\n# 'clf__l1_ratio': (0.05, 0.1, 0.15, 0.2, 0.25),\r\n# 'clf__power_t': (0.1, 0.25, 0.5, 0.75, 1)\r\n# }\r\n\r\n#pipeline to be processed by GridSearchCV with classifer and countvectorizer only\r\n# pipeline = Pipeline([\r\n# ('vect', CountVectorizer()),\r\n# ('clf', SGDClassifier()),\r\n# ])\r\n\r\n\r\n#get parameters tuned for classifier, countvectorizer and tfidfvectorizer\r\nparams = {'vect__max_df': (0.5, 0.75, 1.0),\r\n 'vect__max_features': (None, 5000, 10000, 50000),\r\n 'vect__ngram_range': ((1, 1), (1, 2)),\r\n 'tfidf__use_idf': (True, False),\r\n 'tfidf__norm': ('l1', 'l2'),\r\n 'clf__alpha': (alpha),\r\n 'clf__penalty': ('l2', 'elasticnet'),\r\n 'clf__loss': ('hinge', 'squared_hinge'),\r\n 'clf__n_iter': (10, 50, 80),\r\n 'clf__fit_intercept': (True, False),\r\n 'clf__l1_ratio': (0.05, 0.1, 0.15, 0.2, 0.25),\r\n 'clf__power_t': (0.1, 0.25, 0.5, 0.75, 1)\r\n }\r\n\r\n#pipeline to be processed by GridSearchCV with classifer, tfidf vectorizer and countvectorizer only\r\npipeline = Pipeline([\r\n ('vect', CountVectorizer()),\r\n ('tfidf', TfidfTransformer()),\r\n ('clf', SGDClassifier()),\r\n])\r\n\r\n#parameter tuning using GridSearchCV\r\ngridsearch = GridSearchCV(pipeline, params)\r\ngridsearch.fit(train, target)\r\nprint(gridsearch.best_estimator_.get_params())\r\nprint(gridsearch.score(train,target))\r\n\r\n#get delta newsarticles from database, which did not get a sentiment yet, in\r\nhandler = DatabaseHandler()\r\nresult = handler.execute(\r\n \"\"\"Select n.source_uri as 'source_uri', n.bow as 'bow' from NewsArticlesBOW n WHERE n.source_uri NOT IN (Select s.source_uri FROM NewsArticlesLinearSVM_B s);\r\n \"\"\")\r\n\r\n#add articles and predicted sentiments to database table and persist a dict\r\nfor row in result:\r\n i_text = (row['bow'])\r\n i_text = str(i_text)\r\n print(i_text)\r\n sent = gridsearch.predict([i_text])\r\n uri = row[\"source_uri\"]\r\n uri = str(uri)\r\n if(sent == 'pos'):\r\n sent = 1\r\n elif(sent == 'neg'):\r\n sent = 0\r\n print(sent, uri)\r\n processed ={}\r\n processed['source_uri'] = uri\r\n processed['sentiment'] = sent\r\n handler.persistDict('NewsArticlesLinearSVM_B', [processed])\r\n","repo_name":"WebMiningTeamProject/Sentiment-Analysis-NLTK","sub_path":"main_linearsvm.py","file_name":"main_linearsvm.py","file_ext":"py","file_size_in_byte":3877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34250299211","text":"import os\nimport time\n\n\n\nfrom flask import Flask, request, render_template, url_for, redirect, send_from_directory, session\n#from flask_mail import Mail, Message\nfrom hockeyFinal import runAnalysis, expectedValue, getProbs\nfrom hockeyModule import hockey_model\n\n\n\n\napp = Flask(__name__)\n#app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0\napp.secret_key = 'super secret key'\napp.config['SESSION_TYPE'] = 'filesystem'\n\n\n@app.route('/', methods=[\"GET\"])\ndef index():\n return render_template(\"index.html\")\n\n@app.route('/getProbs', methods=[\"POST\"])\ndef getProb():\n homeScore = str(request.form[\"homeScore\"])\n awayScore= str(request.form[\"awayScore\"])\n \n isTie = 0\n homeWin = 0\n homeScore = int(homeScore)\n awayScore = int(awayScore)\n diff = abs(homeScore - awayScore)\n totalGoals = homeScore + awayScore\n homeWin = 0\n \n if homeScore > awayScore:\n homeWin = 1\n \n if homeScore == awayScore:\n isTie = 1\n\n \n model = hockey_model('hockeyModel', 'hockeyScaler')\n \n predictions = model.predict_one(homeScore, awayScore, diff, totalGoals, homeWin, isTie)\n \n labelPreds = model.label_predict(predictions)\n \n topPreds = model.label_top(labelPreds, 5)\n \n score1 = list(topPreds)[0]\n probs1 = list(topPreds.values())[0]\n session['probs1'] = float(probs1)\n probs1 = \"{:.2%}\".format(probs1)\n \n score2 = list(topPreds)[1]\n probs2 = list(topPreds.values())[1]\n session['probs2'] = float(probs2)\n probs2 = \"{:.2%}\".format(probs2)\n \n score3 = list(topPreds)[2]\n probs3 = list(topPreds.values())[2]\n session['probs3'] = float(probs3)\n probs3 = \"{:.2%}\".format(probs3)\n \n score4 = list(topPreds)[3]\n probs4 = list(topPreds.values())[3]\n session['probs4'] = float(probs4)\n probs4 = \"{:.2%}\".format(probs4)\n \n score5 = list(topPreds)[4]\n probs5 = list(topPreds.values())[4]\n session['probs5'] = float(probs5)\n probs5 = \"{:.2%}\".format(probs5)\n\n\n displayScen = \"According to the neural network, the odds of 3rd period scores for a \" + str(homeScore) + \" to \" + str(awayScore) + \" game are below.\" \n\n \n session['score1'] = score1\n session['score2'] = score2\n session['score3'] = score3\n session['score4'] = score4\n session['score5'] = score5\n \n session['homeScore'] = homeScore\n session['awayScore'] = awayScore\n session['diff'] = diff\n session['totalGoals'] = totalGoals\n session['homeWin'] = homeWin\n session['isTie'] = isTie\n \n session['displayScen'] = displayScen\n \n \n return render_template(\"index.html\", displayScen=displayScen, score1=score1, probs1=probs1, \n score2=score2, probs2=probs2, score3=score3, probs3=probs3, score4=score4, \n probs4=probs4, score5=score5, probs5=probs5, homeScore = homeScore, awayScore = awayScore)\n\n@app.route('/getAnalysis', methods=[\"POST\"])\ndef getResults():\n probs1num = float(session.get('probs1'))\n probs1 = \"{:.2%}\".format(probs1num)\n \n probs2num = float(session.get('probs2'))\n probs2 = \"{:.2%}\".format(probs2num)\n \n probs3num = float(session.get('probs3'))\n probs3 = \"{:.2%}\".format(probs3num)\n \n probs4num = float(session.get('probs4'))\n probs4 = \"{:.2%}\".format(probs4num)\n \n probs5num = float(session.get('probs5'))\n probs5 = \"{:.2%}\".format(probs5num)\n \n \n score1 = session.get('score1')\n score2 = session.get('score2')\n score3 = session.get('score3')\n score4 = session.get('score4')\n score5 = session.get('score5')\n \n displayScen = session.get('displayScen')\n\n homeScore = session.get('homeScore')\n awayScore = session.get('awayScore')\n diff = session.get('diff')\n totalGoals = session.get('totalGoals')\n homeWin = session.get('homeWin')\n isTie = session.get('isTie') \n\n\n model = hockey_model('hockeyModel', 'hockeyScaler')\n predictions = model.predict_one(homeScore, awayScore, diff, totalGoals, homeWin, isTie)\n labelPreds = model.label_predict(predictions)\n model.label_top(labelPreds, 5)\n\n\n \n odds1 = float(request.form[\"odds1\"])\n odds2 = float(request.form[\"odds2\"])\n odds3 = float(request.form[\"odds3\"])\n odds4 = float(request.form[\"odds4\"])\n odds5 = float(request.form[\"odds5\"])\n bet = int(request.form[\"bet\"])\n\n\n evResult = model.expected_value(odds1, odds2, odds3, odds4, odds5, bet)\n \n ev1 = float(evResult[0])\n ev2 = float(evResult[1])\n ev3 = float(evResult[2])\n ev4 = float(evResult[3])\n ev5 = float(evResult[4])\n \n totalScore1 = probs1num\n totalScore1 = \"{:.2%}\".format(totalScore1) \n \n totalScore2 = probs1num + probs2num\n totalScore2 = \"{:.2%}\".format(totalScore2)\n\n totalScore3 = probs1num + probs2num + probs3num\n totalScore3 = \"{:.2%}\".format(totalScore3)\n \n totalScore4 = probs1num + probs2num + probs3num + probs4num\n totalScore4 = \"{:.2%}\".format(totalScore4)\n\n totalScore5 = probs1num + probs2num + probs3num + probs4num + probs5num\n totalScore5 = \"{:.2%}\".format(totalScore5)\n\n\n return render_template(\"index.html\", probs1=probs1, probs2=probs2, probs3=probs3, probs4=probs4, probs5=probs5,\n score1=score1, score2=score2, score3=score3, score4=score4, score5=score5,\n displayScen = displayScen,\n ev1=ev1, ev2=ev2, ev3=ev3, ev4=ev4, ev5=ev5,\n odds1=odds1, odds2=odds2, odds3=odds3, odds4=odds4, odds5=odds5,\n totalScore1=totalScore1, totalScore2=totalScore2, totalScore3=totalScore3, totalScore4=totalScore4,\n totalScore5=totalScore5, homeScore = homeScore, awayScore = awayScore)\n\n\n#app.run(host='0.0.0.0', port=80)\n\n\n# No caching at all for API endpoints.\n@app.after_request\ndef add_header(response):\n # response.cache_control.no_store = True\n response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'\n response.headers['Pragma'] = 'no-cache'\n response.headers['Expires'] = '-1'\n return response\n\n\n#netstat -ano | findstr :80\n#taskkill /PID /F","repo_name":"edeuber/eric-hockey-appv2","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14870759200","text":"#!/usr/bin/python3\n'''\n PDC LAB 4\n IMPLEMENT MAXIMAL SUM SUB-SEGMENT IN THE ARRAY\n'''\n\ninput_array = [31, -41, 59, 26, -53, 58, 97, -93, -23, 24]\ninput_length = len(input_array)\n\nsubset_sum = []\n\nfor i in range(0, input_length):\n sum = 0\n for j in range(i, input_length):\n sum = sum + input_array[j]\n subset_sum.append((i, j, sum))\n\n# max_tuple = (start_index, end_index, sum)\nmax_tuple = (0, 0, 0)\n\nfor tupl in subset_sum:\n if tupl[2] > max_tuple[2]:\n max_tuple = tupl\n\nprint(\"Index starts from 0.\\n\")\nprint(\"Input: \" + str(input_array))\nprint(\"\\nMax sum is \" + str(max_tuple[2]) + \" when\")\nprint(\"Start index is \" + str(max_tuple[0]) + \" and\")\nprint(\"End index is \" + str(max_tuple[1]) + \".\")\n","repo_name":"Brihat9/PDC","sub_path":"PDC_LAB_4/maximal_subset_sum.py","file_name":"maximal_subset_sum.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33216230931","text":"\"\"\"\nCreated 12th July 2022\n\n@author : minh.ngo\n\"\"\"\n\nimport numpy as np\nfrom numpy.polynomial.hermite import hermfit, hermval\n\ndef fitted_gauss_hermite(wavelength, intensity, deg):\n # Restore data of wavelength and intensity, after that we sort array by order of wavelength\n dtype=[('wavelength', float), ('intensity', float)]\n values = [(wavelength[i], intensity[i]) for i in range(len(intensity))]\n \n # Note : Use array_np.hstack()\n list_of_tuples = np.array(values, dtype=dtype)\n list_of_tuples = np.sort(list_of_tuples, order='wavelength')\n\n # Recast wavelength and intensity into numpy arrays so we can use their handy features\n wavelength_appro = np.asarray(list_of_tuples[:]['wavelength'])\n intensity_appro = np.asarray(list_of_tuples[:]['intensity'])\n\n # Execute hermfit on data\n parameters = hermfit(wavelength_appro, intensity_appro, deg)\n intensity_appro = hermval(wavelength_appro, parameters)\n key = []\n for i in range(len(parameters)):\n key.append(f\"Par{i}\")\n return dict(zip(key, parameters))","repo_name":"minhngo248/hlsf-cral","sub_path":"lib/fitted_gauss_hermite.py","file_name":"fitted_gauss_hermite.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"73749824082","text":"from py_stealth import *\nfrom Scripts.types import Types\nfrom datetime import datetime as dt\nimport re\nimport os\n\n# Changeable\nFORGE_COORDS = (186, 621)\nPROMETHEUS = 1\nKEEP_TOOLS = 1\nKILL_ELEMENTALS = 0\nKEEP_BANDAGES = 100\nFILE_NAME=\"mining\"\nMINE_COORDS = [\n (192, 592),\n (191, 579),\n (179, 579),\n (176, 566),\n (188, 560),\n (176, 556),\n (188, 554)\n]\nCHEST = 0x4018C655\nTILE_SEARCH_RANGE = 8\n# \nLOOT = [0x19B7, 0x19B8, 0x19BA, Types.GOLD_COIN,\n Types.ORE, 0x0F8E, 0x0E35, 0x0FEF]\nROCK = 0x1779\nSKIP_TILE_MESSAGES = [\n \"There is nothing\",\n \"You have no line\",\n \"You decide not to mine\",\n \"Try mining\",\n \"You cannot mine\"\n]\n\nNEXT_TRY_MESSAGES = [\n \"Ваш инструмент\",\n \"Ничего полезного\",\n \"You loosen\",\n \"You decide not\", # Alt-tab protection\n]\n\nMINEABLE_TILES = [1339, 1340, 1341, 1342, 1343, 1344, 1345, 1346, 1347, 1348, 1349, 1350, 1351, 1352,\n 1353, 1354, 1355, 1356, 1357, 1358, 1359, 1361, 1362, 1363, 1386, 581, 582]\n\n\n\ndef cancel_targets():\n CancelWaitTarget()\n if TargetPresent():\n CancelTarget()\n\ndef full_disconnect():\n print(\"Disconnected\")\n SetARStatus(False)\n Disconnect()\n\ndef find_tiles(radius) -> list: \n _tiles_coordinates = [] \n for _tile in MINEABLE_TILES:\n _tiles_coordinates += GetLandTilesArray(GetX(Self()) - radius, GetY(Self()) - radius, GetX(Self()) + radius,\n GetY(Self()) + radius, WorldNum(), _tile)\n \n _tiles_coordinates += GetStaticTilesArray(GetX(Self()) - radius, GetY(Self()) - radius, GetX(Self()) + radius,\n GetY(Self()) + radius, WorldNum(), _tile)\n \n print(\"[FindTiles] Found \"+str(len(_tiles_coordinates))+\" tiles\")\n return _tiles_coordinates\n\n\ndef tool_available() -> bool: \n if FindType(Types.PICKAXE, Backpack()):\n return True\n return False\n\ndef bandages_available() -> bool:\n if FindType(Types.BANDAGES, Backpack()):\n if FindQuantity() > 20:\n return True\n return False\n\ndef equip_tool() -> bool:\n if tool_available():\n _right_hand = ObjAtLayer(RhandLayer())\n if _right_hand > 0:\n if GetType(_right_hand) != Types.PICKAXE: \n UnEquip(RhandLayer())\n Wait(500)\n UseType(Types.PICKAXE, 0xFFFF)\n Wait(500)\n else:\n UseType(Types.PICKAXE, 0xFFFF)\n Wait(500)\n else:\n smelt()\n unload()\n _x, _y = MINE_COORDS[0]\n move_to(_x, _y)\n\n\ndef move_to(x: int, y: int) -> bool:\n _try = 0 \n while GetX(Self()) != x or GetY(Self()) != y:\n newMoveXY(x, y, True, 0, True)\n _try += 1\n if _try > 10:\n print(f\"[move_to] Can't reach X: {x} Y: {y}\")\n return False \n return True\n\n\ndef elemental_around() -> int:\n if FindType(0x003A, Ground()):\n return FindItem()\n return 0\n\ndef loot():\n if FindType(0x2006, Ground()):\n corpse = FindItem()\n newMoveXY(GetX(corpse), GetY(corpse), True, 0, True)\n UseObject(corpse)\n Wait(100)\n if FindTypesArrayEx(LOOT, [0xFFFF], [FindItem()], False):\n for _item in GetFindedList():\n Grab(_item, 0)\n Wait(1000)\n\n\ndef kill_elemental(elemental: int):\n print(\"Killing isonite elemental...\")\n while GetHP(elemental) > 0:\n newMoveXY(GetX(elemental), GetY(elemental), True, 1, True)\n Attack(elemental)\n Wait(500) \n\ndef restock():\n if Count(Types.PICKAXE) < KEEP_TOOLS:\n if FindType(Types.PICKAXE, CHEST):\n if FindCount() > KEEP_TOOLS:\n _got = 0\n for _item in GetFindedList():\n print(\"Getting new pickaxe\")\n _got += 1 \n Grab(_item, 1)\n Wait(1000)\n if _got >= KEEP_TOOLS:\n return True\n return False\n return False\n return True\n\n\ndef get_bandages():\n if FindType(Types.BANDAGES, LastContainer()):\n if FindQuantity() > KEEP_BANDAGES:\n Grab(FindItem(), KEEP_BANDAGES)\n Wait(500) \n\ndef unload():\n _unload_x, _unload_y = FORGE_COORDS\n if move_to(_unload_x, _unload_y): \n UseObject(CHEST) \n Wait(500)\n\n\n if not restock():\n print(\"No more tools left in chest!\")\n full_disconnect()\n\n for _type in [Types.INGOT, Types.GOLD_INGOT, Types.SILVER_INGOT]:\n if FindType(_type, Backpack()):\n for _item in GetFoundList():\n MoveItem(_item, 0, LastContainer(), 0, 0, 0)\n Wait(500)\n\n if PROMETHEUS == 1:\n to_prometheus()\n\n if KILL_ELEMENTALS == 1:\n get_bandages()\n # Unload loot from elementals\n if FindTypesArrayEx(LOOT, [0xFFFF], [Backpack()], False):\n for _loot_item in GetFoundList():\n MoveItem(_loot_item, 0, LastContainer(), 0, 0, 0)\n Wait(500)\n\ndef smelt():\n _forge_x, _forge_y = FORGE_COORDS\n if move_to(_forge_x, _forge_y):\n while FindType(Types.ORE, Backpack()):\n for _ in range(FindCount()):\n _started = dt.now()\n UseType(Types.ORE, 0xFFFF)\n WaitJournalLine(_started, \"You smelt\", 10000)\n\n\ndef get_item_name(item_serial, message):\n _started = dt.now()\n ClickOnObject(item_serial)\n Wait(500)\n _journal_line = InJournalBetweenTimes(message, _started, dt.now())\n if _journal_line > 0: \n _match = re.search(r\"(\\d+)\\s(\\S+)\", Journal(_journal_line))\n if _match:\n return (_match.group(2), _match.group(1))\n\n return ('error', 1)\n\ndef to_prometheus():\n _data = []\n # Collect data to reduce spaces in metrics\n # Yeah, gold ingot has different type =\\\n for _type in [Types.INGOT, Types.GOLD_INGOT, Types.SILVER_INGOT]:\n if FindType(_type, LastContainer()):\n for _ingot in GetFoundList():\n _ingot_name, _ingot_qty = get_item_name(_ingot, \"ingot\")\n if _ingot_name != \"error\":\n _data.append((_ingot_name, _ingot_qty))\n\n # Workaround for bricks \n if FindTypeEx(Types.INGOT, 0x04E8, LastContainer()):\n _brick, _qty = get_item_name(FindItem(), \"Brick\")\n if _brick != \"error\":\n _data.append((_brick, _qty))\n\n # Pickaxes info\n if FindType(Types.PICKAXE, LastContainer()):\n _data.append((\"pickaxe\", FindCount()))\n\n # To empty file lulz \n open(FILE_NAME, 'w').close()\n # Now we can append some data...\n with open(FILE_NAME, \"a\") as _to_exporter: \n for _set in _data:\n _ingot, _qty = _set\n _to_exporter.write(f\"{_ingot}={_qty}\\n\")\n _to_exporter.close()\n\ndef crash_rocks():\n if FindType(ROCK, Backpack()):\n for _rock in GetFindedList():\n cancel_targets()\n UseObject(ObjAtLayer(RhandLayer()))\n WaitForTarget(2000)\n if TargetPresent():\n WaitTargetObject(FindItem())\n Wait(1000) \n\ndef mine():\n for _tile_data in find_tiles(TILE_SEARCH_RANGE):\n _tile, _x, _y, _z = _tile_data\n while not Dead():\n # Overload? \n if Weight() >= MaxWeight() - 20:\n #if Weight() >= 200:\n smelt() \n unload() \n move_to(_x, _y)\n\n # You can't mine so close to yourself \n if newMoveXY(_x, _y, True, 1, True):\n if GetX(Self()) == _x and GetY(Self()) == _y:\n newMoveXY(_x + 1, _y, True, 0, True)\n\n # Prepare to fight ^W dig\n equip_tool()\n crash_rocks()\n # Kill some elementals\n if KILL_ELEMENTALS == 1:\n elemental = elemental_around()\n if elemental > 0 and bandages_available():\n kill_elemental(elemental)\n loot()\n SetWarMode(False)\n #\n cancel_targets()\n \n _started = dt.now()\n UseObject(ObjAtLayer(RhandLayer()))\n WaitForTarget(2000)\n if TargetPresent():\n WaitTargetTile(_tile, _x, _y, _z)\n WaitJournalLine(_started, \"|\".join(\n SKIP_TILE_MESSAGES + NEXT_TRY_MESSAGES), 50000)\n\n if InJournalBetweenTimes(\"|\".join(SKIP_TILE_MESSAGES), _started, dt.now()) > 0:\n break\n else:\n print(f\"Can't reach X: {_x} Y: {_y}\")\n break\n\n Wait(500)\n\n\n# Initialization\n\nSetARStatus(True)\nSetPauseScriptOnDisconnectStatus(True)\nSetWarMode(False)\nSetMoveThroughNPC(20)\nSetFindDistance(20)\n\nsmelt()\nunload()\n#exit()\n\n\nwhile not Dead():\n for point in MINE_COORDS:\n point_x, point_y = point\n move_to(point_x, point_y) \n mine()\n\n\n","repo_name":"it-sova/olmer-stealth-public","sub_path":"mining_home.py","file_name":"mining_home.py","file_ext":"py","file_size_in_byte":9332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36815814008","text":"# -*- coding: utf-8 -*-\n# vim: set sw=4 ts=4 expandtab :\n\nimport base64\n\n\ndef parse_event(d):\n ev = {}\n ev['type'] = d['type']\n ev['attr'] = {}\n for att in d['attributes']:\n k = base64.b64decode(att['key']).decode('latin1')\n v = base64.b64decode(att['value']).decode('latin1')\n # exception handling for hex encoded address\n if k == 'address' and len(v) != 42:\n v = base64.b64decode(v).hex() # bizarre but necessary\n ev['attr'][k] = v\n return ev\n\n\ndef from_dict(obj, d):\n for k, v in d.items():\n obj.__dict__[k] = v\n","repo_name":"MECTrace/edge-data-chain-explorer","sub_path":"server/crawler/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"3"} +{"seq_id":"44062063453","text":"import numpy as np \nimport cv2\nfrom math import pi\n\n\nfrom scipy.spatial.transform import Rotation as R\n\ndef gen(ld_data, depth_data, point, cl_point):\n blank_image = np.zeros((300, 300, 3), np.uint32)\n for idx, point in enumerate(ld_data):\n blank_image[point[0]][point[1]][1] = np.clip(depth_data[idx] + blank_image[point[0]][point[1]][1], 0, cl_point)\n return blank_image\n\nproject_matrix_x = np.asarray([[0, 0, 0], [0, 1, 0], [0, 0, 1]])\nproject_matrix_y = np.asarray([[1, 0, 0], [0, 0, 0], [0, 0, 1]])\nproject_matrix_z = np.asarray([[1, 0, 0], [0, 1, 0], [0, 0, 0]])\n\n\ndef f2d_z(x):\n return project_matrix_z @ x\n\n\ndef f2d_y(x):\n return project_matrix_y @ x\n\ndef f2d_x(x):\n return project_matrix_x @ x\n\n\n\nlidar_data = np.load('lidar_array_100.npy')\n#lidar_data = lidar_data.reshape(-1, 4)\n\n#\n\nroll = 2.6335\npitch = 0.4506\nyaw = 1.1684\n\nhud_dim = [1000, 1000]\n\nimport pudb; pudb.set_trace()\n\npoints = np.frombuffer(lidar_data, dtype=np.dtype('f4'))\npoints = np.reshape(points, (int(points.shape[0] / 3), 3))\n#points[:, [2]] *= -1\n\nlidar_data = np.array(points[:, :2])\nlidar_data *= min(hud_dim) / 100.0\nlidar_data += (0.5 * hud_dim[0], 0.5 * hud_dim[1])\nlidar_data = np.fabs(lidar_data) # pylint: disable=E1111\nlidar_data = lidar_data.astype(np.int32)\n#lidar_data[2] *= -1\nlidar_data = np.reshape(lidar_data, (-1, 2))\nlidar_img_size = (hud_dim[0], hud_dim[1], 3)\nlidar_img = np.zeros((lidar_img_size), dtype = int)\n\n#for idx, point in enumerate(lidar_data):\n #import pudb; pudb.set_trace()\n# lidar_img[point[0]][point[1]][1] += points[idx][3]*10\n #lidar_img[point.T] = (0, points[idx][3] + lidar_img[point.T][1], 0)\n\n\nlidar_img[tuple(lidar_data.T)] = (255, 255, 255)\n\n#import pudb; pudb.set_trace()\n\ncv2.imshow('Lidar', lidar_img.astype(np.uint8))\ncv2.waitKey(0)\n\n\n\n\n\"\"\"\nrot = [0, 3*pi/2, 0]\n\n\n\n#c = [130, 125, 250]\n\n#c = [130, 200, 250]\n\nc = [0.5, 0.5 , 0.5]\n\n\ns = [300, 300]\n\nr = [30, 30, 1]\n\n#e = [3, 3, 3]\nblank_image = np.zeros((300, 300, 3), np.uint32)\ncl_point = 10000000\n\neps = 1e-6\n\n#import pudb; pudb.set_trace()\n\nfor idx, point in enumerate(lidar_data):\n \n #point_no_depth = lidar_data[:, [0, 1, 2]]\n #point_depth = lidar_data[:, 3]\n #if(idx == 4826):\n # import pudb; pudb.set_trace()\n\n a = point[:3] / 255\n point_depth = point[3]\n\n m_1 = np.asarray([[1, 0, 0], [0, np.cos(rot[0]), np.sin(rot[0])], [0, -np.sin(rot[0]), np.cos(rot[0])]])\n m_2 = np.asarray([[np.cos(rot[1]), 0, -np.sin(rot[1])], [0, 1, 0], [np.sin(rot[1]), 0, np.cos(rot[1])]])\n m_3 = np.asarray([[np.cos(rot[2]), np.sin(rot[2]), 0], [-np.sin(rot[2]), np.cos(rot[2]), 0], [0, 0, 1]])\n vec = a - c\n\n d_vec = m_1 @ m_2 @ m_3 @ vec\n\n bx = int((d_vec[0] * s[0]) / (d_vec[2]*r[0] + eps) * r[2])\n by = int((d_vec[1] * s[1]) / (d_vec[2]*r[1] + eps) * r[2])\n\n if(bx > 0 and bx < 300 and by > 0 and by < 300):\n #blank_image[bx, by][1] = np.clip(point_depth + blank_image[bx, by][1], 0, cl_point)\n #blank_image[bx, by][1] = max(point_depth, blank_image[bx, by][1])\n blank_image[bx, by][1] += point_depth\n\n\n\ncv2.imwrite('./depth_test_.png', blank_image / (blank_image.max()/255.0))\n\ncv2.imshow('Proj_x_test', blank_image / (blank_image.max()/255.0))\ncv2.waitKey(0)\ncv2.waitKey(1)\n\n\"\"\"\n\n\n\n\"\"\"\n\n# Resolution and Field of View of LIDAR sensor\nh_res = 0.35 # horizontal resolution, assuming rate of 20Hz is used \nv_res = 0.4 # vertical res\nv_fov = (-24.9, 2.0) # Field of view (-ve, +ve) along vertical axis\nv_fov_total = -v_fov[0] + v_fov[1] \n\n# Convert to Radians\nv_res_rad = v_res * (np.pi/180)\nh_res_rad = h_res * (np.pi/180)\n\n# Project into image coordinates\nx_img = np.arctan2(-y_lidar, x_lidar)/ h_res_rad\ny_img = np.arctan2(z_lidar, d_lidar)/ v_res_rad\n\n\n# SHIFT COORDINATES TO MAKE 0,0 THE MINIMUM\nx_min = -360.0/h_res/2 # Theoretical min x value based on specs of sensor\nx_img = x_img - x_min # Shift\nx_max = 360.0/h_res # Theoretical max x value after shifting\n\ny_min = v_fov[0]/v_res # theoretical min y value based on specs of sensor\ny_img = y_img - y_min # Shift\ny_max = v_fov_total/v_res # Theoretical max x value after shifting\ny_max = y_max + 5 # UGLY: Fudge factor because the calculations based on\n # spec sheet do not seem to match the range of angles\n # collected by sensor in the data.\n\n\n\nlidar_data_nodepth = lidar_data[:, [0, 1, 2]]\nlidar_just_depth = lidar_data[:, 3]\n\nlidar_in_2d_z = np.asarray(list(map(f2d_z, lidar_data_nodepth)))\nlidar_in_2d_z = lidar_in_2d_z[:, [0, 1]]\n\nlidar_in_2d_x = np.asarray(list(map(f2d_x, lidar_data_nodepth)))\nlidar_in_2d_x = lidar_in_2d_x[:, [1, 2]]\n\nlidar_in_2d_y = np.asarray(list(map(f2d_y, lidar_data_nodepth)))\nlidar_in_2d_y = lidar_in_2d_y[:, [0, 2]]\n\n#2558556\n#2722\nimg_x_test = gen(lidar_in_2d_x, lidar_just_depth, 1000, 2558556/3)\ncv2.imshow('Proj_x_test', img_x_test / (img_x_test.max()/255.0))\n\nimg_y_test = gen(lidar_in_2d_y, lidar_just_depth, 1000, 2558556/3)\ncv2.imshow('Proj_y_test', img_y_test / (img_y_test.max()/255.0))\n\nimg_z_test = gen(lidar_in_2d_z, lidar_just_depth, 1000, 2558556/3)\ncv2.imshow('Proj_z_test', img_z_test / (img_z_test.max()/255.0))\n\ncv2.waitKey(0)\ncv2.waitKey(1)\n\"\"\"","repo_name":"GonVas/CarlaFinal","sub_path":"test_lidar_data.py","file_name":"test_lidar_data.py","file_ext":"py","file_size_in_byte":5237,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"3"} +{"seq_id":"20160700604","text":"\"\"\"\n gRPC Server\n ___________________\n this is where we register servicer class and start gRPC Server\n\"\"\"\nimport time\nimport grpc\n\nfrom rpc.lib.core.async_server import AsyncioExecutor\n\nfrom autogen import virtual_account_pb2_grpc\n\nfrom rpc.handler import VirtualAccount\n\n_ONE_DAY_IN_SECONDS = 60 * 60 * 24\n\n\ndef start(host, port):\n \"\"\" start Async gRPC Server\"\"\"\n server = grpc.server(AsyncioExecutor())\n # register GRPC Servicer here\n virtual_account_pb2_grpc.add_VirtualAccountServicer_to_server(\n VirtualAccount(), server\n )\n # start\n server.add_insecure_port(\"{}:{}\".format(host, port))\n server.start()\n print(\"Listening gRPC server at {}:{}\".format(host, port))\n try:\n while True:\n time.sleep(_ONE_DAY_IN_SECONDS)\n except KeyboardInterrupt:\n server.stop(0)\n","repo_name":"vousmeevoyez/finance-microservices-using-python","sub_path":"bni-va-grpc/rpc/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19666569216","text":"# Q) why can't use heap?\n# Because heap gives kth smallest/largest that come in sequence assuming sorted array, not the kth distinct smallest/largest.\n\n# method 1: sort and check from last and count the distinct number you have seen till now.\n# time: O(n*logn)\n\nclass Solution:\n def thirdMax(self, nums: List[int]) -> int:\n n = len(nums)\n if n <=2:\n return max(nums)\n nums.sort()\n count= 1\n for i in range(n-1, 0, -1):\n if nums[i] != nums[i-1]:\n count += 1\n if count == 3:\n return nums[i-1]\n return max(nums)\n\n\n# method 2: Just extension of '2nd largest number' logic.\n# code of '2nd largest' you can see at the bottom.\nclass Solution:\n def thirdMax(self, nums: List[int]) -> int:\n firstMax, secondMax , thirdMax= float('-inf'), float('-inf'), float('-inf')\n for num in nums:\n if num > firstMax:\n # here we need to update all three\n thirdMax = secondMax\n secondMax = firstMax\n firstMax = num\n elif num > secondMax and num != firstMax:\n # here we need to update two i.e thirdMax and secondMax\n thirdMax = secondMax\n secondMax = num\n elif num > thirdMax and num != firstMax and num != secondMax: \n # not writing the condition 'num != firstMax' will give error in case like: [1,2,2,5,3,5], [1,1,1,1,1] etc.\n # # here we need to update only thirdMax\n thirdMax = num\n return thirdMax if thirdMax != float('-inf') else max(nums)\n\n# method 3:\n# Just the shorter version of above logic.\n# Updating the variable at same condition only but we don't need to check that much extra cases \n\n# Note: We need to handle that many cases under if-else to handle duplicate numbers in above method.\n# In this we are only updating if we are getting distinct number.\n# Logic of if-else condition will be same after seeing distinct no.\n\n# keep track of first_max, 2nd_max, 3rd_max after you each ele you see any distinct number.\n\n# Note: we can apply this logic to find '2nd largest number also' in similar way.\n\n# time: O(3* n), space: O(3)\n\nclass Solution(object):\n def thirdMax(self, nums):\n v = [float('-inf'), float('-inf'), float('-inf')] # [first_max, second_max, third_max]\n for num in nums:\n if num not in v: # wil check only for distinct number\n if num > v[0]: v = [num, v[0], v[1]] # make first= num, second= pre_first, third= pre_2nd\n elif num > v[1]: v = [v[0], num, v[1]] # keep first same, make second= num & third= pre_2nd\n elif num > v[2]: v = [v[0], v[1], num] # keep first & second same, & make third= num\n # return max(nums) if float('-inf') in v else v[2]\n return v[2] if v[2] != float('-inf') else v[0]\n\n\n# Note: 2nd distinct maximum\n\n# Code for 2nd maximum (submitted on gfg)\nclass Solution: \n\tdef print2largest(self,arr, n):\n\t\tfirstMax, secondMax = -1, -1 \n\t\tfor num in arr:\n\t\t if num > firstMax:\n\t\t # 'num' is greatest number till now\n\t\t # so in this we will have to update both 'firstMax' and 'secondMax'\n\t\t # Update 'secondMax' to 'firstMax' and then 'firstMax' to cur 'num'.\n\t\t secondMax = firstMax\n\t\t firstMax = num\n\t\t elif num > secondMax and num != firstMax:\n\t\t # in this case we only need to update 'secondMax' to 'num'.\n\t\t secondMax = num\n\t\treturn secondMax # if '-1' then all elements are equal and there is no 2nd maximum.","repo_name":"Ravi-0412/DSA-Program-And-Notes","sub_path":"Array/414. Third Maximum Number.py","file_name":"414. Third Maximum Number.py","file_ext":"py","file_size_in_byte":3616,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"3"} +{"seq_id":"26832505852","text":"from random import choice, randint, shuffle\r\nfrom mylib import add_underscores_to_num\r\n\r\n\r\ndef flip_coins(nr_of_throws, heads='heads', tails='tails'):\r\n nr_of_heads = 0\r\n nr_of_tails = 0\r\n\r\n for _ in range(nr_of_throws):\r\n coin = choice([heads, tails])\r\n if coin == heads:\r\n nr_of_heads += 1\r\n else:\r\n nr_of_tails += 1\r\n\r\n print(f'Tossing a coin {add_underscores_to_num(nr_of_throws,\".\")} times ...')\r\n print(f'- Nr of {heads}: {add_underscores_to_num(nr_of_heads,\".\")}'\r\n f' ({nr_of_heads / nr_of_throws:.2%})')\r\n print(f'- Nr of {tails}: {add_underscores_to_num(nr_of_tails,\".\")}'\r\n f' ({nr_of_tails / nr_of_throws:.2%})')\r\n\r\n return 0\r\n\r\n\r\ndef main():\r\n # Random nr\r\n low = 1\r\n high = 100\r\n number = randint(low, high)\r\n print(f'Random number between number {low} and {high}: {number}')\r\n\r\n # Coin toss\r\n print()\r\n flip_coins(randint(10, 10_000))\r\n\r\n # Add underscores to number (from mylib.py)\r\n my_big_nr = 2990192\r\n print(f'\\n{my_big_nr} becomes {add_underscores_to_num(my_big_nr,\".\")}')\r\n\r\n # Shuffle selection\r\n selection = [1, 2, 3, 4, 5, 6]\r\n print(f'\\nSelection: {selection}')\r\n new_selection = selection[:]\r\n shuffle(new_selection)\r\n print(f'Shuffled: {new_selection}')\r\n\r\n print()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"DJdeGoede/MyRepo","sub_path":"python/CS50P - Programming with Python/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"3571061722","text":"# cp2GDB.py\n# Author: \n# Created: \n# Updated:\n\n\n''' \nHelpful notes about the purpose, expected input, and expected output of this\nscript. Also a great place to store references to sources of inspiration and\nalgorithms used.\n'''\n\n#---------------------------------------------------------\n\n# Name: CreateFileGDB_Example2.py\n# Description: Create a file GDB\n# Author: ESRI\n\n# Import system modules\nimport arcpy\nimport os\n\n# Set local variables\nout_folder_path = \"C:/pyWork/day2\" \nout_gdb_name = \"fGDB.gdb\"\narcpy.env.workspace = \"C:/pyWork/day2/countyChaos\"\narcpy.env.overwriteOutput = True\n\n# Execute CreateFileGDB\n# from resources.arcgis.com/en/help/main/10.1/index.html#//0017000000pw000000\narcpy.CreateFileGDB_management(out_folder_path, out_gdb_name)\n\n# Set local variables\nout_dataset_path = out_folder_path + \"/\" + out_gdb_name \nout_fd_name = \"mesonet\"\n# Creating a spatial reference object\nsr = arcpy.SpatialReference(\"C:/pyWork/day2/countyChaos/mesonet.prj\")\n\n# Execute CreateFeaturedataset\n# from resources.arcgis.com/en/help/main/10.1/index.html#//0017000000pv000000\narcpy.CreateFeatureDataset_management(out_dataset_path, out_fd_name, sr)\n\n# Use ListFeatureClasses to generate a list of shapefiles in the\n# workspace shown above.\nfcList = arcpy.ListFeatureClasses()\n \n# Execute CopyFeatures for each input shapefile\nfor shapefile in fcList:\n # Determine the new output feature class path and name\n out_dataset_path = out_folder_path + \"/\" + out_gdb_name + \\\n \"/\" + out_fd_name\n outFeatureClass = out_dataset_path + \"/\" + shapefile.split(\".\")[0]\n # resources.arcgis.com/en/help/main/10.1/index.html#//001700000035000000\n arcpy.CopyFeatures_management(shapefile, outFeatureClass)\n\n\n\n\n","repo_name":"mcStargazer/workshop","sub_path":"day2/cp2GDB.py","file_name":"cp2GDB.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71453494802","text":"import sys\n\nn, k = map(int, sys.stdin.readline().split())\n\ncup = 0\nwhile (bin(n).count(\"1\"))>k:\n add = 2**(bin(n)[::-1].index(\"1\"))\n cup += add\n n += add\nprint(cup)","repo_name":"dowoonlee/TIL","sub_path":"baekjun/by_level/silver/1052.py","file_name":"1052.py","file_ext":"py","file_size_in_byte":173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"69793662481","text":"# coding: utf8\nimport re\nfrom flask import request, abort\nfrom farbox_bucket.utils import smart_unicode, get_value_from_data, string_types, auto_type\nfrom farbox_bucket.utils.path import get_just_name\nfrom farbox_bucket.bucket.utils import get_bucket_posts_info, get_bucket_in_request_context\nfrom farbox_bucket.bucket.record.utils import get_path_from_record, get_type_from_record\nfrom farbox_bucket.bucket.record.get.path_related import get_record_by_path, get_json_content_by_path, get_records_by_paths\nfrom farbox_bucket.server.template_system.api_template_render import render_api_template\nfrom farbox_bucket.server.template_system.namespace.data import Data\nfrom farbox_bucket.server.utils.request_path import get_request_path, get_offset_path\nfrom farbox_bucket.server.utils.response import force_response\nfrom farbox_bucket.utils.md_related.filter_posts_info import filter_and_get_posts_link_points_info\nfrom farbox_bucket.bucket.record.get.tag_related import get_records_by_tag\n\nfrom farbox_bucket.server.template_system.namespace.data import data as get_data_namespace\nfrom farbox_bucket.server.template_system.model.category import Category\nfrom farbox_bucket.utils.path import get_relative_path\nfrom farbox_bucket.server.utils.record_and_paginator.paginator import auto_pg\nfrom farbox_bucket.server.utils.request_context_vars import set_data_root_in_request\n\n\ndef get_wiki_url_for_doc(wiki_root, doc):\n if not isinstance(wiki_root, string_types) or not isinstance(doc, dict):\n return \"\"\n wiki_root = wiki_root.strip(\"/\")\n doc_type = get_type_from_record(doc)\n doc_path = get_path_from_record(doc)\n relative_path = get_relative_path(doc_path.lower().strip(\"/\"), wiki_root, return_name_if_fail=False)\n if not relative_path:\n return \"\"\n if doc_type == \"post\":\n return \"/wiki/post/%s\" % relative_path\n else:\n return \"/wiki/category/%s\" % relative_path\n\n\n\ndef show_wiki_nodes_as_sub_site():\n bucket = get_bucket_in_request_context()\n if not bucket:\n return\n request_path = get_request_path().strip(\"/\")\n if not re.match(\"wiki_nodes(/|$)\", request_path):\n return\n wiki_configs = get_json_content_by_path(bucket, \"__wiki.json\", force_dict=True)\n enable_wiki_nodes = auto_type(wiki_configs.get(\"enable_wiki_nodes\", True))\n if not enable_wiki_nodes:\n return\n wiki_root = smart_unicode(wiki_configs.get(\"wiki_root\", \"\"))\n if not wiki_root:\n return\n wiki_root = wiki_root.strip(\"/\")\n wiki_title = wiki_configs.get(\"wiki_title\") or get_just_name(wiki_root, for_folder=True)\n path = request.values.get(\"path\", \"\").strip(\"/\")\n if request.values.get(\"type\") == \"data\":\n # return json data\n wiki_root = wiki_root.lower()\n under = \"%s/%s\" % (wiki_root, path)\n posts_info = get_bucket_posts_info(bucket)\n data = filter_and_get_posts_link_points_info(posts_info, under=under)\n nodes = data.get(\"nodes\")\n if nodes:\n for node in nodes:\n node_id = node.get(\"id\")\n if node_id and isinstance(node_id, string_types):\n if node_id.startswith(\"#\"):\n tag = node_id.lstrip(\"#\")\n url = \"/wiki/tag/%s\" % tag\n node[\"url\"] = url\n else:\n relative_path = get_relative_path(node_id.strip(\"/\"), wiki_root, return_name_if_fail=False)\n if relative_path:\n url = \"/wiki/post/%s\" % relative_path\n node[\"url\"] = url\n return force_response(data)\n else:\n return render_api_template(\"builtin_theme_wiki_nodes.jade\", wiki_title=wiki_title)\n\n\n\ndef show_wiki_as_sub_site():\n bucket = get_bucket_in_request_context()\n if not bucket:\n return\n request_path = get_request_path().strip(\"/\")\n if not re.match(\"wiki(/|$)\", request_path):\n return\n wiki_configs = get_json_content_by_path(bucket, \"__wiki.json\", force_dict=True)\n wiki_root = smart_unicode(wiki_configs.get(\"wiki_root\", \"\"))\n if not wiki_root:\n return\n set_data_root_in_request(wiki_root) # set data_root to request\n wiki_root = wiki_root.strip(\"/\")\n wiki_title = wiki_configs.get(\"wiki_title\") or get_just_name(wiki_root, for_folder=True)\n wiki_root = wiki_root.lower()\n\n kwargs = dict(wiki_root=wiki_root, wiki_title=wiki_title, wiki_configs=wiki_configs)\n\n if re.match(\"wiki/?$\", request_path):\n # index\n docs = []\n user_categories = wiki_configs.get(\"categories\")\n if not isinstance(user_categories, (list, tuple)):\n user_categories = []\n for user_category in user_categories:\n if not isinstance(user_category, dict): continue\n category_path = user_category.get(\"path\")\n summary = smart_unicode(user_category.get(\"summary\") or \"\")\n icon = smart_unicode(user_category.get(\"icon\") or \"\")\n doc = get_record_by_path(bucket=bucket, path=category_path)\n if not doc:\n category_path = \"%s/%s\" % (wiki_root, category_path.strip(\"/\"))\n doc = get_record_by_path(bucket=bucket, path=category_path)\n if not doc:\n continue\n doc_type = get_type_from_record(doc)\n if doc_type not in [\"post\", \"folder\"]:\n continue\n doc[\"icon\"] = icon or get_value_from_data(doc, \"metadata.icon\")\n doc[\"summary\"] = summary or get_value_from_data(doc, \"metadata.summary\")\n docs.append(doc)\n if not docs: # by default\n docs = Data.get_data(type='folder', level=1, limit=50, with_page=False, path=wiki_root)\n\n # 处理 url, 取 relative\n index_docs = []\n for doc in docs:\n wiki_url = get_wiki_url_for_doc(wiki_root, doc)\n if not wiki_url:\n continue\n doc[\"wiki_url\"] = wiki_url\n index_docs.append(doc)\n\n return render_api_template(\"builtin_theme_knowbase_index.jade\", docs=index_docs, **kwargs)\n\n elif re.match(\"wiki/tag/\", request_path):\n current_tag = get_offset_path(request_path, 2)\n if not current_tag:\n abort(404, \"no tag?\")\n docs = get_records_by_tag(bucket, current_tag, sort_by=\"-date\")\n for doc in docs:\n doc[\"wiki_url\"] = get_wiki_url_for_doc(wiki_root, doc)\n return render_api_template(\"builtin_theme_knowbase_tag.jade\", current_tag=current_tag, docs=docs, **kwargs)\n\n elif re.search(\"wiki/search(/|$)\", request_path):\n keywords = request.values.get(\"s\")\n data_namespace = get_data_namespace()\n docs = data_namespace.get_data(bucket=bucket, keywords=keywords, pager_name=\"wiki\", path=wiki_root,\n sort_by='-date', min_limit=8)\n for doc in docs:\n doc[\"wiki_url\"] = get_wiki_url_for_doc(wiki_root, doc)\n return render_api_template(\"builtin_theme_knowbase_search.jade\", docs=docs, **kwargs)\n\n elif re.match(\"wiki/category/\", request_path):\n # category\n category_path = get_offset_path(request_path, 2).lower()\n wiki_nodes_url = \"/wiki_nodes?path=%s\" % category_path\n category_path = \"%s/%s\" % (wiki_root, category_path)\n folder_doc = get_record_by_path(bucket, category_path)\n enable_wiki_nodes = auto_type(wiki_configs.get(\"enable_wiki_nodes\", True))\n if not enable_wiki_nodes:\n wiki_nodes_url = \"\"\n if not folder_doc or get_type_from_record(folder_doc) != \"folder\":\n abort(404, \"no category found\")\n else:\n category = Category(folder_doc)\n docs = auto_pg(bucket=bucket, data_type=\"post\", pager_name=\"wiki\", path=category.path,\n ignore_marked_id=True, prefix_to_ignore='_', sort_by='-date', min_limit=8)\n for doc in docs:\n doc[\"wiki_url\"] = get_wiki_url_for_doc(wiki_root, doc)\n return render_api_template(\"builtin_theme_knowbase_category.jade\", category=category, docs=docs,\n wiki_nodes_url=wiki_nodes_url, **kwargs)\n\n elif re.match(\"wiki/post/\", request_path):\n # detail\n doc_path = get_offset_path(request_path, 2)\n doc_path = \"%s/%s\" % (wiki_root, doc_path)\n doc = get_record_by_path(bucket, doc_path)\n if not doc:\n abort(404, \"no doc found\")\n else:\n return render_api_template(\"builtin_theme_knowbase_post.jade\", doc=doc, **kwargs)\n\n\n\n\n\n\n\n\n","repo_name":"hepochen/FarBox","sub_path":"farbox_bucket/server/bucket_render/builtin_theme/wiki.py","file_name":"wiki.py","file_ext":"py","file_size_in_byte":8608,"program_lang":"python","lang":"en","doc_type":"code","stars":155,"dataset":"github-code","pt":"3"} +{"seq_id":"34756078484","text":"import json\nimport os\nfrom xmlrpc.client import boolean\n\nTHIS_FOLDER = os.path.dirname(os.path.abspath(__file__))\nmy_file = os.path.join(THIS_FOLDER, 'workouts.json')\n\n\ndef update_json(data: dict = None, on_creation: bool = False) -> dict or bool:\n if data:\n if (os.path.exists(my_file) and on_creation):\n print(\"\"\"Oops! Path already exists!\"\"\")\n return False # like status code, true good false bad\n\n with open(my_file, 'w') as file:\n json.dump(obj=data, fp=file)\n return True # like status code, true good false bad\n else:\n with open(my_file, 'r') as file:\n return json.load(file)\n","repo_name":"sai-nallani/workout-tracker-new","sub_path":"app/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71819818963","text":"import pandas as pd\r\nfrom sentence_transformers import SentenceTransformer\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\r\nimport numpy as np\r\nimport plotly.express as px\r\nfrom scipy.spatial import distance\r\nfrom collections import Counter\r\nimport itertools\r\n\r\nclass DataHandler:\r\n \"\"\"\r\n Handles data operations including reading and saving data.\r\n \"\"\"\r\n def __init__(self):\r\n self.data = None\r\n\r\n def read_excel(self, file_path, usecols=None):\r\n \"\"\"\r\n Reads data from an Excel file into a pandas DataFrame.\r\n \"\"\"\r\n try:\r\n self.data = pd.read_excel(file_path, usecols=usecols)\r\n except Exception as e:\r\n print(f\"An error occurred while reading the Excel file: {e}\")\r\n\r\n def save_parquet(self, file_name):\r\n \"\"\"\r\n Saves the pandas DataFrame to a Parquet file.\r\n \"\"\"\r\n try:\r\n self.data.to_parquet(file_name)\r\n except Exception as e:\r\n print(f\"An error occurred while saving to Parquet file: {e}\")\r\n\r\nclass TextEmbedder:\r\n \"\"\"\r\n Handles the embedding of text data.\r\n \"\"\"\r\n def __init__(self, model_name):\r\n self.model = SentenceTransformer(model_name)\r\n\r\n def embed_text(self, text_series):\r\n \"\"\"\r\n Generates embeddings for the given text series using the Sentence Transformer model.\r\n \"\"\"\r\n return self.model.encode(text_series.tolist())\r\n\r\nclass Clusterer:\r\n \"\"\"\r\n Handles clustering operations.\r\n \"\"\"\r\n def __init__(self, n_clusters=20):\r\n self.kmeans = KMeans(n_clusters=n_clusters, random_state=42)\r\n self.centroids = None\r\n\r\n def fit(self, embeddings):\r\n \"\"\"\r\n Fits the KMeans clustering model to the embeddings and finds cluster centroids.\r\n \"\"\"\r\n self.centroids = self.kmeans.fit_predict(embeddings)\r\n return self.centroids\r\n\r\nclass DimensionReducer:\r\n \"\"\"\r\n Handles dimensionality reduction.\r\n \"\"\"\r\n def __init__(self, n_components=2):\r\n self.lda = LinearDiscriminantAnalysis(n_components=n_components)\r\n\r\n def reduce_dimensions(self, embeddings, labels):\r\n \"\"\"\r\n Reduces the dimensionality of the embeddings to the number of specified components.\r\n \"\"\"\r\n return self.lda.fit_transform(embeddings, labels)\r\n\r\nclass Plotter:\r\n \"\"\"\r\n Handles the creation of plots.\r\n \"\"\"\r\n def scatter_plot(self, df, x_col, y_col, color_col, title=\"Scatter Plot\"):\r\n \"\"\"\r\n Creates a scatter plot with the given DataFrame and columns.\r\n \"\"\"\r\n fig = px.scatter(df, x=x_col, y=y_col, color=color_col, title=title)\r\n fig.show()\r\n\r\n def treemap(self, df, path_col, size_col, title=\"Treemap\"):\r\n \"\"\"\r\n Creates a treemap with the given DataFrame and columns.\r\n \"\"\"\r\n fig = px.treemap(df, path=[path_col], values=size_col, title=title)\r\n fig.show()\r\n\r\n\r\nclass ClusterNamer:\r\n \"\"\"\r\n Generates names for clusters based on the most common words in the text data.\r\n \"\"\"\r\n def __init__(self):\r\n pass\r\n\r\n def name_clusters(self, data, cluster_column, text_column):\r\n \"\"\"\r\n Generates and assigns names to the clusters based on the task names.\r\n \"\"\"\r\n cluster_names = {}\r\n for cluster in sorted(data[cluster_column].unique()):\r\n task_names = data[data[cluster_column] == cluster][text_column]\r\n words = list(itertools.chain(*[name.split() for name in task_names]))\r\n most_common_words = [word for word, word_count in Counter(words).most_common(3)]\r\n cluster_names[cluster] = ' '.join(most_common_words)\r\n return cluster_names\r\n\r\nclass DataFrameMerger:\r\n \"\"\"\r\n Handles the merging of dataframes.\r\n \"\"\"\r\n @staticmethod\r\n def merge_with_counts(data, cluster_column):\r\n \"\"\"\r\n Merges the DataFrame with a count of occurrences per cluster.\r\n \"\"\"\r\n count_series = data[cluster_column].value_counts().rename('Count')\r\n return data.merge(count_series, left_on=cluster_column, right_index=True)\r\n\r\n# Now, let's instantiate and use these classes.\r\ndata_handler = DataHandler()\r\ndata_handler.read_excel('/home/oliver/ChatDev/tools_data.xlsx', usecols=[0, 1, 2])\r\ndata_handler.save_parquet('/mnt/data/Data_pd.parquet')\r\n\r\ntext_embedder = TextEmbedder('BAAI/bge-small-en-v1.5')\r\nembeddings = text_embedder.embed_text(data_handler.data['Task Name'])\r\n\r\nclusterer = Clusterer(n_clusters=20)\r\ncluster_labels = clusterer.fit(embeddings)\r\ndata_handler.data['Cluster'] = cluster_labels\r\n\r\ndimension_reducer = DimensionReducer(n_components=2)\r\nreduced_embeddings = dimension_reducer.reduce_dimensions(embeddings, cluster_labels)\r\ndata_handler.data['Reduced Embedding'] = list(reduced_embeddings)\r\ndata_handler.save_parquet('/mnt/data/Data_pd.parquet')\r\n\r\nplotter = Plotter()\r\nscatter_data = pd.DataFrame(reduced_embeddings, columns=['x', 'y'])\r\nscatter_data['Cluster'] = cluster_labels\r\nplotter.scatter_plot(scatter_data, 'x', 'y', 'Cluster', 'Scatter plot of Reduced Embeddings')\r\n\r\ncluster_namer = ClusterNamer()\r\ncluster_names = cluster_namer.name_clusters(data_handler.data, 'Cluster', 'Task Name')\r\nfor cluster, name in cluster_names.items():\r\n data_handler.data.loc[data_handler.data['Cluster'] == cluster, 'Cluster Name'] = name\r\ndata_handler.save_parquet('/mnt/data/Data_pd.parquet')\r\n\r\n# Finally, merge the DataFrame with the counts and display the table.\r\ndata_with_counts = DataFrameMerger.merge_with_counts(data_handler.data, 'Cluster')\r\ndata_with_counts.to_parquet('/mnt/data/Data_pd.parquet')\r\n\r\n# Displaying the table with cluster names and counts.\r\nprint(data_with_counts[['Cluster', 'Cluster Name', 'Count']])\r\n\r\n# Usage of the Plotter class for a treemap\r\nplotter = Plotter()\r\n\r\n# Assuming 'data_with_counts' is a DataFrame with a 'Cluster Name' and 'Count' columns\r\n# which was created in the previous steps.\r\nplotter.treemap(data_with_counts, path_col='Cluster Name', size_col='Count', title=\"Treemap of Clusters\")\r\n","repo_name":"olimoz/AI_Teams","sub_path":"Simple_Clustering_via_Chat_GPT4.py","file_name":"Simple_Clustering_via_Chat_GPT4.py","file_ext":"py","file_size_in_byte":6094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7460247514","text":"import json\nimport boto3\nfrom boto3.dynamodb.conditions import Key\n\n\n# import requests\n\ndef lambda_handler(event, context):\n print(event['pathParameters']['userId'])\n username = event['pathParameters']['userId']\n print(username)\n # username = username['userId']\n # username = 'rahul'\n # print(username)\n users = get_user(username)\n # print(username)\n return {\n \"statusCode\": 200,\n \"headers\": {\n \"Access-Control-Allow-Origin\": \"*\",\n \"Access-Control-Allow-Headers\": \"Content-Type\",\n \"Access-Control-Allow-Methods\": \"OPTIONS,GET\"\n },\n \"body\": json.dumps({\n \"message\": ['a', 'b', 'c', 'd'],\n \"users\": users\n }),\n }\n\n\ndef get_user(username):\n dynamodb = boto3.resource('dynamodb')\n\n users_table = dynamodb.Table('users_table1')\n response = users_table.query(\n KeyConditionExpression=Key('userId').eq(username)\n )\n\n print(response['Items'])\n return response['Items'][0]\n","repo_name":"rushikeshkoli/mw-sls","sub_path":"get_user/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18425959884","text":"import pickle\nimport random\nfrom transformers import BertTokenizer, BertModel\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader, RandomSampler, SequentialSampler, Dataset\nfrom torch.nn.functional import binary_cross_entropy\nimport torch.optim as optim\nfrom tqdm import tqdm,trange\nimport qa.system.QA_system.utils\nimport numpy as np\nimport csv\nimport argparse\nimport jieba\nimport codecs as cs\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\ndef setup_seed(seed):\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n np.random.seed(seed)\n random.seed(seed)\n torch.backends.cudnn.deterministic = True\n\n\ndef load_corpus(state):\n '''\n 加载并划分数据集\n :param state: 'train',''valid','test' path:''str\n :return: question[],entity[]\n '''\n\n if state == 'train':\n corpus=pickle.load(open('../data/train_corpus.pkl','rb'))\n train_questions = [corpus[i]['question'] for i in corpus.keys()]\n train_entity = [corpus[i]['gold_entitys'] for i in corpus.keys()]\n return train_questions, train_entity\n elif state == 'valid':\n corpus = pickle.load(open('../data/valid_corpus.pkl', 'rb'))\n valid_questions = [corpus[i]['question'] for i in corpus.keys()]\n valid_entity = [corpus[i]['gold_entitys'] for i in corpus.keys()]\n return valid_questions, valid_entity\n # elif state == 'test':\n # test_questions = [corpus[i]['question'] for i in test_index]\n # test_entity = [corpus[i]['gold_entitys'] for i in test_index]\n # return test_questions, test_entity\n\n\ndef find_lcsubstr(s1, s2):\n m = [[0 for i in range(len(s2) + 1)] for j in range(len(s1) + 1)] # 生成0矩阵,为方便后续计算,比字符串长度多了一列\n mmax = 0 # 最长匹配的长度\n p = 0 # 最长匹配对应在s1中的最后一位\n for i in range(len(s1)):\n for j in range(len(s2)):\n if s1[i] == s2[j]:\n m[i + 1][j + 1] = m[i][j] + 1\n if m[i + 1][j + 1] > mmax:\n mmax = m[i + 1][j + 1]\n p = i + 1\n return s1[p - mmax:p]\n\n\nclass Bert_NER(nn.Module):\n def __init__(self):\n super(Bert_NER, self).__init__()\n # pretrainmodel\n self.bert = BertModel.from_pretrained(pretrained_model_name_or_path='qa/system/pretrainmodels/rbt3')\n # 使参数可更新\n for param in self.bert.parameters():\n param.requires_grad = True\n self.ds_encoder = nn.LSTM(768, 256, bidirectional=True)\n self.fc = nn.Linear(512, 1)\n self.sigmoid=nn.Sigmoid()\n\n\n def forward(self, x):\n outputs = self.bert(x,output_hidden_states=True)\n features=outputs.last_hidden_state\n state = self.ds_encoder(features)\n preds = self.sigmoid((self.fc(state[0])))\n\n return preds\n\n\nclass FeatureDataset(Dataset):\n \"\"\"Pytorch Dataset for InputFeatures\n \"\"\"\n\n def __init__(self, features):\n self.features = features\n\n def __len__(self) -> int:\n return len(self.features)\n\n def __getitem__(self, index):\n return self.features[index]\n\n\nclass NERDataLoader(object):\n def __init__(self):\n self.train_batch_size = 16\n self.val_batch_size = 16\n self.test_batch_size = 16\n self.data_cache = True\n self.max_seq_len = 25\n self.tokenizer = BertTokenizer.from_pretrained(pretrained_model_name_or_path='../pretrainmodels/rbt3')\n\n @staticmethod\n def collate_fn(features):\n \"\"\"将InputFeatures转换为Tensor\n Args:\n features (List[InputFeatures])\n Returns:\n tensors (List[Tensors])\n \"\"\"\n input_ids = torch.tensor([f[0] for f in features], dtype=torch.long)\n labels = torch.tensor([f[1]for f in features], dtype=torch.long)\n tensors = [input_ids, labels]\n return tensors\n\n def convert_examples_to_features(self, questions, entitys):\n #除了用问句和对应实体,需要用提及词典来反向标注问句\n entity2mention_dic = pickle.load(open('../data/entity2mention_dic_cm3.pkl', 'rb'))\n features = []\n for i in range(len(questions)):\n q = questions[i]\n x = self.tokenizer.encode(text=q, max_length=self.max_seq_len)\n y = [[0] for j in range(self.max_seq_len)]\n # padding\n if len(x) != len(y):\n x.extend([0] * (len(y) - len(x)))\n assert len(x) == len(y)\n for e in entitys[i]:\n # 得到实体名和问题的最长连续公共子串\n e1 = find_lcsubstr(e, q)\n if e1 in q and e1!='':\n begin = q.index(e1) + 1\n end = begin + len(e1)\n if end < self.max_seq_len - 1:\n for pos in range(begin, end):\n y[pos] = [1]\n else:\n for enty,mentions in entity2mention_dic.items():\n if e in enty:\n for mention in mentions:\n e2=find_lcsubstr(mention, q)\n if e2 in q and e2!='':\n begin = q.index(e2) + 1\n end = begin + len(e2)\n if end < self.max_seq_len - 1:\n for pos in range(begin, end):\n y[pos] = [1]\n print(q)\n print(q[begin-1:end-1])\n # print(x)\n # print(y)\n if not [1] in y:\n print(q,e)\n features.append([x, y])\n return features\n def get_dataloaderbypath(self,path):\n corpus=pickle.load(open(path,'rb'))\n questions =[corpus[i]['question'] for i in corpus.keys()]\n entities = [corpus[i]['gold_entitys'] for i in corpus.keys()]\n features = self.convert_examples_to_features(questions, entities)\n dataset = FeatureDataset(features)\n print(f\"{len(features)} {path} data loaded!\")\n print(\"=*=\" * 10)\n datasampler = SequentialSampler(dataset)\n dataloader = DataLoader(dataset, sampler=datasampler, batch_size=self.train_batch_size,\n collate_fn=self.collate_fn,shuffle=False)\n return dataloader\n def get_dataloader(self, data_sign):\n questions, entity = load_corpus(state=data_sign)\n features = self.convert_examples_to_features(questions, entity)\n dataset = FeatureDataset(features)\n print(f\"{len(features)} {data_sign} data loaded!\")\n print(\"=*=\" * 10)\n\n if data_sign == \"train\":\n datasampler = RandomSampler(dataset)\n dataloader = DataLoader(dataset, sampler=datasampler, batch_size=self.train_batch_size,\n collate_fn=self.collate_fn)\n elif data_sign == \"valid\":\n datasampler = SequentialSampler(dataset)\n dataloader = DataLoader(dataset, sampler=datasampler, batch_size=self.val_batch_size,\n collate_fn=self.collate_fn)\n elif data_sign in (\"test\", \"pseudo\"):\n datasampler = SequentialSampler(dataset)\n dataloader = DataLoader(dataset, sampler=datasampler, batch_size=self.test_batch_size,\n collate_fn=self.collate_fn)\n else:\n raise ValueError(\"please notice that the data can only be train/val/test !!\")\n return dataloader\n\n\ndef train(epoches,train_loader,val_loader):\n # dataloader = NERDataLoader()\n # train_loader= dataloader.get_dataloader(data_sign='train')\n # print(len(train_loader))\n # val_loader= dataloader.get_dataloader(data_sign='valid')\n model = Bert_NER()\n model.to(device)\n optimizer=optim.Adam(model.parameters(),lr=1e-5)\n maxf=0\n for epoch in range(1, epoches + 1):\n optimizer.zero_grad()\n print(\"Epoch {}/{}\".format(epoch, epoches))\n model.train()\n loss_avg=utils.RunningAverage()\n t = trange(len(train_loader), ascii=True)\n for step, _ in enumerate(t):\n # fetch the next training batch\n batch = next(iter(train_loader))\n batch = tuple(t.to(device) for t in batch)\n input_ids,labels=batch\n\n predicts=model(input_ids)\n\n loss=binary_cross_entropy(predicts,labels.float())\n\n loss.backward()\n loss_avg.update(loss.item())\n t.set_postfix(loss='{:05.3f}'.format(loss.item()),avg_loss='{:05.3f}'.format(loss_avg()))\n optimizer.step()\n\n #evaluate\n _,p,r,f=evaluate(model,val_loader)\n print('{}epoch p is{:.3f} R is {:.3f} f-score is {:.3f}'.format(epoch, p, r, f))\n #模型保存\n if f > maxf:\n torch.save({\n 'Bert_NER_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n },'../models/ner_model-best_1.pkl')\n with open('../models/ner_model-best_1.csv', 'a+') as file:\n writer = csv.writer(file)\n writer.writerow(['epoch', 'loss_avg', 'precision', 'recall', 'f1'])\n writer.writerow([epoch,loss_avg(),p,r,f])\n maxf = f\n\ndef evaluate(model,data_loader):\n model.eval()\n predicts=[]\n labels_all=[]\n for batch in tqdm(data_loader, unit='Batch', ascii=True):\n batch = tuple(t.to(device) for t in batch)\n input_ids,labels=batch\n batch_size, max_len,_ = labels.size()\n with torch.no_grad():\n predict = model(input_ids)\n predicts.extend(predict)\n labels=labels.to('cpu'). numpy()\n labels=labels.squeeze().tolist()\n labels_all.extend(labels)\n pred_labels = [[1 if each[0] > 0.5 else 0 for each in line] for line in predicts ]\n val_ques,val_entity=load_corpus('valid')\n pred_entity=restore_entity_from_labels_on_corpus(pred_labels, val_ques)\n true_entity=restore_entity_from_labels_on_corpus(labels_all,val_ques)\n p, r, f = computeF( true_entity,pred_entity)\n return pred_entity,p,r,f\ndef restore_entity_from_labels_on_corpus(predicty,questions):\n def restore_entity_from_labels(labels,question):\n entitys = []\n str = ''\n labels = labels[1:-1]\n for i in range(min(len(labels),len(question))):\n if labels[i]==1:\n str += question[i]\n else:\n if len(str):\n entitys.append(str)\n str = ''\n if len(str):\n entitys.append(str)\n return entitys\n all_entitys = []\n for i in range(len(predicty)):\n all_entitys.append(restore_entity_from_labels(predicty[i],questions[i]))\n return all_entitys\n\ndef computeF(gold_entity,pre_entity):\n '''\n 根据标注的实体位置和预测的实体位置,计算prf,完全匹配\n 输入: Python-list 3D,值为每个实体的起始位置列表[begin,end]\n 输出: float\n '''\n truenum = 0\n prenum = 0\n goldnum = 0\n for i in range(len(gold_entity)):\n goldnum += len(gold_entity[i])\n prenum += len(pre_entity[i])\n truenum += len(set(gold_entity[i]).intersection(set(pre_entity[i])))\n try:\n precise = float(truenum) / float(prenum)\n recall = float(truenum) / float(goldnum)\n f = float(2 * precise * recall / (precise + recall))\n except:\n precise = recall = f = 0.0\n return precise, recall, f\ndef mention_extrate(corpus,model,data_loader):\n model.eval()\n predicts = []\n labels_all = []\n for batch in tqdm(data_loader, unit='Batch', ascii=True):\n batch = tuple(t.to(device) for t in batch)\n input_ids, labels = batch\n batch_size, max_len, _ = labels.size()\n with torch.no_grad():\n predict = model(input_ids)\n predicts.extend(predict)\n labels = labels.to('cpu').numpy()\n labels = labels.squeeze().tolist()\n labels_all.extend(labels)\n\n ques=[corpus[i]['question'] for i in range(len(corpus))]\n pred_labels = [[1 if each[0] > 0.5 else 0 for each in line] for line in predicts]\n pred_entity = restore_entity_from_labels_on_corpus(pred_labels, ques)\n\n return pred_entity\ndef getmentionfordevlop(corpus,path):\n #利用bert模型进行mention抽取\n model = Bert_NER()\n model.to(device)\n checkpoint = torch.load('../models/ner_model-best_1.pkl')\n model.load_state_dict(checkpoint['Bert_NER_state_dict'])\n dataloader = NERDataLoader()\n data_loader = dataloader.get_dataloaderbypath(path=path)\n pred_entity=mention_extrate(corpus,model,data_loader)\n # 分词词典分词提取mention\n with cs.open('../data/segment_dic.txt', 'r', 'utf-8') as fp:\n segment_dic = {}\n for line in fp:\n if line.strip():\n segment_dic[line.strip()] = 0\n jieba.load_userdict('../data/segment_dic.txt')\n\n for i in range(len(corpus)):\n dic = corpus[i]\n question = dic['question']\n entity_mention={}\n mentions = []\n tokens = jieba.lcut(question)\n for t in tokens:\n if t in segment_dic:\n mentions.append(t)\n me= mentions + pred_entity[i]\n for token in me:\n entity_mention[token] = token\n dic['entity_mention'] = entity_mention\n corpus[i] = dic\n print(question)\n print(dic['entity_mention'])\n print(\"问句数量:\",len(corpus))\n\n return corpus\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='train or use the NER model')\n parser.add_argument('--trainortest', type=str,default='train')\n args=parser.parse_args()\n setup_seed(20)\n #加载数据集\n if args.trainortest=='train':\n dataloader = NERDataLoader()\n train_loader = dataloader.get_dataloader(data_sign='train')\n val_loader = dataloader.get_dataloader(data_sign='valid')\n train(epoches=30,train_loader=train_loader,val_loader=val_loader)\n elif args.trainortest=='test':\n inputpaths=['../data/train_corpus.pkl', '../data/valid_corpus.pkl']\n outputpaths=['../data/entity_mentions_train.pkl','../data/entity_mentions_valid.pkl']\n for i in range(len(inputpaths)):\n inputpath=inputpaths[i]\n outputpath=outputpaths[i]\n corpus=pickle.load(open(inputpath,'rb'))\n corpus=getmentionfordevlop(corpus=corpus,path=inputpath)\n pickle.dump(corpus, open(outputpath, 'wb'))\n print('得到实体mention')\n\n\n\n","repo_name":"lwb69/TCM-KBQA","sub_path":"train_ner.py","file_name":"train_ner.py","file_ext":"py","file_size_in_byte":14701,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"3"} +{"seq_id":"25521539735","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n'''\nCreated on 2015-06-20\n\n@author: mizhon\n'''\nimport os\nimport re\nimport time\nimport shlex\nimport argparse\nimport textwrap\nimport subprocess\nimport ConfigParser\nfrom subprocess import PIPE\n\nfrom Utility import util\n\nfrom Logs import logger\n\nlog = logger.Log()\n\n\nclass CommonActions(object):\n \n tool = None\n provider = None\n \n 'database info fields'\n host = None\n db = None\n port = None\n user = None\n password = None\n instance_type = None # indicate whether the instance is RDS or database on VM host\n db_version = None # database version, e.g.: mysql5.5, mysql5.6 ...\n db_setup = None # including three types: 1.default; 2.high-safety (slave-master); 3.high-performance;\n long_stand = None # indicate whether the test is long-stand testing, default is False \n \n 'sysbench params fields'\n table_engine = None\n engine_trx = None\n test_mode = None\n tables_count = None\n table_size = None\n max_time = None\n max_requests = None\n threads = None\n interval = None\n percentile = None\n lua = None\n \n 'tpcc-mysql params fields'\n warehouse = None\n connection = None\n rampuptime = None\n measuretime = None\n intervaltime = None\n \n @classmethod\n def coma_receive_console_args(cls):\n parser = argparse.ArgumentParser(prog=\"AutoDBPerfTool\",\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=textwrap.dedent('''\n =============================================================\n AutoDBPerfTool used to automate database performance testing.\n =============================================================\n \n command example:\n python auto_run.py -a -c \n ''')\n )\n \n parser.add_argument(\"-v\", \"--version\", action=\"version\", version=\"%(prog)s 1.0.0\")\n parser.add_argument(\"-a\", \"--action\", help=\"specify execution type.\")\n parser.add_argument(\"-c\", \"--config\", help=\"specify config path.\")\n \n args = parser.parse_args()\n return args\n \n @classmethod\n def coma_parse_console_args(cls, console_args):\n try:\n dict_args = vars(console_args)\n \n action = None\n config_file_list = []\n \n args_list = dict_args.items()\n for arg in args_list:\n \n if arg[0] =='action':\n if arg[1] is not None:\n if arg[1] == 'prepare' or arg[1] == 'run' or arg[1] =='cleanup':\n action = arg[1]\n else:\n log.error(\"Invalid value detected for action: '%s', please choose action in 'prepare/run/cleanup'.\" % arg[1])\n else:\n content_error = arg[0]\n log.error(\"No value detected for param: '%s', please input -h/--help for usage.\" % content_error)\n elif arg[0] == 'config':\n if arg[1] is not None:\n config = arg[1]\n if os.path.exists(config):\n config_file_list = CommonActions.ca_get_config_list(config)\n else:\n content_error = arg[1]\n log.error(\"Invalid config file: '%s', please check the config file is exist.\" % arg[1])\n else:\n content_error = arg[0]\n log.error(\"No value detected for param: '%s', please input -h/--help for usage.\" % content_error)\n \n return (action, config_file_list)\n except Exception as e:\n log.error(e)\n \n @classmethod\n def coma_get_config_list(cls, config_path):\n try:\n config_list = []\n if os.path.isfile(config_path):\n if os.path.splitext(config_path)[1] == '.cfg':\n config_list.append(config_path)\n elif os.path.isdir(config_path):\n for config_file in os.listdir(config_path):\n file_path = os.path.join(config_path, config_file)\n if os.path.splitext(file_path)[1] == '.cfg':\n config_list.append(file_path)\n return config_list\n except Exception as e:\n log.error(e)\n \n @classmethod\n def coma_parse_config_params(cls, cfg_file):\n try:\n config = ConfigParser.RawConfigParser()\n config.read(cfg_file)\n \n CommonActions.tool = config.get(\"ToolInfo\", \"tool\")\n CommonActions.provider = config.get(\"ProviderInfo\", \"provider\")\n \n CommonActions.db_setup = config.get(\"DBInfo\", \"db_setup\")\n CommonActions.instance_type = config.get(\"DBInfo\", \"instance_type\")\n \n CommonActions.host = config.get(\"DBInfo\", \"host\")\n CommonActions.db = config.get(\"DBInfo\", \"db\")\n CommonActions.port = config.get(\"DBInfo\", \"port\")\n CommonActions.user = config.get(\"DBInfo\", \"user\")\n CommonActions.password = config.get(\"DBInfo\", \"password\")\n \n if CommonActions.tool == util.SYSBENCH: \n CommonActions.table_engine = config.get(\"RunInfo\", \"mysql-table-engine\")\n CommonActions.engine_trx = config.get(\"RunInfo\", \"mysql-engine-trx\")\n CommonActions.test_mode = config.get(\"RunInfo\", \"oltp-test-mode\")\n CommonActions.tables_count = config.get(\"RunInfo\", \"oltp-tables-count\")\n CommonActions.table_size = config.get(\"RunInfo\", \"oltp-table-size\")\n CommonActions.max_time = config.get(\"RunInfo\", \"max-time\")\n CommonActions.max_requests = config.get(\"RunInfo\", \"max-requests\")\n CommonActions.threads = config.get(\"RunInfo\", \"num-threads\")\n CommonActions.interval = config.get(\"RunInfo\", \"report-interval\")\n CommonActions.percentile = config.get(\"RunInfo\", \"percentile\")\n CommonActions.lua = config.get(\"RunInfo\", \"lua-script\")\n \n elif CommonActions.tool == util.TPCCMYSQL: \n CommonActions.warehouse = config.get(\"RunInfo\", \"warehouse\")\n CommonActions.connection = config.get(\"RunInfo\", \"connection\")\n CommonActions.rampuptime = config.get(\"RunInfo\", \"ramuptime\")\n CommonActions.measuretime = config.get(\"RunInfo\", \"meaasuretime\")\n CommonActions.intervaltime = config.get(\"RunInfo\", \"intervaltime\")\n \n except Exception as e:\n log.error(e)\n \n @classmethod\n def coma_get_cmds_list(cls, cmd_action, cfg_file):\n try:\n cmd_list = []\n CommonActions.ca_parse_config_params(cfg_file) # get basic settings from config file\n CommonActions.db_version = CommonActions.ca_get_mysql_version() # get mysql database relase version\n \n if CommonActions.tool == util.SYSBENCH:\n #cmd_list.append(SysbenchActions.sa_get_cmds(cmd_action))\n pass\n elif CommonActions.tool == util.TPCCMYSQL:\n #cmd_list.append(TpccmysqlActions.ta_get_cmds(cmd_action))\n pass\n \n return cmd_list\n \n except Exception as e:\n log.error(e)\n \n @classmethod\n def coma_exec_cmds(cls, cmd_str):\n cmd = shlex.split(cmd_str)\n p = subprocess.Popen(cmd, bufsize=-1, stdin=PIPE, stdout=PIPE, stderr=PIPE)\n p.wait()\n \n time.sleep(util.SLEEP_TIME) # sleep 60 seconds between each execution\n \n res = p.communicate()\n cmd_result = []\n for cmd_line in res:\n cmd_result.append(cmd_line)\n \n return ''.join(cmd_result)\n \n @classmethod\n def coma_create_file_with_timestamp(cls, file_extension):\n try:\n file_name = []\n time_tag = time.strftime(\"%Y-%m-%d_%H%M\", time.localtime())\n \n file_name.append(CommonActions.provider + '@' + CommonActions.host)\n file_name.apoend(CommonActions.tool)\n file_name.append(CommonActions.threads + 'threads')\n file_name.append(CommonActions.max_time + 's')\n file_name.append(CommonActions.tables_count + 't')\n file_name.append(CommonActions.percentile + '%')\n file_name.append(time_tag + '.' + file_extension)\n return '_'.join(file_name)\n except Exception as e:\n log.error(e)\n \n @classmethod\n def coma_get_extra_info(cls):\n try:\n extra_dict = {\n \"provider\": None,\n \"test\": None,\n \"run_info\": None\n }\n \n extra_dict[\"provider\"] = CommonActions.provider\n extra_dict[\"test\"] = CommonActions.tool\n \n run_info_dict = {}\n exec_time_dict = {}\n \n run_info_dict.setdefault(\"host_name\", CommonActions.host)\n run_info_dict.setdefault(\"db_name\", CommonActions.db)\n run_info_dict.setdefault(\"port\", CommonActions.port)\n run_info_dict.setdefault(\"owner\", CommonActions.user)\n run_info_dict.setdefault(\"instance_type\", CommonActions.instance_type)\n run_info_dict.setdefault(\"db_version\", CommonActions.db_version)\n run_info_dict.setdefault(\"db_setup\", CommonActions.db_setup)\n \n if CommonActions.tool == util.SYSBENCH:\n pass\n \n elif CommonActions.tool == util.TPCCMYSQL:\n pass\n \n extra_dict[\"run_info\"] = run_info_dict\n \n return extra_dict\n \n except Exception as e:\n log.error(e)\n \n @classmethod\n def cmoa_combine_cmds(cls, *params):\n try:\n flag = \" \"\n param_list = []\n \n for param in params:\n param_list.append(param)\n \n return flag.join(param_list)\n except Exception as e:\n log.error(e)\n \n @classmethod\n def cmoa_get_mysql_version(cls):\n str_chk_mysql_ver = CommonActions.ca_combine_cmds(util.DB_MYSQL,\n \"\".join([\"-h\", CommonActions.tool]),\n \"\".join([\"-u\", CommonActions.user]),\n \"\".join([\"-p\", CommonActions.password]),\n \"-e\",\n \"\\\"select version()\\\"\")\n mysql_ver = shlex.split(str_chk_mysql_ver)\n \n p = subprocess.Popen(mysql_ver, bufsize=-1, stdin=PIPE, stdout=PIPE, stderr=PIPE)\n p.wait()\n \n mysql_ver_info = p.communicate()\n mysql_version = ''.join(re.findall(r\"\\d\\.\\d\\.\\d+-\\w+\", mysql_ver_info[0], re.M))\n \n return mysql_version\n \n \n \n \n \n \n \n ","repo_name":"mizhon/auto-db-tools","sub_path":"autodbperftool/ADT/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":11946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14678494144","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('home', '0008_metadata_name'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='filepath',\n name='pooky',\n field=models.CharField(default=datetime.date(2015, 9, 11), max_length=80, serialize=False, primary_key=True),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='filepath',\n name='name',\n field=models.CharField(max_length=80, primary_key=True),\n ),\n ]\n","repo_name":"EMSL-MSC/pacifica-uploader","sub_path":"home/migrations/0009_auto_20150911_1649.py","file_name":"0009_auto_20150911_1649.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"23912814916","text":"import datetime\r\n\r\ndef digitos(info):\r\n if info == 'codigo':\r\n while True:\r\n try:\r\n codigo = input('\\nDigite o código do investimento: ').upper()\r\n if not codigo[:3].isalpha() or not codigo[4:].isnumeric() and len(codigo) < 5 or len(codigo) > 5:\r\n raise Exception\r\n return codigo\r\n except:\r\n print('\\nCódigo inválido. Tente novamente!')\r\n\r\n elif info == 'data':\r\n while True:\r\n try:\r\n data = input('\\nDigite a data do investimento: ')\r\n data_convertida = datetime.datetime.strptime(data, \"%d/%m/%Y\").strftime(\"%Y-%m-%d\")\r\n return data_convertida\r\n except:\r\n print('\\nData inválida. Tente novamente!')\r\n\r\n elif info == 'quantidade':\r\n while True:\r\n try:\r\n quantidade = int(input('\\nDigite a quantidade do investimento: '))\r\n if quantidade < 1:\r\n raise Exception\r\n return quantidade\r\n except:\r\n print('\\nQuantidade inválida. Tente novamente!')\r\n\r\n elif info == 'valor_unidade':\r\n while True:\r\n try:\r\n valor_unidade = float(input('\\nDigite o valor da unidade do investimento: '))\r\n if valor_unidade <= 0:\r\n raise Exception\r\n return valor_unidade\r\n except:\r\n print('\\nValor inválido. Tente novamente!')\r\n\r\n elif info == 'tipo':\r\n while True:\r\n try:\r\n tipo = input('\\nDigite o tipo do investimento: ').upper().strip()\r\n if tipo != 'COMPRA' and tipo != 'VENDA':\r\n raise Exception\r\n return tipo\r\n except:\r\n print('\\nTipo inválido. Tente novamente!')\r\n\r\n elif info == 'taxa_de_corretagem':\r\n while True:\r\n try:\r\n taxa_de_corretagem = float(input('\\nDigite a taxa de corretagem do investimento: '))\r\n if taxa_de_corretagem <= 0:\r\n raise Exception\r\n return taxa_de_corretagem\r\n except:\r\n print('\\nValor da taxa inválido. Tente novamente!')\r\n\r\n elif info == 'escolha':\r\n while True:\r\n try:\r\n escolha = int(input('\\nDigite sua escolha: '))\r\n if escolha < 0 or escolha > 6:\r\n raise Exception\r\n return escolha\r\n except:\r\n print('\\nEscolha inválida. Tente novamente!')\r\n\r\ndef menu():\r\n return '\\n[1]_ Criar investimento. [2]_ Modificar investimento. [3]_ Listar investimentos. [4]_ Deletar investimento. [5]_ Detalhar ativo. [6]_Lucro/prejuízo da carteira. [0]_ Finalizar.'\r\n\r\ndef legenda_investimento(ativo):\r\n if ativo == None:\r\n return '____________________________________________________________________________________________________________________________\\nID| CÓDIGO | DATA | QUANTIDADE | VALOR DA UNIDADE | TIPO | TAXA DE CORRETAGEM | VALOR DA OPERAÇÃO | VALOR TOTAL |'\r\n else:\r\n return '_________________________________________________________________________________________________________________________________________\\nID| CÓDIGO | DATA | QUANTIDADE | VALOR DA UNIDADE | TIPO | TAXA DE CORRETAGEM | VALOR DA OPERAÇÃO | VALOR TOTAL | PREÇO MÉDIO |'\r\n\r\ndef linhas_horizontais(ativo):\r\n if ativo == None:\r\n return '--+---------+------------+------------+------------------+--------+----------------------+-------------------+-------------+'\r\n else:\r\n return '--+---------+------------+------------+------------------+--------+----------------------+-------------------+-------------+-------------+'\r\n \r\ndef opcoes_de_mudanca():\r\n return '\\n[1]_ Código. [2]_ Data [3]_ Quantidade. [4]_ Valor Unidade. [5]_ Tipo. [6]_ Taxa de Corretagem.'\r\n","repo_name":"cauergds16/trabalho-metodos-ageis-266-ACV","sub_path":"codigo/funcoes.py","file_name":"funcoes.py","file_ext":"py","file_size_in_byte":3987,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"43101781799","text":"# Import PaLM API Key from cred.py\nfrom bs4 import BeautifulSoup\n\nimport requests\nfrom cred import palm_api_key, last_fm_api_key\nfrom youtube_search import YoutubeSearch\nimport google.generativeai as palm\n\npalm.configure(api_key = palm_api_key)\n\n# Define a song class\nclass Song:\n def __init__(self, song, artist, song_link, album_art):\n self.song = song\n self.artist = artist\n self.song_link = song_link\n self.album_art = album_art\n\ndef search_youtube(query):\n results = YoutubeSearch(query + 'VEVO Audio', max_results=1).to_dict()\n # print(results)\n if results:\n # Get the id of the first result with a duration under 13 minutes\n video_id = None\n for result in results:\n # Under 13 minutes\n if result['duration'].count(':') == 1 and int(result['duration'].split(':')[0]) < 13:\n video_id = result['id']\n\n # print('Song located...')\n return video_id\n else:\n return None\n\ndef get_links(songs):\n\n songs_with_links = []\n\n for song_object in songs:\n song = song_object.song\n artist = song_object.artist\n\n # Check if artist is unknown or anonymous ignore case\n if (artist.lower() == 'unknown' or artist.lower() == 'anonymous'):\n video_id = search_youtube(song + ' VEVO Audio')\n else:\n video_id = search_youtube(song + ' by ' + artist + ' VEVO Audio')\n \n if (video_id):\n song_object.song_link = 'https://www.youtube.com/watch?v=' + video_id\n\n # Add the song to the list\n songs_with_links.append(song_object)\n # No link found \n else:\n # Remove the song\n songs.remove(song_object)\n \n return songs_with_links\n\ndef fetch_album_art(track_name, artist_name):\n # Assign API Key\n api_key = last_fm_api_key\n\n # Get only the first artist to increase reliability, delimited by commas\n if ',' in artist_name:\n artist_name = artist_name.split(',')[0]\n\n\n # Search for the track by title and artist\n search_url = f'http://ws.audioscrobbler.com/2.0/?method=track.search&track={track_name}&artist={artist_name}&api_key={api_key}&format=json'\n response = requests.get(search_url)\n data = response.json()\n\n # Check if the search returned any results\n if 'results' in data and 'trackmatches' in data['results'] and 'track' in data['results']['trackmatches']:\n tracks = data['results']['trackmatches']['track']\n \n # Check if any tracks were returned\n if tracks:\n # Get the first track result\n track = tracks[0]\n\n # Get the track's information\n track_name = track['name']\n artist_name = track['artist']\n\n # Get track info to retrieve an image\n track_info_url = f'http://ws.audioscrobbler.com/2.0/?method=track.getInfo&track={track_name}&artist={artist_name}&api_key={api_key}&format=json'\n response = requests.get(track_info_url)\n track_info = response.json()\n\n # Check if track info contains image data\n if 'track' in track_info and 'album' in track_info['track'] and 'image' in track_info['track']['album']:\n images = track_info['track']['album']['image']\n # Get the largest available image (usually the last one)\n image_url = images[-1]['#text']\n return image_url\n else:\n return None\n else:\n return None\n\ndef get_art(songs):\n # Loop through each song in the list and iterate through the proxies by poping them off\n for song in songs:\n # Fetch the album art\n try:\n song.album_art = fetch_album_art(song.song, song.artist)\n # Assign the album art to the song object\n song.album_art = song.album_art\n except Exception as e:\n print('Error fetching album art.')\n\ndef generateText(prompt):\n system = \"Act as a playlist generator. Respond to requests with a list of songs in the following format: ' * Song Title | Artist Names'. List the songs in a bulleted lists. Strict adherence to this format is required the song title must always come first followed by the artists seperated by a pipe symbol rather than the word 'by'. Multiple artists should be separated by a comma.\"\n\n completion = palm.generate_text(\n model='models/text-bison-001',\n prompt= system + ' ' + prompt,\n temperature=0.7,\n # The maximum length of the response\n max_output_tokens=800\n )\n\n return completion.result\n\n\ndef prompt(prompt):\n\n print('Generating playlist...')\n reply = generateText('I want to hear: ' + prompt + 'Please generate a playlist in the predefined format (\"Title of the Song\" | \"Artist\").')\n\n # print(reply)\n\n # Playlist\n songs = []\n\n count = 0\n\n # Parse the songs and artists\n try:\n # Get each line of the reply split by a new line\n for line in reply.split('\\n'):\n # Check if the line starts with a bullet point\n if (line.strip().startswith('*')):\n count += 1\n if count > 20:\n break\n # Remove the asterisks\n line = line.replace('*', '')\n # Remove any quotation marks and trim\n line = line.replace('\\\"', '').strip()\n # Get the song\n song = line.split('|')[0].strip()\n # Check if the song is prefaced with 'Title:'\n if(song.startswith('Title:')):\n # Remove the 'Title:'\n song = song.replace('Title:', '').strip()\n # Get the artist\n artist = line.split('|')[1].strip()\n # Ensure song is not header/example\n print(song.lower())\n if(song.lower() != 'title' and song.lower() != 'song title' and song.lower() != 'title of the song'):\n # Define a song object\n new_song = Song(song, artist, None, None)\n # Add the song to the list\n songs.append(new_song)\n # print(song + ' - ' + artist)\n except Exception as e:\n print(e)\n return None\n\n # Print the number of songs\n print('Generated ' + str(len(songs)) + ' songs.')\n \n print('Sourcing links...')\n songs = get_links(songs)\n\n # Print the number of songs\n print('Found ' + str(len(songs)) + ' links.')\n\n print('Sourcing cover art...')\n song = get_art(songs)\n print('Done.')\n\n\n\n # Create a link to a preview of the playlist\n playlist_link = 'https://www.youtube.com/watch_videos?video_ids='\n\n for song in songs:\n link = song.song_link\n # Remove preface to isolate the video id\n link = link.replace('https://www.youtube.com/watch?v=', '')\n # Ad video to playlist preview link\n playlist_link += link + ','\n \n # Remove the last comma\n playlist_link = playlist_link[:-1]\n\n print('\\nPlaylist preview: ' + playlist_link + '\\n')\n\n # Convert songs to JSON format using __dict__\n songs = [song.__dict__ for song in songs]\n\n # TODO: Create a YouTube Playlist using this format: https://www.youtube.com/watch_videos?video_ids=fOe8aEqoN_M,qfZ2P2sWLZw,cew0MuXESGw,1bLlmKzIx0A,CZzcVs8tNfE,O4nHFQMeGo8\n\n return songs","repo_name":"MitchellZ/Playlist-AI-API","sub_path":"palm_service.py","file_name":"palm_service.py","file_ext":"py","file_size_in_byte":7501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11148487841","text":"from obspy.clients.fdsn import Client\nfrom obspy.clients.fdsn.mass_downloader import RectangularDomain, \\\n Restrictions, MassDownloader\nfrom obspy import UTCDateTime\nfrom obspy import Inventory\nfrom typing import Union, List\nimport os\nimport logging\nfrom typing import Union, Tuple\nfrom obspy import UTCDateTime, Stream, Inventory\nfrom obspy.clients.fdsn import Client\n\n\ndef download_stream(origintime: UTCDateTime, duration: float = 7200,\n network: Union[str, None] = \"IU,II\",\n station: Union[str, None] = None,\n location: Union[str, None] = \"00\",\n channel: Union[str, None] = \"BH*\",\n starttimeoffset: float = 0.0,\n endtimeoffset: float = 0.0, dtype='both',\n client_id: str = \"IRIS\",\n ) -> Tuple[Stream, Inventory]:\n \"\"\"Function to download data for a seismic section. Note that this will not\n store the data. It will only download the data into a Stream object, and\n optionally into a corresponding inventory.\n\n Parameters\n ----------\n origintime : UTCDateTime\n origintime of an earthquake\n duration : float, optional\n length of download in seconds, by default 7200\n network : str or None, optional\n Network restrictions, by default \"IU,II\"\n station : str or None, optional\n station restrictions, by default None\n location : str or None, optional\n location restrictions, by default \"00\"\n channel : str or None, optional\n channel restrictions, by default \"BH*\"\n starttimeoffset : float, optional\n set startime to later or earlier, by default 0.0\n endtimeoffset : float, optional\n set endtime to earlier or later, by default 0.0\n\n Returns\n -------\n Tuple[Stream, Inventory]\n tuple with a stream and an inventory\n\n Raises\n ------\n\n ValueError\n If wrong download type is provided.\n\n Notes\n -----\n\n :Author:\n Lucas Sawade (lsawade@princeton.edu)\n\n :Last Modified:\n 2021.01.13 11.00\n\n \"\"\"\n\n if dtype not in ['data', 'stations', 'both']:\n raise ValueError(\n \"download type must be 'data', 'stations', or 'both'.\")\n\n # Get times\n starttime = origintime + starttimeoffset\n endtime = origintime + duration + endtimeoffset\n\n # main program\n client = Client(client_id)\n\n # Download the data\n if (dtype == 'both') or (dtype == \"data\"):\n st = client.get_waveforms(network, station, location, channel,\n starttime, endtime)\n if (dtype == 'both') or (dtype == \"stations\"):\n inv = client.get_stations(network=network, station=station,\n location=location, channel=channel,\n starttime=starttime, endtime=endtime,\n level=\"response\")\n if dtype == 'both':\n return st, inv\n elif dtype == 'stations':\n return inv\n elif dtype == 'data':\n return st\n\n\ndef download_to_storage(\n datastorage: str,\n starttime: UTCDateTime,\n endtime: UTCDateTime,\n minimum_length: float = 0.9,\n reject_channels_with_gaps: bool = True,\n network: Union[str, None] = \"IU,II,G\",\n station: Union[str, None] = None,\n channel: Union[str, None] = None,\n location: Union[str, None] = None,\n providers: Union[List[str], None] = [\"IRIS\"],\n minlatitude: float = -90.0,\n maxlatitude: float = 90.0,\n minlongitude: float = -180.0,\n maxlongitude: float = 180.0,\n location_priorities=None,\n channel_priorities=None,\n limit_stations_to_inventory: Union[Inventory, None] = None,\n waveform_storage: str = None,\n station_storage: str = None,\n logfile: str = None,\n client: Client | List[Client] | None = None,\n **kwargs):\n\n domain = RectangularDomain(minlatitude=minlatitude,\n maxlatitude=maxlatitude,\n minlongitude=minlongitude,\n maxlongitude=maxlongitude)\n\n # Create Dictionary with the settings\n rdict = dict(\n starttime=starttime,\n endtime=endtime,\n reject_channels_with_gaps=reject_channels_with_gaps,\n # Trace needs to be almost full length\n minimum_length=minimum_length,\n network=network,\n station=station,\n location=location,\n channel=channel,\n location_priorities=location_priorities,\n channel_priorities=channel_priorities,\n limit_stations_to_inventory=limit_stations_to_inventory,\n sanitize=True\n )\n\n # Remove unset settings\n if not location_priorities:\n rdict.pop('location_priorities')\n if not channel_priorities:\n rdict.pop('channel_priorities')\n\n restrictions = Restrictions(**rdict)\n\n # Datastorage:\n if waveform_storage is None:\n waveform_storage = os.path.join(datastorage, 'waveforms')\n if station_storage is None:\n station_storage = os.path.join(datastorage, 'stations')\n\n # Get the logger from the obspy package\n logger = logging.getLogger(\"obspy.clients.fdsn.mass_downloader\")\n\n # Setup the logger to print to file instead of stdout/-err\n if logfile is not None:\n # Remove Stream handler (prints stuff to stdout)\n logger.handlers = []\n\n # Add File handler (prints stuff to file)\n fh = logging.FileHandler(logfile, mode='w')\n fh.setLevel(logging.DEBUG)\n\n # Add file handler\n logger.addHandler(fh)\n\n if client is not None:\n providers = client\n\n # Create massdownloader\n mdl = MassDownloader(providers=providers)\n logger.debug(f\"\\n\")\n logger.debug(f\"{' Downloading data to: ':*^72}\")\n logger.debug(f\"MSEEDs: {waveform_storage}\")\n logger.debug(f\"XMLs: {station_storage}\")\n\n mdl.download(domain, restrictions, mseed_storage=waveform_storage,\n stationxml_storage=station_storage, **kwargs)\n\n logger.debug(\"\\n\")\n logger.debug(72 * \"*\")\n logger.debug(\"\\n\")\n","repo_name":"lsawade/GF3D","sub_path":"src/gf3d/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":6152,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"5049404947","text":"import Content, Config\nimport httplib\nfrom BeautifulSoup import BeautifulSoup\n\n\nreq = '{BODY}'\nhost = 'ivsmedia.iptv-distribution.net'\nproxy = 'ivsmedia.iptv-distribution.net'#'ivsmedia.iptv-distribution.net'\nport = 80\n\n\ndef SetSett(sess):\n try:\n # new XBMC 10.05 addons:\n import xbmcaddon\n except ImportError:\n # old XBMC - create fake xbmcaddon module with same interface as new XBMC 10.05\n class xbmcaddon:\n \"\"\" fake xbmcaddon module \"\"\"\n __version__ = \"(old XBMC)\"\n class Addon:\n \"\"\" fake xbmcaddon.Addon class \"\"\"\n def __init__(self, id):\n self.id = id\n \n def getSetting(self, key):\n return xbmcplugin.getSetting(key)\n \n def openSettings(self):\n xbmc.openSettings()\n def setSetting(self, key, value):\n return xbmcplugin.setSetting(key, value)\n \n addon = xbmcaddon.Addon(\"plugin.video.brt\")\n quality = ['HQ', 'SQ']\n q = int(addon.getSetting('quality'))\n region = ['EU_RST', 'NA_PST', 'NA_EST','AU_EST']\n r = int(addon.getSetting('region'))\n server = ['1','7']\n s = int(addon.getSetting('server'))\n req = '' \\\n '' + sess + '' \\\n '' \\\n 'falserus' \\\n '' \\\n '' + server[s] + '' \\\n '' + quality[q] + '' + region[r] + '' \\\n '' + region[r] + ''\n Request(req, 'SetSettings')\n\ndef Login(Username = '',Password = ''):\n x = Content.Application().ClientAppSettings\n x.clientCredential.UserLogin = Username\n x.clientCredential.UserPassword = Password\n x.appSettings.appName ='XBMC'\n temp = req.replace('{BODY}', '' + x.get() + '')\n soup = BeautifulSoup(Request(temp, 'Login'))\n try:\n x.appSettings.sessionID = soup(\"b:sessionid\")[0].text\n except:\n x.appSettings.sessionID = \"\"\n return x\n \n\ndef Request(str, action):\n conn = httplib.HTTPConnection(proxy, port)\n conn.request('POST', Config.ClientService, str, {\n 'Host': host,\n 'SOAPAction': 'http://' + host + '/ClientService/' + action,\n 'Content-Type': 'text/xml; charset=utf-8'\n \n }) \n response = conn.getresponse()\n data = response.read()\n return data","repo_name":"tatigo/XBMC-BestRussianTVPlugin","sub_path":"ClientService.py","file_name":"ClientService.py","file_ext":"py","file_size_in_byte":3377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"1211684242","text":"# put your python code here\nmegan_time_hours = int(input())\nmegan_time_minutes = int(input())\nmegan_time_seconds = int(input())\nrain_time_hours = int(input())\nrain_time_minutes = int(input())\nrain_time_seconds = int(input())\nseconds_per_hour = 3600\nseconds_per_minute = 60\nprint(abs(((megan_time_hours * seconds_per_hour)\n + (megan_time_minutes * seconds_per_minute)\n + megan_time_seconds)\n - ((rain_time_hours * seconds_per_hour)\n + (rain_time_minutes * seconds_per_minute)\n + rain_time_seconds)))\n","repo_name":"advantager/Zookeeper","sub_path":"Topics/Program with numbers/Difference of times/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7484756264","text":"\"\"\"\nNetworks Lab 3: UDP Socket Programming\n\nServer code.\n\nBy:\nClemence Goh (1002075)\nCheryl Goh (1002421)\n\"\"\"\n\nimport socket as S\nimport json\n\n\ndef getMissingNumbers(_all_data, _last_number):\n to_return = []\n for i in range(int(_last_number)):\n if i not in _all_data:\n to_return.append(i)\n return to_return\n\n\ndef serverSide():\n sock = S.socket(S.AF_INET, S.SOCK_DGRAM)\n sock.bind(('localhost', 5555))\n\n # sever side counting of objects\n serverCounter = -1\n all_data = {}\n\n while True:\n serverCounter += 1\n data, addrs = sock.recvfrom(4096)\n data = json.loads(data.decode())\n\n if data[\"Payload\"] == \"End\":\n break\n\n all_data[data[\"ID\"]] = 1\n\n # debugger\n # print(\"Payload: {} \\nID: {} \\nAddresses: {} \\n\\n\".\n # format(data[\"Payload\"], data[\"ID\"], addrs))\n\n missed_data = getMissingNumbers(all_data, data[\"ID\"])\n\n print(\"--------- Report ---------\")\n print(\"Total expected:\", data[\"ID\"])\n if missed_data:\n print(\"! WARNING !\")\n print(\"Data missed:\", str(missed_data))\n print(\"! WARNING !\")\n print(\"------- End Report -------\")\n\n\n\n\nif __name__==\"__main__\":\n serverSide()\n","repo_name":"clemencegoh/SUTD_Networks_50.012","sub_path":"lab/week3/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5167770495","text":"#!/usr/bin/python3\n\nimport http.client\nimport json\nimport os\nimport urllib\nimport socket\nimport subprocess\nimport pwd\nimport sys\nfrom stat import *\n\nQ_HOST = \"\"\nQ_PORT = \"8000\"\nQ_USER = \"\"\nQ_PASS = \"\"\n\ndef _send_req(method, uri, data, hdrs):\n c = http.client.HTTPSConnection(Q_HOST, Q_PORT)\n \n try:\n if data != None:\n json_data = json.dumps(data)\n else:\n json_data = None\n \n c.request(method, uri, json_data, hdrs)\n rsp = c.getresponse()\n \n if (rsp.status != 200):\n raise Exception(\"%d: %s\" % (rsp.status, rsp.reason))\n\n j = rsp.read().decode()\n \n return(json.loads(j))\n\n except Exception as e:\n message = os.strerror(e.errno) if hasattr(e, 'errno') else str(e)\n print(message)\n sys.exit(-1)\n\ndef auth():\n\n r = _send_req(\"POST\", \"/v1/session/login\", { \"username\": Q_USER, \"password\": Q_PASS }, { 'Content-Type': \"application/json\" })\n\n return(r['bearer_token'])\n\ndef getHPCquotas(token):\n q = {}\n done = False\n\n uri = \"/v1/files/quotas/status/\"\n\n while not done:\n r = _send_req(\"GET\", uri, None, { \"Authorization\": \"Bearer \" + token }) \n uri = r['paging']['next']\n\n if (uri == \"\"):\n done = True\n \n for e in r['quotas']:\n q[e['path']] = { 'limit' : e['limit'] }\n \n return(q)\n\napi_token = auth()\nquotas = getHPCquotas(api_token)\n\nfor q in quotas:\n print(\"%s:%s\" % (q, quotas[q]['limit']))\n","repo_name":"redsox38/qumulo_quota_tools","sub_path":"get_quotas.py","file_name":"get_quotas.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"40959180732","text":"# Import required modules\nfrom tkinter import Tk, Label, Entry, StringVar, OptionMenu, Button, filedialog, END\nfrom gtts import gTTS\nfrom pypdf import PdfReader\n\n\nclass PDFToAudioConverter():\n def __init__(self, root):\n \"\"\"Initiate the GUI\"\"\"\n self.root = root\n self.root.title(\"Markanov Text to Audio Converter\")\n self.root.config(bg=\"light yellow\")\n self.root.resizable(True, True)\n self.root.columnconfigure(0, weight=1)\n self.root.rowconfigure(0, weight=1)\n\n # Make it appear on the center of the screen\n window_width = 370\n window_height = 250\n screen_width = self.root.winfo_screenwidth()\n screen_height = self.root.winfo_screenheight()\n x = (screen_width - window_width) // 2\n y = (screen_height - window_height) // 2\n self.root.geometry(\"{}x{}+{}+{}\".format(window_width, window_height, x, y))\n\n # Add some labels\n self.manual_lbl = Label(root, bg=\"light yellow\",\n text=\"This app converts text to speech by getting text from a PDF file\\nand converts it into an MP3 file.\\nIt can only read text from proper PDF files,\\ntext within images (OCR-ing) is not yet supported.\",\n anchor=\"w\", justify=\"left\")\n self.manual_lbl.grid(column=0, row=0, columnspan=2, padx=5, pady=5, sticky=\"w\")\n\n self.prompt_lbl = Label(root, bg=\"light yellow\",text=\"Please select a PDF file:\")\n self.prompt_lbl.grid(column=0, row=1, sticky=\"w\")\n\n self.language_lbl = Label(root, bg=\"light yellow\", text=\"Select Language:\")\n self.language_lbl.grid(column=0, row=4, sticky=\"w\")\n\n self.status_lbl = Label(root, bg=\"light yellow\", text=\"\", anchor=\"w\", justify=\"left\")\n self.status_lbl.grid(column=0, row=5, columnspan=2, pady=5)\n\n # Entry that displays the selected file\n self.selected_file = Entry(root, state=\"readonly\", width=300)\n self.selected_file.grid(column=0, row=3, pady=5)\n self.selected_file.insert(0, \"No file selected\")\n\n # Option to choose between available languages\n self.selected_language = StringVar()\n language_options = [\"en\", \"es\", \"fr\", \"bs\", \"de\", \"hr\", \"iw\"]\n self.language_menu = OptionMenu(root, self.selected_language, *language_options)\n self.language_menu.grid(column=1, row=4)\n self.selected_language.set(\"en\")\n\n # Add some buttons\n self.select_btn = Button(root, text=\"Select File\", command=self.select_pdf_file)\n self.select_btn.grid(column=0, row=2, sticky=\"w\")\n\n self.convert_btn = Button(root, text=\"Convert to MP3\",\n command=self.convert_to_mp3, state=\"disabled\")\n self.convert_btn.grid(column=1, row=2)\n\n self.pdf_file_path = \"\"\n\n\n def select_pdf_file(self):\n \"\"\"Selects the PDF file.\"\"\"\n self.pdf_file_path = filedialog.askopenfilename(filetypes=[(\"PDF Files\", \"*.pdf\")])\n if self.pdf_file_path:\n self.selected_file.config(state=\"normal\")\n self.selected_file.delete(0, END)\n self.selected_file.insert(0, self.pdf_file_path)\n self.selected_file.config(state=\"readonly\")\n self.convert_btn.config(state=\"normal\", bg=\"light green\")\n else:\n self.selected_file.config(state=\"readonly\")\n self.selected_file.delete(0, END)\n self.selected_file.insert(0, \"No file selected\")\n self.convert_btn.config(state=\"disabled\")\n\n def get_text_from_pdf(self):\n \"\"\"Get the text from chosen PDF file. Only proper PDF files are supported,\n no OCR-ing from images.\"\"\"\n if not self.pdf_file_path:\n return\n\n text = \"\"\n\n with open(self.pdf_file_path, \"rb\") as pdf_file:\n pdf_reader = PdfReader(pdf_file)\n for page in pdf_reader.pages:\n content = page.extract_text()\n text += content\n \n return text\n\n def get_audio_from_text(self, text, language):\n \"\"\"Get the audio from text using Google Text to Speech technology.\"\"\"\n text_to_speech = gTTS(text=text, lang=language, slow=False)\n\n mp3_file_path = filedialog.asksaveasfilename(defaultextension=\"mp3\",\n filetypes=[(\"MP3 Files\", \"*.mp3\")])\n if mp3_file_path:\n text_to_speech.save(mp3_file_path)\n\n def convert_to_mp3(self):\n \"\"\"Convert the text to mp3 audio file.\"\"\"\n text = self.get_text_from_pdf()\n if text is not None:\n selected_language = self.selected_language.get()\n self.status_lbl.config(text=\"Converting PDF to MP3 file...\", bg=\"light yellow\")\n self.root.update()\n\n try:\n self.get_audio_from_text(text, selected_language)\n self.status_lbl.config(text=\"Text converted to audio file successfully!\",\n bg=\"light green\")\n except Exception as e:\n self.status_lbl.config(text=f\"Error: {e}\", bg=\"red\")\n \n else:\n self.status_lbl.config(text=\"Error: Could not read text from PDF file.\",\n bg=\"red\")\n\n\nif __name__ == \"__main__\":\n root = Tk()\n app = PDFToAudioConverter(root)\n root.mainloop()","repo_name":"KooMar22/Text_to_Speech","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17014148350","text":"from copy import deepcopy\n\nfrom GameLogic import AssetFundNetwork\nfrom GameLogic.GameState import GameState\nfrom GameLogic.Players import Attacker, Defender\n\n\nclass SimultaneousGame:\n def __init__(self, init_state: GameState, network: AssetFundNetwork, attacker: Attacker, defender: Defender):\n self.state = init_state\n self.network = network\n self.attacker = attacker\n self.defender = defender\n\n def game_reward(self):\n return self.defender.game_reward(self.network.funds)\n\n def game_ended(self):\n return self.attacker.is_goal_achieved(self.network.funds) or self.attacker.resources_exhusted()\n\n def get_valid_actions(self):\n return self.state.get_valid_actions()\n\n def apply_actions(self, attacker_orders, defender_orders, fund_orders):\n self.market.submit_sell_orders(attacker_orders)\n self.market.submit_buy_orders(defender_orders)\n self.market.submit_sell_orders(fund_orders)\n self.market.apply_actions()\n self.network.apply_actions()\n\n def play_single_game(self):\n state = GameState.TwoPlayersSimultaneousGameState(self.network, self.attacker, self.defender)\n if self.config.verbose:\n self.print_portfolios(state.network, self.attacker)\n moves_counter = 0\n #first move by attacker only\n attacker_orders = self.attacker.get_action(state)\n self.apply_actions(attacker_orders, [], [])\n while not state.game_ended():\n attacker_orders = self.attacker.get_action()\n defender_orders = self.defender.get_action()\n liquidation_orders = self.network.get_liquidation_orders()\n self.apply_actions(attacker_orders, defender_orders, liquidation_orders)\n\n if state.game_ended():\n self.stats.update_stats(state.get_winner(), moves_counter)\n","repo_name":"anathash/flash-crash-game","sub_path":"src/GameLogic/Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"39880560678","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"Executable file which contains solver and visualizer\"\"\"\n\nimport argparse\nfrom schroed_solver import solver\n\nPARSER = argparse.ArgumentParser(description='Executes solver for the\\\n Schroedinger equation')\n\nMSG = 'Set directory of the figures\\\n (default value: ./inputdata/potential_well)'\nPARSER.add_argument('-d', '--directory', default='./inputdata/potential_well',\n help=MSG)\n\nARGS = PARSER.parse_args()\nprint(\"Directory: {}\".format(ARGS.directory))\n\nsolver(ARGS.directory)\n","repo_name":"willmajo/schroedinger_project","sub_path":"solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"73997380881","text":"\"\"\"\n# Definition for Employee.\nclass Employee:\n def __init__(self, id: int, importance: int, subordinates: List[int]):\n self.id = id\n self.importance = importance\n self.subordinates = subordinates\n\"\"\"\n\nclass Solution:\n def getImportance(self, employees: List['Employee'], id: int) -> int:\n \n emp_map = {}\n \n for each_emp in employees:\n emp_map[each_emp.id] = [each_emp.importance, each_emp.subordinates]\n \n emp_eval = [id]\n total_imp = 0\n \n while emp_eval:\n \n cur_emp = emp_eval.pop(0)\n total_imp += emp_map[cur_emp][0]\n emp_eval += emp_map[cur_emp][1]\n \n return total_imp\n \n","repo_name":"KajalGada/leetcode-python","sub_path":"leetcode 690 Employee Importance/sol1.py","file_name":"sol1.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"4569802321","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport subprocess\nfrom PIL import Image, ImageDraw\nfrom sys import argv\nfrom tempfile import mkdtemp\n\ncompetitors = {\n \"getBotMove\": [\"./getBotMove\", \"--depth=4\"],\n \"getPgnMove\": [\"./getPgnMove\", \"scoreCacheSorted.txt\"],\n}\n\ndef interpretBoardString(boardString):\n result = []\n for i in boardString:\n if i.isalpha():\n result += [i]\n elif i.isdigit():\n result += [\" \"] * int(i)\n elif i != \"/\":\n print(\"Unknown Symbol: \" + i)\n if len(result) < 64:\n result += [\"\"] * (64 - len(result))\n return result\n\ndef apply(board, move):\n result = board\n startPos = ord(move[1].lower()) - ord(\"a\") + (ord(\"8\") - ord(move[2])) * 8\n endPos = ord(move[3].lower()) - ord(\"a\") + (ord(\"8\") - ord(move[4])) * 8\n for i in [1, 2, 3, 4, 5, 6, 7, 8]:\n result = result.replace(str(i), \" \" * i)\n result = result.replace(\"/\", \"\")\n if result[startPos] == move[0]:\n if move[5] != move[0] and move[5] != \" \" and move[5] != \"\\n\":\n result = result[:endPos] + move[5] + result[endPos+1:]\n else:\n result = result[:endPos] + result[startPos] + result[endPos+1:]\n else:\n print(\"Error: \\\"\" + result[startPos] + \"\\\" != \\\"\" + move[0] + \"\\\"\")\n exit(0)\n result = result[:startPos] + \" \" + result[startPos+1:]\n result = '/'.join(result[i:i+8] for i in range(0, len(result), 8))\n for i in [8, 7, 6, 5, 4, 3, 2, 1]:\n result = result.replace(\" \" * i, str(i))\n if move.startswith(\"ke8g8\"):\n return apply(result, \"rh8f8\\n\")\n elif move.startswith(\"ke8c8\"):\n return apply(result, \"ra8d8\\n\")\n elif move.startswith(\"Ke1g1\"):\n return apply(result, \"Rh1f1\\n\")\n elif move.startswith(\"Ke1c1\"):\n return apply(result, \"Ra1d1\\n\")\n else:\n return result\n\ndef display(boardString, filename):\n width = 1366\n height = 768\n command = \"/usr/lib/w3m/w3mimgdisplay\"\n\n board = interpretBoardString(boardString)\n pieces = {}\n img = Image.open(\"board.png\")\n for i in range(8):\n for j in range(8):\n char = board[i * 8 + j]\n if char != \"\" and char != \" \":\n if not char in pieces:\n pieces[char] = Image.open(\"pieces/\" + char + \".png\").resize((64, 64), resample=Image.BOX)\n img.paste(pieces[char], (j * 64 + 32, i * 64 + 32), pieces[char].convert(\"RGBA\"))\n img.save(filename)\n width = min(width, img.size[0])\n height = min(height, img.size[1])\n inputstr = (\"0;1;0;40;\" + str(width) + \";\" + str(height) + \";;;;;\" + filename + \"\\n4;\\n3;\").encode(\"utf-8\")\n subprocess.run([command], input=inputstr, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)\n\ndef main():\n for i in competitors:\n for j in competitors:\n knownBoards = []\n currentBoard = \"rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR\"\n filename = \"board-\" + i + \"-\" + j + \".png\"\n if os.path.isfile(filename):\n continue\n for counter in range(1000):\n knownBoards += [currentBoard]\n currentMove = subprocess.run(competitors[i] + [\"--play-white\", currentBoard], stdout=subprocess.PIPE).stdout.decode(\"utf-8\")\n print(currentMove[:-1])\n currentBoard = apply(currentBoard, currentMove)\n display(currentBoard, filename)\n if not \"K\" in currentBoard or not \"k\" in currentBoard or knownBoards.count(currentBoard) > 10:\n break\n knownBoards += [currentBoard]\n currentMove = subprocess.run(competitors[j] + [\"--play-black\", currentBoard], stdout=subprocess.PIPE).stdout.decode(\"utf-8\")\n print(currentMove[:-1])\n currentBoard = apply(currentBoard, currentMove)\n display(currentBoard, filename)\n if not \"K\" in currentBoard or not \"k\" in currentBoard or knownBoards.count(currentBoard) > 10:\n break\n if not \"K\" in currentBoard:\n print(\"Tournament: \" + i + \" vs \" + j + \", winner: \" + j)\n elif not \"k\" in currentBoard:\n print(\"Tournament: \" + i + \" vs \" + j + \", winner: \" + i)\n else:\n print(\"Tournament: \" + i + \" vs \" + j + \", draw.\")\n\nif __name__ == '__main__':\n main()\n","repo_name":"MaxMatti/Schachbot","sub_path":"mcp.py","file_name":"mcp.py","file_ext":"py","file_size_in_byte":4405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13031121840","text":"import json\nimport xml.etree.ElementTree as ET\nfrom collections import OrderedDict\n\nfrom flask import Blueprint, render_template, make_response, current_app\nfrom appcomposer.translator.utils import get_cached_session, indent\nfrom appcomposer.utils import report_error\n\ngraasp_i18n_blueprint = Blueprint('graasp_i18n', __name__)\n\nSPACE_URL = 'http://graasp.eu/spaces/560410b2f0e1b09f6c8116da'\n\ndef get_languages():\n return ['en']\n\nclass TimeoutError(Exception):\n pass\n\ndef get_contents(lang, trials=2):\n if lang == 'en':\n resource_id = '560410f1f0e1b09f6c8117ec'\n requests = get_cached_session()\n request_url = \"http://graasp.eu/resources/{0}/raw\".format(resource_id)\n try:\n r = requests.get(request_url, timeout=(10,10))\n r.raise_for_status()\n except Exception:\n raise TimeoutError(\"Timeout\")\n\n try:\n return json.JSONDecoder(object_pairs_hook=OrderedDict).decode(r.text)\n except ValueError as ve:\n if len(r.text) == 0:\n if trials == 0:\n raise ValueError(\"{}: {} returned empty result!\".format(ve, request_url))\n return get_contents(lang, trials-1)\n \n if len(r.text) >= 20:\n response = '{!r}...'.format(r.text[:20])\n else:\n response = r.text\n raise ValueError(\"{}: {}: {!r}\".format(ve, request_url, response))\n else:\n return None\n\n@graasp_i18n_blueprint.route('/')\n@graasp_i18n_blueprint.route('/app.xml')\n@report_error(\"Error on graasp i18n at the App Composer\", additional_recipients = [])\ndef index():\n languages = get_languages()\n response = make_response(render_template('graasp_i18n.xml', languages = languages, title = \"Graasp\"))\n response.content_type = 'application/xml'\n return response\n\ndef _parse_contents(contents, dictionary, parent_key = ''):\n for key, value in contents.items():\n if parent_key:\n cur_key = '%s::%s' % (parent_key, key)\n else:\n cur_key = key\n\n if isinstance(value, dict):\n _parse_contents(value, dictionary, cur_key)\n else:\n dictionary[cur_key] = value\n\ndef messages_to_xml(messages):\n xml_bundle = ET.Element(\"messagebundle\")\n xml_bundle.attrib.update({\n 'mails' : 'pablo.orduna@deusto.es,{}'.format(','.join(current_app.config.get('GRAASP_ADMINS', []))),\n 'automatic' : 'false'\n })\n for key in messages.keys():\n value = messages[key]\n xml_msg = ET.SubElement(xml_bundle, 'msg')\n xml_msg.attrib['name'] = key\n xml_msg.text = value\n indent(xml_bundle)\n xml_string = ET.tostring(xml_bundle, encoding = 'UTF-8')\n return xml_string\n\n\n@graasp_i18n_blueprint.route('/locales/graasp__ALL.xml')\n@report_error(\"Error on graasp i18n\", additional_recipients = [])\ndef locale(language):\n try:\n contents = get_contents(language)\n except TimeoutError:\n return \"Error retrieving external resource\", 502\n\n if contents is None:\n return \"Language not found\", 404\n i18n_contents = OrderedDict()\n _parse_contents(contents, i18n_contents)\n xml_response = messages_to_xml(i18n_contents)\n response = make_response(xml_response)\n response.content_type = 'application/xml'\n return response\n\n","repo_name":"porduna/appcomposer","sub_path":"appcomposer/graasp_i18n.py","file_name":"graasp_i18n.py","file_ext":"py","file_size_in_byte":3355,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"72538542161","text":"class KeyGen(object):\n def __init__(self, encrypt_key: int = 0x787):\n self.encode_table = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=\"\n self.decode_table = {t: i for i, t in enumerate(self.encode_table)}\n self.encrypt_key = encrypt_key\n\n def encode(self, bs: bytes) -> str:\n result = \"\"\n blocks_count, left_bytes = divmod(len(bs), 3)\n\n for i in range(blocks_count):\n coding_int = int.from_bytes(bs[3 * i:3 * i + 3], \"little\")\n block = self.encode_table[coding_int & 0x3f]\n block += self.encode_table[(coding_int >> 6) & 0x3f]\n block += self.encode_table[(coding_int >> 12) & 0x3f]\n block += self.encode_table[(coding_int >> 18) & 0x3f]\n result += block\n\n if left_bytes == 0:\n return result\n elif left_bytes == 1:\n coding_int = int.from_bytes(bs[3 * blocks_count:], \"little\")\n block = self.encode_table[coding_int & 0x3f]\n block += self.encode_table[(coding_int >> 6) & 0x3f]\n result += block\n return result\n else:\n coding_int = int.from_bytes(bs[3 * blocks_count:], \"little\")\n block = self.encode_table[coding_int & 0x3f]\n block += self.encode_table[(coding_int >> 6) & 0x3f]\n block += self.encode_table[(coding_int >> 12) & 0x3f]\n result += block\n return result\n\n def decode(self, s: str) -> bytes:\n result = b''\n blocks_count, left_bytes = divmod(len(s), 4)\n\n for i in range(blocks_count):\n block = self.decode_table[s[4 * i]]\n block += self.decode_table[s[4 * i + 1]] << 6\n block += self.decode_table[s[4 * i + 2]] << 12\n block += self.decode_table[s[4 * i + 3]] << 18\n result += block.to_bytes(3, 'little')\n\n if left_bytes == 0:\n return result\n elif left_bytes == 2:\n block = self.decode_table[s[4 * blocks_count]]\n block += self.decode_table[s[4 * blocks_count + 1]] << 6\n result += block.to_bytes(1, 'little')\n return result\n elif left_bytes == 3:\n block = self.decode_table[s[4 * blocks_count]]\n block += self.decode_table[s[4 * blocks_count + 1]] << 6\n block += self.decode_table[s[4 * blocks_count + 2]] << 12\n result += block.to_bytes(2, 'little')\n return result\n else:\n raise ValueError('Invalid encoding.')\n\n def encrypt(self, bs: bytes) -> bytes:\n result = bytearray()\n key = self.encrypt_key\n for i in range(len(bs)):\n result.append(bs[i] ^ ((key >> 8) & 0xff))\n key = result[-1] & key | 0x482D\n return bytes(result)\n\n def decrypt(self, bs: bytes) -> bytes:\n result = bytearray()\n key = self.encrypt_key\n for i in range(len(bs)):\n result.append(bs[i] ^ ((key >> 8) & 0xff))\n key = bs[i] & key | 0x482D\n return bytes(result)\n\n def gen_license(self, username: str, major_version: int, minor_version: int) -> str:\n license_type = 1 # Professional: 1, Educational = 3, Personal = 4\n count = 1\n license_string = f\"{license_type}#{username}|{major_version}{minor_version}#{count}#{major_version}3\" \\\n f\"{minor_version}6{minor_version}#0#0#0#\"\n encoded_license_string = self.encode(self.decrypt(license_string.encode()))\n print(f\"username: {username}\\nversion: {major_version}.{minor_version}\\n\"\n f\"encoded_license_string: {encoded_license_string}\")\n return encoded_license_string\n\n\ndef main():\n key_gen = KeyGen()\n license_string = key_gen.gen_license(username=\"test\", major_version=21, minor_version=0)\n import zipfile\n with zipfile.ZipFile(\"Custom.mxtpro\", \"w\") as f:\n f.writestr(\"Pro.key\", data=license_string)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"KEYGEN-FIRST/keygen","sub_path":"software/mobaxterm/keygen.py","file_name":"keygen.py","file_ext":"py","file_size_in_byte":3988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17692760193","text":"from DriveMotor import DriveMotor\nimport InternalBehaviour\nimport time\nimport random\n\n\"\"\"\n\nFile/class that's supposed to be used to the apparent behaviour of the robot.\nThis includes things such as doing a victory dance, the specifics of which are dependant on the parameters\nfound in InternalBehaviour.\n\n\"\"\"\n\nclass ExternalBehaviour:\n\n\n def __init__(self, motorLeft, motorRight):\n self._motorLeft = motorLeft\n self._motorRight = motorRight\n self._degreesTurned = 0 # The amount of degrees the robot is turned, from 0 to 360.\n # It should be ensured this is relative to the gyroscope input\n # e.g. 'north' should be equal to 0 degrees.\n #_motorA.setDirection(1)\n InternalBehaviour._funFrust = 5 #test\n\n \"\"\"\n Function that takes the supposed directions and speeds for both motors to change.\n -1: backward motion, 0 no motion, 1 forward motion\n \"\"\"\n def MotorBehaviour(self, directionLeft, directionRight, speedLeft, speedRight):\n DriveMotor.setDirection(self._motorLeft, directionLeft)\n DriveMotor.setDirection(self._motorRight, directionRight)\n DriveMotor.setSpeed(self._motorLeft, speedLeft)\n DriveMotor.setSpeed(self._motorRight, speedRight)\n\n \"\"\"\n Performs either move forward for a random amount of seconds between 1 and 3, backward, or turns to a\n random alignment, all with a chance of 1 in 3.\n \"\"\"\n def RandomMove(self):\n randNum = random.randint(0, 2)\n curTime = time.time()\n duration = random.randrange(1, 3, 0.25)\n stopTime = curTime + duration\n if randNum == 0:\n self.TimeDriveForward(stopTime)\n elif randNum == 1:\n self.TimeDriveBackward(stopTime)\n elif randNum == 2:\n degreesToTurnTo = random.randrange(0, 359, 1)\n self.TurnDegrees(degreesToTurnTo)\n\n \"\"\"\n Drives both motors forward for a given time\n This is done by checking the time at which this function is supposed to stop\n (given by stoptime, which is supposed to be the time at which the function was initiated, to which the amount of\n seconds is added for how long the robot should drive forward) against the current time.\n\n Returns false if this time has not yet passed, and true otherwise.\n \"\"\"\n def TimeDriveForward(self, stoptime):\n #stoptime = timeStarted + datetime.timedelta(seconds=duration)\n #if stoptime < datetime.datetime.now():\n if stoptime < time.time():\n self.MotorBehaviour(0, 0, 0, 0)\n #elif stoptime > datetime.datetime.now():\n elif stoptime > time.time():\n self.MotorBehaviour(1, 1, 1000, 1000)\n #while stoptime > datetime.datetime.now():\n while stoptime > time.time():\n #if stoptime < datetime.datetime.now():\n if stoptime < time.time():\n self.MotorBehaviour(0, 0, 0, 0)\n\n \"\"\"\n Drives both motors backward for a given time\n Same as above, but in opposite direction\n \"\"\"\n def TimeDriveBackward(self, stoptime):\n # stoptime = timeStarted + datetime.timedelta(seconds=duration)\n #if stoptime < datetime.datetime.now():\n if stoptime < time.time():\n self.MotorBehaviour(0, 0, 0, 0)\n #elif stoptime > datetime.datetime.now():\n elif stoptime > time.time():\n self.MotorBehaviour(-1, -1, 1000, 1000)\n # while stoptime > datetime.datetime.now():\n while stoptime > time.time():\n # if stoptime < datetime.datetime.now():\n if stoptime < time.time():\n self.MotorBehaviour(0, 0, 0, 0)\n\n \"\"\"\n Function that aligns the robot to a given amount of angles.\n This is accomplished by rotating it in the corresponding direction until the robot's alignment is within\n a margin of 6 degrees: 3 below and 3 above the supposed alignment.\n \"\"\"\n def TurnDegrees(self, futureDegreesTurned):\n if futureDegreesTurned-3 <= self._degreesTurned <= futureDegreesTurned+3:\n self.MotorBehaviour(0, 0, 0, 0)\n elif futureDegreesTurned < self._degreesTurned:\n self.MotorBehaviour(-1, 1, 100, 100)\n while futureDegreesTurned < self._degreesTurned:\n if futureDegreesTurned-3 <= self._degreesTurned <= futureDegreesTurned+3:\n self.MotorBehaviour(0, 0, 0, 0)\n elif futureDegreesTurned > self._degreesTurned:\n self.MotorBehaviour(1, -1, 100, 100)\n while futureDegreesTurned > self._degreesTurned:\n if futureDegreesTurned-3 <= self._degreesTurned <= futureDegreesTurned+3:\n self.MotorBehaviour(0, 0, 0, 0)\n","repo_name":"SaschaDeWaal/machina","sub_path":"robot/venv/Scripts/Objects/ExternalBehaviour.py","file_name":"ExternalBehaviour.py","file_ext":"py","file_size_in_byte":4782,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"40960377711","text":"import math\nimport gym\nfrom gym import spaces\nfrom gym.utils import seeding\nimport numpy as np\nimport pytinydiffsim \n\n\nclass CartpolePyTinyDiffSim(gym.Env):\n metadata = {'render.modes': ['human', 'rgb_array'], 'video.frames_per_second': 50}\n\n def __init__(self):\n self.tds_sim = pytinydiffsim.CartpoleSimulation()\n self.tds_env = pytinydiffsim.CartpoleEnv(self.tds_sim)\n\n self._render_width = 320\n self._render_height = 200\n \n self.theta_threshold_radians = 12 * 2 * math.pi / 360\n self.x_threshold = 0.4 #2.4\n high = np.array([\n self.x_threshold * 2,\n np.finfo(np.float32).max, \n self.theta_threshold_radians * 2,\n np.finfo(np.float32).max\n ]\n ,dtype=np.float32\n )\n\n self.force_mag = 10\n\n action_dim = 1\n action_high = np.array([self.force_mag] * action_dim, dtype=np.float32)\n self.action_space = spaces.Box(-action_high, action_high, dtype=np.float32)\n \n self.observation_space = spaces.Box(-high, high, dtype=np.float32)\n\n self.seed()\n # self.reset()\n self.viewer = None\n self._configure()\n\n def _configure(self, display=None):\n self.display = display\n\n \n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n #print(\"seed=\",seed)\n s = int(seed) & 0xffffffff\n #print(\"s=\",s)\n self.tds_env.seed(s)\n return [seed]\n\n def step(self, action):\n \n force = action[0]\n \n result = self.tds_env.step(force)\n \n self.state = result.obs\n theta, theta_dot, x, x_dot = self.state\n\n done = x < -self.x_threshold \\\n or x > self.x_threshold \\\n or theta < -self.theta_threshold_radians \\\n or theta > self.theta_threshold_radians\n done = bool(done)\n #print(\"done=\",done)\n reward = 1.0\n #print(\"state=\",self.state)\n return np.array(self.state), reward, done, {}\n\n def reset(self):\n self.state = self.tds_env.reset()\n #print(\"self.state=\", self.state)\n return np.array(self.state)\n\n def render(self, mode='human', close=False):\n if mode == \"human\":\n self._renders = True\n if mode != \"rgb_array\":\n return np.array([])\n px = np.array([[[255,255,255,255]]*self._render_width]*self._render_height, dtype=np.uint8)\n rgb_array = np.array(px, dtype=np.uint8)\n rgb_array = np.reshape(np.array(px), (self._render_height, self._render_width, -1))\n rgb_array = rgb_array[:, :, :3]\n return rgb_array\n\n def configure(self, args):\n pass\n \n def close(self):\n del self.tds_env\n del self.tds_sim\n","repo_name":"StomatoGod/tiny-differentiable-simulator","sub_path":"python/tds_environments/cartpole_tds_env.py","file_name":"cartpole_tds_env.py","file_ext":"py","file_size_in_byte":2546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"18477016159","text":"''' Representation of an element\n@param element_type: type of the element\n@param length: length of the element\n'''\nfrom rml.exceptions import PvException\n\n\nclass Element(object):\n\n def __init__(self, identity, _type, physics):\n '''\n Possible arguments for kwargs:\n\n :str elem_identity: identifier used to match an element to a pv\n :set elem_family: a set used to store families\n :param cs: type of control system to be used\n '''\n self._identity = identity\n self._type = _type\n self._physics = physics\n self.families = set()\n self._uc = dict()\n self.devices = dict()\n\n def get_length(self):\n return self._physics.length\n\n def __repr__(self):\n return str(self.families)\n\n def add_device(self, field, device, uc):\n self.devices[field] = device\n self._uc[field] = uc\n\n def add_to_family(self, family):\n self.families.add(family)\n\n def get_pv_value(self, field, handle, unit='machine', sim=False):\n if not sim:\n if field in self.devices:\n value = self.devices[field].get_value(handle)\n if unit == 'physics':\n value = self._uc[field].machine_to_physics(value)\n return value\n else:\n raise PvException(\"No device associated with field {0}\")\n else:\n value = self._physics.get_value(field, handle, unit)\n if unit == 'machine':\n value = self._uc[field].machine_to_physics(value)\n return value\n\n def put_pv_value(self, field, value, unit='machine', sim=False):\n if not sim:\n if field in self.devices:\n if unit == 'physics':\n value = self._uc[field].physics_to_machine(value)\n self.devices[field].put_value(value)\n else:\n raise PvException('''There is no device associated with\n field {0}'''.format(field))\n else:\n if unit == 'machine':\n value = self._uc[field].machine_to_physics(value)\n self._physics.put_value(field, value)\n\n def get_pv_name(self, field, handle='*', sim=False):\n if not sim:\n if field in self.devices:\n return self.devices[field].get_pv_name(handle)\n else:\n return self._physics.get_pv_name(field, handle)\n raise PvException(\"There is no device associated with field {0}\"\n .format(field))\n","repo_name":"razvanvasile/RML","sub_path":"rml/element.py","file_name":"element.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5442613089","text":"from typing import List, Optional\n\nfrom bqq import const, output\nfrom bqq.bq_client import BqClient\nfrom bqq.config import Config\nfrom bqq.data.infos import Infos\nfrom bqq.service.result_service import ResultService\nfrom bqq.types import JobInfo, SearchLine\nfrom bqq.util import bash_util\nfrom google.api_core.exceptions import BadRequest\nfrom google.cloud.bigquery.job.query import QueryJob, QueryJobConfig\nfrom rich.console import Console\nfrom rich.prompt import Confirm\nfrom rich.text import Text\n\n\nclass InfoService:\n def __init__(\n self,\n console: Console,\n config: Config,\n bq_client: BqClient,\n result_service: ResultService,\n infos: Infos,\n ) -> None:\n self.console = console\n self.config = config\n self.infos = infos\n self.bq_client = bq_client\n self.result_service = result_service\n\n def search(self) -> List[JobInfo]:\n rows = self.infos.search(self.config.project)\n choices = []\n for row in rows:\n search_line = SearchLine.from_job_info(row)\n choices.append(search_line.to_line(self.console))\n lines = bash_util.fzf(choices, multi=True, key=SearchLine.sort_key)\n infos = []\n for line in lines:\n search_line = SearchLine.from_line(line)\n if search_line:\n job_info = next((row for row in rows if row.job_id == search_line.job_id), None)\n infos.append(job_info)\n return infos\n\n def search_one(self) -> JobInfo:\n rows = self.infos.search(self.config.project)\n choices = []\n for row in rows:\n search_line = SearchLine.from_job_info(row)\n choices.append(search_line.to_line(self.console))\n lines = bash_util.fzf(choices, key=SearchLine.sort_key)\n search_line = next((SearchLine.from_line(line) for line in lines), None)\n job_info = None\n if search_line:\n job_info = next((row for row in rows if row.job_id == search_line.job_id), None)\n return job_info\n\n def sync_infos(self):\n jobs = self.bq_client.list_query_jobs()\n for job in jobs:\n job_info = JobInfo.from_query_job(job)\n self.infos.upsert(job_info)\n text = Text(\"Jobs synchronized\", style=const.info_style).append(f\" = {len(jobs)}\", style=\"default\")\n self.console.print(text)\n\n def dry_run(self, query: str) -> Optional[QueryJob]:\n job_config = QueryJobConfig()\n job_config.dry_run = True\n job = None\n try:\n job = self.bq_client.client.query(query, job_config=job_config)\n except BadRequest as e:\n self.console.print(e.message, style=const.error_style)\n return job\n\n def get_info(self, skip: bool, query: str) -> JobInfo:\n job_info = None\n confirmed = skip\n if not skip:\n job = self.dry_run(query)\n if job:\n headers = output.get_dry_info_header(job)\n self.console.print(headers)\n confirmed = Confirm.ask(\n Text(\"\", style=const.darker_style).append(\"Do you want to continue?\", style=const.request_style),\n default=True,\n console=self.console,\n )\n if confirmed:\n query_job = self.bq_client.client.query(query)\n self.result_service.write_result(query_job) # extract result before job info\n job_info = JobInfo.from_query_job(query_job)\n self.infos.insert(job_info)\n return job_info\n\n def delete_infos(self, jobs: List[JobInfo]):\n for job_info in jobs:\n self.bq_client.client.delete_job_metadata(\n job_id=job_info.job_id, project=job_info.project, location=job_info.location\n )\n self.infos.remove(job_info)\n self.console.print(\n Text(\"Job deleted\", style=const.info_style).append(f\": {job_info.job_id}\", style=const.darker_style)\n )\n","repo_name":"martintupy/bqq","sub_path":"bqq/service/info_service.py","file_name":"info_service.py","file_ext":"py","file_size_in_byte":4027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19816985939","text":"# !/usr/bin/env python\n# coding=utf-8\nimport csv # 导入csv模块\nhead = ['code', 'price', 'Date']\nstock1 = ['600001', 26, '20181212']\nstock2 = ['600002', 32, '20181212']\nstock3 = ['600003', 32, '20181212']\n# 以'a'追加写模式打开文件\nfile = open('D:\\\\EvanPro\\\\PyStu\\\\S04\\\\1\\\\stock.csv', 'a', newline='')\n# 设置写入的对象\nwrite = csv.writer(file)\n# 写入具体的内容\nwrite.writerow(head)\nwrite.writerow(stock1)\nwrite.writerow(stock2)\nwrite.writerow(stock3)\nprint(\"Finishe Writing CSV File.\")\n","repo_name":"cnnbevan/PyStu","sub_path":"S04/WriteCsv.py","file_name":"WriteCsv.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37256887929","text":"from datetime import datetime\nfrom random import Random\n\nclass Message:\n \n def __init__(self, text, author_id, author_name, created_at, updated_at, sections=None):\n self.text = text\n self.author_id = author_id\n self.author_name = author_name\n self.edited = updated_at - created_at >= 10**6 # If difference more than 1 second\n self.timestamp = datetime.fromtimestamp(max(updated_at, created_at)/(10**6))\n self.sections = sections or []\n\n\n def __str__(self):\n return \"[%s] %s: %s%s\" % (self.timestamp.strftime('%d.%m %H:%M'), self.author_name,\n self.text, \" (edited)\" if self.edited else \"\")\n\n\nclass TreeNode:\n \n def __init__(self, name, thread_type, thread_id, children=None):\n self.name = name\n self.thread_type = thread_type\n self.thread_id = thread_id\n self.children = children\n\n\nclass User:\n\n def __init__(self, user_id, name, thread=None):\n self.avatar = Random(user_id).choice([' 😎 ', ' 🥺 ', ' 😃 ', ' 🐻 ', ' 🙊 '])\n self.id = user_id\n self.name = name\n self.thread = thread\n\n def __str__(self):\n return \"
{0} {2}
\".format(\n self.avatar,\n self.thread,\n self.name\n )\n","repo_name":"SPBSTU-OrangeTeam/QuipEditor","sub_path":"src/entities/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"32349001922","text":"\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, val, prev, next, child):\n self.val = val\n self.prev = prev\n self.next = next\n self.child = child\n\"\"\"\n\nclass Solution:\n from collections import deque\n def flatten(self, head: 'Optional[Node]') -> 'Optional[Node]':\n curr=head\n if curr==None: #base condition since using recursion\n return head\n while curr!=None: #traverse till end\n if curr.child!=None: #if child does not exist\n nhead=self.flatten(curr.child) #magically the below list is flattened\n t1=nhead #keep its head safe\n while nhead.next!=None: #find the last node of the flattened list\n nhead=nhead.next\n nhead.next=curr.next # do the linking of last node of flattened to orignal\n if curr.next: \n curr.next.prev=nhead #linking of last node of flattend to orignal\n curr.next=t1 #linking of first node of flattend to orignal\n t1.prev=curr #linking of first node of flattend to orignal\n curr.child=None #no need of child\n curr=curr.next #aage badho\n return head #return orignal head\n \n \n \n \n ","repo_name":"mayank9200/My_codes","sub_path":"0430-flatten-a-multilevel-doubly-linked-list/0430-flatten-a-multilevel-doubly-linked-list.py","file_name":"0430-flatten-a-multilevel-doubly-linked-list.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"13590477915","text":"import requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\ndf = pd.read_excel(\"web.xlsx\")\n\ndf['address'] = ''\n\n\nfor index, row in df.iterrows():\n website = row[\" WEBSITE\"]\n print(\"Website:\", website) \n try:\n response = requests.get(website, timeout=5, verify=False)\n\n soup = BeautifulSoup(response.content, 'html.parser')\n \n address_tag = soup.find('address')\n \n if address_tag:\n address = address_tag.get_text().strip()\n \n df.loc[index, 'Address'] = address\n else:\n print(f\"No address found for {row['COLLEGE NAME']}\")\n except Exception as e:\n print(f\"Error getting address for {row['COLLEGE NAME']}: {e}\")\n\ndf.to_excel('colleges_with_address.xlsx', index=False)\n\n\n\n","repo_name":"Dharmareddy8520/web-scraping","sub_path":"sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21981053197","text":"from random import choice\nfrom primes import primes_list\nfrom text_to_numbers import text_to_int\nfrom text_to_numbers import int_to_text\n\n\ndef mcd_extended_euclidian(a, b):\n x_2 = 1\n x_1 = 0\n y_2 = 0\n y_1 = 1\n r_2 = a\n r_1 = b\n r = r_2 % r_1\n q = r_2 // r_1\n\n while r != 0:\n x = x_2 - (q * x_1)\n y = y_2 - (q * y_1)\n x_2 = x_1\n x_1 = x\n y_2 = y_1\n y_1 = y\n r_2 = r_1\n r_1 = r\n r = r_2 % r_1\n q = r_2 // r_1\n\n return x_1, y_1, r_1\n\n\ndef generate_e(phi):\n primes = primes_list(phi)\n\n for i in range(len(primes)-1, -1, -1):\n if phi % primes[i] != 0: # necessary to avoid using a factor of phi\n yield primes[i]\n\n return None\n\n\ndef RSA(x, n, e):\n return x**e % n\n\n\n\"\"\"\np e q são primos secretos\nn é o resultado da multiplicação entre p e q\nn pode ser público, pois fatorar é difícil para p e q grandes \n e com grande diferença entre si\nphi é secreto, pois precisa de p e q para calcular, e é usado para calcular d\ne é público e é usado para codificar a mensagem juntamente com n\nd é privado e é usado para decodificar a mensagem juntamente com n;\n\"\"\"\n\nlist_of_primes = primes_list(100)\n\np = choice(list_of_primes[10:])\nq = choice(list_of_primes[10:])\n\nwhile p == q:\n q = choice(list_of_primes[10:])\n\nn = p * q\n\nphi = (p - 1) * (q - 1)\n\nfor e in generate_e(phi):\n d = mcd_extended_euclidian(e, phi)[0]\n\n if d < 0:\n d += phi\n\n if e != d and phi % d != 0 and d > 2 and e > 2:\n break\n\n# msg = [25, 102, 7, 102, 93, 49, 91, 49, 92, 118, 23, 13, 10]\nmsg = input()\nmsg_numbers = text_to_int(msg)\n\nmsg_encoded = [RSA(msg_numbers[i], n, e) for i in range(len(msg_numbers))]\n# print(msg_encoded)\n\nmsg_decoded = [RSA(msg_encoded[i], n, d) for i in range(len(msg_encoded))]\n# print(msg_decoded)\n\nmsg_text = int_to_text(msg_decoded)\nprint(msg_text)\n\n\"\"\"\nTeste com valores do livro:\n\np = 11\nq = 13\n\nn = p * q\n\nphi = (p - 1) * (q - 1)\n\ne = 7\nd = mcd_extended_euclidian(e, phi)[0]\n\nif d < 0:\n d += phi\n\nprint(n)\nprint(phi)\nprint(e)\nprint(d)\n\nmsg = [25, 102, 7, 102, 93, 49, 91, 49, 92, 118, 23, 13, 10]\n\nmsg_encoded = [RSA(msg[i], n, e) for i in range(len(msg))]\nprint(msg_encoded)\n\nmsg_decoded = [RSA(msg_encoded[i], n, d) for i in range(len(msg_encoded))]\nprint(msg_decoded)\n\n\"\"\"\n","repo_name":"gabrielbergoc/RSA","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2341,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30387583043","text":"import numpy as np\nfrom scipy.interpolate import splprep, splev\nimport helper_functions as hf\n\n\nclass Simulator:\n def __init__(self):\n self.state = None\n self.dt = None\n self.L = None\n self.sim_solution = None\n self.sim_time_vector = None\n self.look_ahead_point = None\n self.x_path = None\n self.y_path = None\n self.kappa_path = None\n self.theta_path = None\n\n def init(self, path_spline, num_points=100, L=3.0, dt=0.1):\n self.L = L\n self.dt = dt\n\n self.x_path, self.y_path = splev(np.linspace(0, 1, num_points), path_spline)\n xp, yp = splev(np.linspace(0, 1, num_points), path_spline, der=1)\n xpp, ypp = splev(np.linspace(0, 1, num_points), path_spline, der=2)\n\n self.kappa_path = []\n for xp_,xpp_,yp_,ypp_ in zip(xp,xpp,yp,ypp):\n self.kappa_path.append((xp_*ypp_ - xpp_*yp_)/(xp_**2+yp_**2)**1.5) # curvature at each point\n\n self.theta_path = []\n for i in range(len(self.x_path)-1):\n dx = self.x_path[i + 1] - self.x_path[i]\n dy = self.y_path[i + 1] - self.y_path[i]\n self.theta_path.append(np.arctan(dy/dx))\n self.theta_path.append(self.theta_path[-1])\n\n\n def reset(self, initial_state):\n self.state = initial_state\n self.sim_solution = [initial_state]\n self.sim_time_vector = [0.0]\n self.look_ahead_point = []\n\n def unroll_state(self):\n return [s for s in self.state]\n\n def move(self, delta, a=0):\n x, y, theta, v = self.unroll_state()\n\n x += v * np.cos(theta) * self.dt\n y += v * np.sin(theta) * self.dt\n theta += v/self.L * np.tan(delta) * self.dt # tan(..) is left out in udacity\n v += a * self.dt\n\n self.state = [x, y, theta, v]\n\n self.sim_solution.append(self.state)\n self.sim_time_vector.append(self.sim_time_vector[-1] + self.dt)\n\n def finalize(self):\n self.sim_solution = np.array(self.sim_solution)\n self.sim_time_vector = np.array(self.sim_time_vector)\n self.look_ahead_point = np.array(self.look_ahead_point)\n\n def look_ahead(self, look_ahead_dist):\n #find closest point on target path\n min_dist = 9999999\n u = None\n ind = None\n for i in range(len(self.x_path)-2): #-2 because of curvature\n dist_, u_ = hf.calc_dist(self.x_path[i], self.y_path[i],\n self.x_path[i+1], self.y_path[i+1],\n self.state[0], self.state[1])\n if dist_ < min_dist:\n min_dist = dist_\n ind = i\n u = u_\n\n # walk along the path to look ahead\n dist_path_points = hf.dist_points(self.x_path[ind], self.y_path[ind],\n self.x_path[ind + 1], self.y_path[ind + 1])\n walk_dist = (1.0 - u) * dist_path_points\n\n while walk_dist < look_ahead_dist and ind < len(self.x_path)-3:\n ind += 1\n dist_path_points = hf.dist_points(self.x_path[ind], self.y_path[ind],\n self.x_path[ind + 1], self.y_path[ind + 1])\n walk_dist += dist_path_points\n\n # interpolate\n u = 1.0 - (walk_dist - look_ahead_dist)/dist_path_points\n\n # lookahead point (interpolated)\n x_look = self.x_path[ind] + u * (self.x_path[ind + 1] - self.x_path[ind])\n y_look = self.y_path[ind] + u * (self.y_path[ind + 1] - self.y_path[ind])\n\n # theta (interpolated)\n theta = self.theta_path[ind] + u * (self.theta_path[ind+1] - self.theta_path[ind])\n\n # curvature (interpolated)\n kappa = self.kappa_path[ind] + u * (self.kappa_path[ind+1] - self.kappa_path[ind])\n\n self.look_ahead_point.append([x_look, y_look, theta, kappa])\n\n return (x_look, y_look, theta, kappa)\n\n\n\n\n\n\n\n #return (x_near, y_near, theta, kappa)\n\n\n\n\n\n","repo_name":"Corni33/path_following","sub_path":"simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":3940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30043112404","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom collections import defaultdict\nimport time\n\nt = int(input())\ndef eigen(n):\n lst = [[0 for i in range(n)] for j in range(n)]\n for i in range(n):\n lst[i][(i-1)%n] = t\n lst[(i-1)%n][i] = t\n for i in lst:\n print(*i)\n eig, _ = np.linalg.eig(lst)\n eig_k = []\n for i in range(n):\n k = (2*np.pi*i)/n\n eig_k.append(-2*t*np.cos(k))\n eig.sort()\n eig_k.sort()\n # print(eig)\n # print(eig_k)\n tmp = [i for i in range(n)]\n plt.plot(tmp, eig, label=\"real space\")\n plt.plot(tmp, eig_k, label=\"k space\")\n plt.legend()\n plt.show()\n return\n\ndef eigen_2d(n):\n lst = [[0 for i in range(n**2)] for j in range(n**2)]\n p = defaultdict(int)\n k = 0\n for i in range(n):\n for j in range(n):\n p[(i, j)] = k\n k += 1\n for i in range(n):\n for j in range(n):\n lst[p[(i, j)]][p[((i-1)%n, j)]] = t\n lst[p[(i, j)]][p[(i+1)%n, j]] = t\n lst[p[(i, j)]][p[(i, (j-1)%n)]] = t\n lst[p[(i, j)]][p[(i, (j+1)%n)]] = t\n # for i in lst:\n # print(*i)\n eig, _ = np.linalg.eig(lst)\n eig_k = []\n for i in range(n):\n kx = (2*np.pi*i)/(n)\n for j in range(n):\n ky = (2*np.pi*j)/(n)\n eig_k.append(2*t*(np.cos(kx)+np.cos(ky)))\n eig.sort()\n eig_k.sort()\n eig=[round(np.real(i),2) for i in eig]\n print(eig)\n eig_k=[round(i,2) for i in eig_k]\n print(eig_k)\n tmp = [i for i in range(n)]\n def hermiticity(arg,n):\n for i in range(n):\n for j in range(n):\n if arg[i][j]!=arg[j][i]:\n return('no')\n return('yes')\n tmp = [i for i in range(n**2)]\n plt.plot(tmp, eig, label = \"real space\")\n plt.plot(tmp, eig_k, label = \"k space\")\n plt.legend()\n plt.show()\n print(hermiticity(lst,n**2))\n return\neigen(10)","repo_name":"rajatbamhore20/bound-states-in-superconductor","sub_path":"real_space.py","file_name":"real_space.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34900318808","text":"import cv2\r\nimport numpy as np\r\n'''Accepts BGR image as Numpy array\r\n Returns: (x,y) coordinates of centroid if found\r\n (-1,-1) if no centroid was found\r\n None if user hit ESC\r\n'''\r\ndef color_detect(image):\r\n # Blur the image to reduce noise\r\n blur=cv2.medianBlur(image,5)\r\n\r\n # Convert BGR to HSV\r\n hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)\r\n\r\n # Threshold the HSV image for only green colors\r\n lower_green = np.array([40, 70, 70])\r\n upper_green = np.array([80, 200, 200])\r\n\r\n # Threshold the HSV image to get only green colors\r\n mask = cv2.inRange(hsv, lower_green, upper_green)\r\n\r\n # Blur the mask\r\n bmask = cv2.GaussianBlur(mask, (5, 5), 0)\r\n\r\n # Take the moments to get the centroid\r\n moments = cv2.moments(bmask)\r\n\r\n m00 = moments['m00']\r\n centroid_x, centroid_y = None, None\r\n if m00 != 0:\r\n centroid_x = int(moments['m10'] / m00)\r\n centroid_y = int(moments['m01'] / m00)\r\n\r\n # Assume no centroid\r\n ctr = (-1, -1)\r\n\r\n # Use centroid if it exists\r\n if centroid_x != None and centroid_y != None:\r\n ctr = (centroid_x, centroid_y)\r\n else:\r\n ctr = (0,0)\r\n return ctr\r\ndef color_detect2(Img):\r\n HSV = cv2.cvtColor(Img, cv2.COLOR_BGR2HSV)#把BGR图像转换为HSV格式\r\n \"\"\"\r\n HSV模型中颜色的参数分别是:色调(H),饱和度(S),明度(V) \r\n 下面两个值是要识别的颜色范围 \r\n \"\"\"\r\n kernel_2 = np.ones((2,2),np.uint8)#2x2的卷积核\r\n kernel_3 = np.ones((3,3),np.uint8)#3x3的卷积核\r\n kernel_4 = np.ones((4,4),np.uint8)#4x4的卷积核\r\n\r\n Lower = np.array([40, 70, 70])#要识别颜色的下限\r\n Upper = np.array([80, 200, 200])#要识别的颜色的上限\r\n\r\n #mask是把HSV图片中在颜色范围内的区域变成白色,其他区域变成黑色\r\n mask = cv2.inRange(HSV, Lower, Upper)\r\n #下面四行是用卷积进行滤波\r\n erosion = cv2.erode(mask,kernel_4,iterations = 1)\r\n erosion = cv2.erode(erosion,kernel_4,iterations = 1)\r\n dilation = cv2.dilate(erosion,kernel_4,iterations = 1)\r\n dilation = cv2.dilate(dilation,kernel_4,iterations = 1)\r\n #target是把原图中的非目标颜色区域去掉剩下的图像\r\n target = cv2.bitwise_and(Img, Img, mask=dilation)\r\n #将滤波后的图像变成二值图像放在binary中\r\n ret, binary = cv2.threshold(dilation,127,255,cv2.THRESH_BINARY)\r\n\r\n #在binary中发现轮廓,轮廓按照面积从小到大排列\r\n _, contours, hierarchy = cv2.findContours(binary,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\r\n\r\n p=0\r\n obj_list=[]\r\n for i in contours:#遍历所有的轮廓\r\n x,y,w,h = cv2.boundingRect(i)#将轮廓分解为识别对象的左上角坐标和宽、高\r\n #在图像上画上矩形(图片、左上角坐标、右下角坐标、颜色、线条宽度)\r\n ctr = (x+w/2, y+h/2)\r\n rad = (w+h)/4\r\n obj = (ctr, rad)\r\n obj_list.append(obj)\r\n p +=1\r\n return [obj_list, len(contours)]","repo_name":"jsBrique/BrickCV","sub_path":"support/color_detect.py","file_name":"color_detect.py","file_ext":"py","file_size_in_byte":3037,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"34154597170","text":"from django.db import models\n\n# Create your models here.\n\n#category model\nclass Category(models.Model):\n title=models.CharField(max_length=100)\n description=models.TextField()\n\n def __str__(self):\n return self.title\n\n#image model\nclass Image(models.Model):\n title=models.CharField(max_length=200)\n location=models.CharField(max_length=200)\n image=models.ImageField(upload_to='images')\n added_date=models.DateTimeField() \n cat=models.ForeignKey(Category,on_delete=models.CASCADE) \n\n def __str__(self):\n return self.title\n\n def save(self,*args,**kwargs): #inbuilt method:overriding for customization of image size - KK\n super().save(*args,**kwargs)\n img=Image.open(self.image.path)\n if img.height >400 or img.width >800:\n output_size=(400,800)\n img.thumbnail(output_size)\n img.save(self.image.path) \n\n","repo_name":"KushalkumarUmesh/Django","sub_path":"imageCollection_project/ks_venv/dj_project/dj_app/imageK/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8572793929","text":"import os\nimport shutil\n\nfrom common import util\nfrom commmon.util import LOG\nfrom modules.openstack import OpenStackService\nfrom modules.conf import CONF\nfrom common.singleton import Singleton\n\n\n@Singleton\nclass Swift(OpenStackService):\n _name = \"swift\"\n _bundle = \"openstack-object-storage\"\n _services = [\"swift-proxy\", \"memcached\"]\n _type = \"object-store\"\n _description = \"OpenStack Object Storage\"\n _public_url = (\"http://{0}:8080/v1/AUTH_%\\(tenant_id\\)s\"\n .format(CONF['CONFIG_CONTROLLER_HOST']))\n _admin_url = \"http://{0}:8080/v1\".format(CONF['CONFIG_CONTROLLER_HOST'])\n\n _devices = []\n\n def ceilometer_enable(self, configfile):\n pass\n\n def config_auth(self, configfile):\n confdir = os.path.dirname(configfile)\n if not os.path.isdir(confdir):\n os.makedirs(confdir)\n shutil.copy('/usr/share/defaults/swift/proxy-server.conf', confdir)\n OpenStackService.config_auth(self, configfile,\n section='filter:authtoken')\n config = (\"[pipeline:main]\\n\"\n \"pipeline = catch_errors gatekeeper healthcheck \"\n \"proxy-logging cache container_sync bulk ratelimit \"\n \"authtoken keystoneauth container-quotas account-quotas \"\n \"slo dlo versioned_writes proxy-logging proxy-server\\n\"\n \"[filter:authtoken]\\n\"\n \"paste.filter_factory = \"\n \"keystonemiddleware.auth_token:filter_factory\\n\"\n \"delay_auth_decision = True\\n\"\n \"[app:proxy-server]\\n\"\n \"use = egg:swift#proxy\\n\"\n \"account_autocreate = True\\n\"\n \"[filter:keystoneauth]\\n\"\n \"use = egg:swift#keystoneauth\\n\"\n \"operator_roles = admin,user\\n\")\n util.write_config(configfile, config)\n\n def config_memcache(self, configfile):\n config = (\"[filter:cache]\\n\"\n \"use = egg:swift#memcache\\n\"\n \"memcache_servers = 127.0.0.1:11211\")\n util.write_config(configfile, config)\n\n def config_hash(self, configfile):\n suffix = CONF['CONFIG_SWIFT_HASH']\n config = (\"[swift-hash]\\n\"\n \"swift_hash_path_suffix = {0}\").format(suffix)\n util.write_config(configfile, config)\n\n def parse_devices(self):\n\n def create_loopback(name, size):\n # Remove the file if exists\n if os.path.isfile(name):\n os.remove(name)\n # Create an empty file\n with open(name, 'wb') as f:\n f.seek(1024 * 1024 * 1024 * size - 1)\n f.write(b\"\\0\")\n LOG.debug(\"formatting '{0}' as XFS\".format(name))\n util.run_command(\"mkfs.xfs %s\" % name)\n\n devs = CONF['CONFIG_SWIFT_STORAGES']\n if devs:\n devs = devs.split(',')\n devs = [x.strip() for x in devs]\n device_number = 0\n num_zones = int(CONF[\"CONFIG_SWIFT_STORAGE_ZONES\"])\n for dev in devs:\n device_number += 1\n zone = (device_number % num_zones) + 1\n self._devices.append({'device': dev, 'zone': zone,\n 'name': 'device%s' % device_number})\n else:\n # Setup loopdevice\n filename = '/srv/loopback-device'\n filesize = int(CONF['CONFIG_SWIFT_STORAGE_SIZE'])\n create_loopback(filename, filesize)\n self._devices.append({'device': filename, 'zone': 1,\n 'name': 'loopback'})\n\n def prepare_devices(self):\n # Avoid adding duplicate entries in fstab\n mounted_devices = []\n if os.path.isfile('/etc/fstab'):\n with open('/etc/fstab', 'r') as f:\n for l in f:\n mounted_devices.append(l.split()[0].strip())\n\n # Add each device to fstab if not already added\n fstab = \"\"\n for device in self._devices:\n # Format the device with xfs filesystem\n util.ensure_directory('/srv/node/%s' % device['name'])\n\n # Ensure the device is mounted\n if device['device'] not in mounted_devices:\n fstab += (\"{0} /srv/node/{1} xfs noatime,nodiratime,nobarrier,\"\n \"logbufs=8 0 2\\n\").format(device['device'],\n device['name'])\n with open('/etc/fstab', 'a') as f:\n f.write(fstab)\n\n util.run_command('mount -a')\n # Change ownership of /srv to swift\n util.run_command(\"chown -R swift:swift /srv\")\n\n def create_rings(self):\n replicas = CONF['CONFIG_SWIFT_STORAGE_REPLICAS']\n ip = util.get_ip()\n for ringtype, port in [('object', '6000'),\n ('container', '6001'),\n ('account', 6002)]:\n LOG.debug(\"creating '{0}' ring with {1} \"\n \"replicas\".format(ringtype, replicas))\n cmd = (\"swift-ring-builder {0}.builder create 10 {1} 1\"\n .format(ringtype, replicas))\n util.run_command(cmd)\n for device in self._devices:\n LOG.debug(\"adding '{0}' storage node on ring \"\n \"'{1}'\".format(device['name'], ringtype))\n cmd = (\"swift-ring-builder %s.builder add --region 1 \"\n \"--zone %s --ip %s --port %s --device %s --weight 100\"\n % (ringtype, device['zone'], ip, port, device['name']))\n util.run_command(cmd)\n LOG.debug(\"rebalancing ring '{0}'\".format(ringtype))\n cmd = \"swift-ring-builder {0}.builder rebalance\".format(ringtype)\n util.run_command(cmd)\n if os.path.isfile(\"/etc/swift/%s.ring.gz\" % ringtype):\n os.remove(\"/etc/swift/%s.ring.gz\" % ringtype)\n shutil.move(\"%s.ring.gz\" % ringtype, \"/etc/swift\")\n\n def config_storage_services(self):\n confdir = '/etc/swift/'\n shutil.copy('/usr/share/defaults/swift/account-server.conf', confdir)\n shutil.copy('/usr/share/defaults/swift/container-server.conf', confdir)\n shutil.copy('/usr/share/defaults/swift/object-server.conf', confdir)\n shutil.copy('/usr/share/defaults/swift/container-reconciler.conf',\n confdir)\n shutil.copy('/usr/share/defaults/swift/object-expirer.conf', confdir)\n for type in ['account', 'container', 'object']:\n conf = (\"[DEFAULT]\\n\"\n \"bind_ip = 0.0.0.0\\n\"\n \"devices = /srv/node\\n\"\n \"[pipeline:main]\\n\"\n \"pipeline = healthcheck recon {0}-server\\n\"\n \"[filter:recon]\\n\"\n \"recon_cache_path = /var/cache/swift\\n\").format(type)\n util.write_config('/etc/swift/%s-server.conf' % type, conf)\n\n def config_rsync(self):\n config = \"\"\"\nuid = swift\ngid = swift\nlog file = /var/log/rsyncd.log\npid file = /var/run/rsyncd.pid\naddress = {0}\n\n[account]\nmax connections = 2\npath = /srv/node/\nread only = false\nlock file = /var/lock/account.lock\n\n[container]\nmax connections = 2\npath = /srv/node/\nread only = false\nlock file = /var/lock/container.lock\n\n[object]\nmax connections = 2\npath = /srv/node/\nread only = false\nlock file = /var/lock/object.lock\n\"\"\".format(util.get_ip())\n with open('/etc/rsyncd.conf', 'w') as f:\n f.write(config)\n","repo_name":"clearlinux/clearstack","sub_path":"clearstack/modules/swift.py","file_name":"swift.py","file_ext":"py","file_size_in_byte":7536,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"6885599028","text":"from django.test import SimpleTestCase\nfrom customers.forms import *\n\n\nclass TestForms(SimpleTestCase):\n def test_feedback_form(self):\n form=FeedbackForm(data={\n 'user':'John',\n 'service ':'Cleaning',\n 'rating':'3',\n 'subject':'Good',\n 'service_feedback':'Good Service',\n })\n self.assertTrue(form.is_valid())\n\n def test_feedback_form(self):\n form=FeedbackForm(data={})\n \n self.assertFalse(form.is_valid())\n self.assertEquals(len(form.errors),3) \n\n\n \n\n\n","repo_name":"Pooja9009/gharelu","sub_path":"customers/tests/test_forms.py","file_name":"test_forms.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"7436712777","text":"from itertools import compress\n\nimport matplotlib.animation as animation\nimport matplotlib.pyplot as plt\n\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\n\nfrom Prototype.kruskal_clustering import FullGraph\n\n\n\ndef main():\n\n p1 = np.random.normal(0, 100, (30, 3))\n p2 = np.random.normal(300, 100, (30, 3))\n p3 = np.random.normal(600, 100, (30, 3))\n\n points = np.append(p1, p2, axis=0)\n points = np.append(points, p3, axis=0)\n\n g = FullGraph(points)\n edges = list(compress(g.E, g.EChoose))\n\n def draw_line(n, points, lines):\n line = lines[n]\n i, j, _ = edges[n]\n coords = points[[i, j]].transpose()\n line.set_data_3d(coords)\n return lines\n\n fig = plt.figure(figsize=(8.5, 18), dpi=100, constrained_layout=True)\n plt.suptitle(\"MST Clustering\")\n spec = fig.add_gridspec(ncols=1, nrows=2, height_ratios=[5, 1])\n\n ax = fig.add_subplot(spec[0, 0], projection=\"3d\")\n ax.set_xlabel(\"X\")\n ax.set_ylabel(\"Y\")\n ax.set_zlabel(\"Z\")\n ax.scatter(*zip(*p1), color=\"tab:red\")\n ax.scatter(*zip(*p2), color=\"tab:blue\")\n ax.scatter(*zip(*p3), color=\"tab:green\")\n\n lines = [ax.plot([], [], [])[0] for _ in range(len(edges))]\n\n anim = animation.FuncAnimation(\n fig, draw_line, len(edges), fargs=(points, lines), interval=50, blit=True\n )\n\n ax = fig.add_subplot(spec[1, 0])\n ax.set_xlabel(\"Edge\")\n ax.set_ylabel(\"Max Partition Size\")\n ax.plot(range(1, len(edges) + 1), g.MaxPartitionEvolve)\n ax.set_xlim(1, len(edges))\n\n bump_idx = np.argmax(np.diff(g.MaxPartitionEvolve)) + 2\n ax.axvline(bump_idx, color=\"tab:orange\")\n ax.set_xticks(list(ax.get_xticks()) + [bump_idx])\n\n\n # Show the plot, save the animation, or save the figure\n # anim.save(\"kruskal-clustering.mp4\")\n plt.show()\n\n #plt.savefig(\"kruskal-clustering.png\")\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"iluvjava/Silly_Python_Stuff","sub_path":"Prototype/plot-kruskal.py","file_name":"plot-kruskal.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"29264023757","text":"import logging\nimport threading\nimport time\n\nfrom .local_items import EnableConditions, ItemLocalization, ItemOptions\n\nlogger = logging.getLogger(__name__)\n\nAPPLICATION_ALIASES_KEYS = (u'ios_bundle', u'android_package_name', u'wp_app_id')\nRELOAD_TIMEOUT_SECONDS = 600 # 10 minutes\nMIN_UPDATE_TIMEOUT = 0.1\nCOLLECTION_NAME_FORMAT = u'localization.{cache_name}'\nAPPLICATION_CACHE_NAME = u'_Yandex_apps'\nON_ERROR_UPDATE_THREAD_RESTART_SECONDS = 0.5\n\n\nclass LocalizationError(RuntimeError):\n pass\n\n\nclass ItemNotFound(LocalizationError):\n pass\n\n\nclass BaseCache(object):\n def __init__(self, name, reload_timeout_seconds=RELOAD_TIMEOUT_SECONDS, items=None):\n self.reload_timeout_seconds = reload_timeout_seconds\n self.expire_time = None\n self.items = items\n self.collection_name = COLLECTION_NAME_FORMAT.format(cache_name=name)\n\n def is_expired(self):\n return self.expire_time is None or self.expire_time < time.time()\n\n def update_cache(self, *args, **kwargs):\n self._update_cache(*args, **kwargs)\n self.expire_time = time.time() + self.reload_timeout_seconds\n\n def _update_cache(self, *args, **kwargs):\n raise NotImplementedError\n\n\nclass ItemsCache(BaseCache):\n def is_ready(self):\n return self.items is not None\n\n def retrieve_items_if_ready(self):\n \"\"\"\n Returns self.items. Other methods should use it to avoid accessing two different versions of self.items\n \"\"\"\n if not self.is_ready():\n raise LocalizationError(u'Localizations not ready')\n return self.items\n\n def is_item_enabled(self, item_name, user_info, items=None):\n items = items or self.retrieve_items_if_ready()\n if item_name not in items:\n return False\n options, localizations = items[item_name]\n for localization in localizations:\n if user_info.matches_conditions(localization.conditions, options):\n return True\n return False\n\n def get_item_value(self, item_name, user_info, items=None):\n items = items or self.retrieve_items_if_ready()\n if item_name not in items:\n raise LocalizationError(u'No such item!')\n options, localizations = items[item_name]\n for localization in localizations:\n if user_info.matches_conditions(localization.conditions, options):\n return localization.value\n raise LocalizationError(u'Item is not enabled')\n\n def maybe_get_item_value(self, item_name, user_info, items=None):\n items = items or self.retrieve_items_if_ready()\n if item_name in items:\n options, localizations = items[item_name]\n for localization in localizations:\n if user_info.matches_conditions(localization.conditions, options):\n return localization.value\n\n def get_all_items(self):\n items = self.retrieve_items_if_ready()\n return list(items)\n\n def get_all_enabled_items(self, user_info):\n result = []\n items = self.retrieve_items_if_ready()\n for name in items:\n if self.is_item_enabled(name, user_info, items=items):\n result.append(name)\n return result\n\n def get_all_enabled_items_with_values(self, user_info):\n result = []\n items = self.retrieve_items_if_ready()\n for name in items:\n value = self.maybe_get_item_value(name, user_info, items=items)\n if value is not None:\n result.append((name, value))\n return result\n\n def _update_cache(self, collection):\n items = {}\n for entry in collection.find():\n localizations = []\n for loc in entry.get(u'values', []):\n conditions = EnableConditions(**loc.get(u'conditions'))\n localizations.append(ItemLocalization(value=loc.get(u'value'), conditions=conditions))\n options = entry.get(u'options', {})\n items[entry[u'_id']] = ItemOptions(audience_salt=options.get(u'audience_salt')), localizations\n self.items = items\n\n\nclass ApplicationsCache(BaseCache):\n def _update_cache(self, collection):\n if self.items is None:\n self.items = {}\n for entry in collection.find():\n entry_id = entry['_id']\n entry_val = set()\n for key in APPLICATION_ALIASES_KEYS:\n if key in entry:\n entry_val.add(entry[key].lower())\n self.items[entry_id] = entry_val\n\n\n_update_thread = None\n_applications_cache = ApplicationsCache(name=APPLICATION_CACHE_NAME)\n_cache_instances = dict()\n\n\ndef load_cache(cache, mongo_db):\n collection = mongo_db.get_collection(cache.collection_name)\n cache.update_cache(collection=collection)\n return cache.expire_time\n\n\ndef init_caches(projects, mongo_db):\n load_cache(_applications_cache, mongo_db)\n for project in projects:\n cache = ItemsCache(name=project)\n load_cache(cache, mongo_db)\n _cache_instances[project] = cache\n\n\ndef _cache_updating_worker(mongo_db):\n while True:\n try:\n if _applications_cache.is_expired():\n load_cache(_applications_cache, mongo_db)\n sleep_until = _applications_cache.expire_time\n for cache in _cache_instances.values():\n if cache.is_expired():\n load_cache(cache, mongo_db)\n sleep_until = min(sleep_until, cache.expire_time)\n time.sleep(max(sleep_until - time.time(), MIN_UPDATE_TIMEOUT))\n except Exception as e:\n logger.error(u'Error %s in cache updating worker. Waiting for %s seconds. Error: %s', e.__class__.__name__, ON_ERROR_UPDATE_THREAD_RESTART_SECONDS, e)\n time.sleep(ON_ERROR_UPDATE_THREAD_RESTART_SECONDS)\n\n\ndef start_update_thread(mongo_db):\n global _update_thread\n if _update_thread is None or not _update_thread.is_alive():\n _update_thread = threading.Thread(target=_cache_updating_worker, args=(mongo_db,))\n _update_thread.daemon = True\n _update_thread.start()\n\n\ndef get_cache(project):\n try:\n return _cache_instances[project]\n except KeyError:\n logger.error(u'Unknown project %s', project)\n raise LocalizationError(u'Trying to get unknown project {}'.format(project))\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"mobile/localization/items_cache.py","file_name":"items_cache.py","file_ext":"py","file_size_in_byte":6349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"43111368959","text":"import base64\nimport functools\nimport http.client\nimport json\nimport os.path\nimport time\nimport urllib.parse\nimport urllib.request\nimport urllib.response\n\nfrom abc import abstractmethod, ABC\nfrom jsonpath_ng.ext import parse\nfrom typing import *\n\n\ndef sort_dict_by_key_as_int(src: dict):\n return dict(sorted(src.items(), key=lambda item: int(item[0])))\n\n\nclass ApiResponse:\n def __init__(self, response: http.client.HTTPResponse):\n self.response: http.client.HTTPResponse = response\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n if not self.response.isclosed():\n self.response.close()\n \n def get_status(self):\n return self.response.status\n\n def get_body(self):\n return self.response.read()\n\n\nclass ApiRequest:\n def __init__(self, url: str, username: str, password: str) -> None:\n self.url: str = url\n self.credential: str = base64.b64encode((username + ':' + password).encode()).decode()\n\n def invoke(self, command: str, data: str = None) -> ApiResponse:\n method = \"POST\"\n headers = {'Content-Type': 'application/json', 'Authorization': self.credential, 'X-Command': command}\n request = urllib.request.Request(\n self.url,\n method = method,\n headers = headers,\n data = data.encode() if data is not None else None\n )\n\n return ApiResponse(\n urllib.request.urlopen(request)\n )\n\n\nclass ApiContext:\n def __init__(self) -> None:\n self.protocol: str = os.getenv('EXASTRO_PROTOCOL', 'http')\n self.host: str = os.environ.get('EXASTRO_HOST', 'localhost')\n self.port: str = os.environ.get('EXASTRO_PORT', '8080')\n self.username: str = os.environ.get('EXASTRO_USERNAME', 'administrator')\n self.password: str = os.environ.get('EXASTRO_PASSWORD', 'password')\n self.current_dir: str = os.getcwd()\n self.workspace: str = os.path.join(self.current_dir, os.environ.get('WORKSPACE_DIR', 'tmp'))\n self.debug: bool = bool(os.environ.get('DEBUG'))\n\n\n def create_api_request(self, menu_id: str) -> ApiRequest:\n return ApiRequest(\n urllib.parse.urlunparse((\n self.protocol,\n self.host + ':' + self.port,\n '/default/menu/07_rest_api_ver1.php',\n '',\n urllib.parse.urlencode({'no': menu_id}),\n ''\n )),\n self.username,\n self.password\n )\n\n\n def get_path_under_current_dir(self, relative_path: str) -> str:\n return os.path.join(self.current_dir, relative_path)\n\n\n def get_path_under_workspace(self, relative_path: str) -> str:\n return os.path.join(self.workspace, relative_path)\n\n def create_temporary_dir(self, prefix: str) -> str:\n tmp_dir = self.get_path_under_workspace('{}-{}'.format(prefix, time.time()))\n os.makedirs(tmp_dir, exist_ok=True)\n return tmp_dir\n\n\nclass ItemValue(ABC):\n @abstractmethod\n def to_value(self, api_context: ApiContext) -> str:\n pass\n\n\nclass UploadFile(ItemValue):\n def __init__(self, file_path: str) -> None:\n self.file_name: str = os.path.basename(file_path)\n self.file_path: str = file_path\n\n\n def to_value(self, api_context: ApiContext) -> str:\n return self.file_name\n\n\n def get_base64(self) -> str:\n with open(self.file_path, \"rb\") as file:\n return base64.b64encode(file.read()).decode(\"ascii\")\n\n\nclass ColumnMetadata:\n def __init__(self, menu_id: str, column_names: List[str], file_upload_column_names: List[str]) -> None:\n self.menu_id: str = menu_id\n self.column_names: List[str] = column_names\n self.file_upload_column_names: List[str] = file_upload_column_names\n\n\nclass ColumnMetadataFinder(ABC):\n @abstractmethod\n def find(self, menu_id: str) -> ColumnMetadata:\n pass\n\n\nclass ApiBuilderError(Exception):\n pass\n\n\nclass ApiBuilder:\n column_metadata_finder: ClassVar[ColumnMetadataFinder]\n\n def __init__(self, menu_id: str, command: str) -> None:\n self.menu_id: str = menu_id\n self.command: str = command\n self.column_metadata: ColumnMetadata = ApiBuilder.column_metadata_finder.find(menu_id)\n\n\n @abstractmethod\n def create_entries(self, params: Dict[str, str]) -> Optional[List[Dict[str, str]]]:\n pass\n\n\n def __validate_entries(self, entries: List[Dict[str, str]]) -> None:\n for entry in entries:\n for column_name in entry.keys():\n if not column_name in self.column_metadata.column_names:\n raise ApiBuilderError('Invalid column name \"{}\"'.format(column_name))\n\n mandatory_column_names = ['実行処理種別']\n for column_name in mandatory_column_names:\n if not column_name in entry.keys():\n raise ApiBuilderError('Mandatory item \"{}\" not contained'.format(column_name))\n\n verb = entry['実行処理種別']\n if not verb in ['登録', '更新', '廃止', '復活']:\n raise ApiBuilderError('Invalid verb {}'.format(verb))\n\n\n def __convert_to_item_key(self, column_name: str) -> str:\n return str(self.column_metadata.column_names.index(column_name))\n\n\n def __convert_to_item_value(self, api_context: ApiContext, value: Any) -> str:\n if isinstance(value, ItemValue):\n result = value.to_value(api_context)\n elif isinstance(value, bool):\n result = \"true\" if value else \"false\"\n else:\n result = str(value)\n\n return result\n\n\n def __convert_to_entry_key(self, index: int) -> str:\n return str(index)\n\n\n def __convert_to_entry_value(self, api_context: ApiContext, entry: Dict[str, Any]) -> Dict[str, str]:\n result = {}\n\n for column_name, value in entry.items():\n element_key = self.__convert_to_item_key(column_name)\n element_value = self.__convert_to_item_value(api_context, value)\n result[element_key] = element_value\n\n return sort_dict_by_key_as_int(result)\n\n\n def __convert_to_upload_file(self, entries: List[Dict[str, str]]) -> List[Dict[str, str]]:\n upload_file_json_object = []\n\n for entry in entries:\n upload_file_entry_json_object = {}\n\n for file_upload_column_name in self.column_metadata.file_upload_column_names:\n if file_upload_column_name in entry.keys():\n key = int(self.column_metadata.column_names.index(file_upload_column_name))\n\n x = entry[file_upload_column_name]\n if isinstance(x, UploadFile):\n value = x.get_base64()\n else:\n raise ApiBuilderError('The value type \"{}\" is invalid for upload file column'.format(type(x).__name__))\n\n upload_file_entry_json_object[key] = value\n\n upload_file_json_object.append(sort_dict_by_key_as_int(upload_file_entry_json_object))\n \n return upload_file_json_object\n\n\n def create_json_object(self, api_context: ApiContext, params: Dict[str, str]) -> Optional[Dict[str, Any]]:\n json_object: Optional[Dict[str, Any]] = None\n\n entries = self.create_entries(params)\n if entries is not None:\n self.__validate_entries(entries)\n\n json_object = {}\n for entry_index, entry in enumerate(entries):\n entry_key = self.__convert_to_entry_key(entry_index)\n entry_value = self.__convert_to_entry_value(api_context, entry)\n json_object[entry_key] = entry_value\n\n if self.column_metadata.file_upload_column_names:\n json_object['UPLOAD_FILE'] = self.__convert_to_upload_file(entries)\n\n return json_object\n\n\nclass StrText(ItemValue):\n def __init__(self, text: str) -> None:\n self.text: str = text\n\n\n def to_value(self, api_context: ApiContext) -> str:\n return self.text\n\n\nclass Join(ItemValue):\n def __init__(self, item_values: List[ItemValue], separator: str = ':') -> None:\n self.separator: str = separator\n self.item_values: List[ItemValue] = item_values\n\n\n def to_value(self, api_context: ApiContext) -> str:\n return functools.reduce(lambda x, y: x + self.separator + y, map(lambda x: x.to_value(api_context), self.item_values))\n\n\nclass Query(ItemValue):\n def __init__(self, menu_id: str, query_string: str, separator: str = ':') -> None:\n self.menu_id: str = menu_id\n self.query_string: str = query_string\n self.separator = separator\n\n\n def to_value(self, api_context: ApiContext) -> str:\n class ApiBuilderFilter(ApiBuilder):\n def __init__(self, menu_id: str) -> None:\n super().__init__(menu_id, 'FILTER')\n \n def create_entries(self, params: Dict[str, str]) -> Optional[List[Dict[str, str]]]:\n return None\n\n api_invoker = ApiInvoker(api_context)\n response_json = api_invoker.invoke(dict(), ApiBuilderFilter(self.menu_id))\n\n data_object = json.loads(response_json)\n\n # convert API response to JSONPath friendly form\n #\n # ***** BEFORE *****\n # {\n # \"status\": \"SUCCEED\",\n # \"resultdata\": {\n # \"CONTENTS\": {\n # \"RECORD_LENGTH\": 1,\n # \"BODY\": [\n # [\n # \"key0\",\n # \"key1\",\n # \"key2\",\n # ...\n # ],\n # [\n # \"value0\",\n # \"value1\",\n # \"value2\",\n # ...\n # ],\n # ...\n # \n # ***** AFTER *****\n # \n # [\n # {\n # key0: value0\n # key1: value1\n # key2: value2\n # ...\n # },\n # {\n # key0: value0\n # key1: value1\n # key2: value2\n # ...\n # }, \n # ...\n converted = []\n keys = data_object['resultdata']['CONTENTS']['BODY'][0]\n for entry in data_object['resultdata']['CONTENTS']['BODY'][1:]:\n converted_entry = {}\n for index in range(len(keys)):\n converted_entry[keys[index]] = entry[index]\n\n converted.append(converted_entry)\n\n jsonpath_expr = parse(self.query_string)\n matches = jsonpath_expr.find(converted)\n # TODO no results handling\n\n values = [match.value for match in matches]\n\n return functools.reduce(lambda x, y: x + self.separator + y, values)\n\n\nclass ApiInvokerError(Exception):\n pass\n\n\nclass ApiInvoker:\n def __init__(self, api_context: ApiContext) -> None:\n self.api_context: ApiContext = api_context\n self.debug_file_dir = 'debug-{}'.format(time.time())\n\n\n def invoke(self, params: Dict[str, str], builder: ApiBuilder, delay: int = 3) -> str:\n print(builder.__class__.__name__)\n\n if delay > 0:\n time.sleep(delay)\n\n request = self.api_context.create_api_request(builder.menu_id)\n\n # DEBUG: save API URL\n if self.api_context.debug:\n self.__save_to_file(0, 'api-url', builder, 'txt', request.url)\n\n # create JSON\n request_body_json_object = builder.create_json_object(self.api_context, params)\n request_body = json.dumps(request_body_json_object) if request_body_json_object != None else None\n\n # DEBUG: save request JSON\n if self.api_context.debug:\n self.__save_to_file(1, 'request', builder, 'json', request_body)\n\n # invoke API\n with request.invoke(builder.command, request_body) as response:\n response_body = response.get_body().decode()\n\n # DEBUG: save response JSON\n if self.api_context.debug:\n self.__save_to_file(2, 'response', builder, 'json', response_body)\n \n\n return response_body\n\n\n def __save_to_file(self, index: int, kind: str, builder: ApiBuilder, file_ext: str, data: Optional[str]) -> None:\n if data is None:\n content = '(no contents)'\n elif file_ext == 'json':\n content = json.dumps(json.loads(str(data)), ensure_ascii=False, indent=4)\n else:\n content = str(data)\n\n dir_path = self.api_context.get_path_under_workspace(self.debug_file_dir)\n os.makedirs(dir_path, exist_ok=True)\n\n file_path = os.path.join(dir_path, \"{:03}-{}-{}.{}\".format(index, kind, type(builder).__name__, file_ext))\n with open(file_path, mode='w') as f:\n f.write(content)\n\n\nclass SimpleIndexCounter:\n def __init__(self, begin: int = 0, step: int = 1) -> None:\n self.value: int = begin\n self.step: int = step\n\n\n def __call__(self) -> int:\n value = self.value\n self.value += 1\n return value\n","repo_name":"mtakahashiif/shuushuu","sub_path":"python-packages/exastro_api/ita/v1/framework.py","file_name":"framework.py","file_ext":"py","file_size_in_byte":13169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14231937919","text":"import flask\n\nfrom apps.admin.register import quickstart_admin_model\nfrom .models import Page\n\nfrom app import app\n\nquickstart_admin_model(Page, 'pages', 'pages', 'Site', exclude=['rendered'],\n form_include=['location', 'title', 'meta_author', 'meta_keywords', 'meta_description', 'content'])\n\n\n@app.route('/', defaults={'path': ''})\n@app.route('/')\ndef catch_all(path):\n for page in Page.query(Page.location == '/%s' % path):\n return flask.render_template('page.html', page=page)\n if path.endswith('/'):\n for page in Page.query(Page.location == '/%s' % path[0:-1]):\n return flask.redirect(page.location)\n else:\n for page in Page.query(Page.location == '/%s/' % path):\n return flask.redirect(page.location)\n\n return flask.abort(404)\n","repo_name":"kkinder/GAEStarterKit","sub_path":"apps/simplecms/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"3"} +{"seq_id":"23039898992","text":"import matplotlib.pyplot as plt\nimport math\n\ndef mean(sampleset):\n total = 0\n for element in sampleset:\n total = total + element\n\n return total/len(sampleset)\n\n\ndef variance(sampleset):\n total = 0\n setmean = mean(sampleset)\n for element in sampleset:\n total = total + math.pow(element-setmean, 2)\n return total/len(sampleset)\n\n\ndef standardDeviation(sampleset):\n total = 0\n setvariance = variance(sampleset)\n return math.sqrt(setvariance)\n\n\nmyset1 = [2., 10., 3., 6., 4., 6., 10.]\nmyset2 = [1., -100., 15., -100., 21.]\n#mymean = mean(myset1)\n\n# plt.isinteractive(block=False)\n# plt.plot(myset)\n# plt.plot([mymean] * 7)\n# plt.show()\n\nprint(\"Variance of first set: {0} with standard deviation {1}\".format(variance(myset1), standardDeviation(myset1)))\nprint(\"Variance of second set: {0} with standard deviation {1}\".format(variance(myset2), standardDeviation(myset2)))\n","repo_name":"irmoralesb/MLForDevsBook","sub_path":"Source/Chapter1/01 theMean.py","file_name":"01 theMean.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11808452357","text":"import argparse\nfrom typing import (\n Dict,\n Optional,\n Iterator,\n)\nfrom .parser import Parser\nfrom .validate import Validator\nfrom .translator import Translator\nfrom .node import Node\n\nparser = argparse.ArgumentParser(description='Convert BNF grammar to CNF')\nparser.add_argument(\n 'file',\n nargs=1,\n type=str,\n help=(\n 'The file to read the grammar from.'\n ),\n)\nparser.add_argument(\n '-f',\n '--format',\n choices=['cyk', 'py'],\n default='py',\n nargs='?',\n type=str,\n help=(\n 'The output format. Can be either \"cyk\" or \"py\". \"cyk\" '\n 'outputs the file in CYK format, as a .cyk file. Py '\n 'generates a grammar which can be read by darglint.'\n ),\n)\nparser.add_argument(\n '-o',\n '--output',\n nargs=1,\n type=str,\n default=None,\n help=(\n 'The output file.'\n )\n)\n\n\nclass Driver(object):\n\n def __init__(self):\n self.data = None # type: Optional[str]\n self.parser = Parser()\n self.validator = Validator()\n self.translator = Translator()\n self.tree = None # type: Optional[Node]\n\n def read(self, filename: str) -> 'Driver':\n with open(filename, 'r') as fin:\n self.data = fin.read()\n return self\n\n def parse(self) -> 'Driver':\n self.tree = self.parser.parse(self.data)\n return self\n\n def translate(self) -> 'Driver':\n self.translator.translate(self.tree)\n return self\n\n def validate(self) -> 'Driver':\n self.validator.validate(self.tree)\n return self\n\n def write(self, _format: str) -> str:\n assert self.tree is not None\n if _format == 'cyk':\n return str(self.tree)\n elif _format == 'py':\n return self.tree.to_python()\n else:\n raise Exception(f'Unrecognized format type {_format}')\n\n def get_imports(self) -> Iterator[str]:\n assert self.tree is not None\n for _import in self.tree.filter(Node.is_import):\n assert _import.value is not None\n yield _import.value\n\n def merge(self, driver: 'Driver'):\n \"\"\"Merge in the grammar at the given filename with this grammar.\n\n Args:\n driver: Another driver to merge into this one.\n\n \"\"\"\n assert self.tree is not None\n assert driver.tree is not None\n self.tree.merge(driver.tree)\n\n\ndef load_script(filename: str, cache: Dict[str, Driver] = dict()):\n \"\"\"Recursively load a script, parsing it and adding dependencies.\n\n Args:\n filename: The name of the file to open.\n cache: A cache to avoid duplicate work.\n\n Returns:\n The fully parsed grammar.\n\n \"\"\"\n assert filename not in cache\n driver = Driver().read(filename).parse()\n cache[filename] = driver\n\n # We know that merging doesn't introduce new imports,\n # so it's safe to immediately merge subgrammars.\n for filename in driver.get_imports():\n if filename in cache:\n # We skip already imported scripts, to avoid\n # having multiple copies of the productions.\n continue\n else:\n subdriver = load_script(filename, cache)\n driver.merge(subdriver)\n\n return driver\n\n\ndef main():\n args = parser.parse_args()\n driver = load_script(args.file[0])\n translated = driver.translate().validate().write(args.format)\n\n if args.output:\n with open(args.output[0], 'w') as fout:\n fout.write(translated)\n else:\n print(translated)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"terrencepreilly/darglint","sub_path":"bin/bnf_to_cnf/bnf_to_cnf/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":3573,"program_lang":"python","lang":"en","doc_type":"code","stars":481,"dataset":"github-code","pt":"3"} +{"seq_id":"7483876549","text":"# Week03 of GMIT CyberSecurity\n# IDR lecturer Seamus Dowling has set us the task of comparing malware on MitreAttackNavigator\n# At the same time programming lecturer Andrew Beatty has set us the task of parsing JSON output\n# So, I've decided to dowload the output of my IDR mitre navigator comparison as a JSON file\n# read it in to a data structure in python and see what I can print out\n# That's the theory\n# \n# This is very definitely a work in progress\n\nimport json\n\nwith open(\"ComparisonLayer.json\", \"r\") as read_file:\n mitreAttackNavigatorData = json.load(read_file)\n\nprint (\"\\n\" + mitreAttackNavigatorData[\"name\"] + \"\\n\") # Print the name of the Mitre Navigator Sheet\n\nfor techniqueID in mitreAttackNavigatorData[\"techniques\"] :\n\tprint (techniqueID[\"techniqueID\"],techniqueID[\"tactic\"])\n\n","repo_name":"aharring/GMITPyProg2021","sub_path":"Practice/readingMitreJsonOutput.py","file_name":"readingMitreJsonOutput.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"39390908132","text":"import json\n# Load out data\nwith open('animal-questions.json', \"r\") as read_file:\n data = json.load(read_file)\n\n# Set my starting question id\nq_id = '16'\n\n# I start without an answer\nans = 'N'\n\n# A simple Y/N validation function\ndef get_answer (q_text):\n good_ans = False\n while good_ans == False:\n x = str(input(q_text + '? [Y/N]: '))\n x = x.upper()\n if x == 'Y':\n return True\n good_ans = True\n elif x == 'N':\n return False\n good_ans = False\n else:\n print('Enter Y or N')\n\n# Step into questions loop\nwhile ans == 'N':\n q_txt = data[q_id]['Q']\n if get_answer(q_txt):\n print('YES')\n # Get next record ID\n q_id = data[q_id]['Y']\n else:\n print('No')\n # Get next record ID\n q_id = data[q_id]['N']\n\n # Is this an answer?\n if data[q_id]['ans'] == 'Y':\n print ('Answer is: ' + data[q_id]['Q'])\n ans ='Y'\n\n","repo_name":"RobertMortimer/sjd","sub_path":"intro_python/python/wk02/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"16227159258","text":"from django.core.exceptions import ValidationError\nfrom django.test import TestCase\n\nfrom .models import Livery, Operator, Vehicle\n\n\nclass VehicleModelTests(TestCase):\n def test_vehicle(self):\n vehicle = Vehicle(reg=\"3990ME\")\n self.assertEqual(str(vehicle), \"3990 ME\")\n self.assertIn(\n \"search/?text=3990ME%20or%20%223990%20ME%22&sort\", vehicle.get_flickr_url()\n )\n\n vehicle.reg = \"J122018\"\n self.assertEqual(str(vehicle), \"J122018\")\n\n vehicle.notes = \"Spare ticket machine\"\n self.assertEqual(\"\", vehicle.get_flickr_link())\n\n vehicle = Vehicle(code=\"RML2604\")\n self.assertIn(\"search/?text=RML2604&sort\", vehicle.get_flickr_url())\n\n vehicle.operator = Operator(name=\"Lynx\")\n self.assertIn(\"search/?text=Lynx%20RML2604&sort\", vehicle.get_flickr_url())\n\n vehicle.fleet_number = \"11111\"\n self.assertIn(\"search/?text=Lynx%2011111&sort\", vehicle.get_flickr_url())\n\n vehicle.reg = \"YN69GHA\"\n vehicle.operator.parent = \"Stagecoach\"\n vehicle.fleet_number = \"11111\"\n\n self.assertIn(\n \"search/?text=YN69GHA%20or%20%22YN69%20GHA%22%20or%20Stagecoach%2011111&sort\",\n vehicle.get_flickr_url(),\n )\n\n def test_vehicle_validation(self):\n vehicle = Vehicle(colours=\"ploop\")\n with self.assertRaises(ValidationError):\n vehicle.clean()\n\n vehicle.colours = \"\"\n vehicle.clean()\n\n def test_livery(self):\n livery = Livery(name=\"Go-Coach\", published=False)\n livery.text_colour = \"#c0c0c0\"\n livery.stroke_colour = \"#ffee99\"\n self.assertEqual(\"Go-Coach\", str(livery))\n self.assertIsNone(livery.preview())\n self.assertEqual(\n '
Go-Coach',\n livery.preview(name=True),\n )\n\n livery.colours = \"#7D287D #FDEE00 #FDEE00\"\n livery.horizontal = True\n livery.save()\n self.assertEqual(\n '
',\n livery.preview(),\n )\n self.assertEqual(\n livery.get_styles(),\n [\n f\"\"\".livery-{livery.id} {{\\n background: linear-gradient(to top,#7D287D 34%,#FDEE00 34%);\n color:#c0c0c0;fill:#c0c0c0;stroke:#ffee99\\n}}\\n\"\"\"\n ],\n )\n\n livery.horizontal = False\n livery.angle = 45\n livery.save()\n self.assertEqual(\n \"linear-gradient(45deg,#7D287D 34%,#FDEE00 34%)\", livery.left_css\n )\n self.assertEqual(\n \"linear-gradient(315deg,#7D287D 34%,#FDEE00 34%)\", livery.right_css\n )\n\n livery.angle = None\n livery.save()\n\n vehicle = Vehicle(livery=livery)\n self.assertEqual(\n \"linear-gradient(to left,#7D287D 34%,#FDEE00 34%)\", vehicle.get_livery(179)\n )\n self.assertIsNone(vehicle.get_text_colour())\n\n vehicle.livery.colours = \"#c0c0c0\"\n vehicle.livery.save()\n self.assertEqual(\"#c0c0c0\", vehicle.get_livery(200))\n\n livery.css = \"linear-gradient(45deg,#ED1B23 35%,#fff 35%,#fff 45%,#ED1B23 45%)\"\n livery.set_css()\n self.assertEqual(\n livery.left_css,\n \"linear-gradient(45deg,#ED1B23 35%,#fff 35%,#fff 45%,#ED1B23 45%)\",\n )\n self.assertEqual(\n livery.right_css,\n \"linear-gradient(315deg,#ED1B23 35%,#fff 35%,#fff 45%,#ED1B23 45%)\",\n )\n\n def test_livery_validation(self):\n livery = Livery()\n\n livery.clean() # should not raise an exception\n\n livery.text_colour = \"#c0c0c0\"\n livery.stroke_colour = \"#ff00a9\"\n livery.right_css = \"{\"\n with self.assertRaises(ValidationError) as cm:\n livery.clean()\n self.assertEqual(\n cm.exception.args, ({\"right_css\": \"Must not contain { or }\"}, None, None)\n )\n\n livery.right_css = \"\"\n livery.left_css = \"url((\"\n with self.assertRaises(ValidationError) as cm:\n livery.clean()\n self.assertEqual(\n cm.exception.args,\n ({\"left_css\": \"Must contain equal numbers of ( and )\"}, None, None),\n )\n\n livery.left_css = \"\"\n livery.stroke_colour = \"red\"\n with self.assertRaises(ValidationError) as cm:\n livery.clean()\n self.assertEqual(\n cm.exception.args,\n (\n {\n \"stroke_colour\": \"An HTML5 simple color must be a Unicode string \"\n \"seven characters long.\"\n },\n None,\n None,\n ),\n )\n","repo_name":"jclgoodwin/bustimes.org","sub_path":"vehicles/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":4783,"program_lang":"python","lang":"en","doc_type":"code","stars":63,"dataset":"github-code","pt":"3"} +{"seq_id":"4050860808","text":"import torch\nimport argparse\nimport numpy as np\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"model\")\nparser.add_argument(\"file\")\nparser.add_argument('--cuda', action='store_true', default=False)\n\n\nargs = parser.parse_args()\n\ndata = torch.load('polyp_data.pth')\n\nif args.cuda:\n print('cuda')\n model = torch.load(args.model).cuda()\nelse:\n model = torch.load(args.model).cpu()\n print('cpu')\n\npredictions = []\n\nfor i in range(182):\n print('Sample number:')\n print(i)\n model.eval()\n\n if args.cuda:\n output = model(Variable(data[4][i]).float()).cuda()\n else:\n output = model(Variable(data[4][i]).float()).cpu()\n\n predictions.append(np.argmax(F.softmax(output).data.numpy(), 1))\n\nnp.savez_compressed(args.file, a=predictions)\n","repo_name":"Wickstrom/Thesis","sub_path":"misc/polyp_predictions.py","file_name":"polyp_predictions.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"} +{"seq_id":"5997675758","text":"#Multiple Linear Regression\n\n#Importing Libraries\nimport numpy as np #used for mathematical calcs\nimport pandas as pd #used to get datasets\nimport matplotlib.pyplot as plt #used for plotting charts and graphs\n\n#import dataset\ndataset= pd.read_csv('50_Startups.csv')\nX= dataset.iloc[:,:-1].values #Extracting the features from the dataset\ny= dataset.iloc[:, 4].values #Extracting the Lables from the dataset\n\n \n#Encoding the categorical data into numbers\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nlabelencoder_X = LabelEncoder() #Encodes categorycal data into numbers\nX[:, 3] = labelencoder_X.fit_transform(X[:,3])\nonehotencoder = OneHotEncoder(categorical_features= [3])\nX = onehotencoder.fit_transform(X).toarray() \n\n#Avoiding Dummy variable trap\n#Python library automatiaclly takes care of this, but still done for reference\nX = X[:, 1:] #Removed the first column\n\n#Splitting the data into training and test data\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) \n \n\n\n#Fit model to training test\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\nregressor.fit(X_train, y_train)\n \n\n#Predicting using the testing data\ny_pred = regressor.predict(X_test)\n\n#Optimizing the model using Backward Elimination\nimport statsmodels.formula.api as sm\nX = np.append(arr = np.ones((50,1)).astype(int), values = X, axis = 1) \n#used to append a column of ones in the begining of the features matrix. y=b0*x0+b1*x1....bn*xn. These ones make up x0\n\nX_opt = X[:, [0,1,2,3,4,5]]\nregressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()\nregressor_OLS.summary()\n\nX_opt = X[:, [0,1,3,4,5]]\nregressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()\nregressor_OLS.summary()\n\n\nX_opt = X[:, [0,3,4,5]]\nregressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()\nregressor_OLS.summary()\n\n\nX_opt = X[:, [0,3,5]]\nregressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()\nregressor_OLS.summary()\n\n\nX_opt = X[:, [0,3]]\nregressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()\nregressor_OLS.summary()\n\n\nX_opt = X[:, [3]]\n\n#So the best model is the one using only the data in the index=3 column\n\n#Modifying the dataset to include only the columns needed\n\nfrom sklearn.cross_validation import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X_opt, y, test_size=0.2, random_state=0) \n\n\n#Fit model to training test\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\nregressor.fit(X_train, y_train)\n \n\n#Predicting using the testing data\ny_pred = regressor.predict(X_test)\n","repo_name":"rounakskm/Machine-Learning","sub_path":"2.Regression/Section 5 - Multiple Linear Regression/multiple_linear_regression_self.py","file_name":"multiple_linear_regression_self.py","file_ext":"py","file_size_in_byte":2681,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"29049918587","text":"from decimal import Decimal\n\nfrom balance.balance_steps import (\n ActsSteps,\n CommonPartnerSteps,\n ContractSteps,\n ExportSteps,\n InvoiceSteps,\n)\nfrom balance.balance_steps.new_taxi_steps import TaxiData\nfrom btestlib.constants import NdsNew, Services, SpendablePaymentType\nfrom temp.allista.report import Report\n\nPAYMENT_AMOUNT = Decimal(\"100.1\")\nREFUND_AMOUNT = Decimal(\"93.13\")\nTOTAL_AMOUNT = PAYMENT_AMOUNT - REFUND_AMOUNT\n\n\ndef export_context_to_oebs(\n context,\n start_dt,\n end_dt,\n completions=tuple(),\n invoices_for=(\"YANDEX_SERVICE\",),\n spendable_contexts=None,\n):\n additional_params = {\"start_dt\": start_dt}\n (\n client_id,\n person_id,\n contract_id,\n contract_eid,\n ) = ContractSteps.create_partner_contract(\n context, is_postpay=0, is_offer=1, additional_params=additional_params\n )\n\n compls_data_3rd_month = TaxiData.generate_default_oebs_compls_data(\n start_dt, context.currency.iso_code, start_dt\n ) + [\n {\n \"service_id\": Services.TAXI.id,\n \"amount\": (PAYMENT_AMOUNT - REFUND_AMOUNT),\n \"product_id\": product,\n \"dt\": start_dt,\n \"transaction_dt\": start_dt,\n \"currency\": context.currency.iso_code,\n \"accounting_period\": start_dt,\n }\n for product in completions\n ]\n\n CommonPartnerSteps.create_partner_oebs_completions(\n contract_id, client_id, compls_data_3rd_month\n )\n\n CommonPartnerSteps.generate_partner_acts_fair_and_export(\n client_id, contract_id, end_dt\n )\n\n invoices = [\n InvoiceSteps.get_invoice_by_service_or_service_code(\n contract_id, service_code=service_code\n )\n for service_code in invoices_for\n ]\n act_data = ActsSteps.get_all_act_data(client_id, dt=end_dt)\n\n client_log = ExportSteps.get_oebs_api_response(\"Client\", client_id)\n person_log = ExportSteps.get_oebs_api_response(\"Person\", person_id)\n contract_log = ExportSteps.get_oebs_api_response(\"Contract\", contract_id)\n pa_log = {\n pa_id: ExportSteps.get_oebs_api_response(\"Invoice\", pa_id)\n for pa_id, pa_eid, service_code in invoices\n }\n act_log = {\n act[\"id\"]: ExportSteps.get_oebs_api_response(\"Act\", act[\"id\"])\n for act in act_data\n }\n\n with Report(context, u\"Projects/balance/reports\") as report:\n report.write_log(\n (\"Client\", client_id, client_log),\n (\"Person\", person_id, person_log),\n (\"Main Contract\", contract_id, contract_log),\n (\"Invoices\", pa_log),\n (\"Acts\", act_log),\n )\n # handle spendable contexts, if any\n if not spendable_contexts:\n return\n additional_params.update(\n {\n \"nds\": NdsNew.ZERO.nds_id,\n \"payment_type\": SpendablePaymentType.MONTHLY,\n \"link_contract_id\": contract_id,\n }\n )\n for ctx in spendable_contexts:\n (\n _,\n spendable_person_id,\n spendable_contract_id,\n spendable_contract_eid,\n ) = ContractSteps.create_partner_contract(\n ctx,\n client_id=client_id,\n unsigned=False,\n additional_params=additional_params,\n )\n spendable_person_log = ExportSteps.get_oebs_api_response(\n \"Person\", spendable_person_id\n )\n spendable_contract_log = ExportSteps.get_oebs_api_response(\n \"Contract\", spendable_contract_id\n )\n report.write_log(\n (u\"\\n\\n{ctx}\".format(ctx=ctx.name),),\n (\"Person\", spendable_person_id, spendable_person_log),\n (\"Contract\", spendable_contract_id, spendable_contract_log),\n )\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"billing/balance_tests/temp/allista/oebs_export.py","file_name":"oebs_export.py","file_ext":"py","file_size_in_byte":3881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8651099913","text":"from urllib.request import urlopen as uReq\nfrom bs4 import BeautifulSoup as soup\nimport re\n\ndef separate(prereqs):\n prereqs = re.sub(r'[a-z=;]', '', prereqs)\n prereqs = ' '.join(prereqs.split())\n prereqs = prereqs.split('AND')\n for x in range(len(prereqs)):\n prereqs[x] = prereqs[x].split('OR')\n\n #in the future we may need to represent the AND / OR relationship as a tree\n #initially we can split them by the AND's to turn them into lists\n #within these lists are separate sections of required classes \n #e.g) ['I&C SCI 22 C OR CSE 22 C OR I&C SCI H22 C OR I&C SCI 33 C OR CSE 43 C OR AP COMP SCI AB 4 ', ' I&C SCI 45C C OR CSE 45C C ', ' MATH 6G C OR MATH 3A C OR I&C SCI 6N C']\n #after that we can make this list into a list of lists by .split('OR'), yielding:\n #[['I&C SCI 22 C ', ' CSE 22 C ', ' I&C SCI H22 C ', ' I&C SCI 33 C ', ' CSE 43 C ', ' AP COMP SCI AB 4 '], [' I&C SCI 45C C ', ' CSE 45C C '], [' MATH 6G C ', ' MATH 3A C ', ' I&C SCI 6N C']]\n #can easily represent this [[str]] through [str] -> str tree\n\n prereqs = str(prereqs)\n prereqs = re.sub(r\"[\\[*\\]*\\'*]\", '', prereqs)\n \n return prereqs\n\n\n'''\n for p in prereqs:\n p = re.sub(r'[a-z()=;]', '', p)\n p = ' '.join(p.split())\n p = p.split('AND')\n for x in range(len(p)):\n p[x] = p[x].split('OR')\n'''\n\n #take out all AND's, OR's, (p), =, ; \n #ics22, ics69, \n\nuClient = None\n\nmy_url = \"https://www.reg.uci.edu/cob/prrqcgi?term=201803&dept=COMPSCI&action=view_by_term#112\"\n\nfilename = \"courses.csv\"\nheaders = (\"Course, Title, Prereqs\\n\")\n\nf = open(filename, 'w')\nf.write(headers)\n\ntry:\n uClient = uReq(my_url)\n page_html = uClient.read()\n\nfinally:\n if uClient != None:\n uClient.close()\n\npage_soup = soup(page_html, \"html.parser\")\nclass_courses = page_soup.findAll(\"td\", {\"class\": \"course\"})\nclass_titles = page_soup.findAll(\"td\", {\"class\": \"title\"})\nclass_prereqs = page_soup.findAll(\"td\", {\"class\": \"prereq\"})\n\nprereqs = class_prereqs[0].text.strip()\nprint(separate(prereqs))\n\nfor x in range(len(class_courses)):\n course = class_courses[x].text.strip()\n title = class_titles[x].text.strip()\n prereqs = class_prereqs[x].text.strip()\n\n f.write(course + ',' + title + ',' + separate(prereqs) + \"\\n\")\n\nf.close()\n\n\n\n","repo_name":"hummuswins/ZotReq","sub_path":"scrape/scrape_courses.py","file_name":"scrape_courses.py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"7211430581","text":"from functools import partial\nfrom multiprocessing.dummy import Pool\n\nimport allel\nimport sys\nimport argparse\nimport os\nimport subprocess\nimport csv\nimport pandas as pd\nimport numpy as np\nfrom scipy import stats\nimport matplotlib.pyplot as plt\n\n\ndef setUp(argv, targetFileName, refFileName, cpus, prefix, regionLength):\n print('\\nChecking input')\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n '-t', '--tarHaps', type=str, required=True, help='Target GWAS dataset in vcfn vcf bgziped, or bcf format as a singular file. This program will do the separation')\n parser.add_argument(\n '-r', '--refHaps', type=str, required=True, help='File list containing reference datasets (in order of chromosomes 1-22, one per line) in imp5 format with an idx index. Use imp5Converter from impute5 to create these files.')\n parser.add_argument(\n '-p', '--prefix', type=str, default='ouput', help='Prefix for output files.')\n parser.add_argument(\n '-c', '--cpus', type=str, default='1', help='Number of threads to use. Suggested around 12 cores with approx 100 Gb RAM.')\n parser.add_argument(\n '-l', '--region_length', type=str, default='10000000', help='length of region to impute at a time (in bp!). 5-15 Mbp is typical and can be changed depending on available memory.')\n args = parser.parse_args()\n\n targetFileName = os.path.abspath(args.tarHaps)\n refFileName = os.path.abspath(args.refHaps)\n cpus = args.cpus\n if int(cpus) < 4:\n print('Warning: the number of cpus to use is less than 4. \\\n This will be very limiting to this pipeline.\\nProgram will continue \\\n to run unless explicitly stopped.')\n cpus = args.cpus\n prefix = args.prefix\n try:\n int(args.region_length)\n except:\n print('please supply a number to -l/--region_length')\n sys.exit(1)\n regionLength = args.region_length\n print('Successful\\n')\n\n return targetFileName, refFileName, cpus, prefix, regionLength\n\n\ndef runCommandLine(commands):\n try:\n subprocess.run(commands, shell=True, check=True)\n except subprocess.CalledProcessError as err:\n print('Error: ', err)\n sys.exit(1)\n\ndef checkPrograms():\n print('Checking required programs and directories')\n\n # create directories if not present\n if not os.path.isdir('output'):\n print('directory \"output\" not found. Creating temporary directory')\n os.mkdir('output')\n for i in range(1, 23):\n os.mkdir('output/chr' + str(i))\n\n if not os.path.isdir('beforePhasing'):\n print('directory \"beforePhasing\" not found. Creating temporary directory')\n os.mkdir('beforePhasing')\n\n if not os.path.isdir('afterPhasing'):\n print('directory \"afterPhasing\" not found. Creating temporary directory')\n os.mkdir('afterPhasing')\n\n if not os.path.isdir('map'):\n print('directory for map data not found, be sure to download from the git repository, unzip plink.GRCh23.map.zip and have the files reside in map/')\n sys.exit(1)\n\n # set all programs used to the env PATH variable\n if 'htslib' not in os.environ.get('PATH'):\n print('Cannot find htslib on PATH, please add it. Bgzip and tabix are used with this.')\n sys.exit(1)\n\n if 'bcftools' not in os.environ.get('PATH'):\n print('Cannot find bcftools on PATH, please add it. Used for sorting and removing duplicates')\n sys.exit(1)\n \n if 'impute_v5.1' not in os.environ.get('PATH'):\n print('Cannot find impute5 on PATH, please add it. Used for imputation')\n sys.exit(1)\n\n if 'shapeit4' not in os.environ.get('PATH'):\n print('Cannot find shapeit4 on PATH, please add it. Used for prephasing')\n sys.exit(1)\n\n print('Successful\\n')\n\n\ndef splitVCF(file, cpus):\n # as long as the target fiile is vcf, vcf.gz. or bcf, bcftools sort will run and convert to bcf\n if not os.path.isfile('beforePhasing/change_chromosomes_tmp.txt'):\n with open('beforePhasing/change_chromosomes_tmp.txt', 'w') as f:\n for i in range(1, 23):\n line = str(i) + \" chr\" + str(i) + \"\\n\"\n f.write(line)\n\n extractChr = ''\n for i in range(1, 23):\n extractChr = extractChr + \"chr\" + str(i) + \",\"\n extractChr = extractChr[:-1]\n \n runCommandLine([\"bcftools annotate \" + file + \" --rename-chrs beforePhasing/change_chromosomes_tmp.txt -Ou \\\n | bcftools view -Ou -t \" + extractChr + \" | bcftools sort -T beforePhasing/ -Ob -o beforePhasing/sortedTargetFile.tmp.bcf\"])\n runCommandLine(['tabix -f beforePhasing/sortedTargetFile.tmp.bcf'])\n\n print('separating into chromosomes 1-22 and remove indels')\n for i in range(1, 23):\n chrm = str(i)\n runCommandLine([\"bcftools view --threads \" + cpus + \" -Ob -o beforePhasing/chr\" + chrm + \".tmp.bcf \\\n -r chr\" + chrm + \" -V indels beforePhasing/sortedTargetFile.tmp.bcf\"])\n\ndef sortAndFilterShapeit4(chrm, cpus):\n # remove duplicate. File should be vcf.gz\n print('Checking QC: removing duplicates, aligning REF and ALT alleles')\n runCommandLine(['bcftools norm beforePhasing/chr' + chrm + '.tmp.bcf -d snps -Ob -c s -N \\\n -f /project/csbio/MathewF/imputation/ImputationAccuracy/reference_1000G/1000G_fasta/GRCh38_full_analysis_set_plus_decoy_hla.fa \\\n --threads ' + cpus + \" -o beforePhasing/chr\" + chrm + \".prephaseReady.tmp.bcf\"])\n runCommandLine([\"tabix beforePhasing/chr\" + chrm + \".prephaseReady.tmp.bcf\"])\n # fasta alignment file here. Added above to bcftools norm command\n # http://ftp.1000genomes.ebi.ac.uk/vol1/ftp/technical/reference/GRCh38_reference_genome/GRCh38_full_analysis_set_plus_decoy_hla.fa\n\ndef prePhaseShapeit4(chrm, cpu):\n print('beginning prephasing with Shapeit4')\n runCommandLine([\"shapeit4 --input beforePhasing/chr\" + chrm + \".prephaseReady.tmp.bcf \\\n --map map/chr\" + chrm + \".b38.gmap.gz \\\n --region chr\" + chrm + \" \\\n --thread \" + cpu + \" \\\n --output afterPhasing/prePhasedChr\" + chrm + \".tmp.bcf --log afterPhasing/prePhasedChr\" + chrm + \".tmp.log\"])\n\ndef runImpute5(chrm, refFile, cpus, regionLength):\n print('\\nBeginning Impute5 Imputation')\n region = int(regionLength) # 10 cM default\n\n # Find how long the chromosome is using the GRCh38 fasta file index\n commandHeader = \"grep -w chr\" + chrm + \\\n \" reference_1000G/1000G_fasta/GRCh38_full_analysis_set_plus_decoy_hla.fa.fai | cut -f 2\"\n try:\n process = subprocess.Popen(commandHeader, shell=True, stdout=subprocess.PIPE, text=True, universal_newlines=True)\n except subprocess.CalledProcessError as err:\n print('Error: ', err)\n sys.exit(1)\n length = process.communicate()\n length = int(length[0])\n div = abs(-(length) // region)\n\n # need index for target file\n runCommandLine([\"tabix -f afterPhasing/prePhasedChr\" + chrm + \".tmp.bcf\"])\n\n commands = []\n imputeList = []\n for i in range(0, div):\n start = 1 if i == 0 else (i * region) + 1\n end = length if i == (int(div) - 1) else (i + 1) * region\n step = str(i + 1)\n commandLine = [\"impute5 --g afterPhasing/prePhasedChr\" + chrm + \".tmp.bcf \\\n --h \" + refFile.strip() + \" --m map/chr\" + chrm + \".b38.gmap.gz \\\n --r chr\" + chrm + \":\" + str(start) + \"-\" + str(end) + \" --o output/chr\" + \n chrm + \"/impute5_\" + step + \".tmp.bcf \\\n --l output/chr\" + chrm + \"/impute5_\" + step + \".tmp.log --out-gp-field\"]\n commands.append(commandLine)\n imputeList.append(\"output/chr\" + chrm + \"/impute5_\" + step + \".tmp.bcf\")\n # the following code was found at https://stackoverflow.com/questions/14533458/python-threading-multiple-bash-subprocesses/14533902#14533902\n max_workers = int(cpus)\n pool = Pool(max_workers) # run the value of 'cpus' concurrent commands at a time\n for i, returncode in enumerate(pool.imap(partial(subprocess.call, shell=True, stdout=subprocess.DEVNULL), commands)):\n if returncode != 0:\n print(\"region %d failed: %d, likely there were no variants included and will still run to completeion.\\\n \\nView output/Chr%d/impute5_%d.tmp.log for more details\" % ((i+1), returncode, int(chrm), (i+1)))\n\n # concat vcf files together\n imputeString = ''\n for file in imputeList:\n if os.path.isfile(file):\n imputeString = imputeString + file + \" \"\n runCommandLine([\"bcftools concat --no-version -Ob --threads \" + cpus + \n \" -o output/chr\" + chrm + \"/impute5_all.tmp.bcf \" + imputeString])\n\n\ndef setUpCompareImpute5(cpus, chrm):\n # find which variants are genotyped 7:7min\n runCommandLine([\"bcftools query -f '%CHROM\\t%POS\\t1\\n' -o output/chr\" + chrm + \"/region_list_tmp.txt afterPhasing/prePhasedChr\" + chrm + \".tmp.bcf\"])\n runCommandLine([\"bgzip -f output/chr\" + chrm + \"/region_list_tmp.txt; tabix -s1 -b2 -e2 output/chr\" + chrm + \"/region_list_tmp.txt.gz\"])\n # Create annotation file for markers that are similar b/t before and after imputation, and filter for IMP flag\n runCommandLine([\"bcftools query -T output/chr\" + chrm + \"/region_list_tmp.txt.gz output/chr\" + chrm + \"/impute5_all.tmp.bcf -f '%CHROM\\t%POS\\t1\\n' -o output/chr\" + chrm + \"/annot_geno_list.tmp.txt\"])\n runCommandLine([\"bgzip -f output/chr\" + chrm + \"/annot_geno_list.tmp.txt; tabix -s1 -b2 -e2 output/chr\" + chrm + \"/annot_geno_list.tmp.txt.gz\"])\n\n # Change header lines\n runCommandLine([\"\"\"echo '##INFO=' > output/chr\"\"\" + chrm + \"/header_lines_tmp\"])\n # Annotate imputed file 7:52min\n runCommandLine([\"bcftools annotate --no-version -a output/chr\" + chrm + \"/annot_geno_list.tmp.txt.gz -h output/chr\" + chrm + \"/header_lines_tmp -c CHROM,POS,INFO/GENO output/chr\" + chrm + \"/impute5_all.tmp.bcf \\\n -Ob -o output/chr\" + chrm + \"/impute5_all_GENO.tmp.bcf --threads \" + cpus])\n \n # create separate files of MAFs < 0.05%, 0.05-5%, and > 5%, then make vcf.gz files of just variant information without GT\n infoFile =[]\n for i in ([\"'MAF <= 0.0005'\", \"0-0.05\"], [\"'MAF > 0.0005 && MAF <= 0.05'\", \"0.05-5\"], [\"'MAF > 0.05'\", \"5-50\"]):\n runCommandLine([\"bcftools view --no-version output/chr\" + chrm + \"/impute5_all_GENO.tmp.bcf -i \" + i[0] + \n \" --threads \" + cpus + \" -Ob -o output/chr\" + chrm + \"/impute5_all_GENO_\" + i[1] + \".tmp.bcf\"])\n\n runCommandLine([\"bcftools view --no-version -G output/chr\" + chrm + \"/impute5_all_GENO_\" + i[1] + \".tmp.bcf \\\n -Oz -o output/chr\" + chrm + \"/impute5_all_GENO_\" + i[1] + \".tmp.vcf.gz --threads \" + cpus])\n \n infoFile.append(\"output/chr\" + chrm + \"/impute5_all_GENO_\" + i[1] + \".tmp.vcf.gz\")\n \n return infoFile\n\ndef printInfoImpute(dfInput, typeVar, csvList, maf, chrm):\n csvList.append([chrm, typeVar , maf, round(dfInput['INFO'].mean(), 4)])\n\ndef createGraphs(df, prefix, outputDir, chrm):\n print('new plot')\n binNum = 30\n all_maf_0_1 = df.loc[(df['MAF'] <= 0.01)]\n all_maf_GT_1 = df.loc[(df['MAF'] > 0.01)]\n bin_means_info_0_1 = stats.binned_statistic(all_maf_0_1['MAF'], all_maf_0_1['INFO'], 'mean', bins=binNum)\n bin_means_info_GT_1 = stats.binned_statistic(all_maf_GT_1['MAF'], all_maf_GT_1['INFO'], 'mean', bins=binNum)\n xaxis_values_0_1 = [ x for x in np.linspace(0.0, 0.01, num=binNum, endpoint=True)]\n xaxis_values_GT_1 = [ x for x in np.linspace(0.01, 0.50, num=binNum, endpoint=True)]\n\n\n plt.subplot(121)\n plt.plot(xaxis_values_0_1, bin_means_info_0_1[0], 'r--', label='INFO score')\n plt.title('Average INFO, MAF 0-1%')\n plt.xlabel('log(MAF)')\n plt.ylabel('Average INFO')\n plt.grid(True)\n plt.legend()\n plt.xscale('log')\n plt.tight_layout(pad=1.0)\n plt.subplot(122)\n plt.plot(xaxis_values_GT_1, bin_means_info_GT_1[0], 'r--', label='INFO score')\n plt.title('Average INFO, MAF 1-50%')\n plt.xlabel('log(MAF)')\n plt.ylabel('Average INFO')\n plt.grid(True)\n plt.legend()\n plt.xscale('log')\n plt.tight_layout(pad=1.0)\n plt.savefig(outputDir + prefix + '_chr' + chrm + '_infoScoreByMaf.jpeg')\n\ndef compareImpute5(cpus, prefix, outputDir, chrm):\n print(\"Starting accuracy analysis\")\n csvList = []\n infoFile = setUpCompareImpute5(cpus, chrm)\n\n df1 = allel.vcf_to_dataframe(infoFile[0], fields=['variants/*'])\n df2 = allel.vcf_to_dataframe(infoFile[1], fields=['variants/*'])\n df3 = allel.vcf_to_dataframe(infoFile[2], fields=['variants/*'])\n\n mafLess_0_05_GENO = df1.loc[(df1['IMP'] == False) & (df1['GENO'] == True) ]\n maf_0_05_to_5_GENO = df2.loc[(df2['IMP'] == False) & (df2['GENO'] == True) ]\n maf_5_to_50_GENO = df3.loc[(df3['IMP'] == False) & (df3['GENO'] == True) ]\n printInfoImpute(mafLess_0_05_GENO, 'genotyped', csvList, '0-0.05%', chrm)\n printInfoImpute(maf_0_05_to_5_GENO, 'genotyped', csvList, '0.05-5%', chrm)\n printInfoImpute(maf_5_to_50_GENO, 'genotyped', csvList, '5-50%', chrm)\n\n mafLess_0_05_IMP = df1.loc[(df1['IMP'] == True) & (df1['GENO'] == False) ]\n maf_0_05_to_5_IMP = df2.loc[(df2['IMP'] == True) & (df2['GENO'] == False) ]\n maf_5_to_50_IMP = df3.loc[(df3['IMP'] == True) & (df3['GENO'] == False) ]\n printInfoImpute(mafLess_0_05_IMP, 'imputed', csvList, '0-0.05%', chrm)\n printInfoImpute(maf_0_05_to_5_IMP, 'imputed', csvList, '0.05-5%', chrm)\n printInfoImpute(maf_5_to_50_IMP, 'imputed', csvList, '5-50%', chrm)\n\n printInfoImpute(df1, 'all', csvList, '0-0.05%', chrm)\n printInfoImpute(df2, 'all', csvList, '0.05-5%', chrm)\n printInfoImpute(df3, 'all', csvList, '5-50%', chrm)\n\n return csvList\n\ndef writeCSV_Impute5(analysisList, chrm):\n with (open('output/chr' + chrm + '/Impute5_analysis_output_tmp.csv', 'w')) as f:\n writer = csv.writer(f)\n writer.writerows(analysisList)\n\ndef combineCSVsImpute5(prefix, outputDir):\n header = \"chr, SNP type, MAF, AvgINFO\"\n runCommandLine(['echo ' + header + ' > ' + outputDir + '/' + prefix + '_imputation_accuracy.csv'])\n runCommandLine(['cat output/chr*/Impute5_analysis_output_tmp.csv >> ' + outputDir + '/' + prefix + '_imputation_accuracy.csv'])\n\ndef cleanUp(prefix, cpus, outputDir):\n print(\"concatenating all chromosomes...\")\n chrList = ''\n for i in range(1, 23):\n if os.path.isfile(\"output/chr\" + str(i) + \"/impute5_all_GENO.tmp.bcf\"):\n chrList = chrList + \"output/chr\" + str(i) + \"/impute5_all_GENO.tmp.bcf \"\n runCommandLine([\"bcftools concat --no-version -Ob --threads \" + cpus + \" -o \" + outputDir + '/' + prefix + \".bcf \" + chrList])\n\n print(\"removing temporary files and directories...\")\n runCommandLine([\"rm beforePhasing/*tmp*\"])\n runCommandLine([\"rm afterPhasing/*tmp*\"])\n runCommandLine([\"rm output/chr*/*tmp*\"])\n\ndef main():\n # Set up argument inputs\n targetFileName = ''\n prefix = ''\n refFileName = ''\n cpus = ''\n regionLength =''\n \n targetFileName, refFileName, cpus, prefix, regionLength = setUp(sys.argv[1:], targetFileName, refFileName, cpus, prefix, regionLength)\n \n with open(refFileName) as f:\n refFileList = f.readlines()\n tempDir = os.path.dirname(sys.argv[0])\n outputDir = os.getcwd()\n os.chdir(tempDir)\n\n checkPrograms()\n splitVCF(targetFileName, cpus)\n\n # Begin working on each chromosome\n for i in range(1, 23):\n chromosome = str(i)\n print('Starting on chromosome ' + chromosome)\n\n sortAndFilterShapeit4(chromosome, cpus)\n prePhaseShapeit4(chromosome, cpus)\n\n refFile = os.path.dirname(refFileName) + \"/\" + refFileList[i-1]\n runImpute5(chromosome, refFile, cpus, regionLength)\n print(\"Successful chromosome \" + chromosome + \" imputation\\n\")\n\n analysisList = compareImpute5(cpus, prefix, outputDir, chromosome)\n writeCSV_Impute5(analysisList, chromosome)\n print('Successful accuracy analysis\\n')\n \n combineCSVsImpute5(prefix, outputDir)\n cleanUp(prefix, cpus, outputDir)\n os.chdir(outputDir)\n\nif __name__ == '__main__':\n main()\n\n\n# nohup ../../ImputationAccuracy/Impute5Pipeline.py -t PD_CIDR_phs000126.GRCh38.vcf.gz -r ../../ImputationAccuracy/reference_1000G/impute5Ref/refFileList -c 12 -p outputTesting > output.log &","repo_name":"FischyM/Imputation-Pipeline","sub_path":"Impute5Pipeline.py","file_name":"Impute5Pipeline.py","file_ext":"py","file_size_in_byte":16251,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"22067324184","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# @author: hongyue.pei\n# @file: iris_tf.py\n# @time: 2019/4/28 上午10:24\n# @desc:\n\nimport pandas as pd\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nimport numpy as np\n\n\ndef norm_x(x):\n x = x - np.mean(x, axis=0)\n x = x / np.max(x, axis=0)\n return x\n\n\ndef iris_tf1(data):\n iris_x = data.values[:, :-1]\n # 归一化\n iris_x = norm_x(iris_x)\n\n c_name = set(data['name'].values)\n iris_y = np.zeros([len(data['name'].values), len(c_name)])\n\n # one hot Y\n len_of_data = []\n for idx, itr_name in enumerate(c_name):\n len_of_data.append(len([iris_y[data.name.values == itr_name]]))\n iris_y[data.name.values == itr_name, idx] = 1\n # print(len(len_of_data))\n # print(iris_y)\n\n x = tf.placeholder(tf.float32, [None, 4], name='input_x')\n label = tf.placeholder(tf.float32, [None, 3], name='input_y')\n\n net = slim.fully_connected(x, 4, activation_fn=tf.nn.relu, scope='full1', reuse=False)\n net = slim.fully_connected(net, 4, activation_fn=tf.nn.relu, scope='full2', reuse=False)\n y = slim.fully_connected(net, 3, activation_fn=tf.nn.sigmoid, scope='full3', reuse=False)\n\n loss = tf.reduce_mean(tf.square(y - label))\n\n correct_predication = tf.equal(tf.argmax(y, 1), tf.argmax(label, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_predication, tf.float32))\n\n train_step = tf.train.GradientDescentOptimizer(0.6).minimize(loss)\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n\n for itr in range(600):\n sess.run(train_step, feed_dict={x: iris_x[:100], label: iris_y[:100]})\n if itr % 30 == 0:\n acc = sess.run(accuracy, feed_dict={x: iris_x[:100], label: iris_y[:100]})\n print(\"step:{:6d} accuracy:{:.3f}\".format(itr, acc))\n\n\ndef iris_tf2(data):\n c_name = set(data.name.values)\n iris_data = data.values[:, :-1]\n iris_data = norm_x(iris_data)\n iris_label = np.zeros([len(data.name.values), len(c_name)])\n\n train_data = []\n train_data_label = []\n test_data = []\n test_data_label = []\n\n for idx, itr_name in enumerate(c_name):\n data_t = iris_data[data.name.values==itr_name, :]\n label_t = np.zeros([len(data_t), len(c_name)])\n label_t[:, idx] = 1\n train_data.append(data_t[:30])\n train_data_label.append(label_t[:30])\n test_data.append(data_t[30:])\n test_data_label.append(label_t[30:])\n\n train_data = np.concatenate(train_data)\n train_data_label = np.concatenate(train_data_label)\n test_data = np.concatenate(test_data)\n test_data_label = np.concatenate(test_data_label)\n\n x = tf.placeholder(tf.float32, [None, 4], name='input_x')\n label = tf.placeholder(tf.float32, [None, 3], name='input_y')\n\n net = slim.fully_connected(x, 4, activation_fn=tf.nn.relu)\n net = tf.contrib.layers.batch_norm(net)\n net = slim.fully_connected(net, 8, activation_fn=tf.nn.relu)\n net = tf.contrib.layers.batch_norm(net)\n net = slim.fully_connected(net, 8, activation_fn=tf.nn.relu)\n net = tf.contrib.layers.batch_norm(net)\n net = slim.fully_connected(net, 4, activation_fn=tf.nn.relu)\n net = tf.contrib.layers.batch_norm(net)\n y = slim.fully_connected(net, 3, activation_fn=tf.nn.softmax)\n\n loss = tf.reduce_sum(- label * tf.log(y), axis=1)\n loss = tf.reduce_mean(loss)\n\n correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(label, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n \n opt = tf.train.GradientDescentOptimizer(0.1)\n\n var_list_w = [var for var in tf.trainable_variables() if 'w' in var.name]\n var_list_b = [var for var in tf.trainable_variables() if 'b' in var.name]\n\n gradient_w = opt.compute_gradients(loss, var_list=var_list_w)\n gradient_b = opt.compute_gradients(loss, var_list=var_list_b)\n\n train_step = opt.apply_gradients(gradient_w + gradient_b)\n\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n\n for itr in range(600):\n sess.run(train_step, feed_dict={x: train_data, label: train_data_label})\n if itr % 30 == 0:\n acc1 = sess.run(accuracy, feed_dict={x: train_data, label: train_data_label})\n acc2 = sess.run(accuracy, feed_dict={x: test_data, label: test_data_label})\n print(\"step:{:6d} train:{:.3f} test:{:.3f}\".format(itr, acc1, acc2))\n\nif __name__ == '__main__':\n data = pd.read_csv('./data/iris.data.csv')\n iris_tf2(data)\n\n\n\n\n \n\n","repo_name":"MiniBee/AIE16","sub_path":"homework/0420_feature_engineering&TF/iris_tf.py","file_name":"iris_tf.py","file_ext":"py","file_size_in_byte":4489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26597948699","text":"import plottingFunctions as pf\n\"\"\"\nShows the dominant emotions as two datasets\n\"\"\"\n\n#Data directories\ncomData = r\"C:\\Users\\Willi\\OneDrive\\Desktop\\Manual_data_analysis\\Complete_data\"\n\n#Load data\ndata_C = pf.loadData(comData)\n\n#Create the data segments and their corrisponding values\ndata_C_segments = pf.getSegments(data_C)\ndata_C_values = pf.getSegmentValues(data_C)\n\n#With multiple datasets must be added to a list\nsegmentLists = []\nsegmnetValuesList = []\n\nsegmentLists.append(data_C_segments)\nsegmnetValuesList.append(data_C_values)\n\n#Name for each dataset\ndataNames = [\"Complete Data\"]\n\n#plot data\npf.graph1D(segmentLists, segmnetValuesList, dataNames)\n","repo_name":"gianttomatopile/FER-Video-Analysis","sub_path":"ManualExpression/plotMain.py","file_name":"plotMain.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"24455473904","text":"'''\nAuthor: Fredy H Lopez (found in discussions\nCreated: nov 7, 2022\nStyle: Python 3\n\n---------------------------- Solution ----------------------------\nDebug the follwoing code.\nThe following is the answer which is removing un-needed ; at the end on lines\nAdding a += to the if and else statement\n'''\ndef strings_xor(s, t):\n res = \"\"\n for i in range(len(s)):\n if s[i] == t[i]:\n res += '0'\n else:\n res += '1'\n\n return res\n\ns = input()\nt = input()\nprint(strings_xor(s, t))\n\n\n\n","repo_name":"Fredylx/Hacker-rank-practice","sub_path":"XOR Strings 2.py","file_name":"XOR Strings 2.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"19162390493","text":"import os\nfrom pathlib import Path\n\nimport click\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision\nimport torchvision.models as models\nimport torchvision.transforms as transforms\nfrom tqdm import tqdm\n\ntransform = transforms.Compose(\n [\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,)), # mean and std of mnist\n ]\n)\n# store data in the configured cahe which should be a volume\ncache_path = Path(os.environ[\"CACHE_PATH\"])\n\n\n@click.command()\n@click.option(\"--epochs\", \"-e\", default=3, type=int, help=\"Number of epochs to run.\")\n@click.option(\"--batch-size\", \"-b\", default=2048, type=int, help=\"Training batch size.\")\n@click.option(\"--num-workers\", \"-w\", default=16, type=int, help=\"workers to load data.\")\n@click.option(\n \"--learning-rate\", \"-lr\", default=0.001, type=float, help=\"learning rate for SGD\"\n)\n@click.option(\"--momentum\", \"-m\", default=0.9, type=float, help=\"momentum for SGD\")\ndef train(epochs, batch_size, num_workers, learning_rate, momentum):\n train_set = torchvision.datasets.MNIST(\n root=cache_path / \"data\", train=True, download=True, transform=transform\n )\n train_loader = torch.utils.data.DataLoader(\n train_set, batch_size=batch_size, shuffle=True, num_workers=num_workers\n )\n\n test_set = torchvision.datasets.MNIST(\n root=cache_path / \"data\", train=False, download=True, transform=transform\n )\n test_loader = torch.utils.data.DataLoader(\n test_set, batch_size=batch_size, shuffle=False, num_workers=num_workers\n )\n model = models.resnet18(pretrained=False, num_classes=10)\n # for mnist we hace a sinle input channel, the rest should be the same as resnet18\n # TODO: get the values from the model.conv1 before overwiting it\n model.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)\n\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=momentum)\n\n trainer = Trainer(\n epochs=epochs,\n train_loader=train_loader,\n test_loader=test_loader,\n model=model,\n optimizer=optimizer,\n criterion=criterion,\n device=torch.device(\"cuda\"),\n )\n trainer.run()\n\n print(\"Finished Training\")\n\n\nclass Trainer:\n def __init__(\n self,\n epochs,\n train_loader,\n test_loader,\n model,\n optimizer,\n criterion,\n device,\n ) -> None:\n self.epochs = epochs\n self.device = device\n self.train_loader = train_loader\n self.test_loader = test_loader\n self.model = model\n self.model.to(self.device)\n self.optimizer = optimizer\n self.criterion = criterion\n\n def run(self):\n self.test()\n\n for epoch in range(self.epochs):\n self.train_epoch(epoch)\n self.test(epoch)\n\n def test(self, epoch=-1):\n self.model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for (inputs, labels) in self._dl_progres(self.test_loader, desc=\"test\"):\n inputs, labels = inputs.to(self.device), labels.to(self.device)\n output = self.model(inputs)\n test_loss += self.criterion(output, labels).item() # sum up batch loss\n pred = output.argmax(\n dim=1, keepdim=True\n ) # get the index of the max log-probability\n correct += pred.eq(labels.view_as(pred)).sum().item()\n\n test_loss /= len(self.test_loader.dataset)\n accuracy = correct / len(self.test_loader.dataset)\n\n print(\n f\"TEST after epoc {epoch}: Average loss: {test_loss:.4f}, Accuracy: {100.0 * accuracy:.0f}%)\\n\"\n )\n\n def _dl_progres(self, data_loader, desc):\n return tqdm(\n data_loader,\n total=len(data_loader),\n desc=desc,\n leave=True,\n )\n\n def train_epoch(self, epoch):\n self.model.train()\n\n for i, (inputs, labels) in enumerate(\n self._dl_progres(self.train_loader, desc=f\"epoc {epoch}\")\n ):\n inputs, labels = inputs.to(self.device), labels.to(self.device)\n\n # zero the parameter gradients\n self.optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = self.model(inputs)\n loss = self.criterion(outputs, labels)\n loss.backward()\n self.optimizer.step()\n\n\nif __name__ == \"__main__\":\n train()\n","repo_name":"javiber/k8s-demo","sub_path":"train/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4537,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"8138434244","text":"import numpy as np\nimport pandas as pd\nimport dash\nimport dash_core_components as dcc\nimport dash_bootstrap_components as dbc\nimport dash_html_components as html\nimport ai4good.webapp.common_elements as common_elements\n\nthankyou_msg = 'Thank you for using AI for Good Simulator for COVID-19'\nlogout_msg = 'Are you sure to logout?'\nprogress_msg = 'You may log out and come back for the simulation results later'\n\nlayout = html.Div(\n [\n common_elements.nav_bar('landing'),\n html.Div([\n dbc.Container([\n dbc.Row(\n dbc.Col(\n dbc.Card([\n html.H4('Logout', className='card-title'),\n html.P(thankyou_msg, className='card-text'),\n html.P(logout_msg,className='card-text'),\n html.Form([\n dbc.CardFooter(\n html.Div([\n dbc.Nav([\n dbc.NavLink('Simulation in progress?', id='logout-sim-progress', href='#')\n ]), \n dbc.Tooltip(progress_msg, target='logout-sim-progress'),\n html.P(''), \n html.Button('Logout', id='logout-submit-button', type='submit', disabled=False, className='mr-1', style={'float':'right'}), \n ], style={'display':'grid', 'grid-template-columns':'50% 20% 30%'}), \n ), \n ], action='/logout/'), \n html.Div(id='logout-alert')\n ], body=True\n ), width=6\n ), justify='center'\n )\n ])\n ], style={'padding':'100px'})\n ]\n)\n","repo_name":"AIforGoodSimulator/model-server","sub_path":"ai4good/webapp/logout_page.py","file_name":"logout_page.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"3"} +{"seq_id":"7858779241","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('home/', views.allPosts, name=\"allposts\"),\n path('home/cat/', views.categoryPosts, name=\"categoryPosts\"),\n path('home/tag/', views.tagPosts, name=\"tagPosts\"),\n path('post/', views.showPost, name=\"post\"),\n path('editpost/', views.postEdit, name=\"editpost\"),\n path('deletepost/', views.postDelete, name=\"deletepost\"),\n path('like/', views.likePost, name='like-post'),\n path('dislike/', views.dislikePost, name='dislike-post'),\n path('add_cat/', views.add_cat, name=\"add_cat\"),\n path('add_tag/', views.add_tag, name=\"add_tag\"),\n]\n","repo_name":"moutazmuhammad/python_django_blog","sub_path":"cloudBlog/post/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11011843746","text":"#!/usr/bin/env python3\nimport os\nimport sys\n\nfrom urllib.parse import parse_qs\nquery = parse_qs(os.environ.get('QUERY_STRING', ''), keep_blank_values=True)\n\nsys.stdout.write(\n 'Content-Type: text/html\\r\\n'\n 'status: {code}\\r\\n'\n 'Location: {url}\\r\\n'\n '\\r\\n'.format(\n code=query.get('code', [200])[0],\n url=query.get('url', [''])[0],\n))\n","repo_name":"WebKit/WebKit","sub_path":"LayoutTests/http/tests/xmlhttprequest/resources/redirect_methods.py","file_name":"redirect_methods.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":6880,"dataset":"github-code","pt":"3"} +{"seq_id":"71467141840","text":"import json\nimport pathlib\nfrom pprint import pprint\n\nfile_path = pathlib.Path(__file__).parent / \"driver ratings.json\"\n\nwith open(file_path, \"r\") as file:\n data = json.load(file)\n\ntop_ratings = []\n\nfor driver, details in data.items():\n top_ratings.append(\n (round(max(details['rating history'].values()), 2), details[\"name\"])\n )\npprint(sorted(top_ratings)[-30:])\n","repo_name":"dfamonteiro/blog","sub_path":"f1-analysis/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"70074650962","text":"from behave import given, then, when\nfrom typing import Union\n\nfrom algosdk import account, encoding, logic\nfrom algosdk.future import transaction\n\nimport test.steps.other_v2_steps\n\n\ndef fund_account_address(\n context, account_address: str, amount: Union[int, str]\n):\n sp = context.app_acl.suggested_params()\n payment = transaction.PaymentTxn(\n context.accounts[0],\n sp,\n account_address,\n int(amount),\n )\n signed_payment = context.wallet.sign_transaction(payment)\n context.app_acl.send_transaction(signed_payment)\n transaction.wait_for_confirmation(context.app_acl, payment.get_txid(), 10)\n\n\n@when(\n 'we make an Account Information call against account \"{account}\" with exclude \"{exclude:MaybeString}\"'\n)\ndef acc_info(context, account, exclude):\n context.response = context.acl.account_info(account, exclude=exclude)\n\n\n@when('we make an Account Information call against account \"{account}\"')\ndef acc_info2(context, account):\n context.response = context.acl.account_info(account)\n\n\n@then(\n 'The account has {num} assets, the first is asset {index} has a frozen status of \"{frozen}\" and amount {units}.'\n)\ndef lookup_account_check(context, num, index, frozen, units):\n assert len(context.response[\"account\"][\"assets\"]) == int(num)\n assert context.response[\"account\"][\"assets\"][0][\"asset-id\"] == int(index)\n assert context.response[\"account\"][\"assets\"][0][\"is-frozen\"] == (\n frozen == \"true\"\n )\n assert context.response[\"account\"][\"assets\"][0][\"amount\"] == int(units)\n\n\n@then(\n 'The account created {num} assets, the first is asset {index} is named \"{name}\" with a total amount of {total} \"{unit}\"'\n)\ndef lookup_account_check_created(context, num, index, name, total, unit):\n assert len(context.response[\"account\"][\"created-assets\"]) == int(num)\n assert context.response[\"account\"][\"created-assets\"][0][\"index\"] == int(\n index\n )\n assert (\n context.response[\"account\"][\"created-assets\"][0][\"params\"][\"name\"]\n == name\n )\n assert (\n context.response[\"account\"][\"created-assets\"][0][\"params\"][\"unit-name\"]\n == unit\n )\n assert context.response[\"account\"][\"created-assets\"][0][\"params\"][\n \"total\"\n ] == int(total)\n\n\n@then(\n \"The account has {μalgos} μalgos and {num} assets, {assetid} has {assetamount}\"\n)\ndef lookup_account_check_holdings(context, μalgos, num, assetid, assetamount):\n assert context.response[\"account\"][\"amount\"] == int(μalgos)\n assert len(context.response[\"account\"].get(\"assets\", [])) == int(num)\n if int(num) > 0:\n assets = context.response[\"account\"][\"assets\"]\n for a in assets:\n if a[\"asset-id\"] == int(assetid):\n assert a[\"amount\"] == int(assetamount)\n\n\n@when('I use {indexer} to lookup account \"{account}\" at round {round}')\ndef icl_lookup_account_at_round(context, indexer, account, round):\n context.response = context.icls[indexer].account_info(account, int(round))\n\n\n@when(\n 'we make a Lookup Account by ID call against account \"{account}\" with round {block}'\n)\ndef lookup_account(context, account, block):\n context.response = context.icl.account_info(account, int(block))\n\n\n@when(\n 'we make a Lookup Account by ID call against account \"{account}\" with exclude \"{exclude:MaybeString}\"'\n)\ndef lookup_account(context, account, exclude):\n context.response = context.icl.account_info(account, exclude=exclude)\n\n\n@when(\"we make any LookupAccountByID call\")\ndef lookup_account_any(context):\n context.response = context.icl.account_info(\n \"PNWOET7LLOWMBMLE4KOCELCX6X3D3Q4H2Q4QJASYIEOF7YIPPQBG3YQ5YI\", 12\n )\n\n\n@then('the parsed LookupAccountByID response should have address \"{address}\"')\ndef parse_account(context, address):\n assert context.response[\"account\"][\"address\"] == address\n\n\n@when(\"we make any Account Information call\")\ndef acc_info_any(context):\n context.response = context.acl.account_info(\n \"PNWOET7LLOWMBMLE4KOCELCX6X3D3Q4H2Q4QJASYIEOF7YIPPQBG3YQ5YI\"\n )\n\n\n@then(\n 'the parsed Account Information response should have address \"{address}\"'\n)\ndef parse_acc_info(context, address):\n assert context.response[\"address\"] == address\n\n\n@when(\n 'we make an Account Asset Information call against account \"{account}\" assetID {assetID}'\n)\ndef acc_asset_info(context, account, assetID):\n context.response = context.acl.account_asset_info(account, assetID)\n\n\n@when(\n 'we make an Account Application Information call against account \"{account}\" applicationID {applicationID}'\n)\ndef acc_application_info(context, account, applicationID):\n context.response = context.acl.account_application_info(\n account, applicationID\n )\n\n\n@when(\n 'we make a LookupAccountAssets call with accountID \"{account}\" assetID {asset_id} includeAll \"{includeAll:MaybeBool}\" limit {limit} next \"{next:MaybeString}\"'\n)\ndef lookup_account_assets(context, account, asset_id, includeAll, limit, next):\n context.response = context.icl.lookup_account_assets(\n account,\n asset_id=int(asset_id),\n include_all=includeAll,\n limit=int(limit),\n next_page=next,\n )\n\n\n@when(\n 'we make a LookupAccountCreatedAssets call with accountID \"{account}\" assetID {asset_id} includeAll \"{includeAll:MaybeBool}\" limit {limit} next \"{next:MaybeString}\"'\n)\ndef lookup_account_created_assets(\n context, account, asset_id, includeAll, limit, next\n):\n context.response = context.icl.lookup_account_asset_by_creator(\n account,\n asset_id=int(asset_id),\n include_all=includeAll,\n limit=int(limit),\n next_page=next,\n )\n\n\n@when(\n 'we make a LookupAccountAppLocalStates call with accountID \"{account}\" applicationID {application_id} includeAll \"{includeAll:MaybeBool}\" limit {limit} next \"{next:MaybeString}\"'\n)\ndef lookup_account_applications(\n context, account, application_id, includeAll, limit, next\n):\n context.response = context.icl.lookup_account_application_local_state(\n account,\n application_id=int(application_id),\n include_all=includeAll,\n limit=int(limit),\n next_page=next,\n )\n\n\n@when(\n 'we make a LookupAccountCreatedApplications call with accountID \"{account}\" applicationID {application_id} includeAll \"{includeAll:MaybeBool}\" limit {limit} next \"{next:MaybeString}\"'\n)\ndef lookup_account_created_applications(\n context, account, application_id, includeAll, limit, next\n):\n context.response = context.icl.lookup_account_application_by_creator(\n account,\n application_id=int(application_id),\n include_all=includeAll,\n limit=int(limit),\n next_page=next,\n )\n\n\n@then(\n 'the parsed LookupAssetBalances response should be valid on round {roundNum}, and contain an array of len {length} and element number {idx} should have address \"{address}\" amount {amount} and frozen state \"{frozenState}\"'\n)\ndef parse_asset_balance(\n context, roundNum, length, idx, address, amount, frozenState\n):\n assert context.response[\"current-round\"] == int(roundNum)\n assert len(context.response[\"balances\"]) == int(length)\n assert context.response[\"balances\"][int(idx)][\"address\"] == address\n assert context.response[\"balances\"][int(idx)][\"amount\"] == int(amount)\n assert context.response[\"balances\"][int(idx)][\"is-frozen\"] == (\n frozenState == \"true\"\n )\n\n\n@when(\n \"we make a Search Accounts call with assetID {index} limit {limit} currencyGreaterThan {currencyGreaterThan} currencyLessThan {currencyLessThan} and round {block}\"\n)\ndef search_accounts(\n context, index, limit, currencyGreaterThan, currencyLessThan, block\n):\n context.response = context.icl.accounts(\n asset_id=int(index),\n limit=int(limit),\n next_page=None,\n min_balance=int(currencyGreaterThan),\n max_balance=int(currencyLessThan),\n block=int(block),\n )\n\n\n@when(\n 'we make a Search Accounts call with assetID {index} limit {limit} currencyGreaterThan {currencyGreaterThan} currencyLessThan {currencyLessThan} round {block} and authenticating address \"{authAddr:MaybeString}\"'\n)\ndef search_accounts2(\n context,\n index,\n limit,\n currencyGreaterThan,\n currencyLessThan,\n block,\n authAddr,\n):\n if authAddr == \"none\":\n authAddr = None\n context.response = context.icl.accounts(\n asset_id=int(index),\n limit=int(limit),\n next_page=None,\n min_balance=int(currencyGreaterThan),\n max_balance=int(currencyLessThan),\n block=int(block),\n auth_addr=authAddr,\n )\n\n\n@when('we make a Search Accounts call with exclude \"{exclude:MaybeString}\"')\ndef search_accounts3(\n context,\n exclude,\n):\n context.response = context.icl.accounts(exclude=exclude)\n\n\n@when(\n 'I use {indexer} to search for an account with {assetid}, {limit}, {currencygt}, {currencylt}, \"{auth_addr:MaybeString}\", {application_id}, \"{include_all:MaybeBool}\" and token \"{token:MaybeString}\"'\n)\ndef icl_search_accounts_with_auth_addr_and_app_id_and_include_all(\n context,\n indexer,\n assetid,\n limit,\n currencygt,\n currencylt,\n auth_addr,\n application_id,\n include_all,\n token,\n):\n context.response = context.icls[indexer].accounts(\n asset_id=int(assetid),\n limit=int(limit),\n next_page=token,\n min_balance=int(currencygt),\n max_balance=int(currencylt),\n auth_addr=auth_addr,\n application_id=int(application_id),\n include_all=include_all,\n )\n\n\n@when(\n 'I use {indexer} to search for an account with {assetid}, {limit}, {currencygt}, {currencylt}, \"{auth_addr:MaybeString}\", {application_id} and token \"{token:MaybeString}\"'\n)\ndef icl_search_accounts_with_auth_addr_and_app_id(\n context,\n indexer,\n assetid,\n limit,\n currencygt,\n currencylt,\n auth_addr,\n application_id,\n token,\n):\n context.response = context.icls[indexer].accounts(\n asset_id=int(assetid),\n limit=int(limit),\n next_page=token,\n min_balance=int(currencygt),\n max_balance=int(currencylt),\n auth_addr=auth_addr,\n application_id=int(application_id),\n )\n\n\n@when(\n 'I use {indexer} to search for an account with {assetid}, {limit}, {currencygt}, {currencylt} and token \"{token:MaybeString}\"'\n)\ndef icl_search_accounts_legacy(\n context, indexer, assetid, limit, currencygt, currencylt, token\n):\n context.response = context.icls[indexer].accounts(\n asset_id=int(assetid),\n limit=int(limit),\n next_page=token,\n min_balance=int(currencygt),\n max_balance=int(currencylt),\n )\n\n\n@then(\n \"I get the next page using {indexer} to search for an account with {assetid}, {limit}, {currencygt} and {currencylt}\"\n)\ndef search_accounts_nex(\n context, indexer, assetid, limit, currencygt, currencylt\n):\n context.response = context.icls[indexer].accounts(\n asset_id=int(assetid),\n limit=int(limit),\n min_balance=int(currencygt),\n max_balance=int(currencylt),\n next_page=context.response[\"next-token\"],\n )\n\n\n@then(\n 'There are {num}, the first has {pendingrewards}, {rewardsbase}, {rewards}, {withoutrewards}, \"{address}\", {amount}, \"{status}\", \"{sigtype:MaybeString}\"'\n)\ndef check_search_accounts(\n context,\n num,\n pendingrewards,\n rewardsbase,\n rewards,\n withoutrewards,\n address,\n amount,\n status,\n sigtype,\n):\n assert len(context.response[\"accounts\"]) == int(num)\n assert context.response[\"accounts\"][0][\"pending-rewards\"] == int(\n pendingrewards\n )\n assert context.response[\"accounts\"][0].get(\"rewards-base\", 0) == int(\n rewardsbase\n )\n assert context.response[\"accounts\"][0][\"rewards\"] == int(rewards)\n assert context.response[\"accounts\"][0][\n \"amount-without-pending-rewards\"\n ] == int(withoutrewards)\n assert context.response[\"accounts\"][0][\"address\"] == address\n assert context.response[\"accounts\"][0][\"amount\"] == int(amount)\n assert context.response[\"accounts\"][0][\"status\"] == status\n assert context.response[\"accounts\"][0].get(\"sig-type\", \"\") == sigtype\n\n\n@then(\n 'The first account is online and has \"{address}\", {keydilution}, {firstvalid}, {lastvalid}, \"{votekey}\", \"{selectionkey}\"'\n)\ndef check_search_accounts_online(\n context, address, keydilution, firstvalid, lastvalid, votekey, selectionkey\n):\n assert context.response[\"accounts\"][0][\"status\"] == \"Online\"\n assert context.response[\"accounts\"][0][\"address\"] == address\n assert context.response[\"accounts\"][0][\"participation\"][\n \"vote-key-dilution\"\n ] == int(keydilution)\n assert context.response[\"accounts\"][0][\"participation\"][\n \"vote-first-valid\"\n ] == int(firstvalid)\n assert context.response[\"accounts\"][0][\"participation\"][\n \"vote-last-valid\"\n ] == int(lastvalid)\n assert (\n context.response[\"accounts\"][0][\"participation\"][\n \"vote-participation-key\"\n ]\n == votekey\n )\n assert (\n context.response[\"accounts\"][0][\"participation\"][\n \"selection-participation-key\"\n ]\n == selectionkey\n )\n\n\n@when(\"we make any SearchAccounts call\")\ndef search_accounts_any(context):\n context.response = context.icl.accounts(asset_id=2)\n\n\n@then(\n 'the parsed SearchAccounts response should be valid on round {roundNum} and the array should be of len {length} and the element at index {index} should have address \"{address}\"'\n)\ndef parse_accounts(context, roundNum, length, index, address):\n assert context.response[\"current-round\"] == int(roundNum)\n assert len(context.response[\"accounts\"]) == int(length)\n if int(length) > 0:\n assert context.response[\"accounts\"][int(index)][\"address\"] == address\n\n\n@when(\n 'the parsed SearchAccounts response should be valid on round {roundNum} and the array should be of len {length} and the element at index {index} should have authorizing address \"{authAddr:MaybeString}\"'\n)\ndef parse_accounts_auth(context, roundNum, length, index, authAddr):\n assert context.response[\"current-round\"] == int(roundNum)\n assert len(context.response[\"accounts\"]) == int(length)\n if int(length) > 0:\n assert (\n context.response[\"accounts\"][int(index)][\"auth-addr\"] == authAddr\n )\n\n\n@given('a signing account with address \"{address}\" and mnemonic \"{mnemonic}\"')\ndef signing_account(context, address, mnemonic):\n context.signing_mnemonic = mnemonic\n\n\n@given(\n \"I create a new transient account and fund it with {transient_fund_amount} microalgos.\"\n)\ndef create_transient_and_fund(context, transient_fund_amount):\n context.transient_sk, context.transient_pk = account.generate_account()\n sp = context.app_acl.suggested_params()\n payment = transaction.PaymentTxn(\n context.accounts[0],\n sp,\n context.transient_pk,\n int(transient_fund_amount),\n )\n signed_payment = context.wallet.sign_transaction(payment)\n context.app_acl.send_transaction(signed_payment)\n transaction.wait_for_confirmation(context.app_acl, payment.get_txid(), 10)\n\n\n@then(\n \"I get the account address for the current application and see that it matches the app id's hash\"\n)\ndef assert_app_account_is_the_hash(context):\n app_id = context.current_application_id\n expected = encoding.encode_address(\n encoding.checksum(b\"appID\" + app_id.to_bytes(8, \"big\"))\n )\n actual = logic.get_application_address(app_id)\n assert (\n expected == actual\n ), f\"account-address: expected [{expected}], but got [{actual}]\"\n\n\n@given(\n \"I fund the current application's address with {fund_amount} microalgos.\"\n)\ndef fund_app_account(context, fund_amount):\n fund_account_address(\n context,\n logic.get_application_address(context.current_application_id),\n fund_amount,\n )\n","repo_name":"AlgoBloom/DAO-Helper-Identity","sub_path":"identity/venv/lib/python3.10/site-packages/test/steps/account_v2_steps.py","file_name":"account_v2_steps.py","file_ext":"py","file_size_in_byte":15897,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"71359978960","text":"from urllib.request import urlopen\r\nfrom bs4 import BeautifulSoup\r\nimport ssl\r\n\r\nctx = ssl.create_default_context()\r\nctx.check_hostname = False\r\nctx.verify_mode = ssl.CERT_NONE\r\n\r\nurl = 'http://py4e-data.dr-chuck.net/known_by_Hadia.html'\r\npos = 18\r\ntmz = 7\r\n\r\nfor i in range(tmz):\r\n html = urlopen(url, context=ctx).read()\r\n soup = BeautifulSoup(html, \"html.parser\")\r\n\r\n span = soup('a')\r\n count = 0\r\n for tags in span:\r\n count += 1\r\n\r\n if count > pos:\r\n break\r\n\r\n url = tags.get('href', None)\r\nprint(url)\r\n\r\n","repo_name":"jsmugs/Python-Web-Data","sub_path":"webscrape.py","file_name":"webscrape.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30370162437","text":"\n\"\"\"\n @project LeManchot-Analysis : Core components\n @organization Laval University\n @lab MiViM Lab\n @supervisor Professor Xavier Maldague\n @industrial-partner TORNGATS\n\"\"\"\n\nfrom typing import Dict\n\nfrom gimp_labeling_converter import XCFDataset\n\nclass RepetitiveDatasetWrapper(XCFDataset):\n def __init__(\n self,\n root_dir: str,\n category: Dict[str, int],\n transform=None,\n target_transform=None,\n iteration : int = 1\n ) -> None:\n super().__init__(root_dir, category, transform, target_transform)\n self.iteration = iteration\n\n @property\n def wrapped_dataset(self):\n return self.dataset_\n\n def __len__(self):\n return super().__len__() * self.iteration\n\n def __getitem__(self, idx):\n super().__getitem__(idx % self.actual_size)","repo_name":"parham/thermal-segmentor","sub_path":"lemanchot/dataset/repetitive.py","file_name":"repetitive.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"42610628220","text":"import sys\n\n# Part 1\ndef twosum_mul(nums, target):\n \"\"\"\n Returns the product of the two numbers in `nums` that sum up to the target,\n returns None if they do not exist.\n \"\"\"\n\n seen = set()\n for n in nums:\n comp = target - n\n if comp in seen:\n return comp * n\n seen.add(n)\n\n\n# Part 2\ndef threesum_mul(nums, target):\n \"\"\"\n Returns the product of the three numbers in `nums` that sum up to the\n target, returns None if they do not exist.\n \"\"\"\n\n for n in nums:\n twos_target = target - n\n twos_product = twosum_mul(nums, twos_target)\n if twos_product:\n return n * twos_product\n\n\ndef main():\n assert len(sys.argv) > 1, 'Missing argument: path to input file'\n assert len(sys.argv) < 3, 'Too many arguments'\n input_file = sys.argv[1]\n\n with open(input_file, 'r') as f:\n nums = [int(n) for n in f.read().splitlines()]\n\n # Solve part 1\n part1 = twosum_mul(nums, 2020)\n print('\\nPart 1:', part1)\n\n # Solve part 2\n part2 = threesum_mul(nums, 2020)\n print('Part 2:', part2)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"yuzhoumo/advent-of-code","sub_path":"2020/day01/day01.py","file_name":"day01.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19231033912","text":"from __future__ import print_function\n\n# Stdlib imports\nimport glob\nimport inspect\nimport os\nimport re\nimport sys\n\n# Third-party imports\nfrom time import time\nfrom zipimport import zipimporter\n\n# Our own imports\nfrom IPython.core.completer import expand_user, compress_user\nfrom IPython.core.error import TryNext\nfrom IPython.utils import py3compat\nfrom IPython.utils._process_common import arg_split\n\n# FIXME: this should be pulled in with the right call via the component system\nfrom IPython.core.ipapi import get as get_ipython\n\n#-----------------------------------------------------------------------------\n# Globals and constants\n#-----------------------------------------------------------------------------\n\n# Time in seconds after which the rootmodules will be stored permanently in the\n# ipython ip.db database (kept in the user's .ipython dir).\nTIMEOUT_STORAGE = 2\n\n# Time in seconds after which we give up\nTIMEOUT_GIVEUP = 20\n\n# Regular expression for the python import statement\nimport_re = re.compile(r'.*(\\.so|\\.py[cod]?)$')\n\n# RE for the ipython %run command (python + ipython scripts)\nmagic_run_re = re.compile(r'.*(\\.ipy|\\.py[w]?)$')\n\n#-----------------------------------------------------------------------------\n# Local utilities\n#-----------------------------------------------------------------------------\n\ndef module_list(path):\n \"\"\"\n Return the list containing the names of the modules available in the given\n folder.\n \"\"\"\n # sys.path has the cwd as an empty string, but isdir/listdir need it as '.'\n if path == '':\n path = '.'\n\n if os.path.isdir(path):\n folder_list = os.listdir(path)\n elif path.endswith('.egg'):\n try:\n folder_list = [f for f in zipimporter(path)._files]\n except:\n folder_list = []\n else:\n folder_list = []\n\n if not folder_list:\n return []\n\n # A few local constants to be used in loops below\n isfile = os.path.isfile\n pjoin = os.path.join\n basename = os.path.basename\n\n def is_importable_file(path):\n \"\"\"Returns True if the provided path is a valid importable module\"\"\"\n name, extension = os.path.splitext( path )\n return import_re.match(path) and py3compat.isidentifier(name)\n\n # Now find actual path matches for packages or modules\n folder_list = [p for p in folder_list\n if isfile(pjoin(path, p,'__init__.py'))\n or is_importable_file(p) ]\n\n return [basename(p).split('.')[0] for p in folder_list]\n\ndef get_root_modules():\n \"\"\"\n Returns a list containing the names of all the modules available in the\n folders of the pythonpath.\n \"\"\"\n ip = get_ipython()\n\n if 'rootmodules' in ip.db:\n return ip.db['rootmodules']\n\n t = time()\n store = False\n modules = list(sys.builtin_module_names)\n for path in sys.path:\n modules += module_list(path)\n if time() - t >= TIMEOUT_STORAGE and not store:\n store = True\n print(\"\\nCaching the list of root modules, please wait!\")\n print(\"(This will only be done once - type '%rehashx' to \"\n \"reset cache!)\\n\")\n sys.stdout.flush()\n if time() - t > TIMEOUT_GIVEUP:\n print(\"This is taking too long, we give up.\\n\")\n ip.db['rootmodules'] = []\n return []\n\n modules = set(modules)\n if '__init__' in modules:\n modules.remove('__init__')\n modules = list(modules)\n if store:\n ip.db['rootmodules'] = modules\n return modules\n\n\ndef is_importable(module, attr, only_modules):\n if only_modules:\n return inspect.ismodule(getattr(module, attr))\n else:\n return not(attr[:2] == '__' and attr[-2:] == '__')\n\n\ndef try_import(mod, only_modules=False):\n try:\n m = __import__(mod)\n except:\n return []\n mods = mod.split('.')\n for module in mods[1:]:\n m = getattr(m, module)\n\n m_is_init = hasattr(m, '__file__') and '__init__' in m.__file__\n\n completions = []\n if (not hasattr(m, '__file__')) or (not only_modules) or m_is_init:\n completions.extend( [attr for attr in dir(m) if\n is_importable(m, attr, only_modules)])\n\n completions.extend(getattr(m, '__all__', []))\n if m_is_init:\n completions.extend(module_list(os.path.dirname(m.__file__)))\n completions = set(completions)\n if '__init__' in completions:\n completions.remove('__init__')\n return list(completions)\n\n\n#-----------------------------------------------------------------------------\n# Completion-related functions.\n#-----------------------------------------------------------------------------\n\ndef quick_completer(cmd, completions):\n \"\"\" Easily create a trivial completer for a command.\n\n Takes either a list of completions, or all completions in string (that will\n be split on whitespace).\n\n Example::\n\n [d:\\ipython]|1> import ipy_completers\n [d:\\ipython]|2> ipy_completers.quick_completer('foo', ['bar','baz'])\n [d:\\ipython]|3> foo b\n bar baz\n [d:\\ipython]|3> foo ba\n \"\"\"\n\n if isinstance(completions, basestring):\n completions = completions.split()\n\n def do_complete(self, event):\n return completions\n\n get_ipython().set_hook('complete_command',do_complete, str_key = cmd)\n\ndef module_completion(line):\n \"\"\"\n Returns a list containing the completion possibilities for an import line.\n\n The line looks like this :\n 'import xml.d'\n 'from xml.dom import'\n \"\"\"\n\n words = line.split(' ')\n nwords = len(words)\n\n # from whatever -> 'import '\n if nwords == 3 and words[0] == 'from':\n return ['import ']\n\n # 'from xy' or 'import xy'\n if nwords < 3 and (words[0] in ['import','from']) :\n if nwords == 1:\n return get_root_modules()\n mod = words[1].split('.')\n if len(mod) < 2:\n return get_root_modules()\n completion_list = try_import('.'.join(mod[:-1]), True)\n return ['.'.join(mod[:-1] + [el]) for el in completion_list]\n\n # 'from xyz import abc'\n if nwords >= 3 and words[0] == 'from':\n mod = words[1]\n return try_import(mod)\n\n#-----------------------------------------------------------------------------\n# Completers\n#-----------------------------------------------------------------------------\n# These all have the func(self, event) signature to be used as custom\n# completers\n\ndef module_completer(self,event):\n \"\"\"Give completions after user has typed 'import ...' or 'from ...'\"\"\"\n\n # This works in all versions of python. While 2.5 has\n # pkgutil.walk_packages(), that particular routine is fairly dangerous,\n # since it imports *EVERYTHING* on sys.path. That is: a) very slow b) full\n # of possibly problematic side effects.\n # This search the folders in the sys.path for available modules.\n\n return module_completion(event.line)\n\n# FIXME: there's a lot of logic common to the run, cd and builtin file\n# completers, that is currently reimplemented in each.\n\ndef magic_run_completer(self, event):\n \"\"\"Complete files that end in .py or .ipy for the %run command.\n \"\"\"\n comps = arg_split(event.line, strict=False)\n relpath = (len(comps) > 1 and comps[-1] or '').strip(\"'\\\"\")\n\n #print(\"\\nev=\", event) # dbg\n #print(\"rp=\", relpath) # dbg\n #print('comps=', comps) # dbg\n\n lglob = glob.glob\n isdir = os.path.isdir\n relpath, tilde_expand, tilde_val = expand_user(relpath)\n\n dirs = [f.replace('\\\\','/') + \"/\" for f in lglob(relpath+'*') if isdir(f)]\n\n # Find if the user has already typed the first filename, after which we\n # should complete on all files, since after the first one other files may\n # be arguments to the input script.\n\n if filter(magic_run_re.match, comps):\n pys = [f.replace('\\\\','/') for f in lglob('*')]\n else:\n pys = [f.replace('\\\\','/')\n for f in lglob(relpath+'*.py') + lglob(relpath+'*.ipy') +\n lglob(relpath + '*.pyw')]\n #print('run comp:', dirs+pys) # dbg\n return [compress_user(p, tilde_expand, tilde_val) for p in dirs+pys]\n\n\ndef cd_completer(self, event):\n \"\"\"Completer function for cd, which only returns directories.\"\"\"\n ip = get_ipython()\n relpath = event.symbol\n\n #print(event) # dbg\n if event.line.endswith('-b') or ' -b ' in event.line:\n # return only bookmark completions\n bkms = self.db.get('bookmarks', None)\n if bkms:\n return bkms.keys()\n else:\n return []\n\n if event.symbol == '-':\n width_dh = str(len(str(len(ip.user_ns['_dh']) + 1)))\n # jump in directory history by number\n fmt = '-%0' + width_dh +'d [%s]'\n ents = [ fmt % (i,s) for i,s in enumerate(ip.user_ns['_dh'])]\n if len(ents) > 1:\n return ents\n return []\n\n if event.symbol.startswith('--'):\n return [\"--\" + os.path.basename(d) for d in ip.user_ns['_dh']]\n\n # Expand ~ in path and normalize directory separators.\n relpath, tilde_expand, tilde_val = expand_user(relpath)\n relpath = relpath.replace('\\\\','/')\n\n found = []\n for d in [f.replace('\\\\','/') + '/' for f in glob.glob(relpath+'*')\n if os.path.isdir(f)]:\n if ' ' in d:\n # we don't want to deal with any of that, complex code\n # for this is elsewhere\n raise TryNext\n\n found.append(d)\n\n if not found:\n if os.path.isdir(relpath):\n return [compress_user(relpath, tilde_expand, tilde_val)]\n\n # if no completions so far, try bookmarks\n bks = self.db.get('bookmarks',{}).iterkeys()\n bkmatches = [s for s in bks if s.startswith(event.symbol)]\n if bkmatches:\n return bkmatches\n\n raise TryNext\n\n return [compress_user(p, tilde_expand, tilde_val) for p in found]\n\ndef reset_completer(self, event):\n \"A completer for %reset magic\"\n return '-f -s in out array dhist'.split()\n","repo_name":"miniBloq/v0.83","sub_path":"source/Bin/Minibloq/lang/PPythonWin/v2.7.5.1/App/Lib/site-packages/IPython/core/completerlib.py","file_name":"completerlib.py","file_ext":"py","file_size_in_byte":10030,"program_lang":"python","lang":"en","doc_type":"code","stars":82,"dataset":"github-code","pt":"3"} +{"seq_id":"12298949645","text":"from typing import Union\n\nimport torch\nimport torch.nn as nn\nimport torch.autograd as autograd\nfrom .auxillary.linalg import (\n diff_matrix,\n get_rbf_weights,\n)\n\n\ndef ortho_reg(x: torch.Tensor, centered=False):\n \"\"\"\n Orthogonal features penalization.\n\n .. math::\n y = \\| E[x x^\\top] - I \\|^2\n\n Parameters\n ----------\n x: torch.Tensor\n Design matrix of size `number of samples * number of dimension`.\n centered: bool, optional\n Either to center `x` before computing covariance. Default is False.\n\n Returns\n -------\n loss: torch.Tensor\n Empirical orthogonal regularization loss.\n \"\"\"\n if centered:\n x = x - x.mean(dim=0)\n x = x.transpose(1, 0) @ x / x.size(0)\n x = x - torch.eye(x.size(0), device=x.device)\n loss = torch.sum(x ** 2)\n return loss\n\n\ndef ortho_reg_minibatch(x: torch.Tensor, centered=False):\n \"\"\"\n Orthogonal features penalization implemented to provide unbiased stochastic gradient.\n\n .. math::\n y = E[(x^\\top x')^2 - 2 E[(x^\\top x)] + p\n\n Parameters\n ----------\n x: torch.Tensor\n Design matrix of size `number of samples * number of dimension` (`2n * p`).\n centered: bool, optional\n Either to center `x` before computing covariance. Default is False.\n\n Returns\n -------\n loss: torch.Tensor\n Empirical orthogonal regularization loss.\n \"\"\"\n if centered:\n x = x - x.mean(dim=0)\n n = x.size(0) // 2\n if n < 1:\n raise ValueError(f'Batch size should be at least two, got {x.size(0)}.')\n # Quadratic part\n y = x[:n] @ x[n:].transpose(1, 0)\n loss = torch.mean(y ** 2)\n # Linear part\n loss = loss - torch.mean(x ** 2) * 2 * x.size(1)\n loss = loss + x.size(1)\n return loss\n\n\ndef Dirichlet(module: nn.Module, inputs: torch.Tensor, **kwargs):\n \"\"\"\n Compute Dirichlet energy of module on inputs.\n\n .. math::\n l = \\frac{1}{npd}\\sum_{i < n, j < p} \\| \\nabla f_j(x_i) \\|^2\n\n Parameters\n ----------\n net: torch.nn.Module\n Pytorch neural network module, producing an output of size `p`.\n inputs: torch.Tensor\n Tensor of inputs to feed the networks of size `n * d`.\n\n Returns\n -------\n loss: torch.Tensor\n Empirical Dirichlet energy up to dimensional constants.\n \"\"\"\n def to_diff(x):\n return torch.sum(module(x), dim=0)\n jac = autograd.functional.jacobian(to_diff, inputs, create_graph=True)\n loss = torch.mean(jac ** 2)\n return loss\n\n\ndef augmentation_diff(net: nn.Module, inputs: torch.Tensor, outputs: Union[None, torch.Tensor] = None,\n sigma_augmentation: float = 1, **kwargs):\n \"\"\"\n Compute average difference between inputs and augmented inputs.\n\n Parameters\n ----------\n net: torch.nn.Module\n Pytorch neural network module.\n inputs: torch.Tensor\n Tensor of inputs to feed the networks.\n outputs: torch.Tensor, optional\n To avoid recomputing outputs inside function scope. Default is None.\n sigma_augmentation: float, optional\n Noise level in augmentation. Standard deviation of the Gaussian.\n\n Returns\n -------\n loss: torch.Tensor\n Empirical average of augmentation differences.\n \"\"\"\n factory_kwargs = {'device': inputs.device, 'dtype': inputs.dtype}\n\n if outputs is None:\n outputs = net(inputs)\n\n aug = torch.randn(inputs.size(), **factory_kwargs)\n aug = sigma_augmentation * aug\n aug = inputs + aug\n aug_out = net(aug)\n\n diff = outputs - aug_out\n diff = diff ** 2\n loss = torch.mean(diff)\n return loss\n\n\ndef graph_Laplacian(net: nn.Module, inputs: torch.Tensor, outputs: Union[None, torch.Tensor] = None,\n sigma_rbf: float = 1, **kwargs):\n \"\"\"\n Compute graph Laplacian criterion on data.\n\n Parameters\n ----------\n net: torch.nn.Module\n Pytorch neural network module.\n inputs: torch.Tensor\n Tensor of inputs to feed the networks.\n outputs: torch.Tensor, optional\n To avoid recomputing outputs inside function scope. Default is None.\n sigma_rbf: float, optional\n Scale parameter in the graph Laplacian with rbf kernel.\n\n Returns\n -------\n loss: torch.Tensor\n Loss function based on the graph Laplacian.\n \"\"\"\n if outputs is None:\n outputs = net(inputs)\n\n diff = diff_matrix(outputs)\n sim_weights = get_rbf_weights(inputs, sigma=sigma_rbf)\n diff = sim_weights * diff\n loss = torch.mean(diff)\n return loss\n","repo_name":"facebookresearch/PSSL","sub_path":"src/pssl/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":4523,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"34756546544","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2022/4/11 13:35\n# @Author : Wang Zixv\n# @Site : \n# @File : get_first_n_s_in_video.py\n# @Software: PyCharm\n# 使用python进行视频前几秒的视频进行裁剪\nimport os\nimport sys\nfrom moviepy.editor import *\n\n\ndef gen_video_name(save_folder_path):\n video_name_part = len(os.listdir(save_folder_path))\n video_name = str(len(os.listdir(save_folder_path))) + \".mp4\"\n save_path = os.path.join(save_folder_path, video_name)\n\ndef get_video(start_time, end_time, video_path, save_folder_path):\n \"\"\"\n 裁剪视频对应时间内的帧\n \"\"\"\n # clip = VideoFileClip(video_path).subclip(start_time, end_time)\n video = CompositeVideoClip([VideoFileClip(video_path).subclip(start_time, end_time)])\n # video_name = str(len(os.listdir(save_folder_path))) + \".\" + video_path.split(\".\")[-1]\n video_name_part = len(os.listdir(save_folder_path))\n video_name = str(len(os.listdir(save_folder_path))) + \".mp4\"\n save_path = os.path.join(save_folder_path, video_name)\n print(save_path)\n while os.path.exists(save_path):\n video_name_part += 1\n video_name = str(video_name_part) + \".mp4\"\n save_path = os.path.join(save_folder_path, video_name)\n if not os.path.exists(save_path):\n break\n print(\"123\")\n\n video.write_videofile(save_path, fps=30, threads=1, codec=\"libx264\")\n\n\ndef get_time_on_video_path(video_path):\n split = os.path.split(video_path)[-1]\n print(split)\n vide_n = split.split(\"_\")\n start = vide_n[0]\n end = vide_n[1]\n return start, end\n\ndef get_time_arear_video(video_folder,save_folder):\n for video_name in video_path_list:\n video_path = os.path.join(video_folder, video_name)\n start_time, end_time = get_time_on_video_path(video_path)\n print(\"00000000000000000000000000000\")\n print(start_time, end_time)\n get_video(start_time, end_time, video_path, save_folder)\n\ndef get_first_sec_in_video(video_folder,save_folder):\n for video_name in video_path_list:\n video_path = os.path.join(video_folder, video_name)\n start_time, end_time = 0,1\n get_video(start_time, end_time, video_path, save_folder)\n\n\nif __name__ == '__main__':\n #\n # video_folder = \"F:\\\\1.znzz\\\\03.OldMan_Care\\\\01.datasets\\\\datasets\\\\getUp\"\n video_folder =\"E:\\\\video\\\\test\"\n save_folder = video_folder + \"_split\"\n video_path_list = os.listdir(video_folder)\n if not os.path.exists(save_folder):\n os.mkdir(save_folder)\n get_time_arear_video(video_folder, save_folder)\n\n","repo_name":"PepperTree-wang/python_tools","sub_path":"video_cut/split_video/get_first_n_s_in_video.py","file_name":"get_first_n_s_in_video.py","file_ext":"py","file_size_in_byte":2586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"15423801682","text":"import numpy as np\nfrom absl.testing import parameterized\nimport pva\n\nfrom tensorflow.keras import layers\nfrom tensorflow.python.ipu import test_utils as tu\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ipu import functional_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import googletest\nfrom tensorflow.python.training import gradient_descent\nfrom tensorflow.python.training import momentum\nfrom tensorflow.python.training import optimizer as optimizer_lib\nfrom tensorflow.python.training import adam\nfrom tensorflow.python.training import rmsprop\nfrom tensorflow.python.ipu import embedding_ops\nfrom tensorflow.python.ipu import ipu_compiler\nfrom tensorflow.python.ipu import ipu_infeed_queue\nfrom tensorflow.python.ipu import ipu_outfeed_queue\nfrom tensorflow.python.ipu import loops\nfrom tensorflow.python.ipu import utils\nfrom tensorflow.python.ipu.config import IPUConfig\nfrom tensorflow.python.ipu import gradient_accumulation as ga\nfrom tensorflow.python.ipu.optimizers import gradient_accumulation_optimizer as gao\nfrom tensorflow.python.ipu.tests import pipelining_test_util\nfrom tensorflow.compat.v1 import disable_v2_behavior\n\ndisable_v2_behavior()\n\n\ndef _gradient_accumulation_loop(test_wrapper,\n fwd_fn,\n inputs_fn,\n input_values,\n repeat_count,\n num_batches_to_accumulate,\n dataset_fn,\n optimizer_fn,\n num_iterations=None,\n replication_factor=1,\n minimum_remote_tensor_size=128,\n replicated_optimizer_state_sharding=False,\n assert_compute_sets_contain_list=None,\n reduction_method=None):\n g = ops.Graph()\n\n if num_iterations is None:\n num_iterations = repeat_count * num_batches_to_accumulate\n\n with g.as_default(), test_wrapper.test_session(graph=g) as session:\n dataset = dataset_fn()\n inputs = inputs_fn()\n infeed_queue = ipu_infeed_queue.IPUInfeedQueue(dataset)\n outfeed_queue = ipu_outfeed_queue.IPUOutfeedQueue()\n\n with variable_scope.variable_scope(\"ipu\", use_resource=True, reuse=False):\n\n def model(*args):\n loss = fwd_fn(*functional_ops._convert_to_list(args)) # pylint: disable=W0212\n enqueue_op = outfeed_queue.enqueue(loss)\n optimizer = optimizer_fn()\n if replication_factor > 1:\n opt = gao.CrossReplicaGradientAccumulationOptimizerV2( # pylint: disable=line-too-long\n optimizer,\n num_batches_to_accumulate,\n reduction_method=reduction_method,\n offload_weight_update_variables=replicated_optimizer_state_sharding or None, # pylint: disable=line-too-long\n replicated_optimizer_state_sharding=replicated_optimizer_state_sharding) # pylint: disable=line-too-long\n else:\n opt = gao.GradientAccumulationOptimizerV2(\n optimizer,\n num_batches_to_accumulate,\n False,\n reduction_method=reduction_method)\n outs = list(args[:len(args) - infeed_queue.number_of_tuple_elements])\n outs.append(enqueue_op)\n outs.append(opt.minimize(loss))\n return outs\n\n def my_net(*args):\n return loops.repeat(num_iterations,\n model,\n inputs=args,\n infeed_queue=infeed_queue)\n\n with ops.device(\"/device:IPU:0\"):\n loop_ret = ipu_compiler.compile(my_net, inputs=inputs)\n\n outfeed_op = outfeed_queue.dequeue()\n\n cfg = IPUConfig()\n if assert_compute_sets_contain_list is not None:\n report_helper = tu.ReportHelper()\n report_helper.set_autoreport_options(cfg)\n if utils.running_on_ipu_model():\n tu.enable_ipu_events(cfg)\n cfg.ipu_model.compile_ipu_code = True\n cfg.ipu_model.tiles_per_ipu = 128\n cfg.optimizations.minimum_remote_tensor_size = minimum_remote_tensor_size\n cfg.auto_select_ipus = replication_factor\n tu.add_hw_ci_connection_options(cfg)\n cfg.configure_ipu_system()\n utils.move_variable_initialization_to_cpu()\n\n session.run(variables.global_variables_initializer())\n session.run(infeed_queue.initializer)\n if assert_compute_sets_contain_list is not None:\n report_helper.clear_reports()\n session.run(loop_ret, feed_dict=dict(zip(inputs, input_values)))\n r = session.run(outfeed_op)\n if assert_compute_sets_contain_list is not None:\n report = pva.openReport(report_helper.find_report())\n test_wrapper.assert_compute_sets_contain_list(\n report, assert_compute_sets_contain_list)\n return r\n\n\ndef _compare_to_cpu(test_wrapper,\n fwd_fn,\n inputs_fn,\n input_values,\n repeat_count,\n num_batches_to_accumulate,\n dataset_fn,\n optimizer_fn,\n replication_factor=1,\n minimum_remote_tensor_size=128,\n replicated_optimizer_state_sharding=False,\n assert_compute_sets_contain_list=None,\n reduction_method=None):\n\n ga_losses = _gradient_accumulation_loop(\n test_wrapper,\n fwd_fn,\n inputs_fn,\n input_values,\n repeat_count,\n num_batches_to_accumulate,\n dataset_fn,\n optimizer_fn,\n replication_factor=replication_factor,\n minimum_remote_tensor_size=minimum_remote_tensor_size,\n replicated_optimizer_state_sharding=replicated_optimizer_state_sharding,\n assert_compute_sets_contain_list=assert_compute_sets_contain_list,\n reduction_method=reduction_method)\n\n cpu_losses = pipelining_test_util.PipelineTester._cpu_with_grad_accum( # pylint: disable=protected-access\n test_wrapper, [fwd_fn],\n inputs_fn,\n input_values,\n repeat_count,\n num_batches_to_accumulate * replication_factor,\n dataset_fn,\n optimizer_fn,\n reduction_method=reduction_method)\n\n cpu_losses = np.reshape(cpu_losses, np.shape(ga_losses))\n test_wrapper.assertAllClose(cpu_losses, ga_losses)\n\n\nclass GradientAccumulationTest(test_util.TensorFlowTestCase,\n parameterized.TestCase):\n @tu.skip_on_hw\n @test_util.deprecated_graph_mode_only\n def testIterationsNotMultiple(self):\n def dataset_parser(value):\n a = value\n b = (value + 10.) / 2.0\n return a, b\n\n def dataset_fn():\n dataset = tu.create_single_increasing_dataset(5, shape=[4, 4, 2])\n dataset = dataset.batch(batch_size=2, drop_remainder=True)\n return dataset.map(dataset_parser)\n\n def model(c, x, b):\n with variable_scope.variable_scope(\"vs\", use_resource=True):\n y = layers.Conv2D(2,\n 1,\n use_bias=True,\n kernel_initializer=init_ops.ones_initializer(),\n name='conv1')(x)\n y = y + b\n y = math_ops.reduce_sum(y) + c\n return y\n\n def inputs_fn():\n with ops.device('cpu'):\n return [array_ops.placeholder(np.float32, shape=[])]\n\n def optimizer_fn():\n return momentum.MomentumOptimizer(0.01, 0.9)\n\n with self.assertRaisesRegex(\n errors.FailedPreconditionError,\n 'Detected a gradient accumulation operation with 32'):\n _gradient_accumulation_loop(\n self,\n model,\n inputs_fn, [10.01],\n 3,\n 32,\n dataset_fn,\n optimizer_fn,\n 10,\n reduction_method=ga.GradientAccumulationReductionMethod.MEAN) # pylint: disable=line-too-long\n\n @tu.test_may_use_ipus_or_model(num_ipus=1)\n @test_util.deprecated_graph_mode_only\n def testCompare1(self):\n def dataset_fn():\n dataset = tu.create_single_increasing_dataset(7, shape=[4, 4, 2])\n dataset = dataset.batch(batch_size=2, drop_remainder=True)\n\n def dataset_parser(value):\n img = value / 7\n label = value[0][0][0][0]\n return img, label\n\n return dataset.map(dataset_parser)\n\n num_batches_to_accumulate = 20\n repeat_count = 2\n\n def optimizer_fn():\n return adam.AdamOptimizer()\n\n def fwd_fn(c, img, label):\n with variable_scope.variable_scope(\"part1\", use_resource=True):\n y = layers.Conv2D(\n 2,\n 1,\n use_bias=True,\n kernel_initializer=init_ops.constant_initializer(0.5),\n bias_initializer=init_ops.constant_initializer(0.5),\n name='conv1')(img)\n y = y * 20\n y = layers.Dense(2,\n kernel_initializer=init_ops.constant_initializer(0.5),\n bias_initializer=init_ops.constant_initializer(0.5))(y)\n return math_ops.reduce_sum(\n layers.Dense(2,\n kernel_initializer=init_ops.constant_initializer(0.5),\n bias_initializer=init_ops.constant_initializer(0.5))\n (y)) + c + label\n\n def inputs_fn():\n with ops.device('cpu'):\n return [array_ops.placeholder(np.float32, shape=[])]\n\n _compare_to_cpu(self,\n fwd_fn,\n inputs_fn, [10.01],\n repeat_count,\n num_batches_to_accumulate,\n dataset_fn,\n optimizer_fn,\n reduction_method=ga.GradientAccumulationReductionMethod.MEAN) # pylint: disable=line-too-long\n\n @tu.test_may_use_ipus_or_model(num_ipus=1)\n @test_util.deprecated_graph_mode_only\n def testCompare2(self):\n # Resnet like network.\n def dataset_fn():\n dataset = tu.create_single_increasing_dataset(100, shape=[4])\n dataset = dataset.batch(batch_size=64, drop_remainder=True)\n dataset = dataset.batch(batch_size=64, drop_remainder=True)\n dataset = dataset.batch(batch_size=2, drop_remainder=True)\n\n def dataset_parser(value):\n img = value\n label = math_ops.reduce_mean(img, axis=[1, 2, 3])\n return img, math_ops.cast(label, np.int32)\n\n return dataset.map(dataset_parser)\n\n num_batches_to_accumulate = 18\n repeat_count = 2\n\n def optimizer_fn():\n return gradient_descent.GradientDescentOptimizer(0.01)\n\n def fixed_padding(inputs, kernel_size):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n padded_inputs = array_ops.pad(\n inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])\n return padded_inputs\n\n def block(name, first_stride, out_filters, count, x):\n\n for i in range(count):\n shape_in = x.shape\n stride = first_stride if (i == 0) else 1\n if stride > 1:\n x = fixed_padding(x, 3)\n sc = x\n\n with variable_scope.variable_scope(name + \"/\" + str(i) + \"/1\"):\n x = conv(x, 3, stride, out_filters)\n x = nn.relu(x)\n\n with variable_scope.variable_scope(name + \"/\" + str(i) + \"/2\"):\n x = conv(x, 3, 1, out_filters)\n\n # shortcut\n if stride != 1:\n sc = array_ops.strided_slice(sc, [0, 0, 0, 0],\n sc.shape,\n strides=[1, stride, stride, 1])\n pad = int(x.shape[3] - shape_in[3])\n if pad != 0:\n sc = array_ops.pad(sc, paddings=[[0, 0], [0, 0], [0, 0], [0, pad]])\n\n x = nn.relu(x + sc)\n\n return x\n\n def fc(x, num_units_out):\n return layers.Dense(\n num_units_out,\n kernel_initializer=init_ops.constant_initializer(0.01),\n bias_initializer=init_ops.constant_initializer(0.05))(x)\n\n def max_pool(x, ksize=3, stride=2):\n return layers.MaxPooling2D(ksize, stride, padding='SAME')(x)\n\n def conv(x, ksize, stride, filters_out):\n return layers.Conv2D(\n filters_out,\n ksize,\n stride,\n 'SAME',\n kernel_initializer=init_ops.constant_initializer(0.01),\n bias_initializer=init_ops.constant_initializer(0.05))(x)\n\n def fwd_fn(img, label):\n with variable_scope.variable_scope(\"part1\", use_resource=True):\n x = conv(img, 7, 2, 8)\n x = nn.relu(x)\n x = max_pool(x, ksize=3, stride=2)\n\n with variable_scope.variable_scope(\"part2\", use_resource=True):\n x = block(\"b\", 2, 32, 1, x)\n\n with variable_scope.variable_scope(\"part3\", use_resource=True):\n x = math_ops.reduce_mean(x, axis=[1, 2])\n x = fc(x, 100)\n loss = math_ops.reduce_mean(\n nn_ops.sparse_softmax_cross_entropy_with_logits(logits=x,\n labels=label))\n return loss\n\n _compare_to_cpu(self,\n fwd_fn,\n lambda: [], [],\n repeat_count,\n num_batches_to_accumulate,\n dataset_fn,\n optimizer_fn,\n reduction_method=ga.GradientAccumulationReductionMethod.MEAN) # pylint: disable=line-too-long\n\n @tu.test_may_use_ipus_or_model(num_ipus=1)\n @test_util.deprecated_graph_mode_only\n def testCompare3(self):\n def dataset_fn():\n dataset = tu.create_single_increasing_dataset(10, shape=[4])\n dataset = dataset.batch(batch_size=2, drop_remainder=True)\n\n def dataset_parser(value):\n label = math_ops.reduce_mean(value, axis=[1])\n return math_ops.cast(value,\n np.int32), math_ops.cast(label / 10, np.int32)\n\n return dataset.map(dataset_parser)\n\n num_batches_to_accumulate = 20\n repeat_count = 2\n\n def optimizer_fn():\n return momentum.MomentumOptimizer(0.01, 0.8)\n\n def fwd_fn(idx, label):\n with variable_scope.variable_scope(\"part1\", use_resource=True):\n embedding = variable_scope.get_variable(\n \"c\",\n shape=[10, 1216],\n dtype=np.float32,\n initializer=init_ops.constant_initializer(10.01),\n trainable=True)\n x = embedding_ops.embedding_lookup(embedding, idx)\n\n logits = math_ops.reduce_sum(x, axis=[-1])\n loss = math_ops.reduce_mean(\n nn_ops.sparse_softmax_cross_entropy_with_logits(logits=logits,\n labels=label))\n return loss\n\n _compare_to_cpu(self,\n fwd_fn,\n lambda: [], [],\n repeat_count,\n num_batches_to_accumulate,\n dataset_fn,\n optimizer_fn,\n reduction_method=ga.GradientAccumulationReductionMethod.MEAN) # pylint: disable=line-too-long\n\n @tu.test_may_use_ipus_or_model(num_ipus=1)\n @test_util.deprecated_graph_mode_only\n def testCompare4(self):\n def dataset_fn():\n dataset = tu.create_single_increasing_dataset(7, shape=[4, 4])\n\n def dataset_parser(value):\n img = value\n label = value[0][0] % 4\n return img, math_ops.cast(label, np.int32)\n\n dataset = dataset.map(dataset_parser)\n\n return dataset.batch(batch_size=2, drop_remainder=True)\n\n num_batches_to_accumulate = 20\n repeat_count = 2\n\n def optimizer_fn():\n return adam.AdamOptimizer()\n\n def fwd_fn(x, label):\n with variable_scope.variable_scope(\"vs\", use_resource=True):\n weight = variable_scope.get_variable(\n \"w0\",\n shape=[4, 4],\n dtype=np.float32,\n initializer=init_ops.ones_initializer())\n x = math_ops.matmul(x, weight)\n\n with variable_scope.variable_scope(\"vs\", use_resource=True):\n weight = variable_scope.get_variable(\n \"w1\",\n shape=[4, 4],\n dtype=np.float32,\n initializer=init_ops.ones_initializer())\n x = math_ops.matmul(x, weight)\n\n with variable_scope.variable_scope(\"vs\", use_resource=True):\n weight = variable_scope.get_variable(\n \"w2\",\n shape=[4, 4],\n dtype=np.float32,\n initializer=init_ops.ones_initializer())\n x = math_ops.matmul(x, weight)\n\n with variable_scope.variable_scope(\"vs\", use_resource=True):\n weight = variable_scope.get_variable(\n \"w3\",\n shape=[4, 4],\n dtype=np.float32,\n initializer=init_ops.ones_initializer())\n x = math_ops.matmul(x, weight)\n\n # Ruse the weight here.\n with variable_scope.variable_scope(\"vs\", use_resource=True, reuse=True):\n weight = variable_scope.get_variable(\n \"w0\",\n shape=[4, 4],\n dtype=np.float32,\n initializer=init_ops.ones_initializer())\n x = math_ops.matmul(x, weight)\n logits = math_ops.reduce_mean(x, axis=[1])\n loss = math_ops.reduce_mean(\n nn_ops.sparse_softmax_cross_entropy_with_logits(logits=logits,\n labels=label))\n return loss\n\n _compare_to_cpu(self,\n fwd_fn,\n lambda: [], [],\n repeat_count,\n num_batches_to_accumulate,\n dataset_fn,\n optimizer_fn,\n reduction_method=ga.GradientAccumulationReductionMethod.MEAN) # pylint: disable=line-too-long\n\n @tu.test_may_use_ipus_or_model(num_ipus=1)\n @test_util.deprecated_graph_mode_only\n def testCompare5(self):\n def dataset_fn():\n dataset = tu.create_single_increasing_dataset(7, shape=[4, 4])\n\n def dataset_parser(value):\n img = value\n label = value[0][0] % 4\n return img, math_ops.cast(label, np.int32)\n\n dataset = dataset.map(dataset_parser)\n\n return dataset.batch(batch_size=2, drop_remainder=True)\n\n num_batches_to_accumulate = 20\n repeat_count = 2\n\n def optimizer_fn():\n return adam.AdamOptimizer()\n\n def fwd_fn(x, label):\n with variable_scope.variable_scope(\"vs\", use_resource=True):\n weight = variable_scope.get_variable(\n \"w0\",\n shape=[4, 4],\n dtype=np.float32,\n initializer=init_ops.ones_initializer())\n x = math_ops.matmul(x, weight)\n x = nn.relu(x)\n\n with variable_scope.variable_scope(\"vs\", use_resource=True, reuse=True):\n weight = variable_scope.get_variable(\n \"w0\",\n shape=[4, 4],\n dtype=np.float32,\n initializer=init_ops.ones_initializer())\n x = math_ops.matmul(x, weight)\n logits = math_ops.reduce_mean(x, axis=[1])\n loss = math_ops.reduce_mean(\n nn_ops.sparse_softmax_cross_entropy_with_logits(logits=logits,\n labels=label))\n return loss\n\n _compare_to_cpu(self,\n fwd_fn,\n lambda: [], [],\n repeat_count,\n num_batches_to_accumulate,\n dataset_fn,\n optimizer_fn,\n reduction_method=ga.GradientAccumulationReductionMethod.MEAN) # pylint: disable=line-too-long\n\n def _compare6(self, optimizer_fn, replicated_optimizer_state_sharding=False):\n dataset_size = 10\n num_batches_to_accumulate = 4\n repeat_count = 2\n embedding_size = 4\n\n def dataset_fn():\n dataset = tu.create_single_increasing_dataset(dataset_size, shape=[4])\n dataset = dataset.batch(batch_size=2, drop_remainder=True)\n\n def dataset_parser(value):\n label = math_ops.reduce_mean(value, axis=[1])\n return math_ops.cast(value,\n np.int32), math_ops.cast(label % 4, np.int32)\n\n return dataset.map(dataset_parser)\n\n def fwd_fn(idx, label):\n np.random.seed(1)\n embedding_shape = (dataset_size, embedding_size)\n embedding_initializer = np.random.normal(0, 1, embedding_shape).astype(\n np.float32)\n weights_shape = (embedding_size, embedding_size)\n weights_initializer = np.random.normal(0, 1,\n weights_shape).astype(np.float32)\n\n with variable_scope.variable_scope(\"part1\", use_resource=True):\n embedding = variable_scope.get_variable(\n \"c\",\n dtype=np.float32,\n initializer=embedding_initializer,\n trainable=True)\n\n weight = variable_scope.get_variable(\"w0\",\n dtype=np.float32,\n initializer=weights_initializer,\n trainable=True)\n\n x = embedding_ops.embedding_lookup(embedding, idx)\n x = math_ops.matmul(x, weight)\n\n logits = math_ops.reduce_sum(x, axis=[-1])\n loss = math_ops.reduce_mean(\n nn_ops.sparse_softmax_cross_entropy_with_logits(logits=logits,\n labels=label))\n return loss\n\n _compare_to_cpu(\n self,\n fwd_fn,\n lambda: [], [],\n repeat_count,\n num_batches_to_accumulate,\n dataset_fn,\n optimizer_fn,\n replication_factor=2,\n minimum_remote_tensor_size=8,\n replicated_optimizer_state_sharding=replicated_optimizer_state_sharding,\n assert_compute_sets_contain_list=['/ReduceScatter/']\n if replicated_optimizer_state_sharding else None,\n reduction_method=ga.GradientAccumulationReductionMethod.MEAN) # pylint: disable=line-too-long\n\n @parameterized.parameters({'replicated_optimizer_state_sharding': False}, {\n 'replicated_optimizer_state_sharding': True,\n })\n @tu.test_uses_ipus(num_ipus=2)\n @test_util.deprecated_graph_mode_only\n def testCompare6Momentum(self, replicated_optimizer_state_sharding):\n self._compare6(lambda: momentum.MomentumOptimizer(0.01, 0.8),\n replicated_optimizer_state_sharding)\n\n @tu.test_uses_ipus(num_ipus=2)\n @test_util.deprecated_graph_mode_only\n def testCompare6SDG(self):\n self._compare6(lambda: gradient_descent.GradientDescentOptimizer(0.01))\n\n @parameterized.parameters({'replicated_optimizer_state_sharding': False}, {\n 'replicated_optimizer_state_sharding': True,\n })\n @tu.test_uses_ipus(num_ipus=2)\n @test_util.deprecated_graph_mode_only\n def testCompare6Adam(self, replicated_optimizer_state_sharding):\n self._compare6(adam.AdamOptimizer, replicated_optimizer_state_sharding)\n\n @parameterized.parameters({'replicated_optimizer_state_sharding': False}, {\n 'replicated_optimizer_state_sharding': True,\n })\n @tu.test_uses_ipus(num_ipus=2)\n @test_util.deprecated_graph_mode_only\n def testCompare6RMS(self, replicated_optimizer_state_sharding):\n self._compare6(lambda: rmsprop.RMSPropOptimizer(0.01),\n replicated_optimizer_state_sharding)\n\n @parameterized.parameters(list(ga.GradientAccumulationReductionMethod))\n @tu.test_may_use_ipus_or_model(num_ipus=1)\n @test_util.deprecated_graph_mode_only\n def testGradientAccumulationDtype(self, reduction_method):\n gradient_accumulation_count = 8\n gradient_accumulation_dtype = np.float32\n\n x = np.finfo(np.float16).max\n y = np.array(0.0, dtype=np.float16)\n initial_w = np.array(1.0, dtype=np.float16)\n\n x = np.array([[0, 0, x, 0]], np.float16)\n y = np.array([[0.0]], dtype=np.float16)\n initial_w = np.array([1.0, 1.0, 1.0, 1.0], dtype=np.float16)\n learning_rate = 2**-10\n\n features = np.repeat(x, gradient_accumulation_count, 0)\n labels = np.repeat(y, gradient_accumulation_count, 0)\n dataset = dataset_ops.Dataset.from_tensor_slices((features, labels))\n\n infeed_queue = ipu_infeed_queue.IPUInfeedQueue(dataset)\n grad_outfeed_queue = ipu_outfeed_queue.IPUOutfeedQueue()\n\n class CastingGradientDescent(optimizer_lib.Optimizer): # pylint: disable=abstract-method\n \"\"\"Compute update using the dtype of the gradient, and then cast to\n the dtype of the variable.\"\"\"\n def __init__(self, outer):\n self.outer = outer\n super().__init__(use_locking=False, name=\"CastingGradientDescent\")\n\n def apply_gradients(self, grads_and_vars, global_step=None, name=None):\n update_ops = []\n\n for (grad, var) in grads_and_vars:\n self.outer.assertEqual(grad.dtype, gradient_accumulation_dtype)\n update_ops.append(grad_outfeed_queue.enqueue(grad))\n delta = math_ops.cast(-learning_rate * grad, var.dtype)\n update_ops.append(var.assign_add(delta))\n\n return control_flow_ops.group(*update_ops)\n\n def model_iteration(features, labels):\n w = variable_scope.get_variable(name=\"w\", initializer=initial_w)\n partial = w * features\n loss = partial + labels\n\n def dtype_getter(var):\n self.assertEqual(var, w)\n return gradient_accumulation_dtype\n\n opt = gao.GradientAccumulationOptimizerV2(\n CastingGradientDescent(self),\n gradient_accumulation_count,\n dtype=dtype_getter,\n reduction_method=reduction_method)\n return opt.minimize(loss)\n\n def model():\n return loops.repeat(gradient_accumulation_count,\n model_iteration,\n infeed_queue=infeed_queue)\n\n def compiled_model():\n with ops.device(\"/device:IPU:0\"):\n return ipu_compiler.compile(model)\n\n train_op = compiled_model()\n\n dequeued_gradient = grad_outfeed_queue.dequeue()\n\n cfg = IPUConfig()\n if utils.running_on_ipu_model():\n tu.enable_ipu_events(cfg)\n cfg.ipu_model.compile_ipu_code = True\n cfg.ipu_model.tiles_per_ipu = 128\n cfg.auto_select_ipus = 1\n tu.add_hw_ci_connection_options(cfg)\n cfg.configure_ipu_system()\n utils.move_variable_initialization_to_cpu()\n\n with tu.ipu_session() as sess:\n sess.run(infeed_queue.initializer)\n sess.run(variables.global_variables_initializer())\n\n sess.run(train_op)\n [actual_accumulated_gradient] = sess.run(dequeued_gradient)\n\n # L(x) = w * x + y\n # dL(x)/dw = x\n # This would overflow in fp16:\n if reduction_method == ga.GradientAccumulationReductionMethod.SUM:\n expected_accumulated_gradient = gradient_accumulation_count * x.astype(\n gradient_accumulation_dtype)[0]\n else:\n expected_accumulated_gradient = x.astype(\n gradient_accumulation_dtype)[0]\n\n self.assertAllClose(expected_accumulated_gradient,\n actual_accumulated_gradient)\n\n sess.run(infeed_queue.deleter)\n sess.run(grad_outfeed_queue.deleter)\n\n def __makeGATestNetwork(self, reduction_method):\n def dataset_fn():\n dataset = tu.create_single_increasing_dataset(7, shape=[4, 4, 2])\n dataset = dataset.batch(batch_size=2, drop_remainder=True)\n\n def dataset_parser(value):\n img = value / 7\n label = value[0][0][0][0]\n return img, label\n\n return dataset.map(dataset_parser)\n\n num_batches_to_accumulate = 20\n repeat_count = 4\n optimizer = adam.AdamOptimizer(learning_rate=1.0,\n epsilon=1.0,\n beta1=0.5,\n beta2=0.5)\n\n def fwd_fn(c, img, label):\n with variable_scope.variable_scope(\"part1\", use_resource=True):\n y = layers.Conv2D(\n 2,\n 1,\n use_bias=True,\n kernel_initializer=init_ops.constant_initializer(0.5),\n bias_initializer=init_ops.constant_initializer(0.5),\n name='conv1')(img)\n y = y * 20\n y = layers.Dense(2,\n kernel_initializer=init_ops.constant_initializer(0.5),\n bias_initializer=init_ops.constant_initializer(0.5))(y)\n return math_ops.reduce_sum(\n layers.Dense(2,\n kernel_initializer=init_ops.constant_initializer(0.5),\n bias_initializer=init_ops.constant_initializer(0.5))\n (y)) + c + label\n\n def inputs_fn():\n with ops.device('cpu'):\n return [array_ops.placeholder(np.float32, shape=[])]\n\n g = ops.Graph()\n\n repeat_count = 2\n num_batches_to_accumulate = 4\n\n num_iterations = repeat_count * num_batches_to_accumulate\n\n with g.as_default(), self.test_session(graph=g):\n dataset = dataset_fn()\n inputs = inputs_fn()\n infeed_queue = ipu_infeed_queue.IPUInfeedQueue(dataset)\n outfeed_queue = ipu_outfeed_queue.IPUOutfeedQueue()\n\n with variable_scope.variable_scope(\"ipu\", use_resource=True,\n reuse=False):\n\n def model(*args):\n loss = fwd_fn(*functional_ops._convert_to_list(args)) # pylint: disable=W0212,E1120\n enqueue_op = outfeed_queue.enqueue(loss)\n opt = gao.GradientAccumulationOptimizerV2(\n optimizer,\n num_batches_to_accumulate,\n reduction_method=reduction_method)\n outs = list(args[:len(args) - infeed_queue.number_of_tuple_elements])\n outs.append(enqueue_op)\n outs.append(opt.minimize(loss))\n return outs\n\n def my_net(*args):\n return loops.repeat(num_iterations,\n model,\n inputs=args,\n infeed_queue=infeed_queue)\n\n with ops.device(\"/device:IPU:0\"):\n ipu_compiler.compile(my_net, inputs=inputs)\n\n @parameterized.parameters([\n ('SUM', ga.GradientAccumulationReductionMethod.SUM),\n ('sum', ga.GradientAccumulationReductionMethod.SUM),\n ('MEAN', ga.GradientAccumulationReductionMethod.MEAN),\n ('mean', ga.GradientAccumulationReductionMethod.MEAN),\n ('RUNNING_MEAN', ga.GradientAccumulationReductionMethod.RUNNING_MEAN),\n ('running_mean', ga.GradientAccumulationReductionMethod.RUNNING_MEAN),\n (ga.GradientAccumulationReductionMethod.SUM,\n ga.GradientAccumulationReductionMethod.SUM),\n (ga.GradientAccumulationReductionMethod.MEAN,\n ga.GradientAccumulationReductionMethod.MEAN),\n (ga.GradientAccumulationReductionMethod.RUNNING_MEAN,\n ga.GradientAccumulationReductionMethod.RUNNING_MEAN)\n ])\n @test_util.deprecated_graph_mode_only\n def testGAReduceMethodSupported(self, reduction_method,\n expected_reduction_method):\n with ops.device(\"/device:IPU:0\"):\n reduction_method = ga.GradientAccumulationReductionMethod.parse(\n reduction_method)\n self.assertEqual(reduction_method, expected_reduction_method)\n\n @parameterized.parameters(['Exp', 10, None])\n @test_util.deprecated_graph_mode_only\n def testGAReduceMethodInvalid(self, reduction_method):\n with self.assertRaisesRegex(\n ValueError, f\"Cannot parse {reduction_method} as one of \"\n \"GradientAccumulationReductionMethod.\"):\n reduction_method = ga.GradientAccumulationReductionMethod.parse(\n reduction_method)\n\n\nif __name__ == \"__main__\":\n googletest.main()\n","repo_name":"graphcore/tensorflow","sub_path":"tensorflow/python/ipu/tests/gradient_accumulation_test.py","file_name":"gradient_accumulation_test.py","file_ext":"py","file_size_in_byte":31558,"program_lang":"python","lang":"en","doc_type":"code","stars":76,"dataset":"github-code","pt":"3"} +{"seq_id":"30919490195","text":"import os\n\nnumber = 1\nwith open(\"seed_Unaligned.FASTA\") as f:\n for line in f:\n if line[0] != \">\":\n filename = \"gene\"+str(number)+\"/gene\"+str(number)+\".fa\"\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n with open(filename,\"w\") as out:\n out.write(line)\n\n\n number+=1\n","repo_name":"tijeco/ReciproFinder","sub_path":"splitTogene.py","file_name":"splitTogene.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"74641864402","text":"import time\nimport typing as t\n\nimport gym\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as opt\n\n\nclass Episode:\n \"\"\"Episode history tracker from start to terminate state.\"\"\"\n\n def __init__(self):\n self.total_reward = 0\n self.states_history = []\n self.actions_history = []\n\n def append(self, state: np.ndarray, action: int, reward: float) -> None:\n self.states_history.append(state)\n self.actions_history.append(action)\n self.total_reward += reward\n\n\nclass ReplayBuffer:\n \"\"\"Replay buffer to store episodes and find 'elites'.\"\"\"\n\n def __init__(self, capacity: int):\n self.capacity = capacity\n self.last_episodes = []\n\n def append(self, episode: Episode) -> None:\n self.last_episodes.append(episode)\n self.last_episodes = self.last_episodes[:self.capacity]\n\n def get_elite_episodes(self, quantile: float) -> t.List[Episode]:\n min_elite_reward = np.quantile([episode.total_reward for episode in self.last_episodes], q=quantile)\n return [episode for episode in self.last_episodes if episode.total_reward >= min_elite_reward]\n\n\nclass Agent(nn.Module):\n \"\"\"RL algorithm to take actions in particular state.\"\"\"\n\n def __init__(self, state_dim: int, action_dim: int):\n super().__init__()\n self.action_dim = action_dim\n self.state_dim = state_dim\n\n self.network = nn.Sequential(\n nn.Linear(self.state_dim, 150),\n nn.ReLU(),\n nn.Linear(150, 75),\n nn.ReLU(),\n nn.Linear(75, self.action_dim)\n )\n self.softmax = nn.Softmax(dim=-1)\n self.loss_fn = nn.CrossEntropyLoss()\n self.optimizer = opt.Adam(self.network.parameters(), lr=0.05)\n\n def forward(self, state: torch.Tensor) -> torch.Tensor:\n \"\"\"Forward flow to train agent.\"\"\"\n x = self.network(state)\n return x\n\n def action(self, state: np.ndarray) -> int:\n \"\"\"Take action in state with no learning.\"\"\"\n state_tensor = torch.FloatTensor(state).unsqueeze(0)\n\n with torch.no_grad():\n logits = self.network(state_tensor)\n action_probs = self.softmax(logits).detach().numpy()[0]\n action = np.random.choice(range(self.action_dim), p=action_probs)\n return action\n\n def update_policy(self, elite_episodes: t.List[Episode]) -> None:\n \"\"\"Update RL policy from 'elite' episodes in recent history.\"\"\"\n target = []\n train_data = []\n\n for episode in elite_episodes:\n for state, action in zip(episode.states_history, episode.actions_history):\n train_data.append(state)\n target.append(action)\n\n train_data = np.array(train_data)\n target = np.array(target)\n\n train_data_t = torch.FloatTensor(train_data)\n target_t = torch.LongTensor(target)\n\n self.optimizer.zero_grad()\n print(train_data_t.size())\n print(target_t.size())\n action_logits = self.network(train_data_t)\n loss = self.loss_fn(action_logits, target_t)\n loss.backward()\n self.optimizer.step()\n\n\ndef train_agent(\n env: gym.Env,\n agent: Agent,\n buffer: ReplayBuffer,\n quantile: float,\n n_epochs: int,\n n_episodes: int\n ) -> None:\n \"\"\"\n Train agent via cross entropy method (from elite episodes).\n\n Args:\n env: gym.Env - Environment for agent to act.\n agent: Agent - RL algorithm.\n buffer: ReplayBuffer - Buffer to store last episodes.\n quantile: float - Threshold for reward to detect elite episodes.\n n_epochs: int - How many times update RL policy after playing n_episodes.\n n_episodes: int - How many episodes agent needs to play to update policy.\n \"\"\"\n for _ in range(n_epochs):\n total_rewards = []\n for n in range(n_episodes):\n state = env.reset()\n episode = Episode()\n total_reward = 0\n\n while True:\n action = agent.action(state)\n new_state, reward, done, _ = env.step(action)\n episode.append(state, action, reward)\n state = new_state\n total_reward += reward\n\n if done:\n break\n\n buffer.append(episode)\n total_rewards.append(total_reward)\n\n mean_episode_reward = np.mean(total_rewards)\n print('#################', mean_episode_reward)\n if mean_episode_reward >= 480:\n break\n elite_episodes = buffer.get_elite_episodes(quantile=quantile)\n agent.update_policy(elite_episodes)\n\n\ndef eval_agent(env: gym.Env, agent: Agent) -> float:\n \"\"\"\n Evaluating agent on existing env.\n\n Args:\n env: gym.Env - Environment for agent to act.\n agent: Agent - RL algorithm.\n\n Returns:\n float - cumulative reward from start to terminate state.\n \"\"\"\n state = env.reset()\n total_reward = 0\n\n while True:\n action = agent.action(state)\n new_state, reward, done, _ = env.step(action)\n state = new_state\n total_reward += reward\n\n env.render()\n time.sleep(0.05)\n\n if done:\n break\n\n return total_reward\n\n\nif __name__ == '__main__':\n env = gym.make('CartPole-v1')\n\n action_dim = env.action_space.n\n state_dim = env.observation_space.shape[0]\n\n agent = Agent(state_dim, action_dim)\n buffer = ReplayBuffer(capacity=5_000)\n\n # train_agent(env, agent, buffer, quantile=0.99, n_epochs=10_000, n_episodes=50)\n # torch.save(agent.network.state_dict(), 'agents_store/cross_entropy/agent.pt')\n\n agent.network.load_state_dict(torch.load('agents_store/cross_entropy/agent.pt'))\n agent.network.eval()\n\n reward = eval_agent(env, agent)\n print(reward)\n","repo_name":"ivanmarkov97/rl_algorithms","sub_path":"cross_entropy/deep_cross_entropy.py","file_name":"deep_cross_entropy.py","file_ext":"py","file_size_in_byte":5858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36437298895","text":"class Solution:\n def subarraySum(self, nums: List[int], k: int) -> int:\n count, sumt = 0, 0\n maps = {}\n maps[0] = 1\n for i in range(len(nums)):\n sumt += nums[i]\n if sumt-k in maps:\n count += maps[sumt-k]\n maps[sumt] = maps[sumt]+1 if sumt in maps else 1\n\n return count\n","repo_name":"sinoyuco/leetcode_solutions","sub_path":"array/subarray_sum.py","file_name":"subarray_sum.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"15420358482","text":"import collections\nimport csv\n\nfrom typing import (Any, Callable, Dict, IO, Iterable, List, Mapping, Optional,\n Sequence, Tuple)\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow.python.util import tf_export\n\n# pylint: disable=g-import-not-at-top\ntry:\n from tensorflow.lite.python import metrics_portable as metrics_stub # type: ignore\nexcept ImportError:\n from tensorflow.lite.python import metrics_nonportable as metrics_stub # type: ignore\n# pylint: enable=g-import-not-at-top\n\n# Returns metrics based on difference of values for quantized/float ops.\n_DEFAULT_LAYER_DEBUG_METRICS = {\n 'num_elements': lambda diffs: diffs.size,\n 'stddev': np.std,\n 'mean_error': np.average,\n 'max_abs_error': lambda diffs: np.max(np.abs(diffs)),\n 'mean_squared_error': lambda diffs: np.average(diffs**2),\n}\n\n_NUMERIC_VERIFY_OP_NAME = 'NumericVerify'\n\n\ndef _get_quant_params(\n tensor_detail: Mapping[str, Any]) -> Optional[Tuple[float, int]]:\n \"\"\"Returns first scale and zero point from tensor detail, if present.\"\"\"\n quant_params = tensor_detail['quantization_parameters']\n if not quant_params:\n return None\n if quant_params['scales'] and quant_params['zero_points']:\n return (quant_params['scales'][0], quant_params['zero_points'][0])\n return None\n\n\n@tf_export.tf_export('lite.experimental.QuantizationDebugOptions')\nclass QuantizationDebugOptions:\n \"\"\"Debug options to set up a given QuantizationDebugger.\"\"\"\n\n def __init__(\n self,\n layer_debug_metrics: Optional[Mapping[str, Callable[[np.ndarray],\n float]]] = None,\n model_debug_metrics: Optional[Mapping[str, Callable[\n [Sequence[np.ndarray], Sequence[np.ndarray]], float]]] = None,\n layer_direct_compare_metrics: Optional[Mapping[str, Callable[\n [Sequence[np.ndarray], Sequence[np.ndarray], float, int],\n float]]] = None\n ) -> None:\n \"\"\"Initializes debugger options.\n\n Args:\n layer_debug_metrics: a dict to specify layer debug functions\n {function_name_str: function} where the function accepts result of\n NumericVerify Op, which is value difference between float and\n dequantized op results. The function returns single scalar value.\n model_debug_metrics: a dict to specify model debug functions\n {function_name_str: function} where the function accepts outputs from\n two models, and returns single scalar value for a metric. (e.g.\n accuracy, IoU)\n layer_direct_compare_metrics: a dict to specify layer debug functions\n {function_name_str: function}. The signature is different from that of\n `layer_debug_metrics`, and this one gets passed (original float value,\n original quantized value, scale, zero point). The function's\n implementation is responsible for correctly dequantize the quantized\n value to compare. Use this one when comparing diff is not enough.\n (Note) quantized value is passed as int8, so cast to int32 is needed.\n\n Raises:\n ValueError: when there are duplicate keys\n \"\"\"\n self.layer_debug_metrics = layer_debug_metrics\n self.model_debug_metrics = model_debug_metrics\n self.layer_direct_compare_metrics = layer_direct_compare_metrics\n\n keys = []\n for metrics in [\n layer_debug_metrics, model_debug_metrics, layer_direct_compare_metrics]:\n if metrics is not None:\n keys.extend(metrics.keys())\n if len(keys) != len(set(keys)):\n raise ValueError('Provided metrics have duplicate keys.')\n\n\n@tf_export.tf_export('lite.experimental.QuantizationDebugger')\nclass QuantizationDebugger:\n \"\"\"Debugger for Quantized TensorFlow Lite debug mode models.\n\n This can run the TensorFlow Lite converted models equipped with debug ops and\n collect debug information. This debugger calculates statistics from\n user-defined post-processing functions as well as default ones.\n \"\"\"\n\n def __init__(\n self,\n quant_debug_model_path: Optional[str] = None,\n quant_debug_model_content: Optional[bytes] = None,\n float_model_path: Optional[str] = None,\n float_model_content: Optional[bytes] = None,\n debug_dataset: Optional[Callable[[],\n Iterable[Sequence[np.ndarray]]]] = None,\n debug_options: Optional[QuantizationDebugOptions] = None) -> None:\n \"\"\"Runs the TFLite debugging model with given debug options.\n\n Args:\n quant_debug_model_path: Path to the quantized debug TFLite model file.\n quant_debug_model_content: Content of the quantized debug TFLite model.\n float_model_path: Path to float TFLite model file.\n float_model_content: Content of the float TFLite model.\n debug_dataset: a factory function that returns dataset generator which is\n used to generate input samples (list of np.ndarray) for the model. The\n generated elements must have same types and shape as inputs to the\n model.\n debug_options: Debug options to debug the given model.\n\n Raises:\n ValueError: If the debugger was unable to be created.\n\n Attributes:\n layer_statistics: results of error metrics for each NumericVerify op\n results. in {layer_name: {metric_name: metric}} format.\n model_statistics: results of error metrics for difference between float\n and quantized models. in {metric_name: metric} format.\n \"\"\"\n self._data_gen = debug_dataset\n self._debug_options = debug_options or QuantizationDebugOptions()\n\n input_data = next(iter(self._data_gen()))\n self._quant_interpreter = tf.lite.Interpreter(\n quant_debug_model_path,\n quant_debug_model_content,\n experimental_preserve_all_tensors=(\n self._debug_options.layer_direct_compare_metrics is not None))\n if self._debug_options.model_debug_metrics:\n self._float_interpreter = tf.lite.Interpreter(float_model_path,\n float_model_content)\n\n # TODO(b/177749613) : Fix the dependency on tf.lite._get_ops_details()\n # Following code is needed to get op's name from the output tensor index,\n # since NumericVerify op only provides its quantized input tensor index.\n self._defining_op = dict()\n for op_info in self._quant_interpreter._get_ops_details(): # pylint: disable=protected-access\n self._defining_op.update(\n {tensor_idx: op_info['index'] for tensor_idx in op_info['outputs']})\n\n self._numeric_verify_tensor_details = None\n self._numeric_verify_op_details = None\n if not self._get_numeric_verify_tensor_details():\n raise ValueError('Please check if the quantized model is in debug mode')\n\n self._layer_debug_metrics = _DEFAULT_LAYER_DEBUG_METRICS.copy()\n if self._debug_options.layer_debug_metrics:\n self._layer_debug_metrics.update(self._debug_options.layer_debug_metrics)\n\n self.layer_statistics = None\n self.model_statistics = None\n\n self._metrics = metrics_stub.TFLiteMetrics()\n self._metrics.increase_counter_debugger_creation()\n\n def run(self) -> None:\n \"\"\"Runs models and gets metrics.\"\"\"\n self.layer_statistics = self._collect_layer_statistics()\n if self._debug_options.model_debug_metrics:\n self.model_statistics = self._collect_model_statistics()\n\n def _collect_layer_statistics(self) -> Dict[str, Dict[str, float]]:\n \"\"\"Collects layer statistics by applying layer debug metrics.\n\n For all data from the given RepresentativeDataset, collect statistics per\n example by getting the NumericVerify op results in _quant_interpreter\n and calculating layer debug metrics on the results.\n\n Returns:\n aggregated per-layer statistics of NumericVerify results.\n {layer_name: {metric_name: metric}}\n \"\"\"\n layer_statistics = collections.defaultdict(\n lambda: collections.defaultdict(list))\n\n initialize = True\n for tensor_data in self._data_gen():\n self._set_input_tensors(self._quant_interpreter, tensor_data, initialize)\n initialize = False\n\n # Run the model.\n self._quant_interpreter.invoke()\n\n # Collect the statistics of this invoke result.\n for tensor_detail in self._get_numeric_verify_tensor_details():\n tensor_name = tensor_detail['name']\n diffs = self._quant_interpreter.get_tensor(tensor_detail['index'])\n for metric_name, metric_fn in self._layer_debug_metrics.items():\n layer_statistics[tensor_name][metric_name].append(metric_fn(diffs))\n\n if self._debug_options.layer_direct_compare_metrics is not None:\n for tensor_detail in self._get_numeric_verify_tensor_details():\n tensor_name = tensor_detail['name']\n op_idx = self._defining_op[tensor_detail['index']]\n op_detail = self._quant_interpreter._get_op_details(op_idx) # pylint: disable=protected-access\n q_idx, f_idx = op_detail['inputs']\n quant_input_detail = self._quant_interpreter._get_tensor_details( # pylint: disable=protected-access\n q_idx)\n for (metric_name, metric_fn\n ) in self._debug_options.layer_direct_compare_metrics.items():\n layer_statistics[tensor_name][metric_name].append(\n metric_fn(\n self._quant_interpreter.get_tensor(f_idx),\n self._quant_interpreter.get_tensor(q_idx),\n quant_input_detail['quantization_parameters']['scales'][0],\n quant_input_detail['quantization_parameters']['zero_points']\n [0]))\n\n # Calculate final aggregated metrics for each layer.\n for metrics in layer_statistics.values():\n for metric_name in metrics:\n metrics[metric_name] = np.nanmean(metrics[metric_name])\n\n return layer_statistics\n\n def _collect_model_statistics(self) -> Dict[str, float]:\n \"\"\"Collects model output metrics.\n\n For all data from the given RepresentativeDataset, collect all model output\n results from float model & quantized debug model, and calculate metrics\n by using model output functions. As a result, self.model_results is filled,\n\n where self.model_results[model_output_function_name] = `aggregated model\n output function value` (a scalar).\n\n Returns:\n aggregated per-model output discrepancy metrics.\n {metric_name: aggregated_metric}\n \"\"\"\n\n model_statistics = collections.defaultdict(list)\n\n initialize = True\n for tensor_data in self._data_gen():\n self._set_input_tensors(self._quant_interpreter, tensor_data, initialize)\n self._set_input_tensors(self._float_interpreter, tensor_data, initialize)\n initialize = False\n\n # Run the models.\n self._quant_interpreter.invoke()\n self._float_interpreter.invoke()\n\n # Collect the output results from both models.\n float_tensor_data = self._get_output_tensors(self._float_interpreter)\n quant_tensor_data = self._get_output_tensors(self._quant_interpreter)\n\n # Calculate the metrics.\n for (metric_name,\n metric_fn) in self._debug_options.model_debug_metrics.items():\n model_statistics[metric_name].append(\n metric_fn(float_tensor_data, quant_tensor_data))\n\n # Calculate final aggregated metrics for each outputs.\n return {\n metric_name: np.mean(metric)\n for metric_name, metric in model_statistics.items()\n }\n\n def _set_input_tensors(self, interpreter: tf.lite.Interpreter,\n tensor_data: Sequence[np.ndarray],\n initialize: bool) -> None:\n \"\"\"Sets input tensors into TFLite model Interpreter.\n\n Args:\n interpreter: a tf.lite.Interpreter object with allocated tensors.\n tensor_data: a list of Numpy array data.\n initialize: set to true when input is first set for the interpreter, to\n set input shapes and allocate tensors.\n\n Raises:\n ValueError: when inputs can't be set, or size of provided inputs does not\n match size of model inputs.\n \"\"\"\n input_details = interpreter.get_input_details()\n if len(input_details) != len(tensor_data):\n raise ValueError(\n 'Number of inputs provided ({}) does not match number of inputs to '\n 'the model ({})'.format(len(tensor_data), len(input_details)))\n\n if initialize:\n for input_detail, tensor in zip(input_details, tensor_data):\n interpreter.resize_tensor_input(input_detail['index'], tensor.shape)\n interpreter.allocate_tensors()\n\n for input_detail, tensor in zip(input_details, tensor_data):\n if tensor.dtype == np.float32 and input_detail['dtype'] == np.int8:\n quant_params = _get_quant_params(input_detail)\n if quant_params:\n scale, zero_point = quant_params\n tensor = np.round((tensor / scale) + zero_point).astype(np.int8)\n interpreter.set_tensor(input_detail['index'], tensor)\n\n def _get_output_tensors(self,\n interpreter: tf.lite.Interpreter) -> List[np.ndarray]:\n \"\"\"Returns output tensors of given TFLite model Interpreter.\n\n Args:\n interpreter: a tf.lite.Interpreter object with allocated tensors.\n\n Returns:\n a list of numpy arrays representing output tensor results.\n \"\"\"\n\n outputs = []\n for output_detail in interpreter.get_output_details():\n tensor = interpreter.get_tensor(output_detail['index'])\n if output_detail['dtype'] == np.int8:\n quant_params = _get_quant_params(output_detail)\n if quant_params:\n scale, zero_point = quant_params\n tensor = ((tensor.astype(np.float32) - zero_point) * scale).astype(\n np.float32)\n outputs.append(tensor)\n\n return outputs\n\n def _get_numeric_verify_tensor_details(self) -> List[str]:\n \"\"\"Returns all names of all tensors from NumericVerify op.\"\"\"\n # pylint: disable=protected-access\n if not self._numeric_verify_tensor_details:\n self._numeric_verify_tensor_details = []\n self._numeric_verify_op_details = {}\n for op_info in self._quant_interpreter._get_ops_details():\n if op_info['op_name'] == _NUMERIC_VERIFY_OP_NAME:\n self._numeric_verify_tensor_details.append(\n self._quant_interpreter._get_tensor_details(\n op_info['outputs'][0]))\n tensor_name = self._numeric_verify_tensor_details[-1]['name']\n self._numeric_verify_op_details[tensor_name] = op_info\n # pylint: enable=protected-access\n return self._numeric_verify_tensor_details\n\n def _get_operand_name_and_index(self,\n numeric_verify_name: str) -> Tuple[str, int]:\n \"\"\"Gets the index and name of NumericVerify Op's quantized input tensor.\n\n Args:\n numeric_verify_name: name of the NumericVerify op's output tensor. It has\n format of `NumericVerify/{quantized_tensor_name}:{quantized_tensor_idx}`\n\n Returns:\n Tuple of (tensor_name, tensor_idx) for quantized op's output tensor.\n \"\"\"\n tensor_name, tensor_idx = numeric_verify_name.rsplit(':', 1)\n return (tensor_name[len(_NUMERIC_VERIFY_OP_NAME) + 1:], int(tensor_idx))\n\n def layer_statistics_dump(self, file: IO[str]) -> None:\n \"\"\"Dumps layer statistics into file, in csv format.\n\n Args:\n file: file, or file-like object to write.\n \"\"\"\n # order of `fields` is the order of fields in csv.\n fields = ['op_name', 'tensor_idx'] + list(self._layer_debug_metrics.keys())\n if self._debug_options.layer_direct_compare_metrics is not None:\n fields += list(self._debug_options.layer_direct_compare_metrics.keys())\n fields += ['scale', 'zero_point', 'tensor_name']\n writer = csv.DictWriter(file, fields)\n writer.writeheader()\n for name, metrics in self.layer_statistics.items():\n data = metrics.copy()\n (data['tensor_name'], _) = self._get_operand_name_and_index(name)\n data['tensor_idx'] = self._numeric_verify_op_details[name]['inputs'][0]\n data['op_name'] = self._quant_interpreter._get_op_details( # pylint: disable=protected-access\n self._defining_op[data['tensor_idx']])['op_name']\n details = self._quant_interpreter._get_tensor_details(data['tensor_idx']) # pylint: disable=protected-access\n data['scale'], data['zero_point'] = (\n details['quantization_parameters']['scales'][0],\n details['quantization_parameters']['zero_points'][0])\n writer.writerow(data)\n","repo_name":"graphcore/tensorflow","sub_path":"tensorflow/lite/tools/optimize/debugging/python/debugger.py","file_name":"debugger.py","file_ext":"py","file_size_in_byte":16406,"program_lang":"python","lang":"en","doc_type":"code","stars":76,"dataset":"github-code","pt":"3"} +{"seq_id":"28087996103","text":"'''\nGiven an integer array nums sorted in non-decreasing order, return an array of the squares of each number sorted in non-decreasing order.\n\n \nExample 1:\nInput: nums = [-4,-1,0,3,10]\nOutput: [0,1,9,16,100]\nExplanation: After squaring, the array becomes [16,1,0,9,100].\nAfter sorting, it becomes [0,1,9,16,100].\n\nExample 2:\nInput: nums = [-7,-3,2,3,11]\nOutput: [4,9,9,49,121]\n\nConstraints:\n1 <= nums.length <= 104\n-104 <= nums[i] <= 104\nnums is sorted in non-decreasing order.\n \nFollow up: Squaring each element and sorting the new array is very trivial, could you find an O(n) solution using a different approach?\n'''\n\n\ndef sortedSquares(nums: [int]) -> [int]:\n ans = [0] * len(nums)\n i = 0\n j = len(nums) - 1\n k = len(nums) - 1\n while k >= 0:\n # print(k,ans,nums[i],abs(nums[j]))\n if abs(nums[i]) <= abs(nums[j]):\n ans[k] = nums[j] * nums[j]\n j -= 1\n else:\n ans[k] = nums[i] * nums[i]\n i += 1\n k -= 1\n # print(nums)\n return ans\n\nif __name__ == '__main__':\n nums = [-4,-1,0,3,10]\n print(sortedSquares(nums))","repo_name":"Karan-J/leetcode","sub_path":"leetcode-2022/SortedSquares.py","file_name":"SortedSquares.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"4312194473","text":"from django.db import models\n\nclass Student(models.Model):\n name = models.CharField(max_length=50)\n slug = models.SlugField(max_length=50)\n gap = models.IntegerField()\n description = models.TextField()\n\nclass Organization(models.Model):\n name = models.CharField(max_length=200)\n slug = models.SlugField(max_length=200)\n description = models.TextField()\n","repo_name":"diN0bot/harmonika","sub_path":"webapp/harmonika/web/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25067255146","text":"import MySQLdb as mdb\nimport sys\nimport csv\nimport numpy as np\nimport pandas as pd\nimport string as s\n\n \ndef build_population_df():\n # Build pandas data frame of Population by Community District\n # Data from City of New York, 2010\n \n path = '/Users/jamie/GitHub/nyc_data/'\n \n # Declare empty data frame\n n_community_districts = 59\n indicies = range(n_community_districts)\n columns = ['Borough','Community num', 'Community name', 'Population']\n Population_by_Community_df = pd.DataFrame(index=indicies,columns=columns)\n \n # Open file,load data into the data frame\n o = open(path+'nyc population by community district.csv','rU')\n csv_data = csv.reader(o)\n\n for i in range(n_community_districts):\n Population_by_Community_df.ix[i,:] = csv_data.next()\n Population_by_Community_df.ix[i,'Community num'] = \\\n Population_by_Community_df.ix[i,'Community num'].strip() #remove extra spaces\n \n Population_by_Community_df.ix[i,'Population'] = \\\n int(Population_by_Community_df.ix[i,'Population'].replace(\",\", \"\"))\n \n return Population_by_Community_df\n \ndef build_zip_to_community_df():\n # Build pandas data frame of Zip code to Community District mapping\n # Approximate mapping by Frank Donnelly, Geospatial Data Librarian, Baruch College CUNY\n # Source: http://guides.newman.baruch.cuny.edu/ld.php?content_id=7154885\n \n path = '/Users/jamie/GitHub/nyc_data/'\n # Open file, look at column names\n o = open(path+'nyc zip to community district.csv','rU')\n csv_data = csv.reader(o)\n file_col_names = csv_data.next()\n # print file_col_names\n\n # Declare empty data frame\n n_zips = 211\n indicies = range(n_zips)\n columns = ['Zip','PUMA num', 'Community name long', 'Community name short','Per in Com','Per of Com']\n Zip_to_community_mapping_df = pd.DataFrame(index=indicies,columns=columns)\n \n for i in range(n_zips):\n Zip_to_community_mapping_df.ix[i,:] = csv_data.next()\n return Zip_to_community_mapping_df\n \ndef parse_long_community_name(long_name):\n # Return borough, district numbers, short name\n # assumes the format \"NYC-- Community District --\"\n name_halfs = s.split(long_name,'--')\n short_name = name_halfs[1] #short name is following '--'\n \n # get borough and numbers\n first_half = name_halfs[0]\n split_first_half = first_half.split('Community District')\n borough = split_first_half[0].split('NYC-')[1].strip()\n\n # now get one or more numbers\n numbers = split_first_half[1].split('&')\n numbers_array = [n.split()[0] for n in numbers]\n \n return short_name, borough, numbers_array\n \ndef merge_nyc_community_info(Zip_to_community_mapping_df,Population_by_Community_df):\n # Now combine the useful data from these two sources\n # to build a data frame of Community names, Boroughs, ZIP codes, Population \n # then for each community I can rank the noise complaints per capita and look \n # at the types of noise/community district\n \n unique_communities = Zip_to_community_mapping_df['Community name long'].unique()\n \n n_communities = np.size(unique_communities) \n #some communities are merged here, so the count differs from other df\n \n # Build my community df and community zip codes dictionary\n indicies = range(n_communities)\n columns = ['Borough','Num 1','Num 2','Community name','Population']\n NYC_Community_df = pd.DataFrame(index=indicies,columns=columns)\n\n NYC_Community_zips_dict = {}\n \n for i in range(n_communities):\n # parse the long community names to get the short names, boroughs, and bourough numbers\n this_long_name = unique_communities[i]\n \n short_name, borough, numbers_array = parse_long_community_name(this_long_name)\n NYC_Community_df.ix[i,'Community name'] = short_name\n NYC_Community_df.ix[i,'Borough'] = borough\n NYC_Community_df.ix[i,'Num 1'] = numbers_array[0]\n if len(numbers_array) == 2: #for merged community numbers\n NYC_Community_df.ix[i,'Num 2'] = numbers_array[1]\n \n # Now add the populations. Some communities combine two districts\n this_borough_i = np.where(Population_by_Community_df['Borough'] == borough)[0]\n this_num_i = np.where(Population_by_Community_df['Community num'] == numbers_array[0])[0]\n if len(numbers_array) == 2:\n this_num2_i = np.where(Population_by_Community_df['Community num'] == numbers_array[1])[0]\n this_num_i = np.union1d(this_num_i,this_num2_i)\n this_pop_i = np.intersect1d(this_borough_i,this_num_i)\n \n NYC_Community_df.ix[i,'Population'] = Population_by_Community_df.ix[this_pop_i[0],'Population']\n if len(numbers_array) == 2:\n NYC_Community_df.ix[i,'Population'] = NYC_Community_df.ix[i,'Population'] +\\\n Population_by_Community_df.ix[this_pop_i[1],'Population']\n \n #build a dictionary of short_name: zip codes\n community_is = Zip_to_community_mapping_df['Community name short'] == short_name\n community_zips_list = Zip_to_community_mapping_df.ix[community_is,'Zip'].values.tolist()\n NYC_Community_zips_dict[short_name] = tuple(community_zips_list)\n \n return NYC_Community_df, NYC_Community_zips_dict\n ","repo_name":"jkfitzg/nyc_data","sub_path":"process_community_data.py","file_name":"process_community_data.py","file_ext":"py","file_size_in_byte":5399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42812448249","text":"from typing import List\n\n\nclass Solution:\n def numUniqueEmails(self, emails: List[str]) -> int:\n s = set()\n for email in emails:\n arr = email.split('@')\n prefix = arr[0].split('+')[0]\n prefix = prefix.replace('.', '')\n s.add(prefix + '@' + arr[1])\n return len(s)\n\n\nif __name__ == \"__main__\":\n s = Solution()\n result = s.numUniqueEmails(\n [\"test.email+alex@leetcode.com\", \"test.email.leet+alex@code.com\"])\n print(result)\n","repo_name":"kenwoov/PlayLeetCode","sub_path":"Algorithms/Easy/929. Unique Email Addresses/answer.py","file_name":"answer.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"1173475488","text":"from ast import Return\nimport email\nfrom pydoc import cli\nfrom select import select\nfrom time import timezone\nfrom django.contrib import messages\nfrom servico.models import Servico\nfrom produto.models import Produto\nfrom cliente.models import Cliente\nfrom .models import OrdemServico\nfrom django.views.generic.list import ListView\nfrom django.views import View\nfrom django.http import HttpResponse\nfrom email import message\nfrom django.shortcuts import render, redirect, reverse, get_object_or_404\nfrom datetime import date\nfrom .models import ItemPeca, ItemServico\n\n\nclass Dashboard(ListView):\n model = OrdemServico\n template_name = 'ordem_de_servico/dashboard.html'\n context_object_name = 'ordens'\n\n\nclass PreCriarOs(ListView):\n model = Cliente\n template_name = 'ordem_de_servico/add_cliente.html'\n context_object_name = 'clientes'\n paginate_by = 10\n\n\nclass AdicionarCliente(View):\n def get(self, *args, **kwargs):\n id_cliente = self.request.GET.get('id_cliente')\n info_cliente = get_object_or_404(Cliente, id=id_cliente)\n nome_cliente = info_cliente.nome\n sobrenome_cliente = info_cliente.sobrenome\n cpf_cliente = info_cliente.cpf\n telefone_cliente = info_cliente.telefone\n email_cliente = info_cliente.email\n id_cliente = info_cliente.pk\n\n self.request.session['cliente'] = {}\n self.request.session['cliente'] = {\n 'id_cliente': id_cliente,\n 'nome_cliente': nome_cliente,\n 'last_cliente': sobrenome_cliente,\n 'cpf_cliente': cpf_cliente,\n 'telefone_cliente': telefone_cliente,\n 'email_cliente': email_cliente\n }\n\n self.request.session.save()\n\n return redirect('os:criar')\n\n\nclass CriarOS(ListView):\n model = Produto\n template_name = 'ordem_de_servico/criar_os_servico.html'\n context_object_name = 'os_produtos'\n paginate_by = 10\n\n def get_context_data(self, **kwargs):\n context = super(CriarOS, self).get_context_data(**kwargs)\n context.update({\n 'os_servicos': Servico.objects.order_by('nome_servico')\n })\n\n return context\n\n\nclass AdicionandoOS(View):\n def get(self, *args, **kwargs):\n\n http_referer = self.request.META.get(\n 'HTTP_REFERER',\n reverse('os:criar')\n )\n\n # carregando o Id pelo GET\n id_produto = self.request.GET.get('id_produto')\n id_servico = self.request.GET.get('id_servico')\n comentarios = self.request.GET.get('comentarios')\n\n # buscando o produto no banco de dados\n if id_produto:\n produto = get_object_or_404(Produto, id=id_produto)\n messages.success(\n self.request,\n f'{produto.nome_produto} adicionado com sucesso'\n )\n\n produto_id = produto.pk\n nome_produto = produto.nome_produto\n preco_produto = produto.preco_produto\n descricao_produto = produto.descricao_produto\n estoque_produto = produto.estoque\n\n if estoque_produto < 1:\n messages.error(\n self.request,\n 'Estoque insuficiente'\n )\n return redirect(http_referer)\n\n # buscando serviço no banco de dados\n if id_servico:\n servico = get_object_or_404(Servico, id=id_servico)\n messages.success(\n self.request,\n f'{servico.nome_servico} adicionado com sucesso'\n )\n\n servico_id = servico.pk\n nome_servico = servico.nome_servico\n preco_servico = servico.preco_servico\n descricao_servico = servico.descricao_servico\n\n if not self.request.session.get('carrinho_produto'):\n self.request.session['carrinho_produto'] = {}\n self.request.session.save()\n\n if not self.request.session.get('carrinho_servico'):\n self.request.session['carrinho_servico'] = {}\n self.request.session.save()\n\n carrinho_produto = self.request.session['carrinho_produto']\n carrinho_servico = self.request.session['carrinho_servico']\n\n # adicionando os produtos na ordem de serviço\n if id_produto != None:\n if id_produto in carrinho_produto:\n quantidade_produto_carrinho = carrinho_produto[id_produto]['quantidade']\n quantidade_produto_carrinho += 1\n\n if estoque_produto < quantidade_produto_carrinho:\n messages.error(\n self.request,\n f'Estoque insuficiente para {quantidade_produto_carrinho} x'\n f' no produto {nome_produto}. Adicionamos {estoque_produto} x'\n f' na ordem de serviço.'\n )\n quantidade_produto_carrinho = estoque_produto\n\n carrinho_produto[id_produto]['quantidade'] = quantidade_produto_carrinho\n carrinho_produto[id_produto]['preco_produto_os'] = preco_produto * \\\n quantidade_produto_carrinho\n else:\n carrinho_produto[id_produto] = {\n 'produto_id': produto_id,\n 'nome_produto': nome_produto,\n 'preco_produto': preco_produto,\n 'descricao_produto': descricao_produto,\n 'quantidade': 1,\n 'preco_produto_os': preco_produto\n }\n\n # adicionando os serviços na ordem de serviço\n if id_servico != None:\n if id_servico in carrinho_servico:\n quantidade_servico_os = carrinho_servico[id_servico]['quantidade']\n quantidade_servico_os += 1\n\n carrinho_servico[id_servico]['quantidade'] = quantidade_servico_os\n carrinho_servico[id_servico]['preco_servico_os'] = preco_servico * \\\n quantidade_servico_os\n else:\n carrinho_servico[id_servico] = {\n 'servico_id': servico_id,\n 'nome_servico': nome_servico,\n 'preco_servico': preco_servico,\n 'descricao_servico': descricao_servico,\n 'quantidade': 1,\n 'preco_servico_os': preco_servico\n }\n\n self.request.session.save()\n\n return redirect(http_referer)\n\n\nclass AdicionandoComent(View):\n def get(self, *args, **kwargs):\n http_referer = self.request.META.get(\n 'HTTP_REFERER',\n reverse('os:criar')\n )\n\n produto = self.request.session['carrinho_produto'] or None\n servico = self.request.session['carrinho_servico'] or None\n\n if produto == None and servico == None:\n messages.info(\n self.request, 'Crie sua ordem de serviço'\n )\n return redirect(http_referer)\n\n comentarios = self.request.GET.get('comentarios')\n veiculo = self.request.GET.get('veiculo')\n placa = self.request.GET.get('placa')\n km = self.request.GET.get('km')\n data = self.request.GET.get('data')\n # TODO: arrumar formato da data\n entrada = '{}-{}-{}'.format(date.today().day,\n date.today().month, date.today().year)\n\n self.request.session['carrinho_comentario'] = {\n 'veiculo': veiculo,\n 'placa': placa,\n 'km': km,\n 'entrada': entrada,\n 'data': data,\n 'comentarios': comentarios\n }\n\n self.request.session.save()\n\n return redirect('os:listar')\n\n\nclass RemoverItemOs(View):\n def get(self, *args, **kwargs):\n http_referer = self.request.META.get(\n 'HTTP_REFERER',\n reverse('os:pre_os')\n )\n\n id_produto_remove = self.request.GET.get('id_produto')\n id_servico_remove = self.request.GET.get('id_servico')\n\n if id_produto_remove:\n if not self.request.session['carrinho_produto']:\n return redirect(http_referer)\n\n if not id_produto_remove:\n return redirect(http_referer)\n\n if not id_produto_remove in self.request.session['carrinho_produto']:\n return redirect(http_referer)\n carrinho = self.request.session['carrinho_produto'][id_produto_remove]\n\n # TODO: verificar pois a mensagem aparece no local errado\n messages.success(\n self.request,\n f'Produto {carrinho[\"nome_produto\"]} removido com sucesso'\n )\n\n del self.request.session['carrinho_produto'][id_produto_remove]\n self.request.session.save()\n return redirect(http_referer)\n\n if id_servico_remove:\n if not self.request.session['carrinho_servico']:\n return redirect(http_referer)\n\n if not id_servico_remove:\n return redirect(http_referer)\n\n if not id_servico_remove in self.request.session['carrinho_servico']:\n return redirect(http_referer)\n\n carrinho = self.request.session['carrinho_servico'][id_servico_remove]\n # TODO: verificar pois a mensagem aparece no local errado\n messages.success(\n self.request,\n f'Produto {carrinho[\"nome_servico\"]} removido com sucesso'\n )\n\n del self.request.session['carrinho_servico'][id_servico_remove]\n self.request.session.save()\n return redirect(http_referer)\n\n return redirect(http_referer)\n\n\nclass ListarOs(View):\n def get(self, *args, **kwargs):\n\n total_produto = float()\n total_servico = float()\n\n for chave_1, valor_1 in self.request.session['carrinho_produto'].items():\n for chave_2, valor_2 in valor_1.items():\n if chave_2 == 'preco_produto_os':\n valor_2 = float(valor_2)\n total_produto += valor_2\n\n for chave_1, valor_1 in self.request.session['carrinho_servico'].items():\n for chave_2, valor_2 in valor_1.items():\n if chave_2 == 'preco_servico_os':\n valor_2 = float(valor_2)\n total_servico += valor_2\n\n total_os = total_produto + total_servico\n self.request.session['total'] = {\n 'valor_total': total_os\n }\n\n contexto = {\n 'carrinho_comentario': self.request.session.get('carrinho_comentario', {}),\n 'carrinho_produto': self.request.session.get('carrinho_produto', {}),\n 'carrinho_servico': self.request.session.get('carrinho_servico', {}),\n 'cliente': self.request.session.get('cliente', {}),\n 'total_os': self.request.session.get('total', {})\n }\n return render(self.request, 'ordem_de_servico/resumo_os.html', contexto)\n\n\nclass SalvarOs(View):\n template_name = 'ordem_de_servico/dashboard.html'\n\n def get(self, *args, **kwargs):\n\n if not self.request.session.get('cliente'):\n messages.error(self.request, 'Selecione o cliente.')\n return redirect('os:pre_os')\n\n if not self.request.session.get('carrinho_produto') and not self.request.session.get('carrinho_servico'):\n messages.error(self.request, 'Selecione o produto ou serviço')\n return redirect('os:pre_os')\n\n carrinho_produto = self.request.session['carrinho_produto']\n carrinho_servico = self.request.session['carrinho_servico']\n carrinho_comentario = self.request.session['carrinho_comentario']\n cliente = self.request.session['cliente']['id_cliente']\n valor_total = self.request.session['total']['valor_total']\n\n carrinho_produto_itens = [v for v in carrinho_produto]\n carrinho_servico_itens = [v for v in carrinho_servico]\n comentario_veiculo = carrinho_comentario['veiculo']\n comentario_placa = carrinho_comentario['placa']\n comentario_km = carrinho_comentario['km']\n comentario_data_entrada = carrinho_comentario['entrada']\n comentario_data_saida = carrinho_comentario['data']\n comentario_comentario = carrinho_comentario['comentarios']\n\n bd_produto = list(Produto.objects.filter(id__in=carrinho_produto))\n for valor in bd_produto:\n vid = str(valor.id)\n\n estoque = valor.estoque\n qtd_carrinho = carrinho_produto[vid]['quantidade']\n preco_unit = carrinho_produto[vid]['preco_produto']\n\n cliente_db = Cliente.objects.get(pk=cliente)\n\n if not self.request.session.get('carrinho_servico'):\n fechar_os = True\n\n os = OrdemServico(\n cliente=cliente_db,\n veiculo=comentario_veiculo or 'N.A',\n placa_veiculo=comentario_placa or 'N.A',\n km=comentario_km or 0,\n data_inicial=comentario_data_entrada,\n data_termino=comentario_data_saida or f'{date.today().year}-{date.today().month}-{date.today().day}',\n valor_total=valor_total,\n observacoes=comentario_comentario or '',\n os_concluida=fechar_os\n )\n\n os.save()\n\n ItemPeca.objects.bulk_create(\n [\n ItemPeca(\n ordem_servico=os,\n produto=v['nome_produto'],\n produto_id=v['produto_id'],\n preco=v['preco_produto'],\n quantidade=v['quantidade']\n ) for v in carrinho_produto.values()\n ]\n )\n\n ItemServico.objects.bulk_create(\n [\n ItemServico(\n ordem_servico=os,\n servico=v['nome_servico'],\n servico_id=v['servico_id'],\n preco=v['preco_servico']\n ) for v in carrinho_servico.values()\n ]\n )\n\n del self.request.session['carrinho_produto']\n del self.request.session['carrinho_servico']\n del self.request.session['cliente']\n del self.request.session['total']\n\n return redirect('os:dash')\n\n\n# TODO: Tentar fazer função imprimir\nclass Imprimir(View):\n pass\n\n\nclass ListarOrdens(ListView):\n # TODO: corrigir a ordem de exibição...os mais recentes primeiro\n model = OrdemServico\n template_name = 'ordem_de_servico/listar_os.html'\n context_object_name = 'ordem_de_servico'\n paginate_by = 10\n","repo_name":"pereiraelionai/Projeto-Django","sub_path":"ordem_de_servico/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14549,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10999972851","text":"import json\n\nimport math\nfrom django.conf import settings\nfrom django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom file_management.syncing_names import fix_filenames\nfrom sendfile import sendfile\nimport os\nfrom rest_framework.decorators import api_view\nfrom database_items.models import DplusSession\nfrom raw_backend.jobs import start_new_job, check_file_status, get_exe_results, delete_job, create_metadata_file, \\\n get_job_status, modify_return_json, py_api_version, web_debug_version\n\n__author__ = \"DevoraW\"\n\nclass MyDecoder(json.JSONDecoder):\n def __init__(self, *args, **kwargs):\n json.JSONDecoder.__init__(self, object_hook=self.object_hook, *args, **kwargs)\n\n def object_hook(self, obj):\n if \"inf\" in obj.values() or \"-inf\" in obj.values():\n for key, value in obj.items():\n if value==\"inf\":\n obj[key]=math.inf\n elif value==\"-inf\":\n obj[key]=-math.inf\n return obj\n\ndef get_body_json(body_bytes):\n body_str = body_bytes.decode('utf-8')\n body_json = json.loads(body_str, cls=MyDecoder)\n return body_json\n\n\ndef metadata(request):\n folder_path = settings.MEDIA_ROOT\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n try:\n filename = folder_path + r\"/metadata.json\"\n f = open(filename, 'r', encoding='utf8')\n except FileNotFoundError:\n create_metadata_file(folder_path)\n f = open(filename, 'r', encoding='utf8')\n data = json.load(f)\n f.close()\n res_dict = {'result': data}\n return modify_return_json(res_dict)\n\ndef handle_error(e):\n error_dict={\n \"error\":\n {\n \"message\":str(e),\n \"debug_info\": \"pyInt version: \"+ str(py_api_version) +\" web version\" + web_debug_version\n }\n }\n if isinstance(e, ValueError):\n error_dict[\"error\"][\"code\"]=19\n jr = JsonResponse(error_dict)\n jr.status_code = 400\n else:\n error_dict[\"error\"][\"code\"] = 5\n jr = JsonResponse(error_dict)\n jr.status_code = 500\n return jr\n\n@api_view(['GET', 'PUT'])\n@csrf_exempt\ndef generate(request):\n Dsession = DplusSession.get_or_create_session(request)\n try:\n if request.method == 'PUT':\n body_json = get_body_json(request.body)\n args = fix_filenames(body_json, request.user)\n return start_new_job('Generate', Dsession, args)\n if request.method == 'GET':\n return get_exe_results(Dsession)\n except Exception as e:\n return handle_error(e)\n\n@api_view(['GET', 'PUT'])\n@csrf_exempt\ndef fit(request):\n Dsession = DplusSession.get_or_create_session(request)\n try:\n if request.method == 'PUT':\n body_json = get_body_json(request.body)\n args = fix_filenames(body_json, request.user)\n return start_new_job('Fit', Dsession, args)\n if request.method == 'GET':\n return get_exe_results(Dsession)\n except Exception as e:\n return handle_error(e)\n\n\n@api_view(['GET', 'DELETE'])\n@csrf_exempt\ndef job(request):\n Dsession = DplusSession.get_or_create_session(request)\n if request.method == 'DELETE':\n return delete_job(Dsession)\n if request.method == 'GET':\n return get_job_status(Dsession)\n\n\n\n@api_view(['GET'])\n@csrf_exempt\ndef pdb(request, modelptr):\n Dsession = DplusSession.get_or_create_session(request)\n #check_file_status(Dsession) #TODO\n ptr_string = '%08d.pdb' % (int(modelptr))\n filepath = os.path.join(Dsession.directory, 'pdb', ptr_string)\n if not os.path.isfile(filepath):\n return JsonResponse({\"error\":{\"code\":6, \"message\":\"could not find pdb for model \"+modelptr}}, status=404, reason=\"file not found\")\n # TODO: Check if something bad happened\n return sendfile(request, filepath, attachment=True, attachment_filename='returnedfile.pdb',\n mimetype='application/octet-stream')\n\n\n@api_view(['GET'])\n@csrf_exempt\ndef amplitude(request, modelptr):\n Dsession = DplusSession.get_or_create_session(request)\n #check_file_status(Dsession) #TODO\n ptr_string = '%08d.amp' % (int(modelptr))\n filepath = os.path.join(Dsession.directory, 'cache', ptr_string)\n if not os.path.isfile(filepath):\n return JsonResponse({\"error\":{\"code\":6, \"message\":\"could not find amplitude for model \"+modelptr}}, status=404, reason=\"file not found\")\n # TODO: Check if something bad happened\n return sendfile(request, filepath, attachment=True, attachment_filename='returnedfile.amp',\n mimetype='application/octet-stream')\n\n","repo_name":"uri-raviv-lab/dplus-dev","sub_path":"WebApplication/raw_backend/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4656,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"24592297990","text":"#!/usr/bin/env python3\n\"\"\"performs a valid convolution on grayscale images\"\"\"\nimport numpy as np\n\n\ndef convolve_grayscale_valid(images, kernel):\n \"\"\"\n Args:\n images: (m,h,w) containing multiple grayscale images\n m: is the number of images\n h: is the height in pixels of the images\n w: is the width in pixels of the images\n kernel: (kh,kw) containing the kernel for the convolution\n kh: is the height of the kernel\n kw: is the width of the kernel\n Return:\n a numpy.ndarray containing the convolved images\n \"\"\"\n m = images.shape[0]\n h = images.shape[1]\n w = images.shape[2]\n kh = kernel.shape[0]\n kw = kernel.shape[1]\n # Calculatin output shape\n W_out = w - kw + 1\n H_out = h - kh + 1\n output_matriz = np.zeros((m, H_out, W_out))\n for i in range(W_out):\n for j in range(H_out):\n # np.tensordot(a2D,a3D,((-1,),(-1,))).transpose(1,0,2)\n part_image = images[:, j:j + kh, i:i + kw]\n output_matriz[:, j, i] = np.tensordot(part_image,\n kernel,\n axes=2)\n return output_matriz\n","repo_name":"Luffy981/holbertonschool-machine_learning","sub_path":"math/0x04-convolutions_and_pooling/0-convolve_grayscale_valid.py","file_name":"0-convolve_grayscale_valid.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"22114259231","text":"import tkinter as tk\n\n# Create the main window\nroot = tk.Tk()\nroot.title(\"Simple GUI\")\n\n# Add a label to the window\nlabel = tk.Label(root, text=\"Hello, Tkinter!\", font=(\"TkDefaultFont\", 20))\nlabel.pack()\n\n# Start the GUI event loop\nroot.mainloop() ","repo_name":"rohithrajr/scratch","sub_path":"scratch_2.py","file_name":"scratch_2.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"70780481683","text":"from enum import Enum\nimport hashlib\nimport inspect\nimport logging\nimport os\n\nlogger = logging.getLogger(name=\"precipy.identifiers\")\n\nmetadata_filename = \"metadata.pkl\"\n\nclass FileType(Enum):\n ANALYTICS = \"analytics\"\n METADATA = \"metadata\"\n TEMPLATE = \"template\"\n DOCUMENT = \"document\"\n\nclass GeneratedFile(object):\n def __init__(self, canonical_filename, h, file_type=FileType.ANALYTICS, cache_filepath=None):\n self.canonical_filename = canonical_filename\n self.h = h\n self.file_type = file_type\n self.cache_filepath = cache_filepath\n self.ext = os.path.splitext(canonical_filename)[1]\n self.public_urls = []\n\n def __repr__(self):\n return \" \" % self.canonical_filename\n\ndef hash_for_dict(info_dict):\n logger.debug(\"\")\n logger.debug(\"computing hash for dict from:\")\n\n for k in sorted(info_dict):\n v = info_dict[k]\n\n if isinstance(v, dict):\n logger.debug(\" %s:\" % k)\n for kk, vv in v.items():\n logger.debug(\" %s: %s\" % (kk, str(vv)))\n else:\n logger.debug(\" %s: %s\" % (k, v))\n\n description = u\";\".join(\"%s: %s\" % (k, info_dict)\n for k in sorted(info_dict))\n hashvalue = hashlib.sha256(description.encode('utf-8')).hexdigest()\n\n logger.debug(hashvalue)\n logger.debug(\"\")\n return hashvalue\n\ndef hash_for_fn(fn, kwargs, depends=None):\n import precipy.batch as batch\n import precipy.analytics_function as analytics_function\n return hash_for_dict({\n 'canonical_function_name' : fn.__name__,\n 'fn_source' : hash_for_src(inspect.getsource(fn)),\n 'depends' : depends,\n 'arg_values' : kwargs,\n 'batch_source' : hash_for_src(inspect.getsource(batch)),\n 'analytics_function_source' : hash_for_src(inspect.getsource(analytics_function))\n })\n\ndef hash_for_supplemental_file(canonical_filename, fn_h):\n return hash_for_dict({\n \"fn_hash\" : fn_h,\n \"filename\" : canonical_filename\n })\n\ndef hash_for_src(text):\n m = hashlib.md5()\n m.update(text.encode('utf-8'))\n return m.hexdigest()\n\ndef hash_for_template_text(text):\n m = hashlib.md5()\n m.update(text.encode('utf-8'))\n return m.hexdigest()\n\ndef hash_for_template_file(filepath):\n m = hashlib.md5()\n with open(filepath, 'rb') as f:\n m.update(f.read())\n return m.hexdigest()\n\ndef hash_for_document(template_hash, filter_name, filter_ext, filter_args):\n x = { \"template_hash\" : template_hash,\n \"filter_name\" : filter_name,\n \"filter_ext\" : filter_ext}\n if isinstance(filter_args, dict):\n x.update(filter_args)\n else:\n x['filter_args'] = str(filter_args)\n return hash_for_dict(x)\n\ndef hash_for_doc(canonical_filename, hash_args=None):\n import precipy.batch as batch\n analytics_frameinfo = inspect.stack()[2]\n frame = analytics_frameinfo.frame \n\n d = { \n 'canonical_filename' : canonical_filename,\n 'batch_source' : hash_for_src(inspect.getsource(batch)),\n 'frame_source' : hash_for_src(inspect.getsource(frame)),\n 'values' : inspect.getargvalues(frame).args\n }\n\n if hash_args is not None:\n d.update(hash_args)\n\n return hash_for_dict(d)\n","repo_name":"ananelson/precipy","sub_path":"precipy/identifiers.py","file_name":"identifiers.py","file_ext":"py","file_size_in_byte":3325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12512185874","text":"#User function Template for python3\n\nclass Solution:\n \n #Function to return a list containing the DFS traversal of the graph.\n def dfsOfGraph(self, V, adj):\n # code here\n visited = set()\n src = 0\n res = []\n def dfs(visited, adj, node, res):\n res.append(node)\n visited.add(node)\n for child in adj[node]:\n if child not in visited:\n dfs(visited, adj, child, res)\n return res\n res = dfs(visited, adj, src, res)\n return res\n\n\n#{ \n # Driver Code Starts\nif __name__ == '__main__':\n T=int(input())\n while T>0:\n V,E=map(int,input().split())\n adj=[[] for i in range(V+1)]\n for i in range(E):\n u,v=map(int,input().split())\n adj[u].append(v)\n adj[v].append(u)\n ob=Solution()\n ans=ob.dfsOfGraph(V,adj)\n for i in range(len(ans)):\n print(ans[i],end=\" \")\n print()\n T-=1\n# } Driver Code Ends","repo_name":"piyushrs/GFG","sub_path":"dfs_of_graph.py","file_name":"dfs_of_graph.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26836833994","text":"from sqlite3 import Error\nimport sqlite3 as lite\n\nDB_PATH = \"/ip-camera/bot/db/data_users.db\"\n\n\ndef create_connection(db_file):\n \"\"\" create a database connection to a SQLite database \"\"\"\n conn = None\n try:\n conn = lite.connect(db_file)\n except Error as e:\n print(e)\n finally:\n if conn:\n conn.close()\n\n\ndef create(db_file):\n conn = None\n try:\n conn = lite.connect(db_file)\n c = conn.cursor()\n\n c.execute(\"\"\"DROP TABLE IF EXISTS users\"\"\")\n c.execute(\n \"\"\"CREATE TABLE \"users\" (\n \"user_id\" INTEGER PRIMARY KEY ,\n \"user_name\" TEXT,\n \"isRunning\" INTEGER,\n \"isTracking\" INTEGER,\n \"msg_id\" INTEGER,\n \"email\" TEXT,\n \"camera_ip\" TEXT,\n \"pan\" REAL,\n \"tilt\" REAL,\n \"zoom\" REAL\n );\"\"\"\n )\n\n conn.commit()\n except Error as e:\n print(e)\n finally:\n if conn:\n conn.close()\n\n\ndef new_users(user_id, username, isRunning, msg_id, data_users=DB_PATH, isTracking=False):\n con = lite.connect(data_users)\n cur = con.cursor()\n cur.execute(\"select user_id from users WHERE user_id = ?\", (user_id,))\n\n if not (user_id,) in cur.fetchall():\n con = lite.connect(data_users)\n cur = con.cursor()\n cur.execute(\n \"INSERT INTO users VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\",\n (user_id, username, isRunning, isTracking, msg_id, \"NULL\", \"NULL\", \"NULL\", \"NULL\", \"NULL\"),\n )\n\n con.commit()\n cur.close()\n\n\ndef update_by_id(user_id, column_name, value, table=\"users\", data_users=DB_PATH):\n con = lite.connect(data_users)\n cur = con.cursor()\n cur.execute(\n f\"UPDATE {table} SET {column_name}=? WHERE user_id=?\", (value, user_id,)\n )\n con.commit()\n cur.close()\n\n\ndef select_by_id(user_id, column_name, table=\"users\", data_users=DB_PATH):\n\n con = lite.connect(data_users)\n cur = con.cursor()\n recs = cur.execute(\n f\"SELECT {column_name} FROM {table} WHERE user_id={user_id}\"\n ).fetchall()\n\n cur.close()\n return recs[0][0]\n\n\ndef insert_camera_by_ip(\n user_id, camera_ip, configuration, table=\"cameras\", data_users=DB_PATH\n):\n con = lite.connect(data_users)\n cur = con.cursor()\n recs = cur.execute(\n f\"select user_id, camera_ip from {table} WHERE user_id = ? AND camera_ip = ?\",\n (user_id, camera_ip),\n ).fetchall()\n\n if not (user_id, camera_ip) in recs:\n\n cur.execute(\n f\"INSERT INTO {table} VALUES(?, ?, ?, ?, ?)\",\n (\n user_id,\n camera_ip,\n configuration[\"pan\"],\n configuration[\"tilt\"],\n configuration[\"zoom\"],\n ),\n )\n con.commit()\n\n cur.close()\n\n\ndef select_conf_by_id(user_id, table=\"users\", data_users=DB_PATH):\n\n con = lite.connect(data_users)\n cur = con.cursor()\n\n recs = cur.execute(\n f\"SELECT pan, tilt, zoom FROM {table} WHERE user_id={user_id}\"\n ).fetchall()\n\n cur.close()\n return recs[0]\n\n\ndef update_camera_by_id(user_id, camera_ip, column_name, value, data_users=DB_PATH):\n con = lite.connect(data_users)\n cur = con.cursor()\n cur.execute(\n f\"UPDATE camera SET {column_name}=? WHERE user_id=? AND \",\n (value, user_id, camera_ip),\n )\n con.commit()\n cur.close()\n\n\nif __name__ == \"__main__\":\n db_path = repr(DB_PATH)\n # create_connection(db_path)\n # create(db_path)\n\n conn = lite.connect(\"db/data_users.db\")\n c = conn.cursor()\n sql = \"SELECT * FROM users \"\n recs = c.execute(sql).fetchall()\n print(recs)\n\n c.close()\n","repo_name":"suchusername/ip-camera","sub_path":"bot/db_tools.py","file_name":"db_tools.py","file_ext":"py","file_size_in_byte":3916,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"28033631129","text":"#----------------------------------------------------------\n# File panel_props.py\n#----------------------------------------------------------\nimport bpy\nimport mathutils\nimport math\nimport json\nimport os\nfrom pathlib import Path\nReport = bpy.data.texts[\"report.py\"].as_module().Report\nimport importlib\n\nbl_info = {\n \"name\": \"ClonkExport\",\n \"description\": \"Export mesh and skeleton files for OpenClonk (which mainly uses the Ogre format)\",\n \"author\": \"Richard Gerum\",\n \"version\": (1, 0, \"rc3\"),\n \"blender\": (2, 81, 0),\n \"location\": \"View3D > Mish > Clonk Export\",\n \"warning\": \"\", # used for warning icon and text in addons panel\n \"wiki_url\": \"https://github.com/rgerum/clonk-blender-exporter\",\n \"tracker_url\": \"https://github.com/rgerum/clonk-blender-exporter/issues\",\n \"support\": \"COMMUNITY\",\n \"category\": \"Import-Export\",\n}\n\ndef Trans_Identity():\n return mathutils.Matrix()\n \ndef Trans_Translate(x, y, z):\n return mathutils.Matrix.Translation((x, y, z))\n \ndef Trans_Rotate(angle, x, y, z):\n return mathutils.Matrix.Rotation(angle*math.pi/180, 4, (x, y, z))\n \ndef Trans_Scale(x, y=None, z=None):\n if y is None:\n y = x\n if z is None:\n z = x\n return mathutils.Matrix.Scale(x, 4, (1, 0, 0))*mathutils.Matrix.Scale(y, 4, (0, 1, 0))*mathutils.Matrix.Scale(z, 4, (0, 0, 1))\n\ndef Trans_Mul(a, b):\n return a*b\n\ndef applyTransformation(ob, trans):\n if trans is None:\n trans = Trans_Identity()\n else:\n trans = eval(trans)\n trans = trans*mathutils.Matrix(((1, 0, 0, 0), (0, 1, 0, 0), (0, 0, 1, 0), (0, 0, 0, 1)))\n print(\"trans\", ob)\n print(trans)\n ob.location = trans.to_translation()\n ob.rotation_quaternion = trans.to_quaternion()\n ob.rotation_mode = \"QUATERNION\"\n ob.scale = trans.to_scale()\n print(ob.location)\n print(ob.rotation_quaternion)\n \n \n# material properties\ndef MaterialChanged(mat, context):\n for i in range(4):\n mat.diffuse_color[i] = mat.clonkDiffuse[i]\n if mat.clonkTexture is None:\n mat.use_nodes = False\n else:\n mat.use_nodes = True\n for node in mat.node_tree.nodes:\n image = getattr(node, \"image\", None)\n if image is not None:\n node.image = mat.clonkTexture\n break\n else:\n if len(mat.node_tree.nodes):\n output_node = mat.node_tree.nodes[0]\n else:\n output_node = mat.node_tree.nodes.new(type='ShaderNodeOutputMaterial')\n if len(mat.node_tree.nodes) > 1:\n diffuse_node = mat.node_tree.nodes[1]\n else: \n diffuse_node = mat.node_tree.nodes.new(type=\"ShaderNodeBsdfDiffuse\")\n if len(mat.node_tree.nodes) > 2:\n image_node = mat.node_tree.nodes[2]\n else: \n image_node = mat.node_tree.nodes.new(type=\"ShaderNodeTexImage\")\n mat.node_tree.links.new(diffuse_node.outputs[0], output_node.inputs[0])\n mat.node_tree.links.new(image_node.outputs[0], diffuse_node.inputs[0])\n image_node.image = mat.clonkTexture\n print(\"new nodes needed\")\n for area in bpy.context.screen.areas:\n if area.type == 'IMAGE_EDITOR' :\n area.spaces.active.image = mat.clonkTexture\n print(mat, context)\n \nbpy.types.Material.clonkAmbient = bpy.props.FloatVectorProperty(\n name=\"ambient\", \n subtype='COLOR', \n min=0,\n max=1,\n size=4,\n default=[0.5,0.5,0.5,1.0])\nbpy.types.Material.clonkDiffuse = bpy.props.FloatVectorProperty(\n name=\"diffuse\", \n subtype='COLOR', \n min=0,\n max=1,\n size=4,\n update=MaterialChanged,\n default=[1.0,1.0,1.0,1.0])\nbpy.types.Material.clonkSpecular = bpy.props.FloatVectorProperty(\n name=\"specular\", \n subtype='COLOR', \n min=0,\n max=1,\n size=4,\n default=[0.0,0.0,0.0,1.0])\nbpy.types.Material.clonkSpecularSize = bpy.props.IntProperty(\n name=\"specular size\", \n min=1,\n max=255,\n default=12)\nbpy.types.Material.clonkEmissive = bpy.props.FloatVectorProperty(\n name=\"emissive\", \n subtype='COLOR', \n min=0,\n max=1,\n size=4,\n default=[0.0,0.0,0.0,1.0])\nbpy.types.Material.clonkReceiveShadows = bpy.props.BoolProperty(\n name=\"receive_shadows\", \n default=True) \nbpy.types.Material.clonkTexture = bpy.props.PointerProperty(\n type=bpy.types.Image,\n name=\"texture\", \n update=MaterialChanged,\n )\n \nclass ClonkImageLoadOperator(bpy.types.Operator):\n\n \"\"\"Create render for all chracters\"\"\"\n bl_idname = \"clonk.add_image_texture\"\n bl_label = \"Open Image\"\n bl_options = {'REGISTER'}\n\n # Define this to tell 'fileselect_add' that we want a directoy\n filepath: bpy.props.StringProperty()\n\n def execute(self, context):\n mat = context.object.active_material\n mat.clonkTexture = bpy.data.images.load(self.filepath)\n print(\"Selected dir: '\" + self.filepath + \"'\")\n\n return {'FINISHED'}\n\n def invoke(self, context, event):\n # Open browser, take reference to 'self' read the path to selected\n # file, put path in predetermined self fields.\n # See: https://docs.blender.org/api/current/bpy.types.WindowManager.html#bpy.types.WindowManager.fileselect_add\n context.window_manager.fileselect_add(self)\n # Tells Blender to hang on for the slow user input\n return {'RUNNING_MODAL'}\n \nclass CLONK_PANEL_PT_material(bpy.types.Panel):\n bl_space_type = 'PROPERTIES'\n bl_region_type = 'WINDOW'\n bl_context = \"material\"\n bl_label = \"Clonk Material\"\n \n def draw(self, context):\n mat = context.material\n self.layout.prop(mat, 'clonkReceiveShadows')\n self.layout.prop(mat, 'clonkAmbient')\n self.layout.prop(mat, 'clonkDiffuse') \n self.layout.prop(mat, 'clonkSpecular') \n self.layout.prop(mat, 'clonkSpecularSize') \n self.layout.prop(mat, 'clonkEmissive') \n row = self.layout.row()\n row.prop(mat, 'clonkTexture')\n row.operator(\"clonk.add_image_texture\", icon=\"FILE_FOLDER\", text=\"\")\n \n# Define an RNA prop for every object\nbpy.types.Object.clonkExportActionFile = bpy.props.StringProperty(\n name=\"Action.txt\",\n default=\"\",\n subtype='FILE_PATH')\n\nbpy.types.Object.clonkExportName = bpy.props.StringProperty(\n name=\"Export Mesh Name\",\n description=\"The name to be used when exporting the object, e.g. Graphics.mesh\",\n default=\"Graphics\")\n \nbpy.types.Object.clonkExportSkeletonName = bpy.props.StringProperty(\n name=\"Export Skeleton Name\",\n description=\"The name to be used when exporting the object, e.g. Clonk.skeleton\",\n default=\"Clonk\")\n\nbpy.types.Action.clonkActionDoExport = bpy.props.BoolProperty(\n name=\"Export\",\n description=\"Whether to export this action.\",\n default=True)\nbpy.types.Action.clonkActionExportName = bpy.props.StringProperty(\n name=\"Export Name\",\n description=\"When set export under a different name.\",\n default=\"\")\nbpy.types.Action.clonkActionStart = bpy.props.IntProperty(\n name=\"Start\",\n min=-1,\n description=\"Define the frame number where to start the action. When -1 select automatically.\",\n default=-1)\nbpy.types.Action.clonkActionEnd = bpy.props.IntProperty(\n name=\"End\",\n min=-1,\n description=\"Define the frame number where to end the action. When -1 select automatically.\",\n default=-1)\n \nbpy.types.Action.clonkAttachMesh = bpy.props.StringProperty(\n name=\"Attach Mesh\",\n description=\"The name of an object which should be attached to the Clonk during this action.\",\n default=\"\")\nbpy.types.Action.clonkAttachBone = bpy.props.StringProperty(\n name=\"Parent Bone\",\n description=\"Which bone of the Clonk to use.\",\n default=\"\")\nbpy.types.Action.clonkAttachBone2 = bpy.props.StringProperty(\n name=\"Child Bone\",\n description=\"Which bone of the tool to use.\",\n default=\"\")\nbpy.types.Action.clonkAttachTransformation = bpy.props.StringProperty(\n name=\"Transformation\",\n description=\"Optinal a transformation to apply when attaching the mesh.\",\n default=\"\")\nbpy.types.Action.clonkAttachAction = bpy.props.StringProperty(\n name=\"Action\",\n description=\"Optinal an action to select in the child mesh.\",\n default=\"\")\n \n# Button\nclass OBJECT_OT_Button(bpy.types.Operator):\n bl_idname = \"clonk.export_mesh\"\n bl_label = \"Export Mesh\"\n \n def execute(self, context):\n ob = context.object\n if ob.type == \"ARMATURE\":\n if len(ob.children):\n ob = ob.children[0]\n \n mesh = bpy.data.texts[\"mesh.py\"].as_module()\n\n target_folder = Path(bpy.data.filepath).parent / \"export\"\n materials = mesh.dot_mesh(ob, target_folder, overwrite=True)\n \n material = bpy.data.texts[\"material.py\"].as_module()\n\n material.writeMaterials(target_folder / \"Scene.material\", materials)\n Report.show()\n return{'FINISHED'}\n \nclass OBJECT_OT_Button2(bpy.types.Operator):\n bl_idname = \"clonk.export_skeleton_all\"\n bl_label = \"Export Skeleton\"\n \n def execute(self, context):\n ob = context.object\n if ob.type == \"ARMATURE\":\n if len(ob.children):\n ob = ob.children[0]\n \n skeleton = bpy.data.texts[\"skeleton.py\"].as_module()\n\n skeleton.dot_skeleton(ob, os.path.dirname(bpy.data.filepath)+\"/export\", overwrite=True)\n Report.show()\n return{'FINISHED'}\n \n# Property panel\nclass CLONK_PT_action_export(bpy.types.Panel):\n bl_label = \"Action Export\"\n bl_space_type = \"VIEW_3D\"\n bl_region_type = \"UI\"\n \n @classmethod\n def poll(cls, context):\n ob = bpy.context.object\n if not ob:\n return\n\n try:\n action = ob.animation_data.action\n except AttributeError:\n action = None\n if action is not None:\n return True\n \n def draw(self, context):\n ob = bpy.context.object\n if not ob:\n return\n layout = self.layout\n\n try:\n action = ob.animation_data.action\n except AttributeError:\n action = None\n if action is not None:\n self.layout.label(text=\"Name: \"+ob.animation_data.action.name)\n layout.prop(ob.animation_data.action, 'clonkActionDoExport')\n layout.prop(ob.animation_data.action, 'clonkActionExportName')\n layout.prop(ob.animation_data.action, 'clonkActionStart')\n layout.prop(ob.animation_data.action, 'clonkActionEnd')\n \nclass CLONK_PT_action(bpy.types.Panel):\n bl_label = \"Action Attach Display\"\n bl_space_type = \"VIEW_3D\"\n bl_region_type = \"UI\"\n \n @classmethod\n def poll(cls, context):\n ob = bpy.context.object\n if not ob:\n return\n\n try:\n action = ob.animation_data.action\n except AttributeError:\n action = None\n if action is not None:\n return True\n \n def draw(self, context):\n ob = bpy.context.object\n if not ob:\n return\n layout = self.layout\n\n try:\n action = ob.animation_data.action\n except AttributeError:\n action = None\n if action is not None:\n layout.prop(ob.animation_data.action, 'clonkAttachMesh')\n layout.prop(ob.animation_data.action, 'clonkAttachBone')\n layout.prop(ob.animation_data.action, 'clonkAttachBone2')\n layout.prop(ob.animation_data.action, 'clonkAttachTransformation')\n layout.prop(ob.animation_data.action, 'clonkAttachAction')\n \nclass CLONK_PT_export(bpy.types.Panel):\n bl_label = \"Clonk Export\"\n bl_space_type = \"VIEW_3D\"\n bl_region_type = \"UI\"\n \n @classmethod\n def poll(cls, context):\n if context.object is None:\n return\n if context.object.type == \"MESH\":\n return True\n if context.object.type == \"ARMATURE\":\n if len(context.object.children):\n obj = context.object.children[0]\n if obj.type == \"MESH\":\n return True\n \n def draw(self, context):\n ob = bpy.context.object\n if not ob:\n return\n layout = self.layout\n \n layout.label(text=\"Clonk Exporter v\"+\".\".join(str(s) for s in bl_info[\"version\"]))\n \n if ob.type == \"ARMATURE\":\n if len(ob.children):\n ob = ob.children[0]\n if ob.type == 'MESH':\n layout.prop(ob, 'clonkExportActionFile')\n layout.prop(ob, 'clonkExportName')\n layout.prop(ob, 'clonkExportSkeletonName')\n layout.operator(\"clonk.export_mesh\")\n layout.operator(\"clonk.export_skeleton_all\")\n \n\ndef getBoneFromObject(object_name, bone_name):\n # get the object given by object_name\n attachMesh = bpy.data.objects.get(object_name)\n \n # get the armature of this object\n if attachMesh:\n attachArmature = attachMesh.parent\n else:\n attachArmature = None\n \n # get the bone given by bone_name\n if attachArmature:\n attachBone = attachArmature.pose.bones.get(bone_name)\n else:\n attachBone = None\n return attachMesh, attachArmature, attachBone\n \n\ndef my_handler(scene):\n # get the currently selected object\n try:\n ob = bpy.context.object\n except AttributeError:\n return\n\n # test if the Armature and the CONTROL_body have the same action\n try:\n equal = (clonkArmature.animation_data.action == clonkArmatureHelper.animation_data.action)\n except AttributeError:\n equal = False\n \n # if they are not equal, make them equal!\n if not equal:\n # if the user has selected the Armature object, copy from this\n if ob == clonkArmature:\n print(\"Copy animation from Armature to helper\")\n clonkArmatureHelper.animation_data.action = clonkArmature.animation_data.action\n # if not, copy the other way around\n else:\n print(\"Copy animation form Helper to Armature\")\n clonkArmature.animation_data.action = clonkArmatureHelper.animation_data.action\n \n # try to get the current action of the Clonk's Armature\n try:\n # get the action\n action = clonkArmature.animation_data.action\n # and create a hash from the metadata\n hash = json.dumps([action.name, action.clonkAttachMesh, action.clonkAttachBone, action.clonkAttachBone2, action.clonkAttachTransformation, action.clonkAttachAction])\n except AttributeError:\n # if not set both to None\n action = None\n hash = None\n \n # if the hash has changed, we have to update the attached mesh\n if scene[\"attachHash\"] != hash:\n \n # if there is an old mesh, we have to remove it\n if scene[\"attachHash\"]:\n # get the metadata\n action_name, object_name, source_bone, target_bone, transformation, attach_action = json.loads(scene[\"attachHash\"])\n # get the associated objects\n attachMesh, attachArmature, attachBone = getBoneFromObject(object_name, target_bone)\n \n # if the bone has been found\n if attachBone is not None:\n # get the constraint list\n constraintList = attachBone.constraints\n # and remove the constraint\n if len(constraintList):\n constraintList.remove(constraintList[-1])\n # if the mesh has been found\n if attachMesh:\n # hide it in the layer 10\n #attachMesh.layers[10] = False\n attachMesh.hide_set(True)\n # and reset the transformation\n applyTransformation(attachMesh, None)\n \n # clear the hash\n scene[\"attachHash\"] = None\n \n # get the new metadata\n action_name, object_name, source_bone, target_bone, transformation, attach_action = json.loads(hash)\n \n # set the hash\n scene[\"attachHash\"] = hash\n \n # if an object name is given\n if object_name:\n # try to get the object\n attachMesh, attachArmature, attachBone = getBoneFromObject(object_name, target_bone)\n \n # print error messages if something has not been found\n if attachMesh is None:\n print(\"ERROR: mesh %s not found\" % object_name)\n elif attachArmature is None:\n print(\"ERROR: no armature found for mesh %s\" % object_name)\n elif attachBone is None:\n print(\"ERROR: bone %s not found in armature %s(%s)\" % (target_bone, attachArmature.name, object_name))\n \n # if all has been found, proceed\n if attachMesh and attachArmature and attachBone:\n print(attachMesh, attachMesh.hide_viewport)\n # show the mesh in layer 10\n #attachMesh.layers[10] = True\n attachMesh.hide_set(False)\n \n # if an action is given, \n if attach_action:\n # try to get it\n attach_action = bpy.data.actions.get(attach_action)\n # and apply it\n if attach_action:\n attachArmature.animation_data.action = attach_action\n # or reset the action\n else:\n try:\n attachArmature.animation_data.action = None\n except AttributeError:\n pass\n\n # if a transformation is given \n if action.clonkAttachTransformation:\n # try to apply it\n try:\n applyTransformation(attachMesh, action.clonkAttachTransformation)\n except Exception as err:\n # if not, print an error message\n print(\"ERROR: transform is not valid:\", action.clonkAttachTransformation)\n print(err)\n # and reset the transformation\n applyTransformation(attachMesh, None)\n else:\n # the default is also a reseted transformation\n applyTransformation(attachMesh, None)\n \n # get the constraint list of the bone\n constraintList = attachBone.constraints\n \n # and add a copy transfroms constraint with the target bone from the Clonk's Armature\n constraint = constraintList.new(type='COPY_TRANSFORMS')\n constraint.target = clonkArmature\n constraint.subtarget = action.clonkAttachBone\n\n\n#bpy.COPY_TRANSFORMS\n\n# = bpy.data.objects[\"ArmatureShovel\"].pose.bones[\"main\"].constraints.new(type='COPY_TRANSFORMS')\n\ndef getBonesForControl(control):\n if control.endswith(\".R\"):\n bones = getBonesForControl2(control.replace(\".R\", \".L\"))\n bones = [bone.replace(\".L\", \".R\") for bone in bones]\n else:\n bones = getBonesForControl2(control)\n return bones\n\ndef getBonesForControl2(control):\n # a foot will take the whole leg\n if control == \"mainfoot.L\":\n return [\"skeleton_leg_upper.L\", \"skeleton_leg_lower.L\", \"skeleton_foot_ball.L\", \"skeleton_foot_tip.L\"]\n # a hand takes thw whole arm\n if control == \"hand.L\":\n return [\"skeleton_arm_upper.L\", \"skeleton_arm_lower.L\", \"skeleton_arm_hand.L\"]\n # the head\n if control == \"head\":\n return [\"skeleton_head\"]\n # the eyes\n if control == \"eyes_target\":\n return [\"eye.L\", \"eye.R\"]\n \n # the thumb\n if control == \"thumb.L\":\n return [\"skeleton_hand_digit1.L\", \"skeleton_hand_digit2.L\", \"skeleton_hand_digit3.L\"]\n # index finger\n if control == \"index.L\":\n return [\"skeleton_hand_index1.L\", \"skeleton_hand_index2.L\", \"skeleton_hand_index3.L\"]\n # middle finger\n if control == \"middle.L\":\n return [\"skeleton_hand_middle1.L\", \"skeleton_hand_middle2.L\", \"skeleton_hand_middle3.L\"]\n # ring finger\n if control == \"ring.L\":\n return [\"skeleton_hand_ring1.L\", \"skeleton_hand_ring2.L\", \"skeleton_hand_ring3.L\"]\n # little finger\n if control == \"little.L\":\n return [\"skeleton_hand_small1.L\", \"skeleton_hand_small2.L\", \"skeleton_hand_small3.L\"]\n \n if control == \"body\":\n return [\"RootB\"]\n if control == \"body2\":\n return [\"skeleton_body\"]\n\n\ndef register():\n \n # Registration\n bpy.utils.register_class(ClonkImageLoadOperator) \n bpy.utils.register_class(CLONK_PANEL_PT_material)\n\n bpy.utils.register_class(OBJECT_OT_Button)\n bpy.utils.register_class(OBJECT_OT_Button2)\n bpy.utils.register_class(CLONK_PT_action_export)\n bpy.utils.register_class(CLONK_PT_action)\n bpy.utils.register_class(CLONK_PT_export)\n\n # initalize the attachHash if it is not present yet\n scene = bpy.context.scene\n try:\n scene[\"attachHash\"]\n except KeyError:\n scene[\"attachHash\"] = None\n\n # get the Armatures\n try:\n clonkArmature = bpy.data.objects[\"Armature\"]\n clonkArmatureHelper = bpy.data.objects[\"CONTROL_body\"]\n except KeyError:\n clonkArmature = None\n clonkArmatureHelper = None\n \n if scene[\"attachHash\"] is not None:\n scene[\"attachHash\"] = scene[\"attachHash\"]+\" \"\n if len(bpy.app.handlers.frame_change_pre):\n bpy.app.handlers.frame_change_pre.remove(bpy.app.handlers.frame_change_pre[-1])\n bpy.app.handlers.frame_change_pre.append(my_handler)\n\n\ndef unregister():\n bpy.utils.unregister_class(ClonkImageLoadOperator)\n bpy.utils.unregister_class(CLONK_PANEL_PT_material)\n \n bpy.utils.unregister_class(OBJECT_OT_Button)\n bpy.utils.unregister_class(OBJECT_OT_Button2)\n bpy.utils.unregister_class(CLONK_PT_action_export)\n bpy.utils.unregister_class(CLONK_PT_action)\n bpy.utils.unregister_class(CLONK_PT_export)\n\nif __name__ == \"__main__\":\n register()","repo_name":"rgerum/clonk-blender-exporter","sub_path":"scripts/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":22145,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"24159444873","text":"# voronoi.py\n# A script to plot a Voronoi representation of the system and to visualise\n# a specific data set\n\nimport sys\n\n# This code must be run with python3!\nif (sys.version_info < (3, 5)):\n print(\"This code must be run with Python version 3.5 or higher\")\n sys.exit(1)\n\nimport numpy as np\nimport scipy.spatial as spatial\nimport matplotlib as mpl\nimport matplotlib.cm as cm\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom matplotlib import rc\nfrom matplotlib.animation import FuncAnimation\nfrom matplotlib.collections import PatchCollection\nfrom matplotlib.patches import Polygon\n\nargs = sys.argv\nif (len(args) != 22):\n print(\"usage: voronoi.py npoints lx ly xbuff ybuff Dr dt data_col data_min data_max tic_start tic_end tic_inc tstart tend tinc make_movie print_to_screen pos_file data_file out_file\")\n sys.exit(1)\n\nnpoints = int(args.pop(1))\nlx = int(args.pop(1))\nly = int(args.pop(1))\nxbuff = float(args.pop(1))\nybuff = float(args.pop(1))\nDr = float(args.pop(1))\ndt = float(args.pop(1))\ndata_col = int(args.pop(1))\ndata_min = float(args.pop(1))\ndata_max = float(args.pop(1))\ntic_start = float(args.pop(1))\ntic_end = float(args.pop(1))\ntic_inc = float(args.pop(1))\ntstart = int(args.pop(1))\ntend = int(args.pop(1))\ntinc = int(args.pop(1))\nmake_movie = bool(int(args.pop(1)))\nprint_to_screen = bool(int(args.pop(1)))\npos_file = args.pop(1)\ndata_file = args.pop(1)\nout_file = args.pop(1)\nxbuff *= lx\nybuff *= ly\nnframes = int((tend-tstart)//tinc+1)\n\nuse_label = 0 # 1\nuse_cbar = 1 # 1\n\nif (not make_movie):\n tend = tstart\n\n# Data arrays\npos = [[] for i in range(nframes)]\ndata_val = [[0.0 for j in range(npoints)] for i in range(nframes)]\nindex_map = [[] for i in range(nframes)]\ntime_map = [i*tinc+tstart for i in range(nframes)]\n\n# Useful functions for plotting\ndef add_point(index, x, y, frame):\n global pos, data_map, lx, ly, xbuff, ybuff\n if (x < lx+xbuff and x > -xbuff and y < ly+ybuff and y > -ybuff):\n pos[frame].append((x,y))\n index_map[frame].append(index)\n# data_map[frame][(x,y)] = val\n\nperiodic_loc = [(lx,-ly),(lx,0),(lx,ly),(0,-ly),(0,0),(0,ly),(-lx,-ly),\n (-lx,0),(-lx,ly)]\n\n\n# Read position data\nnlines = npoints + 2\nreader = open(pos_file, 'r')\n\nwhile True:\n # Read header section (including time info)\n for i in range(2):\n line = reader.readline()\n if (not line): break\n data = line.split()\n time = int(data[1])\n if (time > tend):\n break\n elif (time < tstart or (time-tstart) % tinc != 0):\n for i in range(npoints):\n line = reader.readline()\n else:\n frame = int((time-tstart)//tinc)\n for n in range(npoints):\n line = reader.readline()\n data = line.split()\n x = float(data[0])\n y = float(data[1])\n for pt in periodic_loc:\n add_point(n, x+pt[0], y+pt[1], frame)\n \nreader.close()\n\nif (data_col >= 0):\n data_reader = open(data_file, 'r')\n while True:\n # Read header section (including time info)\n for i in range(2):\n line = data_reader.readline()\n if (not line): break\n data = line.split()\n time = int(data[1])\n if (time > tend):\n break\n elif (time < tstart or (time-tstart) % tinc != 0):\n for n in range(npoints):\n data_reader.readline()\n else:\n frame = int((time-tstart)//tinc)\n for n in range(npoints):\n line = data_reader.readline()\n data = line.split()\n data_val[frame][n] = float(data[data_col])\n \n data_reader.close()\n\n# Make animation\n# Plot settings\nfontsize = 20\nnorm = mpl.colors.Normalize(vmin=data_min, vmax=data_max, clip=True)\nmapper = cm.ScalarMappable(norm=norm, cmap=cm.RdYlBu_r)\nmapper.set_array([])\nfig, ax = plt.subplots()\nax.set_xlim([0,lx])\nax.set_ylim([0,ly])\nax.tick_params(axis=\"both\", labelsize=fontsize)\n\nif (use_cbar):\n cbar = plt.colorbar(mapper)\n cbar.set_ticks(np.arange(tic_start, tic_end+tic_inc/2.0, tic_inc))\n cbar.ax.tick_params(labelsize=fontsize)\n\n#if (not make_movie):\n# Use Latex typesetting when not making movies\n# mpl.rcParams[\"text.latex.unicode\"] = True\n# mpl.rcParams[\"text.latex.preamble\"] = [\n# r'\\usepackage{amsmath}',\n# r'\\usepackage{amssymb}',\n# r'\\usepackage[scaled=1]{helvet}',\n# r'\\usepackage{sansmath}',\n# r'\\sansmath']\n# plt.rc(\"text\", usetex=True)\n# mpl.rcParams['axes.unicode_minus'] = False\n\nplt.rcParams['font.family'] = 'sans-serif'\nplt.rcParams['font.sans-serif'] = 'FreeSans' # A font close to Helvetica\n\n# Draw borders but no axes and ticks\nplt.tick_params(axis=\"both\", which=\"both\", bottom=False, top=False, \n labelbottom=False, right=False, left=False, labelleft=False)\n\n# Set plot margins\nplt.subplots_adjust(left=0.05,right=0.95,top=0.95,bottom=0.05)\n\n# Get the artist for plotting centre of Voronoi cells\nplt_pts, = ax.plot([],[], '.', markersize=5, color=\"black\") # Empty data\n\n# Get the artist for plotting the time label\nif (use_label):\n plt_time_txt = ax.text(0.45,0.005,\"\",fontsize=14,\n horizontalalignment=\"center\",\n transform=plt.gcf().transFigure)\n plt_time_txt.set_fontsize(fontsize)\n\n# Get the artist for plotting the polygons\npatches = PatchCollection([], linewidth=1.0)\nplt_polygons = ax.add_collection(patches)\n\ndef plot_data(frame):\n global pos, data_map, time_map\n\n # Plot centres of Voronoi cells\n vor = spatial.Voronoi(pos[frame])\n plt_pts.set_xdata(vor.points[:,0])\n plt_pts.set_ydata(vor.points[:,1])\n \n # Plot the Voronoi polygons\n colors = []\n polygons = []\n for r in range(len(vor.point_region)):\n region = vor.regions[vor.point_region[r]]\n if -1 in region: continue\n# pt = tuple(vor.points[r])\n poly = [vor.vertices[i] for i in region]\n polygons.append(Polygon(poly))\n if (data_col >= 0):\n colors.append(mapper.to_rgba(\n data_val[frame][index_map[frame][r]]))\n else:\n colors.append(mapper.to_rgba(index_map[frame][r]))\n plt_polygons.set_paths(polygons)\n plt_polygons.set_facecolor(colors)\n plt_polygons.set_edgecolor(\"black\")\n\n # Plot the time label\n if (use_label):\n plt_time_txt.set_text(r\"$D_rt = {:.1f}$\".format(time_map[frame]*Dr*dt))\n \n return plt_pts, plt_polygons,\n\nif (make_movie):\n if (print_to_screen):\n ani = FuncAnimation(fig, plot_data, np.arange(nframes), \n fargs=[], interval=1)\n plt.show()\n else:\n ani = FuncAnimation(fig, plot_data, np.arange(nframes), \n fargs=[], interval=1)\n Writer = animation.writers[\"ffmpeg\"]\n writer = Writer(fps=15, bitrate=1500)\n ani.save(out_file,writer=writer)\nelse:\n plot_data(0)\n if (print_to_screen):\n plt.show()\n else:\n plt.savefig(out_file, transparent=True)\n","repo_name":"mchchiang/phase_field_model","sub_path":"c/benjamin_model/analysis/src/voronoi.py","file_name":"voronoi.py","file_ext":"py","file_size_in_byte":7044,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"11947614126","text":"import argparse\nimport pytest\nimport re\nimport requests\n\ndef __validate_conn_pattern(conn: str) -> str:\n \"\"\"\n Validate connection information format is connect\n :valid formats:\n 127.0.0.1:32049\n user:passwd@127.0.0.1:32049\n :args:\n conn:str - REST connection information\n :params:\n pattern1:str - compiled pattern 1 (127.0.0.1:32049)\n pattern2:str - compiled pattern 2 (user:passwd@127.0.0.1:32049)\n :return:\n if fails raises Error\n if success returns conn\n \"\"\"\n pattern1 = re.compile(r'^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}:\\d{1,5}$')\n pattern2 = re.compile(r'^\\w+:\\w+@\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}:\\d{1,5}$')\n\n if not pattern1.match(conn) and not pattern2.match(conn):\n raise argparse.ArgumentTypeError(f'Invalid REST connection format: {conn}')\n\n return conn\n\n@pytest.fixture\ndef parser()->dict:\n parser = argparse.ArgumentParser()\n parser.add_argument('conn', type=__validate_conn_pattern, default='23.239.12.151:32349', help=\"REST connection information (example: {user}:{password}@{ip}:{port})\")\n parser.add_argument('--timeout', type=int, default=30)\n\n\ndef execute_query(conn:str, auth:tuple, timeout:int, headers:dict)->requests.Request:\n \"\"\"\n Execute REST request against a node\n :args:\n conn:str - REST connection information\n auth:tuple - REST authentication information\n timeout:int - REST timeout\n headers:dict - REST headers\n :params:\n r:requests.Request - request return\n :exception:\n 1. query fails to execute\n 2. request returns a status_code != 200\n :return:\n raw request results\n \"\"\"\n try:\n r = requests.get(url=f\"http://{conn}\", auth=auth, timeout=timeout, headers=headers)\n except Exception as error:\n pytest.fail(f\"Failed to execute `{headers['command']}` against {conn} (Error: {error})\", pytrace=True)\n else:\n if int(r.status_code) != 200:\n pytest.fail(f\"Failed to execute `{headers['command']}` against {conn} (Network Error: {r.status_code})\", pytrace=True)\n return r\n\n\ndef validate_status(conn:str, auth:tuple, timeout:int)->bool:\n \"\"\"\n Validate whether the node is accessible (via REST)\n :args:\n conn:str - REST connection information\n auth:tuple - REST authentication information\n timeout:int - REST timeout\n :params:\n status:bool\n headers:dict - REST headers\n :return:\n if success returns True\n else returns False\n if fails returns an exception\n \"\"\"\n status = True\n headers = {\n \"command\": \"get status\",\n \"User-Agent\": \"AnyLog/1.23\"\n }\n\n r = execute_query(conn=conn, auth=auth, timeout=timeout, headers=headers)\n if 'running' in r.text and 'not' not in r.text:\n status = True\n\n return status","repo_name":"AnyLog-co/deployment-scripts","sub_path":"test/pytests/support.py","file_name":"support.py","file_ext":"py","file_size_in_byte":2856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"27450605908","text":"from __future__ import absolute_import, division\n\nimport numpy as np\nimport pkg_resources\nimport random\n\nfrom builtins import range, str, zip\nfrom deap import base, creator, tools\nfrom multiprocessing import Pool\n\nfrom .addm import aDDM\nfrom .util import load_data_from_csv, convert_item_values\n\n\n# Global variables.\ndataTrials = []\n\n\ndef evaluate(individual):\n \"\"\"\n Computes the negative log likelihood of the global data set given the\n parameters of the aDDM.\n Args:\n individual: list containing the 3 model parameters, in the following\n order: d, theta, sigma.\n Returns:\n A list containing the negative log likelihood for the global data set and\n the given model.\n \"\"\"\n d = individual[0]\n theta = individual[1]\n sigma = individual[2]\n model = aDDM(d, sigma, theta) \n\n logLikelihood = 0\n for trial in dataTrials:\n try:\n likelihood = model.get_trial_likelihood(trial)\n except:\n print(u\"An exception occurred during the likelihood \" +\n \"computations for model \" + str(model.params) + u\".\")\n raise\n if likelihood != 0:\n logLikelihood += np.log(likelihood)\n\n print(u\"NLL for \" + str(individual) + u\": \" + str(-logLikelihood))\n if logLikelihood != 0:\n return -logLikelihood,\n else:\n return float(\"inf\"),\n\n\ndef main(lowerBoundD=0.0001, upperBoundD=0.09, lowerBoundSigma=0.001,\n upperBoundSigma=0.9, lowerBoundTheta=0, upperBoundTheta=1,\n expdataFileName=None, fixationsFileName=None, trialsPerSubject=100,\n popSize=18, numGenerations=20, crossoverRate=0.5, mutationRate=0.3,\n subjectIds=[], numThreads=9, verbose=False):\n \"\"\"\n Args:\n lowerBoundD: float, lower search bound for parameter d.\n upperBoundD: float, upper search bound for parameter d.\n lowerBoundSigma: float, lower search bound for parameter sigma.\n upperBoundSigma: float, upper search bound for parameter sigma.\n lowerBoundTheta: float, lower search bound for parameter theta.\n upperBoundTheta: float, upper search bound for parameter theta.\n expdataFileName: string, path of experimental data file.\n fixationsFileName: string, path of fixations file.\n trialsPerSubject: int, number of trials from each subject to be used in\n the analysis. If smaller than 1, all trials are used.\n popSize: int, number of individuals in each population.\n numGenerations: int, number of generations.\n crossoverRate: float, crossover rate.\n mutationRate: float, mutation rate.\n subjectIds: list of strings corresponding to the subject ids. If not\n provided, all existing subjects will be used.\n numThreads: int, size of the thread pool.\n verbose: boolean, whether or not to increase output verbosity.\n \"\"\"\n global dataTrials\n\n # Load experimental data from CSV file.\n if verbose:\n print(u\"Loading experimental data...\")\n if not expdataFileName:\n expdataFileName = pkg_resources.resource_filename(\n u\"addm_toolbox\", u\"data/expdata.csv\")\n if not fixationsFileName:\n fixationsFileName = pkg_resources.resource_filename(\n u\"addm_toolbox\", u\"data/fixations.csv\")\n data = load_data_from_csv(expdataFileName, fixationsFileName,\n convertItemValues=convert_item_values)\n\n # Get correct subset of trials.\n subjectIds = ([str(subj) for subj in subjectIds] if subjectIds\n else list(data))\n for subjectId in subjectIds:\n numTrials = (trialsPerSubject if trialsPerSubject >= 1\n else len(data[subjectId]))\n trialSet = np.random.choice(\n [trialId for trialId in range(len(data[subjectId]))],\n numTrials, replace=False)\n dataTrials.extend([data[subjectId][t] for t in trialSet])\n\n creator.create(u\"FitnessMin\", base.Fitness, weights=(-1.0,))\n creator.create(u\"Individual\", list, fitness=creator.FitnessMin)\n\n toolbox = base.Toolbox()\n\n # Create thread pool.\n pool = Pool(numThreads)\n toolbox.register(u\"map\", pool.map)\n\n # Create individual.\n toolbox.register(u\"attr_d\", random.uniform, lowerBoundD, upperBoundD)\n toolbox.register(u\"attr_sigma\", random.uniform, lowerBoundSigma,\n upperBoundSigma)\n toolbox.register(u\"attr_theta\", random.uniform, lowerBoundTheta,\n upperBoundTheta)\n toolbox.register(u\"individual\", tools.initCycle, creator.Individual,\n (toolbox.attr_d, toolbox.attr_theta, toolbox.attr_sigma),\n n=1)\n\n # Create population.\n toolbox.register(u\"population\", tools.initRepeat, list, toolbox.individual)\n pop = toolbox.population(n=popSize)\n\n # Create operators.\n toolbox.register(u\"mate\", tools.cxUniform, indpb=0.4)\n toolbox.register(u\"mutate\", tools.mutGaussian, mu=0,\n sigma=[0.0005, 0.05, 0.005], indpb=0.4)\n toolbox.register(u\"select\", tools.selTournament, tournsize=3)\n toolbox.register(u\"evaluate\", evaluate)\n\n # Evaluate the entire population.\n try:\n fitnesses = list(map(toolbox.evaluate, pop))\n except:\n print(u\"An exception occurred during the first population evaluation.\")\n raise\n bestFit = float(\"inf\")\n bestInd = None\n for ind, fit in zip(pop, fitnesses):\n ind.fitness.values = fit\n\n # Get best individual.\n currFit = fit[0] if isinstance(fit, tuple) else fit\n if currFit < bestFit:\n bestInd = ind\n\n for g in range(numGenerations):\n if verbose:\n print(u\"Generation \" + str(g) + u\"...\")\n\n # Select the next generation individuals.\n offspring = toolbox.select(pop, len(pop))\n # Clone the selected individuals.\n offspring = list(map(toolbox.clone, offspring))\n\n # Apply crossover and mutation on the offspring.\n for child1, child2 in zip(offspring[::2], offspring[1::2]):\n if random.random() < crossoverRate:\n toolbox.mate(child1, child2)\n del child1.fitness.values\n del child2.fitness.values\n\n for mutant in offspring:\n if random.random() < mutationRate:\n toolbox.mutate(mutant)\n del mutant.fitness.values\n\n # Evaluate the individuals which are valid but have an invalid fitness.\n invalidInd = list()\n for ind in offspring:\n if (ind[0] < lowerBoundD or\n ind[0] > upperBoundD or\n ind[1] < lowerBoundTheta or\n ind[1] > upperBoundTheta or\n ind[2] < lowerBoundSigma or\n ind[2] > upperBoundSigma):\n ind.fitness.values = float(\"inf\"),\n elif not ind.fitness.valid:\n invalidInd.append(ind)\n try:\n fitnesses = list(map(toolbox.evaluate, invalidInd))\n except:\n print(u\"An exception occurred during the population evaluation \"\n \"for generation \" + str(g) + u\".\")\n raise\n for ind, fit in zip(invalidInd, fitnesses):\n ind.fitness.values = fit\n\n # The population is entirely replaced by the offspring.\n pop[:] = offspring\n\n # Update best individual.\n for ind in pop:\n if ind.fitness.values[0] < bestFit:\n bestFit = ind.fitness.values[0]\n bestInd = ind\n\n print(u\"Best individual: \" + str(bestInd))\n print(u\"Fitness of best individual: \" + str(bestFit))\n","repo_name":"goptavares/aDDM-Toolbox","sub_path":"addm_toolbox/genetic_algorithm_optimize.py","file_name":"genetic_algorithm_optimize.py","file_ext":"py","file_size_in_byte":7583,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"3"} +{"seq_id":"32921657105","text":"#!/bin/python\nimport math\nimport json\nfrom singlestatus import samples\nfrom singlestatus import display\n\nfrom singlestatus import common\n\n\nclass Cell:\n def __init__(self, board, r, c):\n self.board = board\n self.r = r\n self.c = c\n\n def __eq__(self, other):\n assert(isinstance(other, Cell))\n return ((self.board == other.board)\n and (self.r == other.r)\n and (self.c == other.c))\n\n def get(self):\n return self.board[self.r][self.c]\n\n def contains(self, val):\n return self.get().find(\"%d\" % val) > -1\n\n def set(self, val):\n self.board[self.r][self.c] = val\n\n def is_solved(self):\n return (len(self.get().strip()) == 1)\n\n def remove_from_set(self, vals):\n curr = self.get()\n for c in vals:\n curr = curr.replace(c, '')\n return curr\n\n def compare_and_reduce(self, other):\n if self.is_solved():\n return 0\n if other.is_solved():\n curr = self.get()\n cell = self.remove_from_set(other.get().strip())\n assert(len(cell.strip()) != 0)\n self.set(cell)\n if curr != cell:\n reduced = 1 + self.reduce_solved()\n return reduced\n return 0\n\n def solve(self, p):\n if self.is_solved():\n return\n solve_set = common.FULL_SET.replace(p, '')\n self.set(self.remove_from_set(solve_set))\n self.reduce_solved()\n\n def reduce_solved(self):\n reduced = 0\n if self.is_solved():\n return 0\n for other in BlockCellIterator(self.board, self.r, self.c):\n reduced = reduced + self.compare_and_reduce(other)\n for other in ColCellIterator(self.board, self.c):\n reduced = reduced + self.compare_and_reduce(other)\n for other in RowCellIterator(self.board, self.r):\n reduced = reduced + self.compare_and_reduce(other)\n return reduced\n\n\n# Iterates through all the Cells in a given Row\nclass RowCellIterator:\n def __init__(self, board, row):\n self.board = board\n self.row = row\n self.col = 0\n\n def reset(self):\n self.col = 0\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.col < common.DIM:\n val = Cell(self.board, self.row, self.col)\n self.col = self.col + 1\n return val\n else:\n raise StopIteration\n\n\n# Iterates through all the Cells in a given Column\nclass ColCellIterator:\n def __init__(self, board, col):\n self.board = board\n self.row = 0\n self.col = col\n\n def reset(self):\n self.row = 0\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.row < common.DIM:\n val = Cell(self.board, self.row, self.col)\n self.row = self.row + 1\n return val\n else:\n raise StopIteration\n\n\n# Iterates through all the Cells in the same block\nclass BlockCellIterator:\n def __init__(self, board, row, col):\n self.board = board\n self.sg_row = math.floor(row / common.BASIS) * common.BASIS\n self.sg_col = math.floor(col / common.BASIS) * common.BASIS\n self.block_row = 0\n self.block_col = 0\n\n def reset(self):\n self.block_row = 0\n self.block_col = 0\n\n def get_row(self):\n return self.sg_row + self.block_row\n\n def get_col(self):\n return self.sg_col + self.block_col\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.block_row >= common.BASIS:\n raise StopIteration\n cell = Cell(self.board, self.get_row(), self.get_col())\n self.block_col = self.block_col + 1\n\n if (self.block_col >= common.BASIS):\n self.block_col = 0\n self.block_row = self.block_row + 1\n return cell\n\n\n# Iterates through all the Cells one the board\nclass BoardCellIterator:\n def __init__(self, board):\n self.board = board\n self.row = 0\n self.col = 0\n\n def reset(self):\n self.col = 0\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.col == common.DIM:\n self.col = 0\n self.row += 1\n if self.row == common.DIM:\n raise StopIteration\n val = Cell(self.board, self.row, self.col)\n self.col += 1\n return val\n\n\ndef block_for_rc(row, col):\n sg_row = math.floor(row / common.BASIS)\n sg_col = math.floor(col / common.BASIS)\n return [sg_row, sg_col]\n\n\n# select k from n. Number of elements will be\n# n!/(k! - (n-k)!)\ndef gen_subset_indexes(n, k):\n subsets = []\n max = 1 << n\n for i in range(max):\n indexes = []\n for x in range(9):\n if (i >> x) & 1 == 1:\n indexes.append(x)\n if len(indexes) == k:\n subsets.append(indexes)\n return subsets\n\n\n# generates all subsets of length k\ndef gen_subsets(allset, k):\n subsets = []\n indexes = gen_subset_indexes(len(allset), k)\n for i in indexes:\n subset = []\n for j in i:\n subset.append(allset[j])\n subsets.append(subset)\n return subsets\n\n\nclass TupleKey:\n def __init__(self, house, location, tuple):\n self.house = house\n self.location = location\n self.tuple = tuple\n\n def __eq__(self, other):\n if not isinstance(other, TupleKey):\n return False\n return ((self.house == other.house) and\n (self.location == other.location) and\n (self.tuple == other.tuple))\n\n def __hash__(self):\n return hash(self.__str__())\n\n def __repr__(self):\n return self.__str__()\n\n def __str__(self):\n return \"%s %s %s\" % (self.house, self.location, self.tuple)\n\n\ndef append_to_tuple_map(tuple_map, r, c, tuple, board):\n def no_singletons(board, itr):\n ok = True\n for cell in itr:\n if not ok:\n break\n for digit in tuple:\n if cell.get() == digit:\n ok = False\n break\n return ok\n\n tuple_str = \"\"\n for i in tuple:\n tuple_str += i\n keys = []\n\n if no_singletons(board, RowCellIterator(board, r)):\n keys.append(TupleKey('r', r, tuple_str))\n\n if no_singletons(board, ColCellIterator(board, c)):\n keys.append(TupleKey('c', c, tuple_str))\n\n if no_singletons(board, BlockCellIterator(board, r, c)):\n keys.append(TupleKey('b', block_for_rc(r, c), tuple_str))\n\n for key in keys:\n if tuple_map.get(key) is None:\n tuple_map[key] = []\n tuple_map[key].append((r, c))\n\n\ndef generate_hidden_tuple_map(board, k):\n tuples = gen_subsets(common.FULL_SET, k)\n tuple_map = dict()\n for cell in BoardCellIterator(board):\n if len(cell.get()) == 1:\n continue\n for tuple in tuples:\n for digit in tuple:\n if digit in cell.get():\n append_to_tuple_map(\n tuple_map, cell.r, cell.c, tuple, board)\n break\n return tuple_map\n\n\ndef find_hidden_tuples(board, k):\n tuple_map = generate_hidden_tuple_map(board, k)\n found = dict()\n for (key, values) in tuple_map.items():\n if len(values) == k:\n found[key] = values\n return found\n\n\ndef reduce_hidden_tuples(board, found, n):\n for (key, cells) in found.items():\n digits_in = key.tuple\n digits_out = common.FULL_SET\n for c in digits_in:\n digits_out = digits_out.replace(c, '')\n\n itr = None\n if key.house == 'b':\n itr = BlockCellIterator(board, cells[0][0], cells[0][1])\n elif key.house == 'r':\n itr = RowCellIterator(board, cells[0][0])\n elif key.house == 'c':\n itr = ColCellIterator(board, cells[0][1])\n else:\n assert(itr is not None)\n\n for cell in itr:\n ct = (cell.r, cell.c)\n if ct in cells:\n cell.set(cell.remove_from_set(digits_out))\n else:\n cell.set(cell.remove_from_set(digits_in))\n\n\ndef find_and_reduce_hidden_tuples(board, n):\n found = find_hidden_tuples(board, n)\n reduce_hidden_tuples(board, found, n)\n\n\ndef remove_from_set(cell_set, vals):\n for c in vals:\n cell_set = cell_set.replace(c, '')\n return cell_set\n\n\ndef populate_full_board():\n board = []\n for row in range(common.DIM):\n cur_row = []\n for col in range(common.DIM):\n cur_row.append(common.FULL_SET)\n board.append(cur_row)\n return board\n\n\ndef initialize_board(board, puzzle_array):\n for cell in BoardCellIterator(board):\n p = puzzle_array[cell.r][cell.c]\n if p > '0':\n cell.solve(p)\n\n\ndef reduce_solved(board):\n reduced = 0\n for target in BoardCellIterator(board):\n reduced = reduced + target.reduce_solved()\n return reduced\n\n\ndef reduce_singleton_in_section(itr, test_val):\n found = None\n for other in itr:\n if other.contains(test_val):\n if found is None:\n found = other\n else:\n found = None\n return\n if found is not None:\n found.solve(\"%d\" % test_val)\n\n\ndef reduce_singletons_in_section(itr):\n for test_val in range(common.DIM):\n reduce_singleton_in_section(itr, test_val)\n itr.reset()\n\n\ndef reduce_naked_pairs_in_section(itr):\n firsts = {}\n second = None\n\n for cell in itr:\n val = cell.get()\n if len(val) == 2:\n if firsts.get(val) is None:\n firsts[val] = cell\n else:\n second = cell\n break\n if second is None:\n return\n val = second.get()\n first = firsts[val]\n itr.reset()\n for cell in itr:\n if cell == first:\n continue\n if cell == second:\n continue\n new_value = cell.remove_from_set(val)\n cell.set(new_value)\n\n\ndef reduce_singletons(board):\n visit_all(board, reduce_singletons_in_section)\n\n\ndef reduce_naked_pairs(board):\n visit_all(board, reduce_naked_pairs_in_section)\n\n\ndef visit_all(board, visitor):\n for col in range(common.DIM):\n visitor(ColCellIterator(board, col))\n\n for row in range(common.DIM):\n visitor(RowCellIterator(board, row))\n\n for r in range(common.BASIS):\n for c in range(common.BASIS):\n visitor(\n BlockCellIterator(board, r * common.BASIS, c * common.BASIS))\n\n\ndef x_wing(board):\n xmap = dict()\n for row in range(common.DIM):\n valmap = dict()\n for cell in RowCellIterator(board, row):\n if cell.is_solved():\n continue\n for val in cell.get():\n if valmap.get(val) is None:\n valmap[val] = []\n valmap[val].append(cell)\n xmap[row] = valmap\n\n pairs_by_val = dict()\n for row, matches in xmap.items():\n for val, cells in matches.items():\n if (len(cells)) == 2:\n if pairs_by_val.get(val) is None:\n pairs_by_val[val] = []\n pairs_by_val[val].append(cells)\n\n reduced = 0\n for val, rows in pairs_by_val.items():\n pos_dict = dict()\n for r in rows:\n tup = (r[0].c, r[1].c)\n o = pos_dict.get(tup)\n if o is None:\n pos_dict[tup] = r\n else:\n for mc in r:\n for target in ColCellIterator(board, mc.c):\n if target.r == r[0].c:\n continue\n if target.r == r[1].c:\n continue\n if target.r == o[0].c:\n continue\n if target.r == o[1].c:\n continue\n if val in target.get():\n target.set(target.remove_from_set(val))\n reduced = reduced + 1 + target.reduce_solved()\n return reduced\n\n\ndef solve_puzzle(puzzle):\n board = populate_full_board()\n puzzle_array = common.puzzle_to_array(puzzle)\n initialize_board(board, puzzle_array)\n reduced = reduce_solved(board)\n reduced = 10\n while reduced > 0:\n reduced = reduced - 1\n #x_wing(board)\n reduce_naked_pairs(board)\n reduce_singletons(board)\n reduced = reduce_solved(board)\n find_and_reduce_hidden_tuples(board, 4)\n\n return board\n\n\ndef solve_sample():\n display.draw_puzzle(samples.sample_puzzle)\n sample_board = solve_puzzle(samples.sample_puzzle)\n display.draw_board(sample_board)\n\n\ndef solve_from_file():\n solved_map = dict()\n infile = open(\"data/sample_sudoku_board_inputs.csv\")\n line = infile.readline()\n for line in infile:\n solved_map[line] = solve_puzzle(line)\n print(json.dumps(solved_map))\n\n\nsolve_from_file()\n#solve_sample()\n","repo_name":"admiyo/singlestatus","sub_path":"singlestatus/sudoku.py","file_name":"sudoku.py","file_ext":"py","file_size_in_byte":13040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36768104330","text":"import types\nimport numpy as np\nimport pandas as pd\nfrom ....data.materials.CompositionEntry import CompositionEntry\nfrom ....data.materials.util.LookUpData import LookUpData\n\nclass ElementPairPropertyAttributeGenerator:\n \"\"\"Class to generate attributes based on the properties of constituent\n binary systems.\n Computes the minimum, maximum and range of all pairs in\n the material, and the fraction-weighted mean and variance of all pairs.\n Variance is defined as the mean absolute deviation from the mean over all\n pairs. If an entry has only one element, the value of NaN is used for all\n attributes.\n\n Attributes\n ----------\n elemental_pair_properties : list\n Elemental properties to be associated with this class for the generation of\n features.\n pair_lookup-data : dict\n Dictionary containing the property name as the key and a list of floats as\n the value.\n\n \"\"\"\n\n elemental_pair_properties = []\n pair_lookup_data = {}\n\n def load_pair_lookup_data(self):\n \"\"\"Function to load the property values into `self.lookup_data` for the\n computation of features.\n\n \"\"\"\n\n self.pair_lookup_data = LookUpData.load_pair_properties(\n self.elemental_pair_properties)\n\n def add_elemental_pair_property(self, property):\n \"\"\"Function to add an elemental pair property to be used to compute\n features.\n\n Parameters\n ----------\n property : str\n Property to be added.\n\n \"\"\"\n\n if property not in self.elemental_pair_properties:\n self.elemental_pair_properties.append(property)\n\n def add_elemental_pair_properties(self, properties):\n \"\"\"Function to provide a list of elemental pair properties to be used to\n compute features.\n\n Parameters\n ----------\n properties : array-like\n Properties to be included. A list of strings containing property\n names.\n\n \"\"\"\n\n for prop in properties:\n self.add_elemental_pair_property(prop)\n\n def remove_elemental_pair_property(self, property):\n \"\"\"Function to remove an elemental pair property from the list of\n elemental properties.\n\n Parameters\n ----------\n property : str\n Property to be removed.\n\n \"\"\"\n\n if property in self.elemental_pair_properties:\n self.elemental_pair_properties.remove(property)\n\n def remove_elemental_pair_properties(self, properties):\n \"\"\"Function to remove a list of elemental pair properties from the\n list of elemental properties.\n\n Parameters\n ----------\n properties : array-like\n Properties to be removed. A list of strings containing property\n names.\n\n \"\"\"\n\n for prop in properties:\n self.remove_elemental_pair_property(prop)\n\n def generate_features(self, entries):\n \"\"\"Function to generate features as mentioned in the class description.\n\n Parameters\n ----------\n entries : array-like\n Compositions for which features are to be generated. A list of\n CompositionEntry's.\n\n Returns\n ----------\n features : DataFrame\n Features for the given entries. Pandas data frame containing the\n names and values of the descriptors.\n\n Raises\n ------\n ValueError\n If no elemental properties are set.\n If input is not of type list.\n If items in the list are not CompositionEntry instances.\n\n \"\"\"\n\n # Initialize lists of feature values and headers for pandas data frame.\n feat_values = []\n feat_headers = []\n\n # Make sure that there is at least one elemental pair property provided.\n if not self.elemental_pair_properties:\n raise ValueError(\"No elemental property is set. Add at least one \"\n \"property to compute meaningful descriptors.\")\n\n # If the dictionary containing the property values is empty,\n # load values into it.\n if not self.pair_lookup_data:\n self.load_pair_lookup_data()\n\n # Raise exception if input argument is not of type list of\n # Composition Entry's.\n if not isinstance(entries, list):\n raise ValueError(\"Argument should be of type list of \"\n \"CompositionEntry's\")\n elif (entries and not isinstance(entries[0], CompositionEntry)):\n raise ValueError(\"Argument should be of type list of \"\n \"CompositionEntry's\")\n\n # Insert header names here.\n n_statistics = 5\n for prop in self.elemental_pair_properties:\n feat_headers.append(\"binary_max_\" + prop)\n feat_headers.append(\"binary_min_\" + prop)\n feat_headers.append(\"binary_range_\" + prop)\n feat_headers.append(\"binary_mean_\" + prop)\n feat_headers.append(\"binary_variance_\" + prop)\n\n for entry in entries:\n tmp_list = []\n elem_ids = entry.get_element_ids()\n elem_fractions = entry.get_element_fractions()\n\n if len(elem_fractions) == 1:\n for i in range(n_statistics):\n tmp_list.append(np.nan)\n feat_values.append(tmp_list)\n continue\n\n pair_weights = []\n for i in range(len(elem_fractions)):\n for j in range(i):\n pair_weights.append(elem_fractions[i]*elem_fractions[j])\n\n total_sum = sum(pair_weights)\n for i in range(len(pair_weights)):\n pair_weights[i] /= total_sum\n\n # Look up values for each pair property.\n for prop in self.elemental_pair_properties:\n tmp_prop = []\n\n for i in range(len(elem_fractions)):\n e_i = elem_ids[i]\n for j in range(i):\n e_j = elem_ids[j]\n idx_1 = max(e_i, e_j)\n idx_2 = min(e_i, e_j)\n tmp_prop.append(self.pair_lookup_data[prop][idx_1][\n idx_2])\n\n max_ = max(tmp_prop)\n min_= min(tmp_prop)\n range_ = max_ - min_\n mean_ = np.average(tmp_prop, weights=pair_weights)\n variance_ = np.average([abs(x - mean_) for x in tmp_prop],\n weights=pair_weights)\n tmp_list.append(max_)\n tmp_list.append(min_)\n tmp_list.append(range_)\n tmp_list.append(mean_)\n tmp_list.append(variance_)\n\n\n feat_values.append(tmp_list)\n\n features = pd.DataFrame(feat_values, columns=feat_headers)\n return features\n","repo_name":"hachmannlab/chemml","sub_path":"chemml/chem/magpie_python/attributes/generators/composition/ElementPairPropertyAttributeGenerator.py","file_name":"ElementPairPropertyAttributeGenerator.py","file_ext":"py","file_size_in_byte":6923,"program_lang":"python","lang":"en","doc_type":"code","stars":143,"dataset":"github-code","pt":"3"} +{"seq_id":"9178089579","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nfrom IPython.display import clear_output, display\n\nclass LiveFigure():\n\tdef __init__(self, fig=None, figsize=None, sleep=True):\n\t\tif fig is None:\n\t\t\tself.fig, self.axes = plt.subplots(1, 1, figsize=figsize)\n\t\t\tself.axes = [self.axes]\n\t\telse:\n\t\t\tself.fig = fig\n\t\t\tself.axes = self.fig.axes\n\t\tif not isinstance(self.fig.axes, list):\n\t\t\tself.axes = [self.fig.axes]\n\n\t\tself.sleep = sleep\n\n\tdef update(self, data, axes=None):\n\t\tif axes is None:\n\t\t\taxes = np.arange(len(self.axes))\n\n\t\tfor (x, y, label, title), ax_idx in zip(data, axes):\n\t\t\tself.axes[ax_idx].cla()\n\t\t\tself.axes[ax_idx].plot(np.transpose(x), np.transpose(y))\n\t\t\tself.axes[ax_idx].set_title(title)\n\t\t\tself.axes[ax_idx].legend(label)\n\t\tclear_output(wait=True)\n\t\tdisplay(self.fig)\n\t\tif self.sleep:\n\t\t\tplt.pause(0.5)","repo_name":"alex-bene/pytorch-utils","sub_path":"pytorchUtils/LiveFigure.py","file_name":"LiveFigure.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"23948823254","text":"from guillotina import testing\nfrom guillotina.tests.fixtures import ContainerRequesterAsyncContextManager\n\nimport json\nimport pytest\n\n\ndef base_settings_configurator(settings):\n if 'applications' in settings:\n settings['applications'].append('guillotina_chat')\n else:\n settings['applications'] = ['guillotina_chat']\n\n\ntesting.configure_with(base_settings_configurator)\n\n\nclass guillotina_chat_Requester(ContainerRequesterAsyncContextManager): # noqa\n\n async def __aenter__(self):\n await super().__aenter__()\n resp = await self.requester(\n 'POST', '/db/guillotina/@addons',\n data=json.dumps({\n 'id': 'guillotina_chat'\n })\n )\n return self.requester\n\n\n@pytest.fixture(scope='function')\nasync def guillotina_chat_requester(guillotina):\n return guillotina_chat_Requester(guillotina)\n","repo_name":"robystar/guillotina_chat","sub_path":"guillotina_chat/tests/fixtures.py","file_name":"fixtures.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"33697286323","text":"'''\nInit file which indidates which cells are to be used for each spreadsheet.\nThis is represented as a dictionary, with a key of the museum name and a\nvalue of the list of relevant columns.\n\nWARNING: The key must exactly match the museum spreadsheet file name with the\nfile extension dropped. For example: the key for penn-museum.csv would be\npenn-museum\n\nvalue array represents: \n[artifact name, country, acquisition date, created date, description of artifact]\n'''\n\ncolumn_args = {\n 'canada-science-and-technology-museums': [1,6,0,9,2],\n 'cleveland-museum-of-art': [5,13,1,10,3],\n 'cooper-hewitt-smithsonian-design-museum': [24,34,35,2,5],\n 'metropolitan-museum-of-art': [5,31,0,22,6],\n 'museum-of-modern-art': [0,4,15,8,9],\n 'penn-museum': [3,6,12,10,14],\n 'minneapolis-institute-of-art': ['title', 'country', 'creditline', 'dated', 'title']\n\n}","repo_name":"madicooley/museum_vis","sub_path":"data/data_init.py","file_name":"data_init.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30729212955","text":"\"\"\"Code for aligning all exons to each of the reads.\"\"\"\n\nimport tempfile\n\nimport fire\nimport pandas as pd\nfrom Bio import SeqIO\n\nfrom gencdna.blast.alignment_output import AlignmentOutputParser\nfrom gencdna.config.alignment_output import USEARCH_COLUMN_NAMES\nfrom gencdna.external_calls import BinaryExecutable\n\n\ndef align_exons_vs_single_read(\n exons_fasta_file: str,\n read_fasta_file: str,\n config: str,\n) -> pd.DataFrame:\n align = BinaryExecutable('blastn', config)\n align_output = align.run(\n '-subject_besthit',\n '-query',\n exons_fasta_file,\n '-subject',\n read_fasta_file,\n )\n return (\n AlignmentOutputParser(USEARCH_COLUMN_NAMES, align_output)\n .output_as_dataframe()\n .sort_values(by=['subject_id', 'query_id'])\n )\n\n\ndef align_exons_vs_reads(\n exons_fasta_file: str,\n reads_fasta_file: str,\n config: str = 'config/blast.yml',\n) -> pd.DataFrame:\n alignment_outputs: list[pd.DataFrame] = []\n for read in SeqIO.parse(reads_fasta_file, 'fasta'):\n with tempfile.NamedTemporaryFile() as single_read_fasta:\n SeqIO.write(read, single_read_fasta.name, 'fasta')\n alignment_outputs.append(\n align_exons_vs_single_read(\n exons_fasta_file=exons_fasta_file,\n read_fasta_file=single_read_fasta.name,\n config=config,\n ),\n )\n return pd.concat(alignment_outputs)\n\n\ndef main(\n input_exons_fasta: str,\n input_reads_fasta: str,\n alignment_output: str,\n config: str = 'config/blast.yml',\n) -> None:\n alignment_output_df = align_exons_vs_reads(\n exons_fasta_file=input_exons_fasta,\n reads_fasta_file=input_reads_fasta,\n config=config,\n )\n alignment_output_df.to_csv(alignment_output, sep='\\t', index=False)\n\n\nif __name__ == '__main__':\n fire.Fire(main)\n","repo_name":"igor-sb/gencdna","sub_path":"gencdna/file_api/align_exons_to_reads.py","file_name":"align_exons_to_reads.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"28792700707","text":"import os\nimport re\nimport nltk\nimport string\nimport random\nfrom nltk.text import Text\nfrom statistics import mode\nfrom nltk.corpus import stopwords\nfrom nltk.classify import ClassifierI\nfrom nltk.tokenize import word_tokenize\nfrom nltk.tokenize import sent_tokenize\nfrom sklearn.svm import SVC, LinearSVC, NuSVC\nfrom nltk.classify.scikitlearn import SklearnClassifier\nfrom sklearn.naive_bayes import MultinomialNB, BernoulliNB\nfrom sklearn.linear_model import LogisticRegression, SGDClassifier\nfrom sklearn.metrics import f1_score, precision_score, accuracy_score, recall_score\n\nrelevantWords = []\ntotalReviews = []\n\ndef getRelevantWords():\n\n global relevantWords\n global totalReviews\n stop_words = list(set(stopwords.words('english')))\n rePunctuation = r'[^(a-zA-Z)\\s]'\n\n #Read in all the lines for the negative reviews\n neg_data_files = os.listdir('neg')\n original_neg_data = [open('neg/'+ fileName, 'r').read() for fileName in neg_data_files]\n\n #Read in all the lines for the positive reviews\n pos_data_files = os.listdir('pos')\n original_pos_data = [open('pos/'+ fileName, 'r').read() for fileName in pos_data_files]\n\n #Preprocess the negative words: strip out punctuation and all stop words. Add all adjetives to the list of relevant words\n for review in original_neg_data:\n\n totalReviews.append((review, \"N\"))\n\n punctuationless_review = re.sub(rePunctuation,'', review)\n\n tokenized_review = word_tokenize(punctuationless_review)\n\n stoppless_review = [word for word in tokenized_review if not word in stop_words]\n\n pos_tagged_review = nltk.pos_tag(stoppless_review)\n\n for word in pos_tagged_review:\n pos = word[1]\n if( pos == \"JJ\" or pos == \"JJR\" or pos == \"JJS\" or pos == \"RB\" or pos == \"RBR\" or pos == \"RBS\" ):\n relevantWords.append(word[0])\n\n #Preprocess the positive words: strip out punctuation and all stop words. Add all adjetives to the list of relevant words\n for review in original_pos_data:\n\n totalReviews.append((review, \"P\"))\n\n punctuationless_review = re.sub(rePunctuation,'', review)\n\n tokenized_review = word_tokenize(punctuationless_review)\n\n stoppless_review = [word for word in tokenized_review if not word in stop_words]\n\n pos_tagged_review = nltk.pos_tag(stoppless_review)\n\n #Only consider adjectives and adverbs\n for word in pos_tagged_review:\n pos = word [1]\n if( pos == \"JJ\" or pos == \"JJR\" or pos == \"JJS\" or pos == \"RB\" or pos == \"RBR\" or pos == \"RBS\" ):\n relevantWords.append(word[0])\n\n\n#for every word in our bag of words check to see if that word is in the review. Set word to true or false depending\ndef find_features(review, word_features):\n words = word_tokenize(review)\n features = {}\n for word in word_features:\n features[word] = (word in words)\n return features\n\n#Given a classifier, calculate the accuracy, fscore and recall of that classifier\ndef runClassifier(classifier, classifierType, testing_set):\n\n actual_sentiment = [review[1] for review in testing_set]\n predicted_sentimet = [classifier.classify(review[0]) for review in testing_set]\n\n accuracy = accuracy_score(actual_sentiment, predicted_sentimet, normalize=True)*100\n fscore = f1_score(actual_sentiment, predicted_sentimet, average = \"macro\")\n recall = recall_score(actual_sentiment, predicted_sentimet, average = \"macro\")\n precision = precision_score(actual_sentiment, predicted_sentimet, average = \"macro\")\n print(classifierType + \"accuracy percent is: \", accuracy)\n print(classifierType + \"fscore is: \", fscore)\n print(classifierType + \"recall is \", recall)\n print(classifierType + \"recall is \", precision)\n \n\n print('\\n\\n')\nif __name__ == \"__main__\":\n print('\\nThis model tests how the size of feature sets influences performance. \\n This will take a several minutes to run to completion. \\n It will take longer than the baseline model. \\n It has to train and test six feature sets on 3 machine learning models.\\n', flush=True)\n print('\\n Please refer to vectorSizingResults.png to see what an example of the completed program produces to std out\\n', flush=True)\n getRelevantWords()\n\n #Get the frequency distributuion of the words to be used in the feature sets\n bag_of_words = nltk.FreqDist(relevantWords)\n\n\n print(\"------------------------------------------------------------------------\\n\", flush=True) \n for i in range(5, 11):\n num_words = i * 1000\n print(\"Now beggining creation of feature set of size \", num_words, flush=True)\n word_features = list(bag_of_words.keys())[:num_words]\n featuresets = [(find_features(review, word_features), sentiment) for (review, sentiment) in totalReviews ]\n \n \n print('\\nCreating testing and training sets.\\n', flush=True)\n random.shuffle(featuresets)\n \n training_set = featuresets[:1500]\n testing_set = featuresets[1500:]\n\n\n logRegressionClassifier = SklearnClassifier(LogisticRegression()).train(training_set)\n\n print(\"Now testing on Naive Bayes Classifier.... \", flush=True)\n naiveBayesClassifier = nltk.NaiveBayesClassifier.train(training_set)\n runClassifier(naiveBayesClassifier, \"Naive Bayes Classifier \", testing_set)\n\n\n print(\"Now testing on Logistic Regression Classifier.... \", flush=True)\n logRegressionClassifier = SklearnClassifier(LogisticRegression()).train(training_set)\n runClassifier(logRegressionClassifier, \"Logistic Regression Classifier \", testing_set)\n\n\n print(\"Now testing on Support Vector Classifier.... \", flush=True) \n supportVectorClassifier = SklearnClassifier(SVC()).train(training_set)\n runClassifier(supportVectorClassifier, \"Support Vector Classifier \", testing_set)\n\n print(\"------------------------------------------------------------------------ \\n\", flush=True) \n","repo_name":"kimyaBuckner/sentimentAnalysis","sub_path":"incremental.py","file_name":"incremental.py","file_ext":"py","file_size_in_byte":6012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18330863161","text":"import sys\nimport math\nimport bisect\nfrom heapq import heapify, heappop, heappush\nfrom collections import deque, defaultdict, Counter\nfrom functools import lru_cache\nfrom itertools import accumulate, combinations, permutations\n\nsys.setrecursionlimit(1000000)\nMOD = 10 ** 9 + 7\nMOD99 = 998244353\n\ninput = lambda: sys.stdin.readline().strip()\nNI = lambda: int(input())\nNMI = lambda: map(int, input().split())\nNLI = lambda: list(NMI())\nSI = lambda: input()\nSMI = lambda: input().split()\nSLI = lambda: list(SMI())\nEI = lambda m: [NLI() for _ in range(m)]\n\n\ndef main():\n N, K = NMI()\n A = NLI()\n INF = 10**10\n ans = INF\n D = deque()\n C = [0] * (K+1)\n now = 0\n\n for a in A:\n D.append(a)\n if 1 <= a <= K:\n if C[a] == 0:\n now += 1\n C[a] += 1\n\n while D:\n if D[0] > K:\n D.popleft()\n elif C[D[0]] > 1:\n C[D[0]] -= 1\n D.popleft()\n else:\n break\n\n if now == K:\n ans = min(ans, len(D))\n\n if ans == INF:\n ans = 0\n\n print(ans)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Mao-beta/AtCoder","sub_path":"AOJ/DSL_3_B.py","file_name":"DSL_3_B.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"40293271233","text":"from aiogram.filters.callback_data import CallbackData\nfrom aiogram.types import InlineKeyboardButton, InlineKeyboardMarkup\n\n\nclass CallbackVote(CallbackData, prefix=\"vt\"):\n action: str\n current_photo_count: str\n current_photo_id: str\n amount_photos: str\n group_id: str\n\n\nclass Actions:\n next = \"➡️\"\n next_text = \"n\"\n prev = \"⬅️\"\n prev_text = \"pr\"\n no_like = \"🤍\"\n no_like_text = \"nl\"\n like = \"❤️\"\n like_text = \"l\"\n amount = \"/\"\n count = \"-\"\n finish = \"Отправить голос 🏁\"\n finish_text = \"f\"\n\n\nclass KeyboardButtons:\n def __init__(\n self,\n group_id: str,\n current_photo_id: str,\n c_photo_count: str,\n amount_photos: str,\n ) -> None:\n self.actions = Actions()\n self.button_next = InlineKeyboardButton(\n text=self.actions.next,\n callback_data=CallbackVote(\n action=self.actions.next_text,\n current_photo_id=current_photo_id,\n current_photo_count=c_photo_count,\n amount_photos=amount_photos,\n group_id=group_id,\n ).pack(),\n )\n self.button_prev = InlineKeyboardButton(\n text=self.actions.prev,\n callback_data=CallbackVote(\n action=self.actions.prev_text,\n current_photo_id=current_photo_id,\n current_photo_count=c_photo_count,\n amount_photos=amount_photos,\n group_id=group_id,\n ).pack(),\n )\n self.no_like = InlineKeyboardButton(\n text=self.actions.no_like,\n callback_data=CallbackVote(\n action=self.actions.no_like_text,\n current_photo_id=current_photo_id,\n current_photo_count=c_photo_count,\n amount_photos=amount_photos,\n group_id=group_id,\n ).pack(),\n )\n self.like = InlineKeyboardButton(\n text=self.actions.like,\n callback_data=CallbackVote(\n action=self.actions.like_text,\n current_photo_id=current_photo_id,\n current_photo_count=c_photo_count,\n amount_photos=amount_photos,\n group_id=group_id,\n ).pack(),\n )\n self.amount = InlineKeyboardButton(\n text=c_photo_count + self.actions.amount + amount_photos,\n callback_data=CallbackVote(\n action=self.actions.count,\n current_photo_id=current_photo_id,\n current_photo_count=c_photo_count,\n amount_photos=amount_photos,\n group_id=group_id,\n ).pack(),\n )\n self.finish = InlineKeyboardButton(\n text=self.actions.finish,\n callback_data=CallbackVote(\n action=self.actions.finish_text,\n current_photo_id=current_photo_id,\n current_photo_count=c_photo_count,\n amount_photos=amount_photos,\n group_id=group_id,\n ).pack(),\n )\n\n\nclass Keyboard:\n def __init__(\n self,\n current_photo_id: str,\n current_photo_count: str,\n amount_photos: str,\n group_id: str,\n ) -> None:\n self.buttons = KeyboardButtons(\n group_id, current_photo_id, current_photo_count, amount_photos\n )\n self.keyboard_start = InlineKeyboardMarkup(\n inline_keyboard=[\n [self.buttons.amount, self.buttons.button_next],\n [self.buttons.no_like],\n ]\n )\n self.keyboard_start_liked = InlineKeyboardMarkup(\n inline_keyboard=[\n [self.buttons.amount, self.buttons.button_next],\n [self.buttons.like],\n ]\n )\n self.keyboard_vote = InlineKeyboardMarkup(\n inline_keyboard=[\n [\n self.buttons.button_prev,\n self.buttons.amount,\n self.buttons.button_next,\n ],\n [self.buttons.no_like],\n ]\n )\n self.keyboard_vote_liked = InlineKeyboardMarkup(\n inline_keyboard=[\n [\n self.buttons.button_prev,\n self.buttons.amount,\n self.buttons.button_next,\n ],\n [self.buttons.like],\n ]\n )\n self.keyboard_end = InlineKeyboardMarkup(\n inline_keyboard=[\n [self.buttons.button_prev, self.buttons.amount],\n [self.buttons.no_like],\n [self.buttons.finish],\n ]\n )\n self.keyboard_end_liked = InlineKeyboardMarkup(\n inline_keyboard=[\n [self.buttons.button_prev, self.buttons.amount],\n [self.buttons.like],\n [self.buttons.finish],\n ]\n )\n\n @classmethod\n def fromcallback(cls, cb: CallbackVote):\n return cls(\n group_id=cb.group_id,\n current_photo_count=cb.current_photo_count,\n current_photo_id=cb.current_photo_id,\n amount_photos=cb.amount_photos,\n )\n","repo_name":"vlle/Photoshnaya","sub_path":"app/utils/keyboard.py","file_name":"keyboard.py","file_ext":"py","file_size_in_byte":5263,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"74319389842","text":"with open('input.txt') as line:\n matchhistory = [x.strip('\\n') for x in line]\n\n # Part 1\n result1 = {'A X':4, 'A Y':8, 'A Z':3, 'B X':1, 'B Y':5, 'B Z':9, 'C X':7, 'C Y':2, 'C Z':6} \n points1 = sum([result1[match] for match in matchhistory])\n print(points1)\n\n # Part 2 \n result2 = {'A X':3, 'A Y':4, 'A Z':8, 'B X':1, 'B Y':5, 'B Z':9, 'C X':2, 'C Y':6, 'C Z':7} \n points2 = sum([result2[match] for match in matchhistory])\n print(points2)","repo_name":"Chengro123/Advent-of-Code-2022","sub_path":"day02/day02.py","file_name":"day02.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37631605005","text":"def parse(text) -> dict:\r\n the_mark = text.find('?')\r\n new_text = text[the_mark + 1:]\r\n if new_text == '' or '?' not in text:\r\n the_dict = {}\r\n else:\r\n final_text = new_text.split('&')\r\n if '' in final_text:\r\n final_text.remove('')\r\n new_list = []\r\n for value in final_text:\r\n split_text = value.split('=', 1)\r\n new_list.append(split_text)\r\n the_dict = dict(new_list)\r\n return the_dict\r\n\r\nif __name__ == '__main__':\r\n assert parse('https://example.com/path/to/page?name=ferret&color=purple') == {'name': 'ferret', 'color': 'purple'}\r\n assert parse('https://example.com/path/to/page?name=ferret&color=purple&') == {'name': 'ferret', 'color': 'purple'}\r\n assert parse('http://example.com/') == {}\r\n assert parse('http://example.com/?') == {}\r\n assert parse('http://example.com/?name=Dima') == {'name': 'Dima'}\r\n\r\n\r\ndef parse_cookie(text) -> dict:\r\n if text == '':\r\n the_dict = {}\r\n else:\r\n new_text = text.split(';')\r\n if '' in new_text:\r\n new_text.remove('')\r\n new_list = []\r\n for value in new_text:\r\n split_text = value.split('=', 1)\r\n new_list.append(split_text)\r\n the_dict = dict(new_list)\r\n return the_dict\r\n\r\nif __name__ == '__main__':\r\n assert parse_cookie('name=Dima;') == {'name': 'Dima'}\r\n assert parse_cookie('') == {}\r\n assert parse_cookie('name=Dima;age=28;') == {'name': 'Dima', 'age': '28'}\r\n assert parse_cookie('name=Dima=User;age=28;') == {'name': 'Dima=User', 'age': '28'}","repo_name":"YoshioYabusaki/python-lesson-Yoshio","sub_path":"Python course June 21/H.W. Yoshio 01.py","file_name":"H.W. Yoshio 01.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23871617565","text":"\"\"\"Test Docker API.\"\"\"\n\nimport asyncio\nfrom unittest.mock import ANY\n\nfrom aiohttp.test_utils import TestClient\n\nfrom supervisor.coresys import CoreSys\nfrom supervisor.jobs.const import ATTR_IGNORE_CONDITIONS, JobCondition\nfrom supervisor.jobs.decorator import Job\n\n\nasync def test_api_jobs_info(api_client: TestClient):\n \"\"\"Test jobs info api.\"\"\"\n resp = await api_client.get(\"/jobs/info\")\n result = await resp.json()\n\n assert result[\"data\"][ATTR_IGNORE_CONDITIONS] == []\n assert result[\"data\"][\"jobs\"] == []\n\n\nasync def test_api_jobs_options(api_client: TestClient, coresys: CoreSys):\n \"\"\"Test jobs options api.\"\"\"\n resp = await api_client.post(\n \"/jobs/options\", json={ATTR_IGNORE_CONDITIONS: [JobCondition.HEALTHY]}\n )\n result = await resp.json()\n assert result[\"result\"] == \"ok\"\n\n resp = await api_client.get(\"/jobs/info\")\n result = await resp.json()\n assert result[\"data\"][ATTR_IGNORE_CONDITIONS] == [JobCondition.HEALTHY]\n\n assert coresys.jobs.save_data.called\n\n\nasync def test_api_jobs_reset(api_client: TestClient, coresys: CoreSys):\n \"\"\"Test jobs reset api.\"\"\"\n resp = await api_client.post(\n \"/jobs/options\", json={ATTR_IGNORE_CONDITIONS: [JobCondition.HEALTHY]}\n )\n result = await resp.json()\n assert result[\"result\"] == \"ok\"\n\n resp = await api_client.get(\"/jobs/info\")\n result = await resp.json()\n assert result[\"data\"][ATTR_IGNORE_CONDITIONS] == [JobCondition.HEALTHY]\n\n assert coresys.jobs.save_data.called\n assert coresys.jobs.ignore_conditions == [JobCondition.HEALTHY]\n\n coresys.jobs.save_data.reset_mock()\n resp = await api_client.post(\"/jobs/reset\")\n result = await resp.json()\n assert result[\"result\"] == \"ok\"\n\n assert coresys.jobs.ignore_conditions == []\n coresys.jobs.save_data.assert_called_once()\n\n\nasync def test_jobs_tree_representation(api_client: TestClient, coresys: CoreSys):\n \"\"\"Test jobs are correctly represented in a tree.\"\"\"\n\n class TestClass:\n \"\"\"Test class.\"\"\"\n\n def __init__(self, coresys: CoreSys):\n \"\"\"Initialize the test class.\"\"\"\n self.coresys = coresys\n self.event = asyncio.Event()\n\n @Job(name=\"test_jobs_tree_outer\")\n async def test_jobs_tree_outer(self):\n \"\"\"Outer test method.\"\"\"\n coresys.jobs.current.progress = 50\n await self.test_jobs_tree_inner()\n\n @Job(name=\"test_jobs_tree_inner\")\n async def test_jobs_tree_inner(self):\n \"\"\"Inner test method.\"\"\"\n await self.event.wait()\n\n @Job(name=\"test_jobs_tree_alt\", cleanup=False)\n async def test_jobs_tree_alt(self):\n \"\"\"Alternate test method.\"\"\"\n coresys.jobs.current.stage = \"init\"\n await self.test_jobs_tree_internal()\n coresys.jobs.current.stage = \"end\"\n\n @Job(name=\"test_jobs_tree_internal\", internal=True)\n async def test_jobs_tree_internal(self):\n \"\"\"Internal test method.\"\"\"\n await self.event.wait()\n\n test = TestClass(coresys)\n asyncio.create_task(test.test_jobs_tree_outer())\n asyncio.create_task(test.test_jobs_tree_alt())\n await asyncio.sleep(0)\n\n resp = await api_client.get(\"/jobs/info\")\n result = await resp.json()\n assert result[\"data\"][\"jobs\"] == [\n {\n \"name\": \"test_jobs_tree_outer\",\n \"reference\": None,\n \"uuid\": ANY,\n \"progress\": 50,\n \"stage\": None,\n \"done\": False,\n \"child_jobs\": [\n {\n \"name\": \"test_jobs_tree_inner\",\n \"reference\": None,\n \"uuid\": ANY,\n \"progress\": 0,\n \"stage\": None,\n \"done\": False,\n \"child_jobs\": [],\n },\n ],\n },\n {\n \"name\": \"test_jobs_tree_alt\",\n \"reference\": None,\n \"uuid\": ANY,\n \"progress\": 0,\n \"stage\": \"init\",\n \"done\": False,\n \"child_jobs\": [],\n },\n ]\n\n test.event.set()\n await asyncio.sleep(0)\n\n resp = await api_client.get(\"/jobs/info\")\n result = await resp.json()\n assert result[\"data\"][\"jobs\"] == [\n {\n \"name\": \"test_jobs_tree_alt\",\n \"reference\": None,\n \"uuid\": ANY,\n \"progress\": 0,\n \"stage\": \"end\",\n \"done\": True,\n \"child_jobs\": [],\n },\n ]\n","repo_name":"home-assistant/supervisor","sub_path":"tests/api/test_jobs.py","file_name":"test_jobs.py","file_ext":"py","file_size_in_byte":4505,"program_lang":"python","lang":"en","doc_type":"code","stars":1510,"dataset":"github-code","pt":"3"} +{"seq_id":"39496286104","text":"from app import app\r\nfrom flask import render_template, url_for\r\nfrom forms import Registration, Login\r\nfrom flask import flash, redirect\r\napp.config['SECRET_KEY'] = '124hjok83241f924f293d123dgs3'\r\nposts = [\r\n {\r\n 'author': {'username': 'John'},\r\n 'body': 'Beautiful day in Portland!'\r\n },\r\n {\r\n 'author': {'username': 'Jane'},\r\n 'body': 'Beautiful day in Portland!' \r\n }\r\n]\r\nusername = {'username': 'Miguel'}\r\n#sets the URL to the def function and will display when connection is made to the \"/XXXX\" \r\n@app.route('/')\r\n@app.route('/index')\r\n#defines index to be used on the @app.route above\r\ndef index():\r\n return render_template('index.html', title = 'Home', post = posts, user = username)\r\n@app.route('/about')\r\ndef about():\r\n return render_template('about.html',)\r\n@app.route('/register', methods = ['GET', 'POST'])\r\ndef register():\r\n form = Registration()\r\n if form.validate_on_submit():\r\n flash(f'Account Created for {form.username.data}!','success')\r\n return redirect(url_for('index'))\r\n return render_template('register.html', title = 'Register', form=form)\r\n@app.route('/login')\r\ndef login():\r\n form = Login()\r\n return render_template('login.html', title = 'Login', form=form)","repo_name":"Darkness6917/Website","sub_path":"routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5851482324","text":"\"\"\"\n File name: solver.py\n Author: Scott Whitney\n Date created: 12/10/21\n Date last modified: 12/10/21\n Python Version: 3.9.7\n\n Comments:\n My first attempt at a backtracking algorithm in python to \n help solve a Sudoku grid.\n\"\"\"\n\n\ndef issafe(grid: list[list[int]], row: int, col: int, num: int):\n \"\"\"Check if it is safe to place a number in the grid.\n\n Args:\n grid (list[list[int]]): The Sudoku grid.\n row (int): Row number in grid.\n col (int): Column number in grid.\n num (int): Number to check placement of.\n\n Returns:\n bool: True if okay to place, otherwise False.\n \"\"\"\n if usedinrow(grid, row, num):\n return False\n\n if usedincolumn(grid, col, num):\n return False\n\n if usedinbox(grid, row - row % 3, col - col % 3, num):\n return False\n\n return True\n\n\ndef usedinrow(grid: list[list[int]], row: int, num: int):\n for i in range(9):\n if grid[row][i] == num:\n return True\n return False\n\n\ndef usedincolumn(grid: list[list[int]], col: int, num: int):\n for i in range(9):\n if grid[i][col] == num:\n return True\n return False\n\n\ndef usedinbox(grid: list[list[int]], row: int, col: int, num: int):\n for i in range(3):\n for j in range(3):\n if grid[i + row][j + col] == num:\n return True\n return False\n\n\ndef findnextemptycell(grid: list[list[int]], grid_loc: list[int]):\n \"\"\"Finds the next empty cell in the Sudoku grid.\n\n Args:\n grid (list[list[int]]): The Sudoku grid.\n grid_loc (list[int]): The location of the grid cell, when found.\n\n Returns:\n bool: True if empty cell found, otherwise False.\n \"\"\"\n for row in range(9):\n for col in range(9):\n if grid[row][col] == 0:\n grid_loc[0] = row\n grid_loc[1] = col\n return True\n return False\n\n\ndef solve(grid: list[list[int]]):\n print('Attemping to solve...')\n\n emptycell = [0, 0]\n\n if not findnextemptycell(grid, emptycell):\n return True # no more empty cells, solved!\n\n row = emptycell[0]\n col = emptycell[1]\n\n # for numbers 1 to 9\n for num in range(1, 10):\n if issafe(grid, row, col, num):\n # try placement\n grid[row][col] = num\n\n if solve(grid):\n return True\n\n # placement wasn't safe, try again.\n grid[row][col] = 0\n\n return False\n\n\ndef printgrid(grid: list[list[int]]):\n \"\"\"Prints the grid out on the screen.\n\n Args:\n grid (list[list[int]]): The Sudoku grid.\n \"\"\"\n for i in range(9):\n for j in range(9):\n print(grid[i][j], end='')\n print(',', end='')\n print()\n\n\ndef main():\n\n # 0 means unassigned cells\n grid = [[3, 0, 6, 5, 0, 8, 4, 0, 0],\n [5, 2, 0, 0, 0, 0, 0, 0, 0],\n [0, 8, 7, 0, 0, 0, 0, 3, 1],\n [0, 0, 3, 0, 1, 0, 0, 8, 0],\n [9, 0, 0, 8, 6, 3, 0, 0, 5],\n [0, 5, 0, 0, 9, 0, 6, 0, 0],\n [1, 3, 0, 0, 0, 0, 2, 5, 0],\n [0, 0, 0, 0, 0, 0, 0, 7, 4],\n [0, 0, 5, 2, 0, 6, 3, 0, 0]]\n\n expected_grid = [[3, 1, 6, 5, 7, 8, 4, 9, 2],\n [5, 2, 9, 1, 3, 4, 7, 6, 8],\n [4, 8, 7, 6, 2, 9, 5, 3, 1],\n [2, 6, 3, 4, 1, 5, 9, 8, 7],\n [9, 7, 4, 8, 6, 3, 1, 2, 5],\n [8, 5, 1, 7, 9, 2, 6, 4, 3],\n [1, 3, 8, 9, 4, 7, 2, 5, 6],\n [6, 9, 2, 3, 5, 1, 8, 7, 4],\n [7, 4, 5, 2, 8, 6, 3, 1, 9]]\n\n solve(grid)\n\n if (grid == expected_grid):\n print('Solved!')\n printgrid(grid)\n\n else:\n print('Solve failed!')\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"puppetsw/Sudoku_Solver","sub_path":"solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":3803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19605858439","text":"from django import forms\n\nfrom feedsione.news.models import *\nfrom feedsione.users.models import *\n\nclass FolderCreateForm(forms.ModelForm):\n class Meta:\n model = Folder\n fields = ['name', ]\n exclude = ('user', )\n\n def __init__(self, *args, **kwargs):\n self.user = kwargs.pop('user')\n super(FolderCreateForm, self).__init__(*args, **kwargs)\n\n def clean_name(self):\n name = self.cleaned_data['name']\n if Folder.objects.filter(user=self.user, name=name).exists():\n raise forms.ValidationError(\"You already have created a folder with same name.\")\n return name\n\n\nclass FeedCreateForm(forms.ModelForm):\n class Meta:\n model = Feed\n fields = [\n 'feed_url',\n 'title',\n 'frequency',\n ]\n\n def clean_frequency(self):\n frequency = self.cleaned_data['frequency']\n if frequency < 10:\n raise forms.ValidationError('Frequency has to be equal or greater than 10 minutes')\n return frequency\n\n def __init__(self, *args, **kwargs):\n self.user = kwargs.pop('user')\n super(FeedCreateForm, self).__init__(*args, **kwargs)\n self.fields['folders'] = forms.ModelMultipleChoiceField(queryset=Folder.objects.filter(user=self.user), required=False, widget=forms.CheckboxSelectMultiple)\n\n def save(self, commit=True):\n feed = super(FeedCreateForm, self).save()\n folders = self.cleaned_data.get('folders')\n\n for folder in folders:\n FeedSubscription.objects.create(feed=feed, folder=folder)\n\n return feed\n\n\nclass MarkReadForm(forms.Form):\n day = forms.IntegerField(required=True, min_value=0)\n\n\nclass ArticleFilterForm(forms.Form):\n\n SORTING_CHOICE = (\n ('-date_published', 'Newest first'),\n ('date_published', 'Oldest first'),\n )\n\n sorting = forms.ChoiceField(choices=SORTING_CHOICE, label='SORTING', widget=forms.Select)\n\n\nclass UserArticleForm(forms.Form):\n article_slug = forms.SlugField(required=True)\n def clean_article_slug(self):\n slug = self.cleaned_data.get('article_slug')\n if not Article.objects.filter(slug=slug).exists():\n raise forms.ValidationError('Non existent article.')\n return slug\n\nclass MarkAsReadArticleForm(UserArticleForm):\n is_read = forms.BooleanField(required=False)\n\nclass ReadLaterArticleForm(UserArticleForm):\n is_read_later = forms.BooleanField(required=False)\n\nclass SaveArticleForm(UserArticleForm):\n is_saved = forms.BooleanField(required=False)\n","repo_name":"thhoang-tdh/feedsione","sub_path":"feedsione/news/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8094049171","text":"from urllib.request import urlopen\nfrom urllib.parse import quote_plus\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nimport time\nimport csv\n\n\nclass GooglePlace(object):\n def __init__(\n self,\n baseUrl=\"https://www.google.com/search?tbs=lrf:!1m4!1u3!2m2!3m1!1e1!2m1!1e3!3sIAE,lf:1,lf_ui:4&tbm=lcl&sxsrf=AOaemvLlmSgd_57QwWo0ESSFQQZXIXZ1ag:1635680689664&q=\",\n window=True,\n dropna=True,\n ):\n self.input = input('search : ')\n self.url = baseUrl + self.input\n self.window = window\n self.dropna = dropna\n\n if self.window:\n \"\"\"\n Chromedriver Path\n 1. Linux : '/path/to/chromedriver'\n 2. Windows : ex) 'C:\\path\\to\\chromedriver.exe'\n \"\"\"\n self.driver = webdriver.Chrome('./chromedriver')\n self.driver.get(self.url)\n else:\n \"\"\"\n WARNING : headless모드로 실행시 봇으로 간주되서 제재당할 수 있음.\n Bypass : 아래 user-agent에 자신의 크롬브라우저 user-agent 값으로 수정한다.\n ## user-agent 확인하는 방법 : 크롬창을 하나 열어서 'Ctrl + Shift + i' 를 누르면 콘솔창이 뜨는데 거기에서 Console 탭 클릭 -> navigator.userAgent 입력후 엔터\n \"\"\"\n options = webdriver.ChromeOptions()\n options.add_argument('headless')\n options.add_argument('window-size=1920x1080')\n options.add_argument(\"disable-gpu\")\n options.add_argument(\"user-agent=Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36\")\n self.driver = webdriver.Chrome('./chromedriver', chrome_options=options)\n self.driver.get(self.url)\n self._parse()\n\n def _parse(self):\n f = open(f'{self.input}.csv', 'w', encoding='utf-8', newline='')\n csvWriter = csv.writer(f)\n\n page_number = 0\n end = 0\n while True:\n try:\n html = self.driver.page_source\n soup = BeautifulSoup(html, 'lxml')\n btn = self.driver.find_element_by_xpath('//*[@id=\"pnnext\"]')\n except:\n btn = None\n\n page_number += 1\n print(f'\\rpage_number : {page_number} page', end='')\n\n names = []\n details = []\n\n place_names = soup.find_all(class_='dbg0pd eDIkBe')\n place_details = soup.find_all(class_='rllt__details')\n place_info = zip(place_names, place_details)\n\n for n_idx, d_idx in place_info:\n if n_idx.span and d_idx.div:\n names.append(n_idx.span.text)\n details.append(d_idx.div.text)\n else:\n pass\n\n searchList = dict(zip(names, details))\n for name, detail in searchList.items():\n if self.dropna:\n if detail.find('·') == -1:\n pass\n else:\n address = detail.split('·')[0]\n phone = detail.split('·')[1]\n else:\n if detail.find('·') == -1:\n if detail.startswith('0'):\n address = 'null'\n phone = detail\n else:\n address = detail\n phone = 'null'\n else:\n address = detail.split('·')[0]\n phone = detail.split('·')[1]\n csvWriter.writerow([name, address, phone])\n \n if end:\n self.driver.close()\n break\n\n if self.window:\n if bool(btn):\n btn.click()\n else:\n end = 1\n else:\n if bool(btn):\n btn.send_keys(Keys.ENTER)\n else:\n end = 1\n time.sleep(2.5)\n\n f.close()\n print('\\ndone')\n\n\nif __name__ == '__main__':\n scraper = GooglePlace(window=True, dropna=True)\n","repo_name":"diversocean/crawler","sub_path":"collector/google.py","file_name":"google.py","file_ext":"py","file_size_in_byte":4277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71193251923","text":"def countX(L, R, X):\n count = 0\n X = str(X)\n for i in range(L + 1, R):\n s = str(i)\n if X in s:\n count += s.count(X)\n return count\n\nprint(countX(18,81,9))","repo_name":"JoyalPeter/GeekForGeeksPOTD","sub_path":"December 2023/Dec 6/MyApproach.py","file_name":"MyApproach.py","file_ext":"py","file_size_in_byte":190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11408118574","text":"import tensorflow as tf\nimport tensorflow.keras.layers as kl\n\n\nclass RFFT(tf.keras.layers.Layer):\n \"\"\"Keras wrapper for tf.signal.rfft with length normalization.\n \"\"\"\n\n def __init__(self):\n super(RFFT, self).__init__()\n\n def call(self, inputs):\n norm_divisor = tf.cast(inputs.get_shape().as_list()[-1], 'float32')\n return tf.abs(tf.signal.rfft(inputs)) / norm_divisor\n\n\ndef make_base_model(chunk_size):\n \"\"\"Returns a model that transforms audio clips into feature vectors.\n\n The resulting model takes a batch of 1D audio clips and produces\n a batch of sequences of feature vectors. Each sequence is the same length\n as the input clips. During inference, either the final feature vectors or\n the mean features over each sequence or a more sophisticated limit\n approximation may be used as the fingerprint.\n\n :param int: chunk_size: number of samples in FFT chunks\n :return: TensorFlow Keras model\n\n Note: LSTM is being used without the Bidirectional wrapper to entertain\n the idea that the model may be used for real-time speech analysis. Were it\n bidirectional, a full clip of audio would be necessary to run the model.\n Since it is not bidirectional, it is possible to run the model on a live\n stream of audio.\n \"\"\"\n model = tf.keras.Sequential()\n\n model.add(kl.Reshape([-1, chunk_size]))\n model.add(RFFT())\n model.add(kl.Conv1D(chunk_size, kernel_size=1,\n activation=kl.LeakyReLU()))\n model.add(kl.Conv1D(chunk_size // 2, kernel_size=1,\n activation=kl.LeakyReLU()))\n model.add(kl.LSTM(units=128,\n return_sequences=True))\n model.add(kl.LSTM(units=128,\n return_sequences=True))\n model.add(kl.Conv1D(64, kernel_size=1,\n activation=kl.LeakyReLU()))\n\n return model\n\n\ndef make_inference_model(base_model):\n model = tf.keras.Sequential()\n\n model.add(base_model)\n model.add(kl.GlobalAveragePooling1D())\n\n return model\n","repo_name":"wwilliamcook/VoiceNet","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"43557067151","text":"\nnames = ['윤나은', '김현주', '장현지', '이지선', '박선주']\n\n\"\"\"\nenumerate ex1 : basic\n\"\"\"\n\nnames_asc = sorted(names)\nstud_dict = dict()\nfor num, name in enumerate(names_asc):\n stud_dict[num+1] = name\n\nprint(stud_dict)\n\n\n\"\"\"\nenumerate ex2 : set start number\n\"\"\"\n\nnames_asc = sorted(names)\nstud_dict = dict()\nfor num, name in enumerate(names_asc, 10):\n stud_dict[num] = name\n\nprint(stud_dict)\n\n\"\"\"\nenumerate ex2 : use dict comprehension\n\"\"\"\n\nstud_dict = { num : name for num, name in enumerate(sorted(names), 100)}\nprint(stud_dict)\n","repo_name":"jhryu1208/Python-Note-Repo","sub_path":"Intermediate/review/20_enumerate.py","file_name":"20_enumerate.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"73157056402","text":"###########################################################################\n###########################################################################\n# Author: Yaron Khazai 2021\n# Version: 20210625.1\n###########################################################################\n###########################################################################\nimport packages.requests as requests \nimport json\nfrom datetime import datetime\nimport time\nimport packages.yaml as yaml\nfrom pprint import pprint\n\nconfig=[]\n\ndef read_yaml(file_path):\n with open(file_path, \"r\") as f:\n return yaml.safe_load(f)\n\ndef get_machine_offset():\n offset = time.timezone if (time.localtime().tm_isdst == 0) else time.altzone\n return offset / 60 / 60 * -1\n\ndef log(message,level = 5):\n global config\n if level= i:\n entry[\"direction\"] = direction_name[idx]\n break\n idx += 1\n last_y = my_value[\"y\"]\n last_x = my_value[\"x\"]\n\n entries.append(entry)\n cnt += 1\n\n #entries_json = json.dumps(entries)\n if (entries):\n nightscoutURL= config[\"nightscout\"][\"URL\"] + \"/api/v1/entries?token=\" + config[\"nightscout\"][\"token\"]\n log (\"trying to call %s with %s\"%(nightscoutURL,json.dumps(entries)),9)\n x = requests.post( nightscoutURL, json=entries)\n if x.status_code != 200 :\n log (\"error calling nightscout got http response : %s\" %( str(x.status_code)),1)\n return {\n 'statusCode': x.status_code,\n 'body': json.dumps('Data was not loaded! ')\n }\n else:\n return {\n 'statusCode': 200,\n 'body': json.dumps('Hello from gmns-Bridge last entry got : %s UTC'% (last_x))\n }\n else:\n log (\"No Data retrived from Glucologweb\",4)\n\nif __name__ == \"__main__\":\n print (lambda_handler(None , None))\n","repo_name":"yaronkhazai/gmns-bridge","sub_path":"src/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":4902,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"9936432858","text":" #!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nfrom datetime import datetime\r\nimport ast\r\nimport csv\r\nimport urllib2\r\nimport urllib\r\nimport json\r\nimport os\r\nimport requests\r\nfrom oauthlib.oauth2 import BackendApplicationClient\r\nimport pprint\r\nimport requests_oauthlib\r\nimport sys\r\nimport time\r\nimport os, ssl\r\nfrom ckanext.odatabcn.api import CustomApi\r\nfrom pylons import config\r\n#from api import CustomApi\r\nif (not os.environ.get('PYTHONHTTPSVERIFY', '') and getattr(ssl, '_create_unverified_context', None)): \r\n ssl._create_default_https_context = ssl._create_unverified_context\r\n \r\nlog = __import__('logging').getLogger(__name__)\r\n\r\n'''\r\nObtain a dataset from the BSM API manager\r\n'''\r\n\r\nclass BsmApi (CustomApi):\r\n\r\n PROVIDER_NAME = 'bsm'\r\n\r\n resource = None\r\n app_token = None\r\n user_token = None\r\n user_id = None\r\n user_key = None\r\n user_secret = None\r\n user_token = None\r\n username = None\r\n email = None\r\n\r\n def __init__(self, resource, app_token, consumer_key, consumer_secret, user_token, user_id, user_key, user_secret, username, email):\r\n \r\n super(BsmApi, self).__init__()\r\n \r\n self.resource = resource\r\n self.app_token = app_token\r\n self.consumer_key = consumer_key\r\n self.consumer_secret = consumer_secret\r\n self.user_token = user_token\r\n self.user_id = user_id\r\n self.user_key = user_key\r\n self.user_secret = user_secret\r\n self.username = username\r\n self.email = email\r\n \r\n if app_token:\r\n self.app_token = ast.literal_eval(app_token)\r\n \r\n if user_token:\r\n self.user_token = ast.literal_eval(user_token)\r\n \r\n \r\n def execute(self):\r\n if (not self.user_key): \r\n self.registerUser()\r\n\r\n return self.getResource()\r\n \r\n \r\n def registerUser(self):\r\n pprint.pprint(\"registerUser\")\r\n pprint.pprint(self.username)\r\n url_signup = config.get('ckanext.odatabcn.api.bsm.url.signup')\r\n \r\n self.username = super(BsmApi, self).getUsername(self.username)\r\n pprint.pprint(self.username)\r\n password = super(BsmApi, self).randomString()\r\n api_name, api_version, api_provider = self.getApiInfo()\r\n data = {\r\n 'user': {\r\n 'username': self.username,\r\n 'email': self.email,\r\n 'password': password \r\n },\r\n 'api': {\r\n 'apiName': api_name,\r\n 'apiVersion': api_version,\r\n 'apiProvider': api_provider\r\n }\r\n }\r\n\r\n response = self.sendData(url_signup, data, 'POST')\r\n\r\n if response.status_code == 201 or response.status_code == 200:\r\n json_response = response.json()\r\n self.user_key = json_response['consumerKey']\r\n self.user_secret = json_response['consumerSecret']\r\n super(BsmApi, self).saveUser(self.user_id, self.PROVIDER_NAME, self.user_key, self.user_secret)\r\n \r\n def subscribeUser(self):\r\n url_subscription = config.get('ckanext.odatabcn.api.bsm.url.subscription').replace('{username}', self.username)\r\n \r\n api_name, api_version, api_provider = self.getApiInfo()\r\n data = {\r\n 'api': {\r\n 'apiName': api_name,\r\n 'apiVersion': api_version,\r\n 'apiProvider': api_provider\r\n }\r\n }\r\n \r\n response = self.sendData(url_subscription, data, 'PUT')\r\n\r\n return response.status_code\r\n \r\n \r\n def sendData(self, url, data, method='POST'):\r\n\r\n session = self.getAppSession()\r\n\r\n headers = {'Content-type': 'application/json'}\r\n \r\n if (method == 'POST'):\r\n response = session.post(url, data=json.dumps(data), headers=headers, verify=False)\r\n else:\r\n response = session.put(url, data=json.dumps(data), headers=headers, verify=False)\r\n\r\n \r\n pprint.pprint(response)\r\n return response\r\n \r\n \r\n def getAppSession(self):\r\n if not self.app_token or ('expires_at' in self.app_token and time.time() > self.app_token['expires_at']):\r\n client = BackendApplicationClient(client_id=self.consumer_key)\r\n session = requests_oauthlib.OAuth2Session(client=client)\r\n \r\n self.app_token = session.fetch_token(\r\n token_url=config.get('ckanext.odatabcn.api.bsm.url.token'),\r\n client_id=self.consumer_key,\r\n client_secret=self.consumer_secret,\r\n verify=False\r\n )\r\n \r\n super(BsmApi, self).saveAppToken(self.app_token, self.PROVIDER_NAME)\r\n \r\n else:\r\n session = requests_oauthlib.OAuth2Session(token = {'access_token': self.app_token['access_token']})\r\n \r\n return session\r\n \r\n def getUserSession(self):\r\n \r\n pprint.pprint(self.user_token)\r\n pprint.pprint(self.user_key)\r\n pprint.pprint(self.user_secret)\r\n if not self.user_token or ('expires_at' in self.user_token and time.time() > self.user_token['expires_at']):\r\n client_user = BackendApplicationClient(client_id=self.user_key)\r\n session = requests_oauthlib.OAuth2Session(client=client_user)\r\n\r\n self.user_token = session.fetch_token(\r\n token_url=config.get('ckanext.odatabcn.api.bsm.url.token'),\r\n client_id=self.user_key,\r\n client_secret=self.user_secret,\r\n verify=False\r\n )\r\n \r\n super(BsmApi, self).saveUserToken(self.user_token, self.user_id, self.PROVIDER_NAME)\r\n else:\r\n session = requests_oauthlib.OAuth2Session(token = {'access_token': self.user_token['access_token']})\r\n \r\n return session\r\n \r\n \r\n def getResource(self):\r\n user_session = self.getUserSession()\r\n\r\n resource_response = user_session.get(self.resource['url'], verify=False)\r\n\r\n if not resource_response.status_code == 403:\r\n return resource_response.content, resource_response.status_code, resource_response.headers\r\n elif self.subscribeUser() == 200:\r\n self.getResource()\r\n \r\n def getApiInfo(self):\r\n url_parts = self.resource['url'].split('/')\r\n api_name = url_parts[-3]\r\n api_version = url_parts[-2]\r\n return api_name, api_version, self.resource['token_provider']","repo_name":"AjuntamentdeBarcelona/ckanext-odatabcn","sub_path":"ckanext/odatabcn/api_bsm.py","file_name":"api_bsm.py","file_ext":"py","file_size_in_byte":6668,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"3"} +{"seq_id":"19669721800","text":"#!/usr/bin/env python\n# By Robin Lennox - twitter.com/robberbear\n\nimport os\nimport re\nimport shutil\nimport subprocess\nimport time\n\nimport netifaces\n\nfrom lib.Layout import colour\nfrom lib.PortCheck import check_port, portquiz_scan, traceroute_port_check, openPorts, possiblePorts\nfrom lib.ProtocolCheck import check_icmp\nfrom lib.SetupTunnel import openPort, checkTunnel, udp2rawTunnel\n\n# Import Colour Scheme\nG, Y, B, R, W = colour()\n\n\ndef replaceText(filename, origText, replaceText, ):\n s = open(filename).read()\n s = s.replace(str(origText), str(replaceText))\n f = open(filename, 'w')\n f.write(s)\n f.close()\n\n\ndef successMessage(ipAddr, port, sshuser):\n print(W + \"------------------------------\" + W)\n if sshuser:\n print(W + \"[!] Port forward using: ssh -f -N -D 8123 {0}@{1} -p{2} -i /home/{0}/.ssh/id_rsa\".format(sshuser, ipAddr, port,) + W)\n else:\n print(W + \"[!] Port forward example: ssh -f -N -D 8123 root@{0} -p{1}\".format(ipAddr, port,) + W)\n print(W + \"[!] Check it's working using: curl --proxy socks5h://localhost:8123 http://google.com\" + W)\n print(W + \"------------------------------\" + W)\n\n\ndef getInterfaces():\n interface_list = netifaces.interfaces()\n # Get Wireless Interfaces\n return filter(lambda x: 'wl' in x, interface_list)\n\n\ndef check_ports(aggressive, config, verbose, ):\n global callbackPort\n callbackPort = []\n\n if config.getboolean('SCAN','PORTQUIZ') is True:\n print(B + \"\\n[-] Running test for commonly open ports.\" + W)\n\n if verbose:\n print(Y + \"[*] Checking for open ports using portquiz.net\" + W)\n \n check_port(aggressive, config, portquiz_scan, verbose, )\n else:\n if verbose:\n print(W + \"[!] Config: Skipping portquiz.net scan\" + W)\n\n # Portquiz might be blocked so try traceroute\n if not openPorts:\n if config.getboolean('SCAN','TRACEROUTE') is True:\n if verbose:\n print(R + \"[*] portquiz.net returned no open ports\" + W)\n print(B + \"\\n[-] Running test for commonly open ports.\" + W)\n print(Y + \"[*] Checking for open ports using traceroute\" + W)\n check_port(aggressive, config, traceroute_port_check, verbose, )\n else:\n if verbose:\n print(W + \"[!] Config: Skipping traceroute scan\" + W)\n\n if openPorts:\n callbackPort = openPorts\n print(G + \"[+] {0} open port/s found\".format(len(callbackPort)) + W)\n else:\n print(R + \"[x] No open port found.\" + W)\n\n if possiblePorts:\n print(Y + \"[*] {0} possible port/s found\".format(len(possiblePorts)) + W)\n\n\ndef checkSSHStatus(callbackIP, callbackSSHPort):\n checkSSHFile = '/opt/breakout/lib/checkSSH.sh'\n try:\n subprocess.check_output(\n 'netstat -tnpa | grep \\'ESTABLISHED.*ssh\\' | grep {0} | grep {1}'.format(callbackIP, callbackSSHPort), shell=True)\n if os.path.isfile(checkSSHFile):\n with open(checkSSHFile) as f:\n for line in f:\n if str(re.findall(r\"(?<=callbackIP=')(.*)(?=')\", line))[2:-2]:\n ip = str(re.findall(\n r\"(?<=callbackIP=')(.*)(?=')\", line))[2:-2]\n\n if str(re.findall(r\"(?<=callbackPort=')(.*)(?=')\", line))[2:-2]:\n port = int(\n str(re.findall(r\"(?<=callbackPort=')(.*)(?=')\", line))[2:-2])\n\n print(\n Y + \"[*] Checking existing SSH port {0} is open on {1}\".format(port, ip, ) + W)\n if not openPort(port, ip) or not checkTunnel(ip, port):\n return False\n else:\n return True\n else:\n return False\n except:\n print(R + \"[x] Existing tunnel {0} is down\".format(callbackIP) + W)\n return False\n\n\ndef callbackTCP(callbackIP, config, sshuser, tunnelPassword, nameserver, verbose, ):\n status = False\n tunnelType = None\n attemptPort = None\n if callbackPort:\n print(B + \"\\n[-] Attempting to create TCP tunnel.\" + W)\n if config.getboolean('TUNNEL','TCP') is True:\n for attemptPort in callbackPort:\n count = 0\n stopCount = 100\n if verbose:\n print(\n Y + \"[*] Calling back to IP {0} on port {1}\".format(callbackIP, attemptPort,) + W)\n while (count < stopCount and status is False):\n if openPort(attemptPort, callbackIP):\n count = stopCount\n if checkTunnel(callbackIP, attemptPort):\n print(G + \"[+] SSH is Open\" + W)\n successMessage(callbackIP, attemptPort, sshuser)\n status = True\n tunnelType = 'Open Port'\n return callbackIP, attemptPort, tunnelType, status\n else:\n print(R + \"\\n[x] Port {0} open on IP {1} but unable to connect via SSH\".format(\n attemptPort, callbackIP,) + W)\n else:\n if verbose:\n print(\n B + \"[-] Waiting for port {0} to be open on IP {1}\".format(attemptPort, callbackIP,) + W)\n count = count + 1\n\n if count == stopCount:\n print(R + \"\\n[x] Port {0} not open on IP {1} after {2} attempts\".format(\n attemptPort, callbackIP, stopCount) + W)\n else:\n if verbose:\n print(W + \"[!] Config: Skipping TCP tunnel\" + W)\n else:\n print(\n R + \"[x] Can't attempt TCP Tunnel, no ports found open on IP {0}\".format(callbackIP,) + W)\n\n return callbackIP, attemptPort, tunnelType, status\n\ndef callbackNonTCP(callbackIP, config, sshuser, tunnelPassword, nameserver, verbose, ):\n print(B + \"\\n[-] Attempting to create Non TCP tunnel.\" + W)\n tunnelIP = '127.0.0.1'\n localPort = 3322\n tunnelType = None\n status = False\n if config.getboolean('TUNNEL','FAKETCP') is True:\n # Non TCP Tunnels\n tunnelType = 'faketcp'\n tunnelPort = 4001\n listenPort = 8856\n status = setupNonTCPTunnel(status, callbackIP, nameserver, tunnelIP,\n tunnelType, tunnelPort, localPort, listenPort, sshuser, tunnelPassword, verbose,)\n else:\n if verbose:\n print(W + \"[!] Config: Skipping fakeTCP tunnel\" + W)\n\n if status is False:\n if config.getboolean('TUNNEL','UDP') is True:\n tunnelType = 'udp'\n tunnelPort = 4003\n listenPort = 8857\n status = setupNonTCPTunnel(status, callbackIP, nameserver, tunnelIP,\n tunnelType, tunnelPort, localPort, listenPort, sshuser, tunnelPassword, verbose, )\n else:\n if verbose:\n print(W + \"[!] Config: Skipping UDP tunnel\" + W)\n\n if status is False:\n if config.getboolean('TUNNEL','ICMP') is True:\n if check_icmp():\n if verbose:\n print(G + \"[+] ICMP is enabled\" + W)\n tunnelType = 'icmp'\n tunnelPort = 4000\n listenPort = 8855\n status = setupNonTCPTunnel(status, callbackIP, nameserver, tunnelIP, tunnelType, tunnelPort, localPort,\n listenPort, sshuser, tunnelPassword, verbose, )\n else:\n print(\n R + \"[x] Can't attempt {0} Tunnel, {0} is disabled\\n\".format(tunnelType) + W)\n status = False\n else:\n if verbose:\n print(W + \"[!] Config: Skipping ICMP tunnel\" + W)\n\n return tunnelIP, localPort, tunnelType, status\n\n\ndef setupNonTCPTunnel(status, callbackIP, nameserver, tunnelIP, tunnelType, tunnelPort, localPort, listenPort, sshuser, tunnelPassword, verbose, ):\n if not status:\n print(\n Y + \"[*] Trying a Udp2raw-tunnel using {0}.\".format(tunnelType) + W)\n if udp2rawTunnel(callbackIP, tunnelIP, tunnelType, tunnelPort, localPort, listenPort, tunnelPassword, verbose, ):\n if checkTunnel(tunnelIP, tunnelPort):\n print(\n G + \"[+] A Udp2raw-tunnel {0} tunnel can be setup!\".format(tunnelType) + W)\n print(\n B + \"[-] An {0} Tunnel is not as fast as a TCP Tunnel\".format(tunnelType) + W)\n successMessage(tunnelIP, tunnelPort, sshuser)\n status = True\n else:\n print(\n R + \"[x] {0} Enabled but unable to create {0} Tunnel\".format(tunnelType) + W)\n status = False\n else:\n print(\n R + \"[x] {0} Enabled but unable to create {0} Tunnel\".format(tunnelType) + W)\n status = False\n\n return status\n\n\ndef writeFile(fileName, timeStamp, ethernetUp, usedGatewayWifi, successfulConnection):\n if successfulConnection:\n subprocess.check_output('rm /opt/breakout/logs/tunnels.txt > /dev/null 2>&1', shell=True,\n stderr=subprocess.STDOUT)\n with open(fileName, 'a') as file:\n file.write(\"{0} Ethernet_Up={1} Tried_WiFi_Gateway={2} Successful_Connection={3} \\n\".format(\n timeStamp, ethernetUp, usedGatewayWifi, successfulConnection,))\n\n\ndef defaultRoute(interface):\n try:\n # Will attempt to stay connected to Wifi\n gateway = subprocess.check_output(\n 'cat /var/lib/dhcp/dhclient.leases | awk \\'/{0}/,/routers/\\' | grep -o -P \\'(?<=routers ).*(?=;)\\' | uniq'.format(interface,), shell=True, stderr=subprocess.STDOUT)\n # Cleanup String\n gateway = gateway.rsplit()[0]\n # Delete old route\n subprocess.check_output(\n \"route del -net 0.0.0.0 netmask 0.0.0.0 gw {0} dev {1} > /dev/null 2>&1\".format(\n gateway, interface, ),\n shell=True, stderr=subprocess.STDOUT)\n\n print(W + \"[!] Set default route to connect to the internet on interface {0} via gateway {1}\".format(\n interface, gateway, ) + W)\n subprocess.check_output(\n \"ip route add default via {0} dev {1} > /dev/null 2>&1\".format(gateway, interface, ), shell=True,\n stderr=subprocess.STDOUT)\n return True\n except:\n print(\n R + \"[x] No DHCP information found for interface {0}.\" + W).format(interface,)\n\n\ndef is_interface_up(interface, verbose):\n if \"down\" in subprocess.check_output('cat /sys/class/net/{0}/operstate'.format(interface), shell='True').decode('utf-8'):\n return False\n else:\n return True\n\n\ndef setupGateways(ethernetInterface, ethernetUp, gatewayWifi, successfulConnection, timeout, ):\n # Create file if not exist\n open(\"/opt/breakout/logs/tunnels.txt\", \"a\")\n # gatewayWifi = defaultRoute(ethernetInterface)\n totalAttempts = subprocess.check_output('awk -v d1=\"$(date -d@\"$(( $(date +%s)-{0}))\" \"+%b %_d %H:%M\")\" -v d2=\"$(date \"+%b %_d %H:%M\")\" \\'$0 > d1 && $0 < d2 || $0 ~ d2\\' /opt/breakout/logs/tunnels.txt | grep Successful_Connection=False | wc -l'.format(timeout),shell=True, stderr=subprocess.STDOUT).decode('utf-8').rstrip().lstrip()\n if ethernetUp and int(totalAttempts) > 20:\n print(R + \"[!] Unable to tunnel resetting routing tables and rebooting\" + W)\n\n # Clear routing table and applying default settings.\n subprocess.check_output(\"rm /opt/breakout/logs/tunnels.txt > /dev/null 2>&1'\", shell=True,\n stderr=subprocess.STDOUT)\n subprocess.check_output(\"rm /opt/breakout/lib/checkSSH.sh > /dev/null 2>&1\", shell=True,\n stderr=subprocess.STDOUT)\n subprocess.check_output(\n \"rm /etc/motd > /dev/null 2>&1\", shell=True, stderr=subprocess.STDOUT)\n subprocess.check_output(\"ip route flush table main && udhcpc -i {0}\".format(\n ethernetInterface), shell=True, stderr=subprocess.STDOUT)\n\n elif ethernetUp and totalAttempts and int(totalAttempts) > 5:\n gatewayWifi = True\n print(R + \"[!] Unable to tunnel out using current default routes\" + W)\n interfaces = getInterfaces()\n for wirelessInterface in interfaces:\n print(\n B + \"[-] Trying to route internet traffic via interface {0}\".format(wirelessInterface,) + W)\n # Reset the default interface\n subprocess.check_output(\"ifconfig {0} down\".format(\n ethernetInterface), shell=True, stderr=subprocess.STDOUT)\n time.sleep(10)\n\n writeFile('/opt/breakout/logs/tunnels.txt', time.strftime(\"%b %-d %H:%M:%S\"), ethernetUp, gatewayWifi,\n successfulConnection)\n\n\ndef setupAutoTunnel(checkSSHLOC, gatewayWifi, sshuser, tunnelIP, tunnelPort, tunnelType, ):\n shutil.copy('/opt/breakout/lib/checkSSH.bak', checkSSHLOC)\n replaceText(checkSSHLOC, 'SET_IP', tunnelIP)\n replaceText(checkSSHLOC, 'SET_PORT', tunnelPort)\n replaceText(checkSSHLOC, 'SET_USER', sshuser)\n replaceText(checkSSHLOC, 'TUNNEL_TYPE', tunnelType)\n replaceText(checkSSHLOC, 'GATEWAY_WIFI', gatewayWifi)\n print(G + \"[+] Setup remote tunnel configuration file\" + W)\n\n\ndef checkInterfaces(currentSSID, verbose):\n ethernetUp = True\n wirelessUp = False\n\n interface_list = netifaces.interfaces()\n # Get Ethernet Interfaces\n for interface in interface_list:\n if interface.startswith('e'):\n ethernetUp = is_interface_up(interface, verbose)\n ethernetInterface = interface\n\n if \"NOT CONNECTED\" not in currentSSID and interface.startswith('w'):\n wirelessUp = is_interface_up(interface, verbose)\n\n return ethernetUp, ethernetInterface, wirelessUp\n\n\ndef currentSSHTunnel(checkSSHLOC, config, isPi, ethernetUp, gatewayWifi, successfulConnection,verbose ):\n tunnelOpen = True\n checkForTunnel = config.getboolean('TUNNEL','CHECKEXISTING')\n\n if checkForTunnel is False:\n tunnelOpen = False\n if verbose:\n print(W + \"[!] Config: Skipping checking existing tunnel\" + W)\n elif os.path.isfile(checkSSHLOC):\n # Extract callback IP\n with open(checkSSHLOC, 'r') as file:\n for line in file:\n if \"callbackIP=\" in line:\n callbackSSHIP = line.split('=')[1]\n callbackSSHIP = line[12:-2]\n if \"callbackPort=\" in line:\n callbackSSHPort = line.split('=')[1]\n\n if checkSSHStatus(callbackSSHIP, callbackSSHPort):\n print(\n G + \"[+] Tunnel already open and working on {0}\".format(callbackSSHIP) + W)\n\n if isPi:\n # Make the power LED Flash to show the connection is active to C&C\n subprocess.check_output(\"sh -c 'echo timer >/sys/class/leds/led1/trigger'\", shell=True,\n stderr=subprocess.STDOUT)\n\n successfulConnection = True\n writeFile('/opt/breakout/logs/tunnels.txt', time.strftime(\"%b %-d %H:%M:%S\"), ethernetUp, gatewayWifi,\n successfulConnection)\n else:\n tunnelOpen = False\n else:\n tunnelOpen = False\n\n return tunnelOpen\n\ndef checkSSH(checkSSHLOC):\n process = subprocess.Popen(\"rc-status --crashed\".split(),\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n if \"sshd\" in str(process.communicate()):\n print(R + \"[!] SSH crashed!\" + W)\n subprocess.Popen(\"rc-service sshd stop && rc-service sshd start\".split(), stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n subprocess.Popen(\"bash {0}\".format(checkSSHLOC).split(\n ), stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n print(Y + \"[*] Setting up SSH\" + W)\n time.sleep(10)\n\ndef quickScan(callbackPort,callbackIP,config, sshuser, verbose):\n status = False\n quickScanStatus = config.getboolean('SCAN','QUICK')\n if quickScanStatus is True:\n if openPort(callbackPort, callbackIP) and checkTunnel(callbackIP, callbackPort):\n if verbose:\n print(\n Y + \"[*] Quick check if port {0} is accessible.\".format(callbackPort) + W)\n print(G + \"[+] SSH tunnel possible!\" + W)\n successMessage(callbackIP, callbackPort, sshuser)\n status = True\n else:\n if verbose:\n print(\n R + \"[!] Quick check failed, Port {0} not accessible.\".format(callbackPort) + W)\n else:\n if verbose:\n print(W + \"[!] Config: Skipping Quick Scan\" + W)\n \n return status\n\ndef initialiseTunnel(aggressive, callbackIP, config, currentSSID, tunnelPassword, isPi, nameserver, sshuser, tunnel, verbose,):\n checkSSHLOC = '/opt/breakout/lib/checkSSH.sh'\n successfulConnection = False\n #30 Mins\n timeout = '1800'\n\n ethernetUp, ethernetInterface, wirelessUp = checkInterfaces(\n currentSSID, verbose)\n checkSSH(checkSSHLOC)\n\n if ethernetUp == False and wirelessUp == False:\n print(R + \"[!] No Interface is up.\" + W)\n if isPi:\n # Reset Heartbeat\n subprocess.check_output(\n \"sh -c 'echo input >/sys/class/leds/led1/trigger'\", shell=True, stderr=subprocess.STDOUT)\n quit()\n\n # Check if Gateway is set\n try:\n if os.path.isfile('/opt/breakout/logs/tunnels.txt'):\n gatewayWifi = subprocess.check_output(\n 'tail -n 1 /opt/breakout/logs/tunnels.txt | awk \\'{print $5}\\' | cut -f2 -d\\'=\\'', shell=True, stderr=subprocess.STDOUT).rstrip().decode('utf-8')\n\n else:\n gatewayWifi = False\n except:\n gatewayWifi = False\n\n if currentSSHTunnel(checkSSHLOC, config, isPi, ethernetUp, gatewayWifi, successfulConnection, verbose) is False:\n # Check which Gateway to use Ethernet or WiFi\n setupGateways(ethernetInterface, ethernetUp, gatewayWifi,\n successfulConnection, timeout, )\n\n if isPi:\n # Reset Heartbeat\n subprocess.check_output(\n \"sh -c 'echo input >/sys/class/leds/led1/trigger'\", shell=True, stderr=subprocess.STDOUT)\n\n # Kill all open SSH\n command = \"killall ssh > /dev/null 2>&1\"\n subprocess.Popen(command.split(), stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n callbackPort = config.get('SCAN','CALLBACKPORT')\n \n tunnelIP = callbackIP\n tunnelPort = callbackPort\n tunnelType = 'Open Port'\n\n tunnelStatus = quickScan(callbackPort,callbackIP,config,sshuser,verbose) \n if tunnelStatus is False:\n check_ports(aggressive, config, verbose, )\n\n if all(v is None for v in [callbackIP, nameserver]):\n print(\n Y + \"[*] Unable to create tunnel as no nameserver or callback IP was provided.\" + W)\n else:\n tunnelIP, tunnelPort, tunnelType, tunnelStatus = callbackTCP(callbackIP, config, sshuser, tunnelPassword, nameserver, verbose, )\n if tunnelStatus is False:\n tunnelIP, tunnelPort, tunnelType, tunnelStatus = callbackNonTCP(\n callbackIP, config, sshuser, tunnelPassword, nameserver, verbose, )\n\n if tunnelStatus is False:\n print(R + '[!] Tunnel not possible, as no possible tunnels to the callback server could be found' + W)\n pass\n elif tunnel is True:\n setupAutoTunnel(checkSSHLOC, gatewayWifi, sshuser,\n tunnelIP, tunnelPort, tunnelType, )\n attemptSSHTunnel = subprocess.check_output(\n 'bash {0}'.format(checkSSHLOC), shell=True).decode('utf-8')\n # Allow time for tunnel to start over low latancy.\n waitTime = config.getint('TUNNEL','WAITTIME')\n print(\n Y + \"[*] Waiting {0} seconds for tunnel to start\".format(waitTime) + W)\n time.sleep(waitTime)\n print(G + \"{0}\".format(attemptSSHTunnel) + W)","repo_name":"robinlennox/breakout","sub_path":"lib/CreateTunnel.py","file_name":"CreateTunnel.py","file_ext":"py","file_size_in_byte":20321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"28082637545","text":"# useful references\n# http://effbot.org/tkinterbook/entry.htm\n# http://effbot.org/tkinterbook/grid.htm <--- grid manager\n# http://effbot.org/zone/tkinter-callbacks.htm <-- passing argument to tk button\n# http://stackoverflow.com/questions/550050/removing-the-tk-icon-on-python-tkinter-windows\n# http://stackoverflow.com/questions/17005961/displaying-images-with-tkinter\n# http://code.activestate.com/recipes/438123-file-tkinter-dialogs/\n\n\nimport os\nimport sys\nimport tkinter as tk\nimport tkinter.filedialog\nimport tkinter.font\nfrom PIL import Image, ImageTk\nimport shutil\nimport csv\n\n\ndef createInputTextBox(parent, inputDir):\n global entryBoxInputText\n global strInputDir\n strInputDir = tk.StringVar()\n entryBoxInputText = tk.Entry(parent, textvariable=strInputDir, font=customFont, width=20)\n #entryBoxInputText.see(tk.END) #want to show rightmost, w/o scroll bar (justify=tk.RIGHT does not affect display)\n strInputDir.set(inputDir)\n\n\ndef buttonSetInputTextBox():\n newInputDir = dirSelect( strInputDir.get() )\n #print newInputDir\n if newInputDir != None:\n strInputDir.set(newInputDir)\n\n\ndef createOutputTextBox(parent, outputDir):\n global entryBoxOutputText\n global strOutputDir\n strOutputDir = tk.StringVar()\n entryBoxOutputText = tk.Entry(parent, textvariable=strOutputDir, font=customFont, width=20)\n strOutputDir.set(outputDir)\n\n\ndef buttonSetOutputTextBox():\n newOutputDir = dirSelect( strOutputDir.get() )\n #print newOutputDir\n if newOutputDir != None:\n strOutputDir.set(newOutputDir)\n\n\ndef dirSelect(startDirectory):\n #root.withdraw() #use to hide tkinter window\n dirname = None\n #tempdir = tkFileDialog.askdirectory(parent=root,initialdir=\"/\",title='Please select a directory')\n tempdir = tkinter.filedialog.askdirectory(parent=root, initialdir=startDirectory, title='Please select a directory')\n if len(tempdir) > 0:\n #print \"You chose %s\" % tempdir\n dirname = tempdir\n return dirname\n\n\ndef setRunParameters(makeWallet):\n #print 'set run parameters'\n\n makeWallet.set_inputdir( strInputDir.get() )\n makeWallet.set_outputdir( strOutputDir.get() )\n\n wh_tile = list(map(int, [grid_w.get(), grid_h.get()] ))\n makeWallet.set_wh_tilePattern( wh_tile )\n\n wh_resolution = list(map(int, [res_w.get(), res_h.get()] ))\n makeWallet.set_long_short_print_px( wh_resolution )\n\n# hw_paperDims = makeWallet.get_dictPaperSizes()[ paperSizeString.get() ]\n# hw_paperDims = map(float, hw_paperDims)\n# makeWallet.set_hw_paperDims( hw_paperDims )\n makeWallet.set_hw_paperDims( paperSizeString.get() )\n\n makeWallet.set_applyCrop( convertToBoolean(varApplyCrop.get()) )\n makeWallet.set_resizeForPrinting( bool(varResizePrint.get()) )\n\n\ndef convertToBoolean(strText):\n if strText.upper() == 'TRUE':\n return True\n elif strText.upper() == 'FALSE':\n return False\n else:\n return strText.upper()\n\n\ndef runMakeWallets(makeWallet): #call from GUI; no arguments\n\n setRunParameters(makeWallet)\n\n inputdir = makeWallet.get_inputdir()\n outputdir = makeWallet.get_outputdir()\n\n #files = os.listdir(imgdir + 'originalPictures/')\n files = os.listdir(inputdir)\n\n #process each jpg in directory\n for fname in files:\n if fname.lower().endswith('.jpg'):\n print('processing: ' + fname)\n writeToLog('processing: ' + fname)\n makeWallet.processImage(inputdir, fname, outputdir, 'wallet-')\n\n if bool(varSaveParameters.get()) == True:\n print('update inputs')\n updateRunParameters( makeWallet.get_paramFile() , makeWallet)\n\n print('done')\n writeToLog('done')\n\n\ndef quit():\n #root.quit()\n root.destroy()\n\n\ndef module_path():\n #http://stackoverflow.com/questions/729583/getting-file-path-of-imported-module\n #currentDirectory = os.path.dirname( inspect.getsourcefile(local_function) )\n\n #http://stackoverflow.com/questions/17398426/error-when-compiling-shitil-with-pyinstaller\n #currentDirectory = os.path.dirname( sys.argv[0] ) # needed for .exe approach.\n\n #http://stackoverflow.com/questions/2292703/how-can-i-get-the-executables-current-directory-in-py2exe\n #currentDirectory = os.path.realpath( os.path.dirname(sys.argv[0]) )\n #print currentDirectory\n\n\n # determine if application is a script file or frozen exe\n # http://stackoverflow.com/questions/404744/determining-application-path-in-a-python-exe-generated-by-pyinstaller\n if getattr(sys, 'frozen', False):\n currentDirectory = os.path.dirname(sys.executable)\n #print 'frozen: ' + currentDirectory\n elif __file__:\n currentDirectory = os.path.dirname(__file__)\n #print 'file: ' + currentDirectory\n\n return currentDirectory\n\n\ndef preview(makeWallet):\n\n setRunParameters(makeWallet)\n\n # override resolution for preview\n orig_wh_resolution = makeWallet.get_long_short_print_px()\n makeWallet.set_long_short_print_px( previewRes )\n\n fname = 'preview.jpg'\n #print 'processing: ' + fname\n inputdir = os.path.join(module_path(), paramDir() )\n outputdir = inputdir\n\n makeWallet.processImage(inputdir, fname, outputdir, 'wallet-')\n\n # restore original resolution\n makeWallet.set_long_short_print_px( orig_wh_resolution )\n\n newimg = Image.open( os.path.join(inputdir,'wallet-preview.jpg') )\n newimg.thumbnail(previewRes, Image.ANTIALIAS) #operates on original; maintains aspect ratio; only shrinks image\n im = ImageTk.PhotoImage(newimg) # Keep a reference, prevent garbage collector\n\n display.configure(image=im)\n display.image = im\n\n\ndef writeToLog(msg):\n # http://www.tkdocs.com/tutorial/text.html\n numlines = outputLog.index('end - 1 line').split('.')[0]\n outputLog['state'] = 'normal'\n # *deletes* data if it exceeds outputLogMaxLines\n# if int(numlines)==outputLogMaxLines:\n# outputLog.delete(1.0, 2.0)\n if outputLog.index('end-1c')!='1.0':\n outputLog.insert('end', '\\n')\n outputLog.insert('end', msg)\n outputLog.see(tk.END) #instead of delete, just keep scrolling to end\n outputLog.update_idletasks() #forces refresh\n outputLog['state'] = 'disabled'\n\n\ndef updateRunParameters(filename, makeWallet):\n currPath = os.path.join(module_path(), paramDir())\n\n # backup current input parameters\n # http://stackoverflow.com/questions/6996603/how-do-i-delete-a-file-or-folder-in-python\n try:\n # if a previous backup exists, delete it\n os.remove( os.path.join(currPath, filename.replace('.csv', '.bak')) )\n except:\n pass\n\n # copy current input file to a \".bak\" version\n inputFilePath = os.path.join(currPath,filename)\n shutil.copy( inputFilePath, inputFilePath.replace('.csv', '.bak') )\n\n # open input file\n f = open( os.path.join(currPath, filename), 'w')\n writer = csv.writer(f, delimiter=',', quotechar='\"')\n\n # output current values to the input file\n writer.writerow(['applyCrop', str(makeWallet.get_applyCrop()) ])\n writer.writerow(['resizeForPrinting', str(makeWallet.get_resizeForPrinting()) ])\n writer.writerow(['hw_paperDims', str(makeWallet.get_hw_paperDims()) ])\n writer.writerow(['wh_tilePattern'] + makeWallet.get_wh_tilePattern() )\n writer.writerow(['long_short_print_px'] + makeWallet.get_long_short_print_px() )\n writer.writerow(['inputdir', str(makeWallet.get_inputdir()) ])\n writer.writerow(['outputdir', str(makeWallet.get_outputdir()) ])\n\n # exit\n f.close()\n\n\ndef buildGUI(makeWallet):\n global root\n root = tk.Tk()\n #root.geometry(\"300x200+300+300\")\n #root.iconbitmap(default='transparent.ico')\n root.title(\"makeWallets\")\n root.wm_resizable(0,0) #makes window NOT resizeable\n\n global customFont\n customFont = tkinter.font.Font(family=\"Helvetica\", size=9)\n\n global buttonFont\n buttonFont = tkinter.font.Font(family=\"Helvetica\", size=6)\n\n #section to select file input / output directories\n inputDir = makeWallet.get_inputdir()\n outputDir = makeWallet.get_outputdir()\n\n tk.Label(root, text=\"input\", font=customFont).grid(row=0,column=0,sticky=tk.E)\n createInputTextBox(root, inputDir)\n entryBoxInputText.grid(row=0, column=1, columnspan=2, sticky=tk.E+tk.W)\n buttonSetInputDirectory = tk.Button(root, text=\"...\", font=buttonFont, height=1, command=buttonSetInputTextBox ) #cannot pass arguments\n buttonSetInputDirectory.grid(row=0, column=3)\n\n tk.Label(root, text=\"output\", font=customFont).grid(row=1,column=0,sticky=tk.E)\n createOutputTextBox(root, outputDir)\n entryBoxOutputText.grid(row=1, column=1, columnspan=2, sticky=tk.E+tk.W)\n buttonSetOutputDirectory = tk.Button(root, text=\"...\", font=buttonFont, height=1, command=buttonSetOutputTextBox ) #cannot pass arguments\n buttonSetOutputDirectory.grid(row=1, column=3)\n\n #section to specify run parameters\n tk.Label(root, text='grid pattern', font=customFont).grid(row=2,column=0,columnspan=2)\n global grid_w\n global grid_h\n grid_w = tk.Entry(root, width=4, font=customFont)\n grid_w.grid(row=3,column=0)\n grid_w.insert(0, makeWallet.get_wh_tilePattern()[0]) #initialize\n grid_h = tk.Entry(root, width=4, font=customFont)\n grid_h.grid(row=4,column=0)\n grid_h.insert(0, makeWallet.get_wh_tilePattern()[1]) #initialize\n tk.Label(root, text='w', font=customFont).grid(row=3,column=1,sticky=tk.W)\n tk.Label(root, text='h', font=customFont).grid(row=4,column=1,sticky=tk.W)\n\n tk.Label(root, text='resolution', font=customFont).grid(row=2,column=2,columnspan=2)\n global res_w\n global res_h\n res_w = tk.Entry(root, width=4, font=customFont)\n res_w.grid(row=3,column=2)\n res_w.insert(0, makeWallet.get_long_short_print_px()[0]) #initialize\n res_h = tk.Entry(root, width=4, font=customFont)\n res_h.grid(row=4,column=2)\n res_h.insert(0, makeWallet.get_long_short_print_px()[1]) #initialize\n tk.Label(root, text='w', font=customFont).grid(row=3,column=3,sticky=tk.W)\n tk.Label(root, text='h', font=customFont).grid(row=4,column=3,sticky=tk.W)\n\n #print size drop down menu\n tk.Label(root, text='print size', font=customFont).grid(row=5,column=0,columnspan=2,sticky=tk.E)\n\n paperSizeOptions = list(makeWallet.get_dictPaperSizes().keys())\n\n\n global paperSizeString\n paperSizeString = tk.StringVar(root)\n paperSizeString.set( makeWallet.get_hw_paperDims() ) # default value (NOTE: dictionary key order is not guaranteed / defined)\n\n paperSize = tk.OptionMenu(*(root, paperSizeString) + tuple(paperSizeOptions))\n paperSize.config(font=customFont,width=6)\n paperSize.grid(row=5,column=2,columnspan=2,sticky=tk.W)\n\n #apply crop (convert to drop down - need \"SQUARE\" option - not Boolean\n tk.Label(root, text='crop image', font=customFont).grid(row=6,column=0,columnspan=2,sticky=tk.E)\n global varApplyCrop #make global to keep ref; else garbage collect; check does not display\n varApplyCrop = tk.StringVar(root)\n varApplyCrop.set( str(makeWallet.get_applyCrop()) )\n optApplyCrop = tk.OptionMenu(root, varApplyCrop, \"True\", \"False\", \"Square\")\n optApplyCrop.config(font=customFont,width=6)\n optApplyCrop.grid(row=6,column=2,columnspan=2,sticky=tk.W)\n\n global varResizePrint\n varResizePrint = tk.IntVar(root)\n varResizePrint.set(makeWallet.get_resizeForPrinting())\n chkResizePrint = tk.Checkbutton(root, text=\"resize for print\", font=customFont, variable=varResizePrint)\n chkResizePrint.grid(row=7,column=0,columnspan=2,sticky=tk.W)\n\n global varSaveParameters\n varSaveParameters = tk.IntVar(root)\n varSaveParameters.set(0) #defaults 'OFF'\n chkResizePrint = tk.Checkbutton(root, text=\"save inputs\", font=customFont, variable=varSaveParameters)\n chkResizePrint.grid(row=7,column=2,columnspan=2,sticky=tk.E)\n\n #section for output message display\n # http://www.tkdocs.com/tutorial/text.html\n global outputLog\n global outputLogMaxLines\n outputLogMaxLines = 4\n outputLog = tk.Text(root, font=customFont, height=outputLogMaxLines, wrap='none', pady=5)\n outputLog.grid(row=8,column=0,columnspan=6)\n\n #section to display preview image\n original = Image.open( os.path.join(paramDir() ,'preview.jpg') )\n global previewRes\n previewRes = 450,300 #w,h\n original.thumbnail(previewRes, Image.ANTIALIAS) #operates on original; maintains aspect ratio; only shrinks image\n\n global im\n im = ImageTk.PhotoImage(original) # Keep a reference, prevent garbage collector\n\n global display\n display = tk.Label(root, width=previewRes[0], height=previewRes[1], image=im)\n display.grid(row=0, column=4, columnspan=2, rowspan=6, pady=5, padx=5)\n\n #section for run / cancel buttons\n buttonGO = tk.Button(root, text=\"GO\", width=10, font=customFont, command=lambda: runMakeWallets(makeWallet) ) #cannot pass arguments\n buttonGO.grid(row=7, column=4, sticky=tk.E, pady=5)\n\n buttonCancel = tk.Button(root, text=\"close\", width=10, font=customFont, command=quit ) #cannot pass arguments\n buttonCancel.grid(row=7, column=5, sticky=tk.W)\n\n buttonPreview = tk.Button(root, text=\"preview\", width=10, font=customFont, command=lambda: preview(makeWallet) ) #cannot pass arguments\n buttonPreview.grid(row=6, column=4, columnspan=2)\n\n\ndef paramDir():\n # name of directory holding gui parameters / inputs / preview image\n return 'inp'\n\n\ndef runGUI(makeWallet):\n buildGUI(makeWallet)\n preview(makeWallet) #initialize preview screen\n root.mainloop() # Start the event loop\n\n\nif __name__ == \"__main__\":\n print('launch this from makeWallets.py')\n\n","repo_name":"beRto-/make_wallet_prints","sub_path":"makeWalletsGUI.py","file_name":"makeWalletsGUI.py","file_ext":"py","file_size_in_byte":13558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"32011915240","text":"#This is my Model. This is the Data. These are the methods that act direct on the data.\r\n#There will be three classes. \r\n# the will be a Log7to8, which keeps track of what the program is doing\r\n# there will be a NetworkObject class, which is a Hostname, IP, \r\n# and a list of Usernames with privledge level, password7, plaintext, secret8\r\n#There will be a NetworkObjectGroup- do i need a class for this? it is really a list of\r\n# network Objects and file operations, maybe sorting.\r\n#\r\nimport datetime\r\nimport os.path\r\n\r\n#from convert7to8PKG.cisco7decrypt import decode\r\n\r\ndef changeDict(self, tempDict, keystring=\"\", valuestring=\"\"):\r\n \r\n #if keystring is not blank, load keys in loadDict\r\n if keystring:\r\n keystring = keystring.upper()\r\n keystring = keystring.strip()\r\n keystring = keystring.replace(\" \",\"\") #remove internal spaces\r\n #add values to the Dictionary\r\n keylist=keystring.split(',')\r\n \r\n for key in keylist:\r\n tempDict[key] = \"\"\r\n \r\n \r\n #if valuestring is not blank, load values in tempDict\r\n if valuestring:\r\n valuestring = valuestring.upper()\r\n valuestring = valuestring.strip()\r\n valuestring = valuestring.replace(\" \",\"\") #remove internal spaces\r\n #add values to the Dictionary\r\n valuelist=valuestring.split(',')\r\n index = 0\r\n\r\n for key in tempDict:\r\n tempDict[key]= valuelist[index]\r\n index += 1\r\n #end for loop through dict keys \r\n\r\n\r\n\r\nclass NetworkObject:\r\n \"\"\"Network object will have 'HOSTNAME','IPADDRESS','LOG','VERBOSE','ORIGUSERNAME','TESTUSERNAME', \\\r\n 'PASSWORD7','PLAINTEXT','SECRET8','CHANGE','VERIFIED','NOTES-AND-ERRORS'\r\n \"\"\" \r\n\r\n def __init__ (self, netobjDict):\r\n \"\"\"Network Object is the basic data object for this program. Hostname should be unique in a network.\r\n Hostname is manditory. IP is manditory. A hostname can have multiple IP's, but an IP can only \r\n be assigned to one hostname. Log and verbose are optional. Login username and password for the \r\n device (IP) are handled by the main program and never saved to a file. A network object might not \r\n have any password 7's, in which case \"No Password 7\" will be in the notes field.\"\"\"\r\n \r\n self.netobjDict=netobjDict\r\n #print(\"IP Address is \",self.netobjDict['IPADDRESS'])\r\n #print(\"Verbose messages is \", self.netobjDict['VERBOSE'])\r\n #print(\"Log messages is \", self.netobjDict['LOG'])\r\n #print(\"Username is \", self.netobjDict['ORIGUSERNAME'])\r\n\r\n def checkUsername(self, username):\r\n pass\r\n \r\n def addUsername(self, username):\r\n pass\r\n\r\n def prepareUsernameCommand(self):\r\n \r\n self.netobjDict['TESTUSERNAME'] = self.netobjDict['ORIGUSERNAME'] + '_TEST'\r\n self.usernamecommand = 'username {0} algorithm-type sha256 secret {1}' \\\r\n .format(self.netobjDict['ORIGUSERNAME'],self.netobjDict['PLAINTEXT'] )\r\n self.testusernamecommand = 'username {0} priv 15 algorithm-type sha256 secret {1}' \\\r\n .format(self.netobjDict['TESTUSERNAME'],self.netobjDict['PLAINTEXT'] )\r\n for key in self.netobjDict:\r\n print(\"key \", key, \"value \", self.netobjDict[key])\r\n \r\n print(self.testusernamecommand, \" \\n to be used on testrouter\")\r\n # return(testusernamecommand)\r\n return(self.testusernamecommand)\r\n # username newuser privilege 15 algorithm-type sha256 secret plaintext \r\n # username {} priv 15 algorithm-type sha256 secret {} \r\n\r\n\r\n\r\n\r\n def showPass7(self):\r\n return(self.netobjDict['PASSWORD7'])\r\n\r\n def setPlaintext(self, plaintext): \r\n self.netobjDict['PLAINTEXT'] = plaintext\r\n\r\n def showPlaintext(self):\r\n return(self.netobjDict['PLAINTEXT'])\r\n\r\n\r\nclass NetworkObjectGroup:\r\n \"\"\"main instantiates Network Object Group Object (netobjgroup)\r\n input needs initial dictionary, from InitializeModel (passed through main)\r\n Create list with up to max_elements from filename. Find length of file.\r\n create pointer for list, create pointer for file if longer than max_elements rows\r\n load next max_elements rows if needed\r\n keep track of updates to plaintext, secret 8, notes, etc.\r\n update log/verbose if selected.\r\n \"\"\"\r\n max_elements = 1024\r\n\r\n def __init__(self, initDict):\r\n \"\"\"\r\n initial dictionary from main(this is objDict from init.model). \r\n create list of up to max_elements rows from filename. create pointer for list\r\n return dict with new row data.\r\n \"\"\"\r\n\r\n self.netObjCount = 0\r\n self.ipindex=0\r\n self.iplist = initDict['IPADDRESS']\r\n self.netObjList=[]\r\n\r\n \r\n # print(\"Network Object will be \", \"netobj_\" + self.netObjCount)\r\n\r\n #now i need to ssh into ip address and find out how many usernames are there.\r\n\r\n def createNetObjs(self,netObjDict):\r\n newNetObj = NetworkObject(netObjDict)\r\n \r\n testuserstring = newNetObj.prepareUsernameCommand()\r\n self.netObjList.append(newNetObj)\r\n print(\"length of NetObjList is \", len(self.netObjList))\r\n return(testuserstring)\r\n\r\n \r\n def showNetObjs(self): \r\n \r\n for netobj in self.netObjList:\r\n print(netobj.netobjDict['ORIGUSERNAME'])\r\n \r\n\r\n\r\n \r\n\r\nclass Logmessages():\r\n \"\"\"\r\n timestamp = datetime()\r\n timestamp = datetime.now()\r\n print (\"timestamp is \", timestamp.now())\r\n \"\"\"\r\n pass\r\n \r\n\r\n\r\nclass InitializeModel():\r\n \"\"\" This class sets up the model. \r\n Other Classes in the Model are Logmessages, Network Object, and Network Object Group\r\n \"\"\"\r\n \r\n\r\n def __init__(self, cliDict):\r\n \"\"\"init prepares the default dict and details file\"\"\"\r\n\r\n print(\"initializing Model\")\r\n #self.filename = filename\r\n #print(\"filename is \",self.filename)\r\n self.cliDict=cliDict\r\n self.objdict = {'HOSTNAME':'','IPADDRESS':'','LOG':'','VERBOSE':'','ORIGUSERNAME':'','TESTUSERNAME':'', \\\r\n 'PASSWORD7':'','PLAINTEXT':'','SECRET8':'','CHANGE':'','VERIFIED':'','NOTES-AND-ERRORS':''} \r\n #testing\r\n #self.path = 'e:/dougsprogs/convert7to8/convert728/'\r\n \r\n\r\n ##Main checks to see if Filename is blank\r\n #if filename :#filename is not blank.\r\n #self.checkFilename()\r\n #if filename is blank, create the default dict\r\n #else: #filename is blank \"\"\r\n self.loadDictRow()\r\n #loadDictValue(key=\"IPADDRESS\", value=str(ipaddress))\r\n #now check to create the default empty file\r\n #checkFilename()\r\n \r\n \r\n def loadDictRow(self, valuestring=\"\"):\r\n \r\n \r\n #if valuestring is not blank, load values in objDict\r\n if valuestring:\r\n valuestring = valuestring.upper()\r\n valuestring = valuestring.strip()\r\n valuestring = valuestring.replace(\" \",\"\") #remove internal spaces\r\n valuestring=valuestring.split(',')\r\n #add values to the Dictionary\r\n \r\n index = 0\r\n\r\n for key in self.objdict:\r\n \r\n self.objdict[key]= valuestring[index]\r\n index += 1\r\n #end for loop through dict keys\r\n else: \r\n for key in self.objdict:\r\n if key in self.cliDict:\r\n self.objdict[key] = self.cliDict[key]\r\n print(\"assigned key/value \", key, \" \", self.objdict[key])\r\n \r\n def getHeaderDict(self):\r\n \"\"\"\r\n Make sure headers in filenname are correct. create a dict to pass back to main.\r\n \"\"\"\r\n #put the headers into a dict\r\n \r\n print(\"opening \",self.filename)\r\n with open(self.filename, 'r') as readfile:\r\n headers = readfile.readline()\r\n firstrow = readfile.readline()\r\n if not firstrow:\r\n print(\"first line after headers is blank\")\r\n self.loadDictRow(keystring=headers)\r\n else: #assume first row after headers is test router\r\n print(\"load test router row\") \r\n self.loadDictRow(keystring = headers, valuestring = firstrow) \r\n \r\n # check for headers\r\n miscount=0\r\n for key in self.dataheader:\r\n if not key in self.objdict:\r\n print(\"missing key !\", key)\r\n miscount += 1\r\n\r\n if miscount == 0:\r\n print(\"all Columns found. Thank you.\")\r\n # elif (miscount == 11) and (\"IPADDRESS\" in ):\r\n # print(\"Found IP Address column. program will add additional columns\")\r\n elif miscount > 11:\r\n print(\"Could not locate Header Row\")\r\n elif miscount > 0:\r\n print(\"some columns missing, will add additional columns\")\r\n \r\n \r\n #end file check on filename \r\n\r\n def loadMaxIPlist(self, filename):\r\n \"\"\"this will load up to first 500 ip's into dict-ip address list and give a count of number\r\n of ip addresses in list\"\"\"\r\n #I need to put this in a try/catch block later \r\n \r\n maxIPlist=10\r\n linecount=0 \r\n iplist=[]\r\n with open(filename, 'r') as infile:\r\n element = infile.readline()\r\n while element:\r\n \r\n linecount +=1\r\n if linecount < maxIPlist:\r\n iplist.append(element)\r\n element = infile.readline()\r\n \r\n self.objdict['IPADDRESS']=iplist\r\n print(\"Loaded \", linecount, \" ip addresses\")\r\n\r\n return(linecount) \r\n\r\n def checkFilename(self):\r\n \"\"\"don't create the network object group until filename is checked\r\n is there a special path?\r\n does file exist? \r\n does file have expected headers?\r\n is second row the test router?\r\n how many rows is the file? can I load in memory or take it in chunks?\r\n \"\"\"\r\n \r\n #all this should be in the view\r\n\r\n print(\"working directory \", self.path) \r\n print(\"If you'd like to use another directory/folder, please include the full path with the filename.\")\r\n #should i let users change working directory or just put it in the file path\r\n print(\"checking filename \", self.filename)\r\n\r\n if not os.path.isfile(self.filename):\r\n print(\"this is not an existing file\")\r\n createYN = (input(\"create it? y/n \")).upper()\r\n if createYN=='Y':\r\n self.createFile()\r\n self.getHeaderDict()\r\n\r\n else: # create file = NO\r\n headerDict = {} #create an empty dictionary\r\n self.loadDictRow(keystring = '') #this will create keys but not values\r\n\r\n else:\r\n \"\"\"\r\n Check to see if the first row is headers, and second row is Test Router\r\n \"\"\"\r\n print(\"this is an existing file\")\r\n self.getHeaderDict()\r\n #end method checkFilename \r\n\r\n \r\n\r\n def createFile(self):\r\n print (\"this will create a file \", self.filename)\r\n print (\"headers will be \", self.dataheader)\r\n writestring = \"\"\r\n \r\n for element in self.dataheader:\r\n writestring = writestring + str(element) + \",\"\r\n\r\n writestring=writestring[:-1] + '\\n' \r\n print(\"default working directory is \", os.getcwd())\r\n print(\"current working directory is \", self.path)\r\n print(\"writestring is \", writestring)\r\n with open(self.filename, 'a') as outfile:\r\n outfile.write(writestring)\r\n #end method createFile\r\n \r\n#End Class InitializeModel\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print(\"starting from model\")\r\n testfilename = \"testIPlist2.csv\"\r\n #testfilename=\"e:/dougsprogs/Convert728/convert7to8PKG/testdata728.csv\"\r\n #testfilename=\"e:\\dougsprogs\\Convert728\\convert7to8PKG\\\\testdata728.csv\"\r\n #testfilename=\"testdata728.csv\" #this is a real file with some data\r\n #testfilename=\"testblank728.csv\" #this is a real file with no data\r\n #testfilename=\"testempty728.csv\" #this is not an existing file\r\n #testfilename=\"\" #this is an empty filename\r\n #dataobject=convert_model(testfilename)\r\n #password7=\"111918160405041E007A7A\"\r\n #plaintext=dataobject.decrypt(password7)\r\n #print(\"plaintext is \",plaintext)\r\n #testobj=NetworkObject(ip=\"192.168.20.1\", verbose=True)\r\n #checkthis=InitializeModel(testfilename)\r\n #checkthis.checkFilename()\r\n #tempDict = checkthis.retObjDict()\r\n # testobj = NetworkObjectGroup(filename = \"\")\r\n\r\n #2020 feb 1st testing Network Objects\r\n testdict = {'HOSTNAME':'testhostname','IPADDRESS':'192.168.20.1','LOG':'n','VERBOSE':'n','ORIGUSERNAME':\r\n 'Username01','TESTUSERNAME':'', 'PASSWORD7':'053B071C325B411B1D5546','PLAINTEXT':'password','SECRET8':'','CHANGE':'y','VERIFIED':'','NOTES-AND-ERRORS':''}\r\n testnetobjgroup = NetworkObjectGroup(testdict)\r\n testnetobjgroup.createNetObjs(testdict)\r\n testnetobjgroup.createNetObjs(testdict)\r\n testnetobjgroup.createNetObjs(testdict)\r\n testnetobjgroup.createNetObjs(testdict)\r\n testnetobjgroup.showNetObjs()","repo_name":"soupwork/Cisco7to8converter","sub_path":"model728.py","file_name":"model728.py","file_ext":"py","file_size_in_byte":13584,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"14199090323","text":"from django.conf.urls import url\nfrom django.contrib import admin\nfrom django.urls import path\n\nfrom . import views\nurlpatterns = [\n path('', views.home),\n path('admin/', admin.site.urls),\n path('modules/', views.mdf_index),\n url(r'modules/(?P[A-Za-z0-9]{5})', views.mdf_test),\n path('staff/', views.staff_index),\n url(r'staff/(?P\\d+)', views.staff_member),\n path('courses/', views.course_index),\n url(r'courses/(?P[A-Za-z]+)_(?P[A-Za-z]+)', views.course),\n path('predecessors/', views.pre_index),\n url(r'predecessors/(?P[A-Za-z0-9]{5})', views.dependancies),\n path('successors/', views.succ_index),\n url(r'successors/(?P[A-Za-z0-9]{5})', views.successors),\n]\n","repo_name":"AMFagan/MDFDatabase","sub_path":"MDFDatabase/MDFDatabase/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"27209942441","text":"import requests\nimport pprint\n\nroutes = input('What route would you like to view? ')\n\n# response = requests.get(f'https://developer.trimet.org/ws/v2/vehicles?appID=D065A3A5DAE4622752786CEB9&routes={routes}')\nresponse = requests.get(f'https://developer.trimet.org/ws/v2/vehicles', params={'appID': 'D065A3A5DAE4622752786CEB9', 'routes': routes})\n# print(response)\n# print(response.url)\n# print(response.status_code)\n# print(response.encoding)\n# print(response.headers)\n# pprint.pprint(response.text)\n# pprint.pprint(response.json())\n\ntrains_and_buses = response.json()['resultSet']['vehicle']\n# pprint.pprint(trains_and_buses)\n\nprint(f\"there are {len(trains_and_buses)} results:\")\nfor vehicle in trains_and_buses:\n print(f\"{vehicle['type']} {vehicle['signMessageLong']} -- ({vehicle['latitude']},{vehicle['longitude']})\")\n","repo_name":"PdxCodeGuild/class_HB2","sub_path":"code/merritt/requests_example.py","file_name":"requests_example.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"14968034892","text":"from sentence_transformers import SentenceTransformer, CrossEncoder, util\nfrom src.utils import toTextList\nfrom tqdm import trange\n\nclass Embeddings:\n def __init__(self, model_name='all-MiniLM-L6-v2', threshold=0.3):\n self.model = SentenceTransformer(model_name)\n self.top_k = 32 #Number of passages we want to retrieve with the bi-encoder\n self.cross_encoder = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-6-v2') \n self.docs_embeddings = None\n \n def encode(self, prompt):\n if isinstance(prompt, str):\n return self.model.encode(prompt, convert_to_tensor=True).reshape((1,-1))\n return self.model.encode(prompt, convert_to_tensor=True, show_progress_bar=True)\n\n def setDocs(self, path):\n self.docs = toTextList(path)\n self.docs_embeddings = self.encode(self.docs).cuda()\n \n def __call__(self,keyword, top=2):\n q_embeddings = self.encode(keyword).cuda()\n hits = util.semantic_search(q_embeddings, self.docs_embeddings, top_k=self.top_k)[0]\n ##### Re-Ranking #####\n # Now, score all retrieved passages with the cross_encoder\n cross_inp = [[keyword, self.docs[hit['corpus_id']]] for hit in hits]\n cross_scores = self.cross_encoder.predict(cross_inp)\n\n # Sort results by the cross-encoder scores\n for idx in trange(len(cross_scores)):\n hits[idx]['cross-score'] = cross_scores[idx]\n\n hits = sorted(hits, key=lambda x: x['score'], reverse=True)\n returning_docs = []\n for hit in hits[0:top]:\n print(hit['score'])\n returning_docs.append(self.docs[hit['corpus_id']].replace(\"\\n\", \" \"))\n return returning_docs\n \nif __name__ == \"__main__\":\n x = [\"Hi, my name is Melih. \"]*10000\n model = Embeddings()\n emb = model.encode(x)\n print(emb.shape)","repo_name":"melih-unsal/DocGPT","sub_path":"src/embeddings.py","file_name":"embeddings.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"43393913109","text":"class Thailandpackage:\n def detail(self):\n print(\"태국 패키지 3박 5일] 방콕, 파타야 여행 (야시장 투어) 50만원\")\n\n#모듈 직접 실행 (모듈이 잘 실행되는지 확인)\nif __name__ == \"__main__\": #__name__ 이 만약 __main__이면\n print(\"Thailand 모듈을 직접 실행\")\n print(\"이 문장은 모듈을 직접 실행할 때만 실행됩니다.\")\n trip_to = Thailandpackage()\n trip_to.detail()\nelse: \n print(\"Thailand 외부에서 모듈 호출\") # __name__ 정보를 활용해서 직접 모듈내에서 실행하는건지 외부에서 모듈을 가져다 쓰는건지 확인할 수 있다","repo_name":"sinbak/Code-backup","sub_path":"python_part/module/travel/thailand.py","file_name":"thailand.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36744792795","text":"import os, sys\nimport time\nimport stat\nimport shutil\n\nfrom . import testspec\nfrom .paramset import ParameterSet\nfrom .testspec import TestSpec\n\nversion = 35\n\n\nclass TestListWriter:\n\n def __init__(self, filename):\n \"\"\n self.filename = filename\n\n def start(self, **file_attrs):\n \"\"\n datestamp = repr( [ time.ctime(), time.time() ] )\n\n remove_attrs_with_None_for_a_value( file_attrs )\n\n with open( self.filename, 'w' ) as fp:\n fp.write( '#VVT: Version = '+str(version)+'\\n' )\n fp.write( '#VVT: Start = '+datestamp+'\\n' )\n fp.write( '#VVT: Attrs = '+repr( file_attrs )+'\\n\\n' )\n\n def addIncludeFile(self, include_filename):\n \"\"\n with open( self.filename, 'a' ) as fp:\n fp.write( '#VVT: Include = '+include_filename+'\\n' )\n\n def includeFileCompleted(self, include_filename):\n \"\"\n with open( self.filename, 'a' ) as fp:\n fp.write( '#VVT: Completed = '+include_filename+'\\n' )\n\n def append(self, tcase, extended=False):\n \"\"\n with open( self.filename, 'a' ) as fp:\n fp.write( test_to_string( tcase, extended ) + '\\n' )\n\n def finish(self):\n \"\"\n datestamp = repr( [ time.ctime(), time.time() ] )\n\n with open( self.filename, 'a' ) as fp:\n fp.write( '\\n#VVT: Finish = '+datestamp+'\\n' )\n\n\nclass TestListReader:\n\n def __init__(self, tcasefactory, filename):\n \"\"\n self.fact = tcasefactory\n self.filename = filename\n\n self.vers = None\n self.start = None\n self.attrs = {}\n self.finish = None\n\n self.incl = set()\n self.tests = {}\n\n def read(self):\n \"\"\n for key,val in self._iterate_file_lines():\n try:\n if key == 'Version':\n self.vers = int( val )\n elif key == 'Start':\n self.start = eval( val )[1]\n elif key == 'Attrs':\n self.attrs = eval( val )\n elif key == 'Include':\n self.incl.add( val )\n elif key == 'Completed':\n if val in self.incl:\n self.incl.remove( val )\n elif key == 'Finish':\n self.finish = eval( val )[1]\n else:\n tcase = string_to_test( val, self.fact )\n self.tests[ tcase.getSpec().getID() ] = tcase\n\n except Exception:\n pass\n\n assert self.vers in [32, 33, 34, 35], \\\n 'corrupt test list file or older format: '+str(self.filename)\n\n for incl_file in self.incl:\n self._read_include_file( incl_file )\n\n def getFileVersion(self):\n \"\"\n return self.vers\n\n def getStartDate(self):\n \"\"\n return self.start\n\n def getFinishDate(self):\n \"\"\n return self.finish\n\n def getAttr(self, name, *default):\n \"\"\n if len(default) > 0:\n return self.attrs.get( name, default[0] )\n return self.attrs[name]\n\n def getAttrs(self):\n \"\"\n return dict( self.attrs.items() )\n\n def getTests(self):\n \"\"\"\n Returns dictionary mapping (file name, execute dir) to TestCase object.\n \"\"\"\n return self.tests\n\n def scanForFinishDate(self):\n \"\"\"\n If the file has a finish date it is returned, otherwise None.\n \"\"\"\n finish = None\n\n for key,val in self._iterate_file_lines():\n try:\n if key == 'Finish':\n finish = eval( val )[1]\n except Exception:\n pass\n\n return finish\n\n def _iterate_file_lines(self):\n \"\"\n with open( self.filename, 'r' ) as fp:\n\n for line in fp:\n\n line = line.strip()\n\n try:\n if line.startswith( '#VVT: ' ):\n n,v = line[5:].split( '=', 1 )\n yield ( n.strip(), v.strip() )\n\n elif line:\n yield ( None, line )\n\n except Exception:\n pass\n\n def _read_include_file(self, fname):\n \"\"\n if not os.path.isabs( fname ):\n # include file is relative to self.filename\n fname = os.path.join( os.path.dirname( self.filename ), fname )\n\n if os.path.exists( fname ):\n\n tlr = TestListReader( self.fact, fname )\n tlr.read()\n self.tests.update( tlr.getTests() )\n\n\ndef file_is_marked_finished( filename ):\n \"\"\n finished = False\n\n try:\n tlr = TestListReader( None, filename )\n if tlr.scanForFinishDate() != None:\n finished = True\n except Exception:\n pass\n\n return finished\n\n\ndef remove_attrs_with_None_for_a_value( attrdict ):\n \"\"\n for k,v in list( attrdict.items() ):\n if v == None:\n attrdict.pop( k )\n\n\ndef test_to_string( tcase, extended=False ):\n \"\"\"\n Returns a string with no newlines containing the file path, parameter\n names/values, and attribute names/values.\n \"\"\"\n tspec = tcase.getSpec()\n tstat = tcase.getStat()\n\n assert tspec.getName() and tspec.getRootpath() and tspec.getFilepath()\n\n testdict = {}\n\n idtraits = tspec.getIDTraits()\n if idtraits:\n testdict['idtraits'] = idtraits\n\n testdict['name'] = tspec.getName()\n testdict['root'] = tspec.getRootpath()\n testdict['path'] = tspec.getFilepath()\n testdict['keywords'] = tspec.getKeywords( include_implicit=False )\n\n if tspec.isAnalyze():\n testdict['paramset'] = tspec.getParameterSet().getParameters()\n else:\n testdict['params'] = tspec.getParameters()\n\n testdict['attrs'] = tstat.getAttrs()\n\n if extended:\n insert_extended_test_info( tcase, testdict )\n\n s = repr( testdict )\n\n return s\n\n\ndef string_to_test( strid, factory ):\n \"\"\"\n Creates and returns a partially filled TestSpec object from a string\n produced by the test_to_string() method.\n \"\"\"\n testdict = eval( strid.strip() )\n\n idtraits = testdict.get( 'idtraits', {} )\n\n name = testdict['name']\n root = testdict['root']\n path = testdict['path']\n\n tspec = TestSpec( name, root, path, idtraits )\n\n if 'paramset' in testdict:\n pset = tspec.getParameterSet()\n for T,L in testdict['paramset'].items():\n pset.addParameters( T, L )\n tspec.setIsAnalyze()\n else:\n tspec.setParameters( testdict['params'] )\n\n tspec.setKeywordList( testdict['keywords'] )\n\n tcase = factory.new( tspec )\n tstat = tcase.getStat()\n\n for k,v in testdict['attrs'].items():\n tstat.setAttr( k, v )\n\n check_load_extended_info( tcase, testdict )\n\n return tcase\n\n\ndef insert_extended_test_info( tcase, testdict ):\n \"\"\n if tcase.hasDependent():\n testdict['hasdependent'] = True\n\n depL = tcase.getDepDirectories()\n if len( depL ) > 0:\n testdict['depdirs'] = depL\n\n\ndef check_load_extended_info( tcase, testdict ):\n \"\"\n if testdict.get( 'hasdependent', False ):\n tcase.setHasDependent()\n\n depL = testdict.get( 'depdirs', None )\n if depL:\n for pat,xdir in depL:\n tcase.addDepDirectory( pat, xdir )\n","repo_name":"sandialabs/vvtest","sub_path":"libvvtest/testlistio.py","file_name":"testlistio.py","file_ext":"py","file_size_in_byte":7309,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"21675278132","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.svm import LinearSVC\nfrom sklearn import datasets, svm, metrics\nfrom sklearn.model_selection import train_test_split\n\n# In this application :\n# apply linear and non linear separators on toy data and on images\n\n\n# ######################################################\n# useful function to plot SVM boundary\n# ######################################################\ndef plot_boundary(clf, X, y):\n \"\"\"\n Function to plot a boundary decision\n \"\"\"\n h = 0.002\n x_min, x_max = X[:, 0].min() - .1, X[:, 0].max() + .1\n y_min, y_max = X[:, 1].min() - .1, X[:, 1].max() + .1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n\n Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n\n plt.figure()\n plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)\n\n plt.scatter(X[:, 0], X[:, 1], c=y, s = 100)\n plt.title('score : ' + str(clf.score(X,y)))\n plt.xlabel('$x_1$')\n plt.ylabel('$x_2$')\n\n\n# ######################################################\n# Linear Classifier on toy data\n# ######################################################\n\n# 1) Generate a separable training set and visualize it\nnp.random.seed(21)\nX = np.random.rand(50,2)\n# put linearly separable labels\ny = X[:,0] > 0.5\nplt.scatter(X[:,0],X[:,1], c = y, s = 100)\nplt.savefig('linear_data.png')\n\n# 2) Apply a linear classifier (use LinearSVC with C=1)\nclf = LinearSVC(C=1)\n# TO FILL\nprint(\"training scores:\",clf.score(X,y))\nplot_boundary(clf,X,y)\nplt.savefig('linear_SVC.png')\n\n\n#3) Add some noise in the dataset\nnp.random.seed(2)\nX = np.random.rand(100,2)\ny = X[:,0] > 0.5\nlines_noise = np.random.choice(range(len(y)), 10)\ny[lines_noise] = 1 - y[lines_noise]\nplt.close()\nplt.scatter(X[:,0],X[:,1], c = y, s = 100)\nplt.savefig('linear_data_noise.png')\n\n# Evaluate the effect of regularization coefficient\nfor C in [10**x for x in range(-5,6)]:\n # Fit a linear SVC with growing C\n # TO FILL\n print(\"training score for C = %.3f : %.3f\"%(C,clf.score(X,y)))\n plt.clf()\n plot_boundary(clf,X,y)\n plt.title('C = ' + str(C))\n plt.savefig('boundary_C_%.3f.png'%C)\n\n# ######################################################\n# Non linear Classifier on toy data\n# ######################################################\n\n# 3) Use non linear data (moon)\nfrom sklearn.svm import SVC\nfrom sklearn.datasets import make_moons\n\n# Creation of a training set\nX, y = make_moons(noise = 0.1, random_state=1, n_samples=40)\nplt.clf()\nplt.scatter(X[:,0],X[:,1], c = y, s = 100)\nplt.savefig('moon_train_data.png')\n# Creation of a testing set\nX_test, y_test = make_moons(noise = 0.1, random_state=321, n_samples=20)\nplt.scatter(X_test[:,0],X_test[:,1], c = y_test, s = 100)\nplt.savefig('moon_test_data.png')\n\n# Try a SVM with polynomial kernel of degree 3 (and initial coef0 = 1, Better suited for these data)\n# and evaluate the effect of the margin\nfor C in [10**x for x in range(-2,8)]:\n # TO FILL\n print(\"training score:\",clf.score(X,y), \". testing score:\", clf.score(X_test,y_test))\n plot_boundary(clf,X,y)\n plt.scatter(clf.support_vectors_[:,0],clf.support_vectors_[:,1], c = 'green', s = 200, marker='*')\n plt.savefig('moon_polynomial_kernel_C_%.3f.png'%C)\n\n# Try a SVM with gaussian kernel (rbf)\n# and evaluate the effect of the margin\nfor C in [10**x for x in range(-3,5)]:\n # TO FILL\n print(\"training score:\",clf.score(X,y), \". testing score:\", clf.score(X_test,y_test))\n plot_boundary(clf,X,y)\n plt.scatter(clf.support_vectors_[:,0],clf.support_vectors_[:,1], c = 'green', s = 200, marker='*')\n plt.savefig('moon_gaussian_kernel_C_%.3f.png'%C)\n\n\n\n# ######################################################\n# Apply SVM on satellite images\n# ######################################################\n# Loading data\nimport pickle\n# load sat data (3 bands)\nwith open('./images/sat_image_R.pkl', 'rb') as handle:\n sat_dataR = pickle.load(handle)\nwith open('./images/sat_image_G.pkl', 'rb') as handle:\n sat_dataG = pickle.load(handle)\nwith open('./images/sat_image_B.pkl', 'rb') as handle:\n sat_dataB = pickle.load(handle)\n# load sat classes\nwith open('./images/sat_label.pkl', 'rb') as handle:\n sat_classes = pickle.load(handle)\n\n# verify labels\ntable_of_labels=np.unique(sat_classes)\nnumber_of_labels = len(table_of_labels)\nprint(number_of_labels , 'labels : ' , table_of_labels)\n# reshape data\n# verify the shape and reshape data to fit with\n# a matrix (N x D) of N points in D dimension\nchannels=3\nsize_x,size_y=sat_dataR.shape\nsat_data=np.zeros((size_x,size_y,channels))\nsat_data[:,:,0]=sat_dataR\nsat_data[:,:,1]=sat_dataG\nsat_data[:,:,2]=sat_dataB\n# Visualization of the image\nplt.clf()\nplt.imshow(sat_data.astype(np.float64)/255)\nplt.show()\n# Visualization of the labels\nplt.clf()\nplt.imshow(sat_classes)\nplt.show()\n# Creation of the dataset (data of size (size_x*size_y,channels)\n# and labels (size_x*size_y,1))\n# TO FILL\n# Extract train data\nnb_data_train=len(labels)\nX=np.zeros((0,channels))\nY=np.zeros((0,1))\nfor i in range(1,number_of_labels):\n index_label=labels==table_of_labels[i]\n number_of_training_samples_i=len(np.where(index_label)[0])\n X=np.concatenate((X,data[np.where(index_label),:][0]),axis=0)\n Y=np.concatenate((Y,table_of_labels[i]*np.ones((number_of_training_samples_i,1))))\n# split test train\n# TO FILL\n#\n# PERFORM CLASSIFICATION\n# TO FILL\n\n\n# Print out performances\nprint(\"Classification report for classifier %s:\\n%s\\n\"\n % (classification, metrics.classification_report(y_test, y_predict)))\ny_image = classification.predict(data)\n\n# Reshape image and save\nlabel_image=y_image.reshape(size_x,size_y)\nplt.clf()\nplt.imshow(label_image)\nplt.title('classification result')\nplt.show()\nplt.savefig('classification_result.png')\n\n# ######################################################\n# Problem ???? Some classes are imbalanced\n# ######################################################\n# Print the number of classes\nnumber_of_training_samples_1=len(np.where(labels==1)[0])\nnumber_of_training_samples_2=len(np.where(labels==2)[0])\nnumber_of_training_samples_0=len(np.where(labels==0)[0])\nprint('number of samples in class 0 : %d'%number_of_training_samples_0)\nprint('number of samples in class 1 : %d'%number_of_training_samples_1)\nprint('number of samples in class 2 : %d'%number_of_training_samples_2)\n# Modify the number of samples in class 0\nnumber_of_training_samples_0=int(0.5*(number_of_training_samples_1+number_of_training_samples_2))\n# Recreate the dataset\nX=np.zeros((0,channels))\nY=np.zeros((0,1))\nfor i in range(0,number_of_labels):\n if i==0:\n # index with label 0\n index_label=np.where(labels==table_of_labels[i])\n # choose randomly number_of_training_samples_0 inside\n index_for_training = np.random.randint(0,len(index_label[0]),number_of_training_samples_0)\n X=np.concatenate((X,data[index_label[0][index_for_training],:]),axis=0)\n Y=np.concatenate((Y,table_of_labels[i]*np.ones((number_of_training_samples_0,1))))\n else:\n index_label=labels==table_of_labels[i]\n number_of_training_samples_i=len(np.where(index_label)[0])\n X=np.concatenate((X,data[np.where(index_label),:][0]),axis=0)\n Y=np.concatenate((Y,table_of_labels[i]*np.ones((number_of_training_samples_i,1))))\n\n# split test train\n# TO FILL\n#\n# PERFORM CLASSIFICATION\n# TO FILL\n\n\n# Print out performances\n\nprint(\"Classification report for classifier %s:\\n%s\\n\"\n % (classification, metrics.classification_report(y_test, y_predict)))\n\n\ny_image = classification.predict(data)\nlabel_image=y_image.reshape(size_x,size_y)\nplt.clf()\nplt.imshow(label_image)\nplt.title('classification result with balanced labels')\nplt.savefig('classification_result_balanced.png')\n\n\n# ######################################################\n# Apply on time series\n# ######################################################\nimport tslearn\nfrom tslearn.barycenters import \\\n euclidean_barycenter, \\\n dtw_barycenter_averaging, \\\n dtw_barycenter_averaging_subgradient, \\\n softdtw_barycenter\nfrom tslearn.datasets import CachedDatasets\n\n# ######################################################\n# Barycenter computation\n# ######################################################\n\n# fetch the example data set\nnp.random.seed(0)\nX_train, y_train, _, _ = CachedDatasets().load_dataset(\"Trace\")\nX = X_train[y_train == 1]\nlength_of_sequence = X.shape[1]\n\n# Useful function to plot series and eventually barycenter\ndef plot_series(X,barycenter=None):\n # plot all points of the data set\n for series in X:\n plt.plot(series.ravel(), \"k-\", alpha=.2)\n # plot the given barycenter of them\n if barycenter is not None:\n plt.plot(barycenter.ravel(), \"r-\", linewidth=2)\n\n# Visualize series, Euclidian and DTW barycenters\nplt.clf()\nax1 = plt.subplot(3, 1, 1)\nplt.title(\"Series\")\nplot_series(X)\nax2 = plt.subplot(3, 1, 2)\nplt.title(\"Eucledian Barycenter\")\nplot_series(X,euclidean_barycenter(X))\nax3 = plt.subplot(3, 1, 3)\nplt.title(\"DTW Barycenter\")\nplot_series(X,dtw_barycenter_averaging(X, max_iter=50, tol=1e-3))\nplt.show()\n\n\n# ######################################################\n# KNN search based on DTW\n# ######################################################\n\n\nfrom tslearn.neighbors import KNeighborsTimeSeries\nfrom tslearn.datasets import CachedDatasets\n\nseed = 0\nnp.random.seed(seed)\nX_train, y_train, X_test, y_test = CachedDatasets().load_dataset(\"Trace\")\n\nn_queries = 2\nn_neighbors = 4\n\nknn = KNeighborsTimeSeries(n_neighbors=n_neighbors)\nknn.fit(X_train)\nind = knn.kneighbors(X_test[:n_queries], return_distance=False)\n\nplt.figure()\nfor idx_ts in range(n_queries):\n plt.subplot(n_neighbors + 1, n_queries, idx_ts + 1)\n plt.plot(X_test[idx_ts].ravel(), \"k-\")\n plt.xticks([])\n for rank_nn in range(n_neighbors):\n plt.subplot(n_neighbors + 1, n_queries,\n idx_ts + (n_queries * (rank_nn + 1)) + 1)\n plt.plot(X_train[ind[idx_ts, rank_nn]].ravel(), \"r-\")\n plt.xticks([])\n\n\nplt.suptitle(\"Queries (in black) and their nearest neighbors (red)\")\nplt.show()\n\n# ######################################################\n# Apply SVM on time series\n# ######################################################\n\n\nfrom tslearn.datasets import CachedDatasets\nfrom tslearn.preprocessing import TimeSeriesScalerMinMax\nfrom tslearn.svm import TimeSeriesSVC\n\nnp.random.seed(0)\nX_train, y_train, X_test, y_test = CachedDatasets().load_dataset(\"Trace\")\n# visualization of the time series\nplt.clf()\nplot_series(X_train)\nplt.show()\n# Homogeneieze data\nX_train = TimeSeriesScalerMinMax().fit_transform(X_train)\nX_test = TimeSeriesScalerMinMax().fit_transform(X_test)\nclf = TimeSeriesSVC(kernel=\"gak\")\nclf.fit(X_train, y_train)\nprint(\"Correct classification rate:\", clf.score(X_test, y_test))\n\nn_classes = len(set(y_train))\nplt.clf()\nplt.figure()\nsupport_vectors = clf.support_vectors_\nfor i, cl in enumerate(set(y_train)):\n plt.subplot(n_classes, 1, i + 1)\n plt.title(\"Support vectors for class %d\" % cl)\n for ts in support_vectors[i]:\n plt.plot(ts.ravel())\n\nplt.tight_layout()\nplt.show()\n","repo_name":"Ruphai/UBS","sub_path":"Machine Learning/Unsupervised Learning/python/classif/classif.py","file_name":"classif.py","file_ext":"py","file_size_in_byte":11215,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"22327802353","text":"#!/usr/bin/env/python3\n# coding:utf-8\n\"\"\"\n# @Time : 2022/1/6 15:43\n# @Author : feifeigao\n# @File : 164.py\n\"\"\"\n\n\ndef maximumGap(nums) -> int:\n if len(nums) < 2: return 0\n l1 = sorted(nums)\n l2=[]\n print(l1)\n for i in range(1,len(nums)):\n # print(l1[i])\n cha=l1[i]-l1[i-1]\n # print(cha)\n if cha>=0:\n l2.append(cha)\n # print(l2)\n print(max(l2))\n return max(l2)\n\nif __name__ == '__main__':\n nums=[3,6,9,1]\n nums=[4,7,21,3,2,36,58]\n nums=[4,7,21,36,58]\n nums=[1, 1, 1, 1]\n nums=[100,3,2,1]\n maximumGap(nums)","repo_name":"gaofeifei/shualeetcode","sub_path":"164.py","file_name":"164.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34839366095","text":"\"\"\"\nExercício Python 096: Faça um programa que tenha uma função chamada área(), \nque receba as dimensões de um terreno retangular \n(largura e comprimento) e mostre a área do terreno.\n\"\"\"\n\ndef are(x, y):\n print(f\"a dimenção do terreno e igual a {x*y} metros Quadrados \")\n\n\nl = float(input(\"digite a largura do terreno: \"))\nc = float(input(\"digite o comprimento do terreno\"))\nare(l,c)","repo_name":"iagocarvalho07/PYTHON","sub_path":"mundo 3/desafio96.py","file_name":"desafio96.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72456936403","text":"from pymongo import MongoClient\r\nfrom random import randint\r\n\r\nclient = MongoClient('mongodb://localhost:27017/')\r\ndb = client.test\r\ncollection = db.zak\r\ncount = collection.find().count()\r\n\r\n# generate small db\r\n# db.zakupki.find({}).limit(228113).forEach(function(x){db.zak.insert(x)})\r\n\r\n\r\ndef GetOrderNames(amount=None):\r\n if amount is None:\r\n amount = count\r\n results = collection.find({}, {\"orderName\": 1})\r\n\r\n for _ in range(amount):\r\n n = results.next()\r\n yield n['orderName'].encode('utf-8')\r\n\r\ndef GetLots(amount=None):\r\n if amount is None:\r\n amount = count\r\n results = collection.find({}, {\"lots.lot.subject\": 1})\r\n for _ in range(amount):\r\n n = results.next()\r\n yield n[\"lots\"][\"lot\"][\"subject\"].encode(\"utf-8\")\r\n\r\n\r\ndef GetOrderNamesRandom(amount):\r\n for _ in range(amount):\r\n randInt = randint(1, amount)\r\n n = collection.find({}, {\"orderName\": 1}).limit(-1).skip(randInt).next()\r\n yield n['orderName'].encode('utf-8')\r\n\r\n\r\ndef GetLotsRandom(amount):\r\n for _ in range(amount):\r\n randInt = randint(1, count)\r\n n = collection.find({}, {\"lots.lot.subject\": 1}).limit(-1).skip(randInt).next()\r\n yield n[\"lots\"][\"lot\"][\"subject\"].encode(\"utf-8\")\r\n\r\ndef GetRandomItem():\r\n randInt = randint(1, count)\r\n result = collection.find().limit(-1).skip(randInt).next()\r\n return result\r\n\r\n#f = open(\"OrderNames.txt\", \"w\")\r\n#results = collection.find({}, {\"orderName\": 1, \"id\": 1})\r\n#for _ in range(count):\r\n# n = results.next()\r\n# idQ = str(n[\"id\"])\r\n# s = n[\"orderName\"].encode(\"utf-8\").strip().replace(\"\"\"\r\n#\"\"\", \"
\")\r\n# f.write(idQ+\"\\t\"+s+\"\\n\")\r\n#\r\n#f = open(\"Lots.txt\", \"w\")\r\n#\r\n#results = collection.find({}, {\"lots.lot.subject\": 1, \"id\": 1})\r\n#for _ in range(count):\r\n# n = results.next()\r\n# idQ = str(n[\"id\"])\r\n# s = n[\"lots\"][\"lot\"][\"subject\"].encode(\"utf-8\").strip().replace(\"\"\"\r\n#\"\"\", \"
\")\r\n# f.write(idQ+\"\\t\"+s+\"\\n\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"bellal89/SynSearch","sub_path":"getData.py","file_name":"getData.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38704070272","text":"\n\ntxt_files = open('praise_song_for_the_day.txt') \npoem_words = {} # makes an empty dictionary\nfor item in txt_files: \n item = item.lower() # make all letters in file lowercase\n item = item.strip() # takes out the leading spaces in the text\n poem = item.split() # seperates the words by white spaces\n \n for word in poem: # loops over each word in the text\n if word in poem_words: # This checks to see if the word is in the dictionary\n poem_words[word] = poem_words[word] + 1 # If the word is in the dictionary increase the count by 1\n else: \n poem_words[word] = 1 # if the word is not in the dictionary add it.\n \nfor key in (poem_words.keys()): \n print(key.rjust(20) , \"|\", poem_words[key], '*' * poem_words[key] ) \n\n\n########################## Remove Stop Words####################\n \n #def remove_from_list(list_of_items, item_to_remove):\n # items = [item for item in list_of_items if item is not item_to_remove] # Only add item to new list if item is not the item we want to remove\n #return items\n\n #def remove_from_list(list_of_items, item_to_remove):\n # newlist = []\n # for item in words_list:\n # if item != words_to_remove:\n # newlist.append(item)\n # return newlist\n\n\n\n\n","repo_name":"momentum-team-2/py-word-frequency-rh017548","sub_path":"testpage2.py","file_name":"testpage2.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"3952113762","text":"\"\"\"Easy access to Databricks databases via databricks-sql-connector\"\"\"\n\nimport typing\nfrom warnings import warn\n\nimport mara_db.dbs\n\n\ndef databricks_cursor_context(db: typing.Union[str, mara_db.dbs.DatabricksDB]) \\\n -> 'databricks.sql.client.Cursor':\n warn('Function databricks_cursor_context(db) is deprecated. Please use mara_db.dbs.cursor_context(db) instead.',\n category=DeprecationWarning)\n\n if isinstance(db, str):\n db = mara_db.dbs.db(db)\n\n assert (isinstance(db, mara_db.dbs.DatabricksDB))\n\n return mara_db.dbs.cursor_context(db)\n","repo_name":"mara/mara-db","sub_path":"mara_db/databricks.py","file_name":"databricks.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"3"} +{"seq_id":"7719700777","text":"from typing import List, Union\n\nimport numpy as np\nimport torch\nfrom PIL import Image\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torch.utils.data import DataLoader\nfrom torchvision import models\n\nfrom dhdrnet.dataset import RCDataset\nfrom dhdrnet.model import DHDRNet\nfrom dhdrnet.util import ROOT_DIR\n\nfigdir = ROOT_DIR / \"figures\"\n\n\nclass RCNet(DHDRNet):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.Dataset = RCDataset\n self.feature_extractor = models.mobilenet_v2(\n pretrained=False, num_classes=self.num_classes\n )\n self.feature_extractor.classifier = nn.Sequential(\n nn.Dropout(0.5),\n nn.Linear(\n self.feature_extractor.last_channel,\n self.feature_extractor.last_channel // 2,\n ),\n nn.BatchNorm1d(self.feature_extractor.last_channel // 2),\n nn.Linear(self.feature_extractor.last_channel // 2, self.num_classes),\n )\n self.criterion = nn.MSELoss()\n\n exp_min = -3\n exp_max = 6\n exp_step = 0.25\n self.exposures: np.ndarray = np.linspace(\n exp_min, exp_max, int((exp_max - exp_min) / exp_step + 1)\n )\n\n def common_step(self, batch):\n mid_exposures, ground_truth_images, names, _labels = batch\n predicted_ev_idx = torch.argmax(self(mid_exposures), dim=1)\n predicted_ev = self.exposures[predicted_ev_idx.cpu().numpy()]\n predicted_fused = torch.stack(\n [\n self.transform(\n Image.fromarray(self.generator.get_reconstruction(name, 0.0, pev))\n )\n for pev, name in zip(predicted_ev, names)\n ]\n )\n predicted_fused = predicted_fused.to(device=\"cuda:0\")\n predicted_fused.requires_grad_(True)\n loss = F.mse_loss(predicted_fused, ground_truth_images)\n return loss\n\n def train_dataloader(self) -> Union[DataLoader, List[DataLoader]]:\n return DataLoader(\n self.train_data,\n batch_size=self.batch_size,\n num_workers=8,\n pin_memory=True,\n )\n\n def val_dataloader(self) -> Union[DataLoader, List[DataLoader]]:\n return DataLoader(\n self.val_data,\n batch_size=self.batch_size,\n pin_memory=True,\n num_workers=8,\n )\n\n def test_dataloader(self) -> Union[DataLoader, List[DataLoader]]:\n return DataLoader(\n self.test_data,\n batch_size=self.batch_size,\n pin_memory=True,\n num_workers=8,\n )\n\n def forward(self, x):\n x = self.feature_extractor(x)\n return x\n","repo_name":"smsegal/DHDRNet","sub_path":"dhdrnet/reconstruction_model.py","file_name":"reconstruction_model.py","file_ext":"py","file_size_in_byte":2732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25877405144","text":"# In[ ]:\nimport io\nfrom TwitterAPI import TwitterAPI\nimport pymongo\nfrom pymongo import MongoClient\nimport json\nimport datetime as dt\nimport time\nimport DataCleansing\n\n#In[]:\n#เรียกใช้ database ใน mongo\nclient = MongoClient('mongodb://localhost:27017/University')\n# client = MongoClient('mongodb://superadmin:4rFV5tGB@0.0.0.0:27017/University?authSource=admin')\ndb = client.University\n#สร้างดัชนี (Index) ให้กับ data\ndb.tweet_raw_data.create_index([('id_str', pymongo.ASCENDING)], unique=True)\n\n#In[]:\n#ดึงข้อมูล keyword ที่อยู่ใน json\nwith io.open(\"keywordSearch.json\",encoding=\"utf-8\") as json_file:\n twData = json.load(json_file)\n\n#In[]:\n#ประกาศ API ที่ใช้งาน\napi = []\nwith io.open(\"searchAPI.json\",encoding=\"utf-8\") as json_file:\n apiData = json.load(json_file)\n\nfor allApi,token in apiData.items():\n api.append(TwitterAPI(token['consumer_key'], token['consumer_secret'], token['access_token_key'], token['access_token_secret']))\n\n#In[]:\ndef getRawTwitter():\n rawData = []\n for i in range(len(api)):\n try:\n t0 = time.time()\n for key, value in twData.items():\n for val in value:\n kw = val +\" -filter:retweets\"\n r = api[i].request('search/tweets', {'q':kw,'lang':'th','tweet_mode':'extended','count':'100'})\n for item in r:\n try:\n db.tweet_raw_data.insert_one(\n {\n 'university':key,\n 'keyword':val,\n 'id_str': item['id_str'],\n 'data':item,\n 'timeUpdate':dt.datetime.today(),\n 'addData':'incomplete'\n })\n except pymongo.errors.DuplicateKeyError:\n rawData.append((pymongo.UpdateOne(\n {\n 'id_str':item['id_str']\n },\n {\n '$set': {\n 'data':item,\n 'timeUpdate':dt.datetime.today(),\n 'addData':'incomplete'\n }\n },upsert=True)))\n if(len(rawData)>0):\n db.tweet_raw_data.bulk_write(rawData,ordered=False)\n rawData = []\n DataCleansing.tweetSearchCleansing()\n time.sleep(20)\n t1 = time.time()\n print('API-',i+1,': %f'%(t1-t0))\n except:\n print('Next API')\n next\n\n# In[ ]:\nif __name__ == '__main__':\n while True:\n getRawTwitter() \n","repo_name":"chanapat1752/Senior-Project-Picktoptrend.com","sub_path":"src/crawler/TwitterSearch.py","file_name":"TwitterSearch.py","file_ext":"py","file_size_in_byte":3003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71019771601","text":"\"\"\"\nA test of how a python iterator passes back items\n\"\"\"\n\nl = []\nfor i in range(8):\n l.append(i)\n\nfor num in l:\n num += 10\n print(\"num inside first iterator = \" + str(num))\n\nfor num in l:\n print(\"num after first iterator = \" + str(num))\n","repo_name":"gcallah/OOP2","sub_path":"code/misc/py_list.py","file_name":"py_list.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"} +{"seq_id":"73435654801","text":"import sys\n\nline = list(map(int, sys.stdin.readline().strip().split(\" \")))\nN = line[0]\nM = line[1]\nline = list(map(int, sys.stdin.readline().strip().split(\" \")))\n\n# i = 0\n# j = M\n# res = sum(line[i:j]) // M\nres = 0\n# while i < N-M + 1:\n# j = i + M\n# while j < N:\n# if sum(line[i:j]) / (j-i) > res:\n# res = sum(line[i:j]) / (j-i)\n# j += 1\n# i += 1\nfor i in range(0, N-M+1):\n for j in range(i+M-1, N):\n res = max(res, sum(line[i:j+1])/(j-i+1))\n\n\nprint(\"%.3f\" %res)\n\n","repo_name":"xiaomojie/NowCoder","sub_path":"面试与笔试/笔试/360-2/22.py","file_name":"22.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26269045094","text":"import logging\n\nimport grpc\nfrom sqlalchemy.orm.exc import NoResultFound\n\nimport models\nfrom decorators import catch_not_found_\n\nfrom .. import crud\nfrom ..protos import blog_pb2\nfrom ..protos import blog_pb2_grpc\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass TagBlogServicer(blog_pb2_grpc.BlogServicer):\n \"\"\"Servicer to provide tag methods that implements blog server.\"\"\"\n\n def GetTags(\n self, request: blog_pb2.GetTagsRequest, context: grpc.ServicerContext\n ) -> blog_pb2.GetTagsResponse:\n \"\"\"Returns a list of tags.\"\"\"\n logger.info(\"GetTags\")\n tags = crud.get_all_tags(request.limit, request.offset)\n return blog_pb2.GetTagsResponse(\n tags=(self._get_tag_schema_from_(tag) for tag in tags)\n )\n\n @catch_not_found_(\"Tag\")\n def GetTagBySlug(\n self,\n request: blog_pb2.GetTagBySlugRequest,\n context: grpc.ServicerContext,\n ) -> blog_pb2.GetTagBySlugResponse:\n \"\"\"Returns a tag by slug.\"\"\"\n logger.info(\"GetTagBySlug\")\n tag = crud.get_tag_by_slug(request.slug)\n return blog_pb2.GetTagBySlugResponse(\n tag=self._get_tag_schema_from_(tag),\n posts=(\n self._get_post_list_schema_from_(post) for post in tag.posts\n ),\n )\n\n def _get_tag_schema_from_(self, tag: models.Tag) -> blog_pb2.TagSchema:\n \"\"\"Returns a TagSchema from the given Tag model.\"\"\"\n return blog_pb2.TagSchema(\n title=tag.title,\n slug=tag.slug,\n )","repo_name":"Gubchik123/ITish-gRPC","sub_path":"blog/grpc_servicer/tag.py","file_name":"tag.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21603638123","text":"def isPrime(n):\n if n==2 or n==3: return True\n if n%2==0 or n<2: return False\n for i in range(3, int(n**0.5)+1, 2): # only odd numbers\n if n%i==0:\n return False\n\n return True\n\n\ndef verificare(f):\n fread = f.readlines()\n #print(fread)\n mul3 = 0\n nr = 0\n nrcrt = {}\n\n for i in fread:\n mul3 += 1\n x = 0\n if mul3 % 3 == 0 and i.strip().split()[0][0] != \".\":\n raise Exception(\"Linia \", mul3, \" nu incepe cu .\")\n\n for j in range(len(i.strip().replace(\" \",\"\").split()[0])):\n if i.strip().replace(\" \",\"\").split()[0][j] >= \"0\" and i.strip().replace(\" \",\"\").split()[0][j] <= \"9\":\n #print(\"Numar: \", i.strip().replace(\" \",\"\").split()[0][j])\n nr = nr + int(i.strip().replace(\" \",\"\").split()[0][j])\n\n for j in range(len(i.split())):\n if i.split()[j].isalpha():\n x += len(i.split()[j])\n\n nrcrt[mul3] = x\n #print(nrcrt[mul3], \" \", mul3)\n if nrcrt[mul3] > 124:\n raise Exception(\"Numarul caracterelor de pe linia \", mul3, \" este mai mare decat 124\")\n\n if isPrime(nr) == False:\n raise Exception(\"Suma numerelor (\", nr,\") nu este prima!\")\n\n print(\"OK\")\n\n\n\n\ndef main():\n #f = open(\"file.txt\", \"r\")\n #verificare(f)\n l = [10,20,30]\n for idx, elem in enumerate(l): print(elem,\" \", idx)\n\nmain()","repo_name":"ImCODEX/Uni","sub_path":"Semester1/FP/PP_Muster/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5205460085","text":"from src.AirLine import airLine\nimport csv\n\nclass influncedAirLine(airLine):\n def __init__(self, line_info):\n super(influncedAirLine, self).__init__(line_info)\n print(\"..a\")\n\n\n def printTestInfo(self):\n print(self.LineIF)\n\n\nif __name__ == '__main__':\n with open(\"../Scenario/Xiahang_Airline.csv\") as f:\n data = csv.reader(f)\n head = next(data)\n airlineSet = []\n airlineSet.append(airLine(next(data)))\n test_airline = airLine(next(data))\n infair = influncedAirLine(airlineSet[0].reconstruct_list())\n infair.printTestInfo()\n print(test_airline.__class__)\n test_airline.__class__ = influncedAirLine\n print(test_airline.__class__)\n test_airline.printTestInfo()\n print(test_airline.LineFlyPeriod)\n","repo_name":"aquablue1/XiahangFlightOptimize","sub_path":"src/InfluencedAirLine.py","file_name":"InfluencedAirLine.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"69955043603","text":"import pickle\r\nimport os\r\n\r\n\r\ndef read_entries():\r\n file_read = open(\"db.p\", \"rb\")\r\n stud = pickle.load(file_read)\r\n for i in stud['students']:\r\n print('Stud number: %s Name: %s Group: %s Age: %d'%(stud['students'][i]['number'],\r\n stud['students'][i]['name'],\r\n stud['students'][i]['group'],\r\n stud['students'][i]['age']))\r\n file_read.close()\r\n\r\n\r\ndef empty_file():\r\n if os.stat(\"db.p\").st_size == 0:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef check_existence(id):\r\n if not empty_file():\r\n file_read = open(\"db.p\", \"rb\")\r\n stud = pickle.load(file_read)\r\n file_read.close()\r\n count = 0\r\n for i in stud['students']:\r\n if stud['students'][i]['number'] == id:\r\n count+=1\r\n if count == 0:\r\n return False\r\n else:\r\n return True\r\n return False\r\n\r\n\r\ndef menu():\r\n choice = '0'\r\n while choice != '6':\r\n\r\n print()\r\n print(\"Main Choice: Choose 1 of 4 choices\")\r\n print(\"1.Add students\")\r\n print(\"2.Delete student\")\r\n print(\"3.Find student\")\r\n print(\"4.Display all students\")\r\n\r\n choice = input(\"Please make a choice: \")\r\n print()\r\n\r\n if choice == \"1\":\r\n if empty_file():\r\n student_arr = {'students': {}}\r\n max_id = 0\r\n else:\r\n file_read = open(\"db.p\", \"rb\")\r\n stud = pickle.load(file_read)\r\n student_arr = stud\r\n max_id = max(student_arr['students'].keys())\r\n file_read.close()\r\n\r\n n_students = int(input('Number of students: '))\r\n l_student_num = 0\r\n while 0 != n_students:\r\n max_id += 1\r\n temp = {}\r\n student = input(\"Student number: \")\r\n if check_existence(student) or student == l_student_num:\r\n print('!!!Student number must be unique!!!')\r\n break\r\n l_student_num = student\r\n temp.update({'number': student})\r\n student = input(\"Name: \")\r\n temp.update({'name': student})\r\n student = input(\"Group: \")\r\n temp.update({'group': student})\r\n student = int(input(\"Age: \"))\r\n temp.update({'age': student})\r\n student_arr['students'][max_id] = temp\r\n n_students -= 1\r\n\r\n file_write = open(\"db.p\", \"wb\")\r\n pickle.dump(student_arr, file_write)\r\n\r\n file_write.close()\r\n\r\n print('Success!')\r\n print()\r\n op = input('Wanna continue?')\r\n if op == 'N' or op == 'n':\r\n break\r\n elif choice == \"2\":\r\n if empty_file():\r\n print('There are no entries!')\r\n else:\r\n\r\n file_read = open(\"db.p\", \"rb\")\r\n stud = pickle.load(file_read)\r\n number = input('Delete student with number: ')\r\n \r\n for id in stud['students']:\r\n if stud['students'][id]['number'] == number:\r\n del stud['students'][id]\r\n print('Success!')\r\n break\r\n\r\n file_write = open(\"db.p\", \"wb\")\r\n pickle.dump(stud, file_write)\r\n file_read.close()\r\n file_write.close()\r\n\r\n print()\r\n op = input('Wanna continue?')\r\n if op == 'N' or op == 'n':\r\n break\r\n elif choice == \"3\":\r\n\r\n if empty_file():\r\n print('There are no entries!')\r\n else:\r\n file_read = open(\"db.p\", \"rb\")\r\n stud = pickle.load(file_read)\r\n file_read.close()\r\n number = input('Please enter student number: ')\r\n for i in stud['students']:\r\n if stud['students'][i]['number'] == number:\r\n print('Number: %s Name: %s Age: %d'%(stud['students'][i]['number'],\r\n stud['students'][i]['name'],\r\n stud['students'][i]['age']))\r\n break\r\n\r\n print()\r\n op = input('Wanna continue?')\r\n if op == 'N' or op == 'n':\r\n break\r\n elif choice == \"4\":\r\n if empty_file():\r\n print('There are no entries!')\r\n else:\r\n read_entries()\r\n print()\r\n op = input('Wanna continue?')\r\n if op == 'N' or op == 'n':\r\n break\r\n else:\r\n print(\"EXIT\")\r\n\r\n\r\nmenu()","repo_name":"eKarakoleva/Python-BMSTU-labs","sub_path":"DB_program.py","file_name":"DB_program.py","file_ext":"py","file_size_in_byte":5028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"28847243154","text":"import os\nimport yaml\nfrom yacs.config import CfgNode\nfrom fluidlab.utils.misc import get_src_dir\nfrom fluidlab.configs.default_config import get_default_cfg\n\ndef make_cls_config(self, cfg=None):\n _cfg = self.default_config()\n if cfg is not None:\n if isinstance(cfg, str):\n _cfg.merge_from_file(cfg)\n else:\n _cfg.merge_from_other_cfg(cfg)\n return _cfg\n\ndef merge_dict(a, b):\n if b is None:\n return a\n import copy\n a = copy.deepcopy(a)\n for key in a:\n if key in b:\n if not isinstance(b[key], dict):\n a[key] = b[key]\n else:\n assert not isinstance(a[key], list)\n a[key] = merge_dict(a[key], b[key])\n for key in b:\n if key not in a:\n raise ValueError(\"Key is not in dict A!\")\n return a\n\ndef merge_lists(a, b):\n outs = []\n assert isinstance(a, list) and isinstance(b, list)\n for i in range(len(a)):\n assert isinstance(a[i], dict)\n x = a[i]\n if i < len(b):\n x = merge_dict(a[i], b[i])\n outs.append(x)\n return outs\n\ndef list_to_cfg(l):\n # convert dict in lists to CfgNode\n l_new = []\n for x in l:\n x_new = CfgNode(x)\n x_new.freeze()\n l_new.append(x_new)\n return l_new\n\n\ndef load_config(cfg_file_name=None):\n cfg = get_default_cfg()\n cfg.set_new_allowed(True)\n if cfg_file_name is not None:\n cfg.merge_from_file(os.path.join(get_src_dir(), cfg_file_name))\n cfg.freeze()\n return cfg","repo_name":"zhouxian/FluidLab","sub_path":"fluidlab/utils/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","stars":122,"dataset":"github-code","pt":"3"} +{"seq_id":"24360118788","text":"import tensorflow as tf\r\nimport numpy as np\r\nfrom TextRNN import TextRNN\r\nfrom data_util_zhihu import load_data_multilabel_new, create_voabulary, create_voabulary_label\r\nfrom tflearn.data_utils import pad_sequences # 这里使用的tflearn高级封装,可以进行数据的pad\r\nimport os\r\nfrom gensim.models.word2vec import Word2Vec\r\nfrom gensim.models import KeyedVectors # 使用加载二进制的保存文件\r\nimport pickle\r\n\r\nnum_classes = 1999\r\nlearning_rate = 0.01\r\nbatch_size = 128 # 计算机显存不够时调节小点\r\n# todo decay参数放弃使用\r\ndecay_steps = 12000\r\ndecay_rate = 0.9\r\nckpt_dir = 'text_rnn_checkpoint/'\r\nsequence_length = 100\r\nembed_size = 100\r\nis_training = True\r\nnum_epochs = 60\r\nvalidation_every = 1\r\nuse_embedding = True\r\ntraing_data_path = './data/train-zhihu4-only-title-all.txt'\r\nword2vec_model_path = './data/zhihu-word2vec-title-desc.bin-100.txt'\r\n\r\n# 1. load data(X:list of lint,y:int) 2.create seession 3.feed data 4.training (5.validation) (6.predict)\r\n\r\ndef main(_):\r\n # 1. load data\r\n if 1 == 1:\r\n # 1. get vocabulary of label.\r\n # trainX, trainY, testX, testY = None, None, None, None\r\n vocabulary_word2index, vocabulary_index2word = create_voabulary(simple='simple',word2vec_model_path=word2vec_model_path,name_scope='rnn')\r\n vocab_size = len(vocabulary_word2index)\r\n print('rnn_model.vocab_size:',vocab_size)\r\n vocabulary_word2index_label, vocabulary_index2word_label = create_voabulary_label(name_scope='rnn',voabulary_label=traing_data_path)\r\n train, test, _ = load_data_multilabel_new(vocabulary_word2index,vocabulary_word2index_label,multi_label_flag=False,traning_data_path=traing_data_path)\r\n trainX, trainY = train\r\n testX, testY = test\r\n\r\n # 2. data preprocessing.Sequence padding\r\n print('start padding & transform to one hot ...')\r\n trainX = pad_sequences(trainX,maxlen=sequence_length,value=0.0) # padding to max length\r\n testX = pad_sequences(testX,maxlen=sequence_length,value=0.0)\r\n\r\n print('trainX[0]:',trainX[0],)\r\n # convert labels to binary vector\r\n print('end padding & transform to one hot ...')\r\n\r\n # 2. create session\r\n config = tf.ConfigProto()\r\n config.gpu_options.allow_growth = True # 动态的分配gpu空间,需要多少占用多少\r\n with tf.Session(config=config) as sess:\r\n # instantiate mdoel\r\n textRNN = TextRNN(num_classes,learning_rate,batch_size,decay_steps,decay_rate,sequence_length,\r\n vocab_size,embed_size,is_training)\r\n saver = tf.train.Saver()\r\n if os.path.exists(ckpt_dir + 'checkpoint'):\r\n print('Restoring Variables from Checkpoint for rnn model.')\r\n saver.restore(sess,tf.train.latest_checkpoint(ckpt_dir)) # todo 怎么找到最近保存的文件的?\r\n else:\r\n print('Initializing Variables')\r\n sess.run(tf.global_variables_initializer())\r\n if use_embedding: # load pre_trained word embedding\r\n assign_pretrained_word_embedding(sess,vocabulary_index2word,vocab_size,textRNN,word2vec_model_path=word2vec_model_path)\r\n curr_epoch = sess.run(textRNN.epoch_step)\r\n\r\n # 3.feed data & training\r\n number_of_training_data = len(trainX)\r\n for epoch in range(curr_epoch,num_epochs):\r\n loss, acc, counter = 0.0, 0.0, 0\r\n for start, end in zip(range(0,number_of_training_data,batch_size),range(batch_size,number_of_training_data,batch_size)):\r\n if epoch == 0 and counter == 0:\r\n print('trainX[start:end]:',trainX[start:end])\r\n curr_loss, curr_acc, _ = sess.run([textRNN.loss_val,textRNN.accuracy,textRNN.train_op],feed_dict={textRNN.input_x:trainX[start:end],textRNN.input_y:trainY[start:end],\r\n textRNN.dropout_keep_prob:1.0})\r\n loss, counter, acc = loss + curr_loss, counter + 1,acc + curr_acc\r\n if counter % 500 ==0:\r\n print('Epoch {} \\tBatch {}\\tTrain Loss:{:.3}\\tTrain Accuracy:{:.3}'.format(epoch,counter,loss/float(counter),acc/float(counter)))\r\n # epoch increament\r\n print('going to increament epoch counter ...')\r\n sess.run(textRNN.epoch_increament)\r\n # 4.validation\r\n print(epoch,validation_every,(epoch % validation_every == 0))\r\n if epoch % validation_every == 0:\r\n eval_loss, eval_acc = do_eval(sess,textRNN,testX,testY,batch_size,vocabulary_index2word_label)\r\n print('Epoch {} Validation Loss: {:.3} \\tValidation Accuracy:{:.3}'.format(epoch,eval_loss,eval_acc))\r\n # save model to checkpoint\r\n save_path = ckpt_dir + 'model.ckpt'\r\n # saver.save(sess,save_path,global_step=epoch)\r\n saver.save(sess,save_path,global_step=textRNN.global_step)\r\n # 5. test in testData and report accuracy\r\n test_loss,test_acc = do_eval(sess,textRNN,testX,testY,batch_size,vocabulary_index2word_label)\r\n pass\r\n\r\ndef assign_pretrained_word_embedding(sess,vocabulary_index2word,vocab_size,textRNN,word2vec_model_path=None):\r\n print('using pre-trained word embedding.started.word2vec_model_path:',word2vec_model_path)\r\n word2vec_model = KeyedVectors.load_word2vec_format(word2vec_model_path,binary=True)\r\n # word-id 字典的生成,但是对于gensim的设计特点可以直接使用字典模式\r\n word_embedding_2dlist = [[]] * vocab_size # create an empty embedding lsit\r\n word_embedding_2dlist[0] = np.zeros(embed_size)\r\n bound = np.sqrt(6.0) / np.sqrt(vocab_size) # todo ?\r\n count_exist = 0\r\n count_not_exist = 0\r\n for i in range(1,vocab_size): # loop each word\r\n word = vocabulary_index2word[i] # get a word\r\n embedding = None\r\n try:\r\n embedding = word2vec_model[word]\r\n except Exception:\r\n embedding = None\r\n if embedding is not None: # the word exist a emebdding\r\n word_embedding_2dlist[i] = embedding\r\n count_exist = count_exist + 1 # assign array to this word\r\n else: # no embedding for this word\r\n word_embedding_2dlist[i] = np.random.uniform(-bound,bound,embed_size)\r\n count_not_exist = count_not_exist + 1 # init a random value for the word\r\n word_embedding_final = np.array(word_embedding_2dlist) # convert to 2d array\r\n word_embedding = tf.constant(word_embedding_final,dtype=tf.float32) # convert to tensor\r\n t_assign_embedding = tf.assign(textRNN.Embedding,word_embedding)\r\n sess.run(t_assign_embedding)\r\n print('word. exits embedding: {} ;word not exist embedding:{}'.format(count_exist,count_not_exist))\r\n print('using pre-trained word embedding.ended...')\r\n\r\n\r\n# 在验证集上做验证,报告损失,精确度\r\ndef do_eval(sess,textRNN,evalX,evalY,batch_size,vocabulary_index2word_label):\r\n number_examples = len(evalX)\r\n print(number_examples)\r\n eval_loss, eval_acc, eval_counter = 0.0,0.0,0\r\n for start, end in zip(range(0,number_examples,batch_size),range(batch_size,number_examples,batch_size)):\r\n curr_eval_loss, logits, curr_eval_acc = sess.run([textRNN.loss_val,textRNN.logits,textRNN.accuracy],\r\n feed_dict={textRNN.input_x:evalX[start:end],textRNN.input_y:evalY[start:end],\r\n textRNN.dropout_keep_prob:1})\r\n eval_loss, eval_acc, eval_counter = eval_loss + curr_eval_loss,eval_acc + curr_eval_acc, eval_counter + 1\r\n\r\n return eval_loss/float(eval_counter), eval_acc/float(eval_counter)\r\n\r\n\r\n# 从logits中提取前五\r\ndef get_label_using_logits(logits,vocabulary_index2word_label,top_number=1):\r\n print('get_label_using_logits.logits:',logits) # 1-d array:array([-5.69036102, -8.54903221, -5.63954401, ..., -5.83969498,-5.84496021, -6.13911009], dtype=float32)\r\n index_list = np.argsort(logits)[-top_number]\r\n index_list = index_list[::-1]\r\n return index_list\r\n\r\n# 统计预测的准确率\r\ndef calculate_accuracy(labels_predicted,labels,eval_counter):\r\n label_nozero = []\r\n labels = list(labels)\r\n for index, label in enumerate(labels):\r\n if label > 0:\r\n label_nozero.append(index)\r\n if eval_counter < 2:\r\n print('labels_predicted: {} ; labels_nozero: {}'.format(labels_predicted,label_nozero))\r\n count = 0\r\n label_dict = {x:x for x in label_nozero}\r\n for label_predict in labels_predicted:\r\n flag = label_dict.get(label_predict,None)\r\n if flag is not None:\r\n count = count + 1\r\n return count / len(labels)\r\n\r\nif __name__ == '__main__':\r\n main(1)","repo_name":"LiaoBoWen/MyProject","sub_path":"Leaning/Model-studying/6_TextRNN/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8872,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"70092308241","text":"import time\nimport sys\nfrom sniffer import sniffer_main\nfrom ProfileClass import main_v2\n\n# Creates a dataset file \n# Two arguments passed command line, first dataset name and second dataset generation timer\ndef main():\n\n\t# Generate a live dataset with 5 min traffic\n\telapsed_time = 0\n\tstart_time = time.time()\n\tbyte_set = []\n\n\tprint('Capturing traffic and defining your profile ...')\n\twhile (elapsed_time < 1*10):\n\t\tbyte_set.append(sniffer_main(1,1))\n\t\telapsed_time = time.time() - start_time\n\t\n\t#print(*byte_set, sep = \", \") \n\tcreateDat('live.dat', byte_set)\n\n\t# Get evaluation\n\tresult = main_v2(normalizeDataset('live.dat'))\n\t#Block websites\n\thosts_path = \"/etc/hosts\"\n\tredirect = \"127.0.0.1\"\n\tif(result == 'Kid'):\n\t\twebsite_list = [\"www.bet.pt\",\"bet.pt\",\"www.ebay.com\",\"ebay.com\"]\n\telif(result == 'Teenager'):\n\t\twebsite_list = [\"www.bet.pt\",\"bet.pt\"]\n\telse:\n\t\twebsite_list = [\"www.youtube.com\",\"youtube.com\"]\n\twhile True:\n\n\t\twith open(hosts_path, 'r') as in_file:\n\t\t\tdata= in_file.readlines()\n\t\tin_file.close()\n\t\twith open(hosts_path, 'w') as out_file:\n\t\t\tout_file.writelines(data[:12])\n\t\t\tfor website in website_list:\n\t\t\t\tout_file.write(redirect + \" \" + website + \"\\n\")\n\n\t\tout_file.close()\n\t\tbreak\n\n# Creates a .dat file with the dataset generated\ndef createDat(name,data):\n\n\tf = open(name, \"w\")\n\tfor i in data:\n\t\tf.write(str(i[0])+' '+str(i[1])+'\\n')\n\t#nRows,nCols = data.shape;\n\t#print(nRows, nCols)\n\t#f.write(data)\n\tf.close()\n\n# Normalize dataset\ndef normalizeDataset(name):\n\t\n\t# Counts number of lines\n\twith open(name) as f:\n \t\tsize=sum(1 for _ in f)\n\t#print(size) \n\n\t# Replicates lines until the there are exactly 6000 lines\n\tf1 = open('live_normalize.dat', \"w\")\n\twhile(size < 6000):\n\t\twith open(name, \"r\") as f:\n\t\t\tfor line in f:\n\t\t\t\tif(size < 6000):\n\t\t\t\t\tf1.write(line)\n\t\t\t\t\tsize = size+1\n\n\tf1.close()\n\treturn 'live_normalize.dat'\n\nmain()\n","repo_name":"Clawerz/WhoIsWho","sub_path":"WhoIsWho.py","file_name":"WhoIsWho.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"10234990577","text":"import numpy as np\nimport cv2\nimport time \nfrom matplotlib import pyplot as plt\nimport imutils\nfrom collections import deque\nimport argparse\nimport pandas as pd\nimport random\n\n\ncap = cv2.VideoCapture('cardi.MP4')\nap = argparse.ArgumentParser()\n\n#arguments to start with\nap.add_argument(\"-b\", \"--buffer\", type=int, default=5000,\n help=\"max buffer size\")\nargs = vars(ap.parse_args())\n\n# create background subtractor\nfgbg = cv2.createBackgroundSubtractorMOG2() \n\n# where the centroids will be stored\npts = deque(maxlen=args[\"buffer\"])\ncounter = 0\n(dX, dY) = (0, 0)\ndirection = \"\"\n\n#setting variables before the image processing\nframes_count, fps, width, height = cap.get(cv2.CAP_PROP_FRAME_COUNT), cap.get(cv2.CAP_PROP_FPS), cap.get(\n cv2.CAP_PROP_FRAME_WIDTH), cap.get(cv2.CAP_PROP_FRAME_HEIGHT)\n\nwidth = int(width)\nheight = int(height)\n\nprint(frames_count, fps, width, height)\n\n# creates a pandas data frame with the number of rows the same length as frame count\ndf = pd.DataFrame(index=range(int(frames_count)))\ndf.index.name = \"Frames\"\n\n\n\nframenumber = 0 # keeps track of current frame\ncarids = [] # blank list to add car ids\ntotalcars = 0 # keeps track of total cars\n\n\n#capturing data\nwhile(True):\n\n\n# Capture two frames\n ret, frame1 = cap.read() # first image\n \n time.sleep(1/25) # slight delay\n ret, frame2 = cap.read() # second image \n image = cv2.resize(frame1, (0, 0), None, 1,1) \n\n \n#getting the difference as the basis for movement\n diff = cv2.absdiff(frame1,frame2)\n mask = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)\n th =25\n imask = mask > th\n canvas = np.zeros_like(frame2, np.uint8)\n canvas[imask] = frame1[imask]\n mask = cv2.cvtColor(canvas, cv2.COLOR_BGR2GRAY)\n #canvas = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)\n # transforms\n fgmask = fgbg.apply(mask) \n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (30,30)) \n #dilation = cv2.dilate(fgmask, kernel) \n \n closing = cv2.morphologyEx(fgmask, cv2.MORPH_CLOSE, kernel)\n #opening = cv2.morphologyEx(closing, cv2.MORPH_ERODE, kernel)\n mask =closing\n \n# variable for contours\n ret,thresh = cv2.threshold(mask,0,255,0)\n\n# creates contours/blobs\n im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n \n# use convex hull to create polygon around contours\n hull = [cv2.convexHull(c) for c in contours]\n\n# draw contours\n cv2.drawContours(mask, hull, -1, (0, 255, 0), 2)\n \n cxx = np.zeros(len(contours))\n cyy = np.zeros(len(contours))\n\n# line created to stop counting contours, needed as cars in distance become one big contour\n lineypos = 400\n cv2.line(image, (-100, lineypos), (width, -120), (255, 0, 0), 3) # blue\n lineypos2 = -700\n cv2.line(image, (-150, lineypos2), (width, 700), (0, 255, 0), 3) # green\n cv2.line(image, (-150, -100), (width, 1800), (255, 255,0), 3)\n\n#creating centroids and boxes\n for j in range(len(contours)): \n\n if hierarchy[0, j, 3] == -1:\n cnt=contours[j]\n \n area = cv2.contourArea(cnt)\n \n if 500 < area < 50000:\n x,y,w,h = cv2.boundingRect(cnt)\n cv2.rectangle(image,(x,y),(x+w,y+h),(0,255,0),2)\n#getting variables for the centroids\n cx = int(x + w/2)\n cy = int(y + h/2)\n cen = (cx,cy)\n cv2.circle(image, (cx,cy), 7, (255,0,0), -1)\n\n cv2.putText(image, str(cx) + \",\" + str(cy), (cx + 10, cy + 10), cv2.FONT_HERSHEY_SIMPLEX,\n .5, (0, 0, 255), 1)\n \n\n cxx[j] = cx\n cyy[j] = cy\n pts.appendleft(cen)\n \n#this is for plotting the past centroid positions\n\n for i in np.arange(1, len(pts)):\n # if either of the tracked points are None, ignore\n # them\n if pts[i - 1] is None or pts[i] is None:\n continue\n \n # draw the centroid tracker\n cv2.circle(image, (pts[i - 1]), 2, (0,0,255), -1)\n\n\n#drawing hte current centroid\n \n cxx = cxx[cxx != 0]\n cyy = cyy[cyy != 0] \n minx_index2 = []\n miny_index2 = []\n maxrad = 30\n\n# if there are centroids in the specified area\n if len(cxx): \n if not carids: # if carids is empty\n \n # loops through all centroids\n for i in range(len(cxx)): \n carids.append(i) \n\n df[str(carids[i])] = \"\"\n df.at[int(framenumber), str(carids[i])] = [cxx[i], cyy[i]]\n \n\n totalcars = carids[i] + 1 \n else:\n dx = np.zeros((len(cxx), len(carids))) \n dy = np.zeros((len(cyy), len(carids))) \n\n for i in range(len(cxx)): \n\n for j in range(len(carids)): \n\n # acquires centroid from previous frame for specific carid\n oldcxcy = df.iloc[int(framenumber - 1)][str(carids[j])]\n\n # acquires current frame centroid that doesn't necessarily line up with previous frame centroid\n curcxcy = np.array([cxx[i], cyy[i]])\n\n if not oldcxcy: # checks if old centroid is empty in case car leaves screen and new car shows\n\n continue # continue to next carid\n\n else: # calculate centroid deltas to compare to current frame position later\n\n dx[i, j] = oldcxcy[0] - curcxcy[0]\n dy[i, j] = oldcxcy[1] - curcxcy[1]\n\n for j in range(len(carids)): # loops through all current car ids\n\n sumsum = np.abs(dx[:, j]) + np.abs(dy[:, j]) # sums the deltas wrt to car ids\n\n # finds which index carid had the min difference and this is true index\n correctindextrue = np.argmin(np.abs(sumsum))\n minx_index = correctindextrue\n miny_index = correctindextrue\n\n # acquires delta values of the minimum deltas in order to check if it is within radius later on\n mindx = dx[minx_index, j]\n mindy = dy[miny_index, j]\n\n if mindx == 0 and mindy == 0 and np.all(dx[:, j] == 0) and np.all(dy[:, j] == 0):\n # checks if minimum value is 0 and checks if all deltas are zero since this is empty set\n # delta could be zero if centroid didn't move\n\n continue # continue to next carid\n\n else:\n\n # if delta values are less than maximum radius then add that centroid to that specific carid\n if np.abs(mindx) < maxrad and np.abs(mindy) < maxrad:\n\n # adds centroid to corresponding previously existing carid\n df.at[int(framenumber), str(carids[j])] = [cxx[minx_index], cyy[miny_index]]\n minx_index2.append(minx_index) # appends all the indices that were added to previous carids\n miny_index2.append(miny_index)\n\n for i in range(len(cxx)): # loops through all centroids\n\n # if centroid is not in the minindex list then another car needs to be added\n if i not in minx_index2 and miny_index2:\n\n df[str(totalcars)] = \"\" # create another column with total cars\n totalcars = totalcars + 1 # adds another total car the count\n t = totalcars - 1 # t is a placeholder to total cars\n carids.append(t) # append to list of car ids\n df.at[int(framenumber), str(t)] = [cxx[i], cyy[i]] # add centroid to the new car id\n\n elif curcxcy[0] and not oldcxcy and not minx_index2 and not miny_index2:\n # checks if current centroid exists but previous centroid does not\n # new car to be added in case minx_index2 is empty\n\n df[str(totalcars)] = \"\" # create another column with total cars\n totalcars = totalcars + 1 # adds another total car the count\n t = totalcars - 1 # t is a placeholder to total cars\n carids.append(t) # append to list of car ids\n df.at[int(framenumber), str(t)] = [cxx[i], cyy[i]] # add centroid to the new car id\n\n # The section below labels the centroids on screen\n\n currentcars = 0 # current cars on screen\n currentcarsindex = [] # current cars on screen carid index\n\n for i in range(len(carids)): # loops through all carids\n\n if df.at[int(framenumber), str(carids[i])] != '':\n # checks the current frame to see which car ids are active\n # by checking in centroid exists on current frame for certain car id\n\n currentcars = currentcars + 1 # adds another to current cars on screen\n currentcarsindex.append(i) # adds car ids to current cars on screen\n\n for i in range(currentcars): # loops through all current car ids on screen\n\n # grabs centroid of certain carid for current frame\n curcent = df.iloc[int(framenumber)][str(carids[currentcarsindex[i]])]\n\n # grabs centroid of certain carid for previous frame\n oldcent = df.iloc[int(framenumber - 1)][str(carids[currentcarsindex[i]])]\n\n if curcent: # if there is a current centroid\n\n # On-screen text for current centroid\n #cv2.putText(image, \"Centroid\" + str(curcent[0]) + \",\" + str(curcent[1]),\n #(int(curcent[0]), int(curcent[1])), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 255, 255), 2)\n\n cv2.putText(image, \"ID:\" + str(carids[currentcarsindex[i]]), (int(curcent[0]), int(curcent[1] - 15)),\n cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 255, 255), 2)\n if oldcent: # checks if old centroid exists\n # adds radius box from previous centroid to current centroid for visualization\n xstart = oldcent[0] - maxrad\n ystart = oldcent[1] - maxrad\n xwidth = oldcent[0] + maxrad\n yheight = oldcent[1] + maxrad\n #cv2.rectangle(image, (int(xstart), int(ystart)), (int(xwidth), int(yheight)), (0, 125, 0), 1)\n\n \n\n\n\n\n\n framenumber = framenumber + 1\n cv2.imshow('Intersection Flow Prediction',image)\n if cv2.waitKey(1) & 0xFF == ord('q'): \n break\n\ncap.release()\ncv2.destroyAllWindows()\ndf.to_csv('grounddata1.csv', sep=',')\n","repo_name":"swenceslao/cardi-IntersectionFlowCounter","sub_path":"cartracker.py","file_name":"cartracker.py","file_ext":"py","file_size_in_byte":11195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"44672459351","text":"import time\nimport numpy as np\nimport pandas as pd\nimport keras\nimport random\n\nfrom scipy import sparse\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import linear_kernel\n\n\nfrom keras.layers import Dropout, Flatten,Activation,Input,Embedding\nfrom keras.models import Model\nfrom keras.layers.merge import dot\nfrom keras.optimizers import Adam\nfrom keras.layers import Dense , merge\nfrom sklearn.metrics import mean_squared_error\nfrom math import sqrt\nfrom sklearn.model_selection import train_test_split\n\n\nnp.random.seed(123)\n# load data\ndef loadData():\n ratings = pd.read_csv('./data/rating.csv', parse_dates=['timestamp'])\n movies = pd.read_csv('./data/movie.csv')\n merge_ratings_movies = pd.merge(movies, ratings, on='movieId', how='inner')\n del merge_ratings_movies[\"userId\"]\n movies_avg_ratings = merge_ratings_movies.groupby('movieId').mean()\n movies_ratings=pd.merge(movies, movies_avg_ratings, on='movieId', how='inner')\n return ratings, movies_ratings\n\n# for test convenience, only use num% of data\ndef cutData(num, ratings):\n rand_userIds = np.random.choice(ratings['userId'].unique(), size=int(len(ratings['userId'].unique())*num), replace=False)\n ratings = ratings.loc[ratings['userId'].isin(rand_userIds)]\n return ratings\n\ndef splitData(ratings):\n users = ratings.userId.unique()\n movies = ratings.movieId.unique()\n\n userid2idx = {o:i for i,o in enumerate(users)}\n movieid2idx = {o:i for i,o in enumerate(movies)}\n\n train_ratings, test_ratings= train_test_split(ratings, test_size=0.2, random_state=42)\n train_ratings_ori = train_ratings.copy()\n test_ratings_ori = test_ratings.copy()\n \n train_ratings['userId'] = train_ratings['userId'].apply(lambda x: userid2idx[x])\n train_ratings['movieId'] = train_ratings['movieId'].apply(lambda x: movieid2idx[x])\n test_ratings['userId'] = test_ratings['userId'].apply(lambda x: userid2idx[x])\n test_ratings['movieId'] = test_ratings['movieId'].apply(lambda x: movieid2idx[x])\n\n return train_ratings, test_ratings, train_ratings_ori, test_ratings_ori\n \ndef findTfidfMatrix(movies_ratings): \n tf = TfidfVectorizer(analyzer='word', ngram_range=(1, 3),max_features=10000, min_df=0, stop_words='english')\n tfidf_matrix = tf.fit_transform(movies_ratings['genres'])\n return tfidf_matrix\n\ndef findSimilar(tfidf_matrix, movies_ratings):\n cosine_similarities = linear_kernel(tfidf_matrix, tfidf_matrix)\n results = {}\n\n for idx, row in movies_ratings.iterrows():\n similar_indices = cosine_similarities[idx].argsort()[:-100:-1]\n similar_items = [(cosine_similarities[idx][i], movies_ratings['movieId'][i]) for i in similar_indices]\n\n results[row['movieId']] = similar_items[1:]\n return results\n\ndef cbSuggest(item_id, amount, results, movies_ratings):\n rating_sum = 0\n recs = results[item_id]\n for rec in recs:\n if rec[1]!=item_id:\n index = movies_ratings[movies_ratings['movieId'] == rec[1]].index\n rating = movies_ratings.iloc[index]['rating']\n rating_sum += float(rating)\n amount -= 1\n if amount <= 0:\n break\n return rating_sum/5\n\ndef cbPredict(test_ratings_ori, results, movies_ratings):\n cb_pre_ratings = []\n for index, row in test_ratings_ori.iterrows():\n pre_rateing = cbSuggest(row['movieId'], 5, results, movies_ratings)\n cb_pre_ratings.append(pre_rateing)\n cb_pre_ratings = np.asarray(cb_pre_ratings)\n cb_pre_ratings_reshape = np.reshape(cb_pre_ratings, (cb_pre_ratings.shape[0], 1))\n return cb_pre_ratings_reshape\n\ndef embeddingNNModel(ratings):\n n_movies=len(ratings['movieId'].unique())\n n_users=len(ratings['userId'].unique())\n n_latent_factors=50 # hyperparamter to deal with. \n\n user_input=Input(shape=(1,),name='user_input',dtype='int64')\n user_embedding=Embedding(n_users,n_latent_factors,name='user_embedding')(user_input)\n user_vec =Flatten(name='FlattenUsers')(user_embedding)\n user_vec=Dropout(0.40)(user_vec)\n\n movie_input=Input(shape=(1,),name='movie_input',dtype='int64')\n movie_embedding=Embedding(n_movies,n_latent_factors,name='movie_embedding')(movie_input)\n movie_vec=Flatten(name='FlattenMovies')(movie_embedding)\n movie_vec=Dropout(0.40)(movie_vec)\n\n sim=dot([user_vec,movie_vec],name='Simalarity-Dot-Product',axes=1)\n nn_inp=Dense(96,activation='relu')(sim)\n nn_inp=Dropout(0.4)(nn_inp)\n nn_inp=Dense(1,activation='relu')(nn_inp)\n nn_model =keras.models.Model([user_input, movie_input],nn_inp)\n return nn_model\n\ndef cfFit(model, train_ratings, epochs, batch_size):\n model.compile(optimizer=Adam(lr=1e-4),loss='mse')\n model.fit([train_ratings.userId,train_ratings.movieId], train_ratings.rating, epochs=epochs, batch_size=batch_size, verbose=1)\n return model\n\ndef cfPredict(model, test_ratings):\n pre_ratings = model.predict([test_ratings.userId,test_ratings.movieId])\n return pre_ratings\n\ndef finalPredict1(cbPre, cfPre):\n finalPre = 0.25 * cbPre + 0.75 * cfPre\n return finalPre\n\ndef finalPredict2(cbPre, cfPre):\n finalPre = 0.1 * cbPre + 0.9 * cfPre\n return finalPre\n\ndef rmse(prediction, ground_truth):\n return sqrt(mean_squared_error(prediction, ground_truth))\n\nstart = time.time()\nratings, movies_ratings = loadData()\n#ratings_cut = cutData(0.3, ratings)\ntrain_ratings, test_ratings, train_ratings_ori, test_ratings_ori = splitData(ratings)\ntfidf_matrix = findTfidfMatrix(movies_ratings)\nresults = findSimilar(tfidf_matrix, movies_ratings)\ncb_pre_ratings = cbPredict(test_ratings_ori, results, movies_ratings)\nnn_model = embeddingNNModel(ratings)\nbatch_size = 512\nepochs = 10\nnew_model = cfFit(nn_model, train_ratings, epochs, batch_size)\ncf_pre_ratings = cfPredict(new_model, test_ratings)\nfinal_pre_ratings1 = finalPredict1(cb_pre_ratings, cf_pre_ratings)\nfinal_pre_ratings2 = finalPredict2(cb_pre_ratings, cf_pre_ratings)\nRMSE = rmse(cf_pre_ratings, test_ratings.rating)\nRMSE1 = rmse(final_pre_ratings1, test_ratings.rating)\nRMSE2 = rmse(final_pre_ratings2, test_ratings.rating)\nprint (f'Batch_size is: {batch_size}, epochs is: {epochs}, Neural Network RMSE is: {RMSE}')\nprint (f'cbPre is 0.25, cfPre is 0.75, Hybrid RMSE is: {RMSE1}')\nprint (f'cbPre is 0.1, cfPre is 0.9, Hybrid RMSE is: {RMSE2}')\nend = time.time()\nprint(f\"Runtime of the program is {end - start}\")","repo_name":"Charliewei6/cmpe255-team3-project","sub_path":"hybrid.py","file_name":"hybrid.py","file_ext":"py","file_size_in_byte":6451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"70942827922","text":"from treelib import Node, Tree\nfrom copy import copy\nfrom itertools import *\nfrom sys import getsizeof\nfrom datetime import datetime\n\n\nclass Task:\n def __init__(self, r: int):\n self.step = 1\n self.r = r\n self.d1 = 8\n self.d2 = 11\n self.d3 = 12\n self.d4 = 16\n self.d5 = 7\n self.d6 = 10\n self.d7 = 14\n self.d8 = 7\n\n self.c1 = 30\n self.c2 = 11\n self.c3 = 23\n self.c4 = 5\n\n self.start_date = datetime.now()\n\n def overload(self, count_of_workers: int, result: int, count: int):\n result += count * self.c1\n return count_of_workers, result, count\n\n def sleep(self, count_of_workers: int, result: int, count: int):\n result += count * self.c2\n return count_of_workers, result, count\n\n def hire(self, count_of_workers: int, result: int, count: int):\n result += count * self.c3\n count_of_workers += count\n return count_of_workers, result, count\n\n def unhire(self, count_of_workers: int, result: int, count: int):\n result += count * self.c4\n count_of_workers -= count\n return count_of_workers, result, count\n\n def __show(self, other: dict, day, total_nodes: int) -> None:\n print(' | '.join(\n [f'{item}: {other[item]} bytes' for item in list(\n other.keys())]) + f' | Day: {day} | Total nodes: {total_nodes} | Elapsed: {int((datetime.now() - self.start_date).total_seconds())} sec.',\n end='\\r')\n\n def __verbose(self, items):\n items = [x.__name__ for x in items]\n verbose_view = []\n if 'overload' in items:\n c = items.count(\"overload\")\n verbose_view.append(f'{c if c != 1 else \"\"}C1')\n if 'sleep' in items:\n c = items.count(\"sleep\")\n verbose_view.append(f'{c if c != 1 else \"\"}C2')\n if 'hire' in items:\n c = items.count(\"hire\")\n verbose_view.append(f'{c if c != 1 else \"\"}C3')\n if 'unhire' in items:\n c = items.count(\"unhire\")\n verbose_view.append(f'{c if c != 1 else \"\"}C4')\n\n verbose_view = '+'.join(verbose_view)\n if not verbose_view:\n verbose_view = ''\n\n return verbose_view\n\n def calculate(self, days_up_to=8):\n total_memory = 0\n tree = Tree()\n price = 0\n count_of_workers = self.r\n tree.create_node(\"Root\", f'd0_0', data={'cnt': count_of_workers, 'price': price}) # Day 0 or Root\n days = enumerate(([v for v in vars(self) if v.startswith('d')]))\n\n for di, day in days:\n print()\n need_workers = getattr(self, day)\n prev_day = f'd{di}'\n di += 1\n current_day = f'd{di}'\n\n if di == 1:\n methods = []\n\n if count_of_workers < need_workers:\n methods.append(self.overload)\n methods.append(self.hire)\n elif count_of_workers > need_workers:\n methods.append(self.sleep)\n methods.append(self.unhire)\n\n delta = abs(count_of_workers - need_workers)\n tries = product(methods, repeat=delta)\n tries = [[y.__name__ for y in x] for x in tries]\n tries = [tuple(sorted(x)) for x in tries]\n tries = list(set(tries))\n tries = [[getattr(self, y) for y in x] for x in tries]\n # tries = list(set([tuple(sorted([y.__name__ for y in x])) for x in product(methods, repeat=delta)]))\n\n for i, item in enumerate(tries):\n tmp_res = copy(price)\n tmp_cow = copy(count_of_workers)\n for x in item:\n tmp_cow, tmp_res, _ = x(tmp_cow, tmp_res, self.step)\n\n verbose_view = self.__verbose(item)\n\n tree.create_node(str(f'C:{tmp_cow}|P:{tmp_res}|V:{verbose_view}|N:{day}_{i}|R:{need_workers}'),\n f'{day}_{i}',\n data={'cnt': tmp_cow, 'price': tmp_res, 'total_price': tmp_res, 'vw': verbose_view}, parent='d0_0')\n\n self.__show({'Tree': getsizeof(tree.nodes), 'Steps': getsizeof(tries)}, current_day,\n len(tree.nodes))\n else:\n nodes_names = [x for x in tree.nodes if prev_day in x]\n epoch = 0\n for nd_i, nname in enumerate(nodes_names):\n pnode = tree.get_node(nname)\n pcnt = pnode.data['cnt']\n pprice = pnode.data['price']\n ptotal_price = pnode.data['total_price']\n\n delta = abs(pcnt - need_workers)\n\n methods = []\n\n if pcnt < need_workers:\n methods.append(self.overload)\n methods.append(self.hire)\n elif pcnt > need_workers:\n methods.append(self.sleep)\n methods.append(self.unhire)\n\n tries = product(methods, repeat=delta)\n tries = [[y.__name__ for y in x] for x in tries]\n tries = [tuple(sorted(x)) for x in tries]\n tries = list(set(tries))\n tries = [[getattr(self, y) for y in x] for x in tries]\n\n for i, item in enumerate(tries):\n tmp_res = copy(pprice)\n tmp_cow = copy(pcnt)\n tmp_tp = copy(ptotal_price)\n\n new_price = 0\n for x in item:\n tmp_cow, new_price, _ = x(tmp_cow, new_price, self.step)\n\n verbose_view = self.__verbose(item)\n\n tree.create_node(\n str(f'C:{tmp_cow}|P:{new_price}|V:{verbose_view}|N:{current_day}_{epoch}|R:{need_workers}'),\n f'{current_day}_{epoch}',\n data={'cnt': tmp_cow, 'price': new_price, 'total_price': new_price + tmp_tp, 'vw': verbose_view}, parent=nname)\n epoch += 1\n\n self.__show({'Tree': getsizeof(tree.nodes), 'Steps': getsizeof(tries)}, current_day,\n len(tree.nodes))\n if di == days_up_to:\n break\n\n print()\n print('Tree built. Resolving min path...')\n\n last_nodes = [x for x in tree.nodes if x.startswith(f'd{days_up_to}_')]\n vals = [tree.get_node(x).data['total_price'] for x in last_nodes]\n min_val = min(vals)\n min_name = last_nodes[vals.index(min_val)]\n node = tree.get_node(min_name)\n fullpath = [node.data['vw']]\n tmp = node\n while True:\n try:\n cur = tree.parent(tmp.identifier)\n fullpath.append(cur.data['vw'])\n tmp = cur\n except:\n break\n\n fullpath.reverse()\n\n print(f'Min price is: {min_val} | Tag: {f\"({node.tag})\"} | Name: {min_name} | FullPath={\" + \".join(fullpath)}')\n\n # tree.to_graphviz('result')\n\n # tree.show()\n\n\nt = Task(10)\nt.calculate(8)\n","repo_name":"icYFTL/IO_DP","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42980126550","text":"from typing import Tuple, List\n\nimport torch\n\nfrom base import BaseModule\n\n\ndef detach(device, X):\n if isinstance(X, torch.Tensor):\n return X.detach().to(device)\n elif isinstance(X, (Tuple, List)):\n return [x.detach().to(device) if isinstance(x, torch.Tensor) else x for x in X]\n else:\n raise RuntimeError(\n f\"Invalid type for X. received: {type(X)}, expected: Tensor, Tuple[Tensor], List[Tensor].\")\n\n\ndef param_in_function(function, *params):\n from inspect import signature\n parameters = signature(function).parameters\n flag = True\n for param in params:\n flag = flag and (param in parameters)\n\n return flag\n\n\ndef get_batch_size(X) -> int:\n \"\"\"Get the size of a batch from the input tensor X\n\n Args:\n X: batch tensor given by the DataLoader\n\n Returns:\n Size of a batch\n\n Raises:\n RuntimeError if X is an invalid type, expected: Tensor,List,Tuple\n \"\"\"\n if isinstance(X, torch.Tensor):\n return X.size(0)\n elif isinstance(X, (List, Tuple)):\n sizes = [elem.size(0) for elem in X if isinstance(elem, torch.Tensor)]\n return sizes[0]\n else:\n raise RuntimeError(f\"Invalid type: {type(X)}\")\n\n\ndef assert_tensor_requires_grad(tensor, train: bool):\n \"\"\"Assert that out contains or doesn't contains gradient based on if trainer is training or validating\n\n Args:\n out: output of the train_step or val_step\n train: define if the Trainer is training or validating\n\n Raises:\n AssertError if requires_grad != train\n \"\"\"\n if isinstance(tensor, torch.Tensor):\n assert tensor.requires_grad == train\n elif isinstance(tensor, (List, Tuple)):\n for elem in tensor:\n if isinstance(elem, torch.Tensor):\n assert elem.requires_grad == train\n\n\ndef move_to_device(X, y, device):\n \"\"\" Move input and target to device\n\n Args:\n X: batch input returned by the DataLoader\n y: batch target returned by the DataLoader\n\n Raises:\n RuntimeError if X is an invalid type\n \"\"\"\n if isinstance(X, torch.Tensor):\n X = X.to(device)\n elif isinstance(X, Tuple):\n X = tuple([elem.to(device) if isinstance(elem, torch.Tensor) else elem for elem in X])\n elif isinstance(X, List):\n X = [elem.to(device) if isinstance(elem, torch.Tensor) else elem for elem in X]\n else:\n raise RuntimeError(f\"Invalid type: {type(X)}\")\n\n if isinstance(y, torch.Tensor):\n y = y.to(device)\n elif isinstance(X, Tuple):\n y = tuple([elem.to(device) if isinstance(elem, torch.Tensor) else elem for elem in y])\n elif isinstance(X, List):\n y = [elem.to(device) if isinstance(elem, torch.Tensor) else elem for elem in y]\n else:\n raise RuntimeError(f\"Invalid type: {type(y)}\")\n\n return X, y\n\ndef forward(model : BaseModule, X):\n \"\"\"Define the base forward\n\n Args:\n X: input\n\n Returns:\n output\n\n Raises:\n RuntimeError if X is an invalid type, expected: Tensor,List,Tuple\n \"\"\"\n if isinstance(X, torch.Tensor):\n return model.train_step(X) if model.training else model.val_step(X)\n elif isinstance(X, (List, Tuple)):\n return model.train_step(*X) if model.training else model.val_step(*X)\n else:\n raise RuntimeError(f\"Invalid type: {type(X)}\")","repo_name":"alessandrodicosola/SuperSampling","sub_path":"src/base/trainer/functional.py","file_name":"functional.py","file_ext":"py","file_size_in_byte":3353,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"12615161763","text":"\"\"\"\nTheme aware pipeline template tags.\n\"\"\"\n\n\nfrom django import template\nfrom django.template.loader import render_to_string\nfrom django.utils.safestring import mark_safe\nfrom pipeline.templatetags.pipeline import JavascriptNode, StylesheetNode\nfrom pipeline.utils import guess_type\n\nfrom openedx.core.djangoapps.theming.helpers_static import get_static_file_url\n\nregister = template.Library() # pylint: disable=invalid-name\n\n\nclass ThemeStylesheetNode(StylesheetNode):\n \"\"\"\n Overrides StyleSheetNode from django pipeline so that stylesheets are served based on the applied theme.\n \"\"\"\n def render_css(self, package, path):\n \"\"\"\n Override render_css from django-pipline so that stylesheets urls are based on the applied theme\n \"\"\"\n template_name = package.template_name or \"pipeline/css.html\"\n context = package.extra_context\n context.update({\n 'type': guess_type(path, 'text/css'),\n 'url': mark_safe(get_static_file_url(path))\n })\n return render_to_string(template_name, context)\n\n\nclass ThemeJavascriptNode(JavascriptNode):\n \"\"\"\n Overrides JavascriptNode from django pipeline so that js files are served based on the applied theme.\n \"\"\"\n def render_js(self, package, path):\n \"\"\"\n Override render_js from django-pipline so that js file urls are based on the applied theme\n \"\"\"\n template_name = package.template_name or \"pipeline/js.html\"\n context = package.extra_context\n context.update({\n 'type': guess_type(path, 'text/javascript'),\n 'url': mark_safe(get_static_file_url(path))\n })\n return render_to_string(template_name, context)\n\n\n@register.tag\ndef stylesheet(parser, token): # pylint: disable=unused-argument\n \"\"\"\n Template tag to serve stylesheets from django-pipeline. This definition uses the theming aware ThemeStyleSheetNode.\n \"\"\"\n try:\n _, name = token.split_contents()\n except ValueError:\n raise template.TemplateSyntaxError( # lint-amnesty, pylint: disable=raise-missing-from\n '%r requires exactly one argument: the name of a group in the PIPELINE[\"STYLESHEETS\"] setting' %\n token.split_contents()[0]\n )\n return ThemeStylesheetNode(name)\n\n\n@register.tag\ndef javascript(parser, token): # pylint: disable=unused-argument\n \"\"\"\n Template tag to serve javascript from django-pipeline. This definition uses the theming aware ThemeJavascriptNode.\n \"\"\"\n try:\n _, name = token.split_contents()\n except ValueError:\n raise template.TemplateSyntaxError( # lint-amnesty, pylint: disable=raise-missing-from\n '%r requires exactly one argument: the name of a group in the PIPELINE[\"JAVASCRIPT\"] setting' %\n token.split_contents()[0]\n )\n return ThemeJavascriptNode(name)\n","repo_name":"openedx/edx-platform","sub_path":"openedx/core/djangoapps/theming/templatetags/theme_pipeline.py","file_name":"theme_pipeline.py","file_ext":"py","file_size_in_byte":2876,"program_lang":"python","lang":"en","doc_type":"code","stars":6774,"dataset":"github-code","pt":"3"} +{"seq_id":"29218541237","text":"import requests\nfrom maps.b2bgeo.ya_courier.backend.test_lib.util import (\n api_path_with_company_id,\n env_get_request\n)\nfrom maps.b2bgeo.ya_courier.backend.test_lib.util_sharing import (\n UserKind,\n)\nfrom maps.b2bgeo.ya_courier.backend.test_lib.conftest import skip_if_remote\n\n\ndef _get_depots(system_env_with_db, company_id, caller=None):\n return env_get_request(\n system_env_with_db,\n api_path_with_company_id(system_env_with_db, 'depots', company_id=company_id),\n caller=caller\n )\n\n\ndef _check_shared_depots(sharing_env, user_kind, company_idx, depot_names):\n \"\"\"\n Testing that a user of company 0 (the user is specified by user_kind)\n can see exactly the depots specified by depot_names.\n\n depot_names is a list of names of depots of a company (specified by\n company_idx) expected to be visible by the user.\n depot_names equal to None means that the user is expected to have\n \"Forbidden access\" when requesting depots from the company company_idx.\n \"\"\"\n\n response = _get_depots(\n sharing_env['dbenv'],\n sharing_env['companies'][company_idx]['id'],\n caller=sharing_env['companies'][0]['users'][user_kind])\n\n if depot_names is None:\n assert response.status_code == requests.codes.forbidden\n else:\n assert response.status_code == requests.codes.ok\n depot_ids = [j['id'] for j in response.json()]\n\n assert len(set(depot_names)) == len(depot_names)\n expected_depot_ids = []\n for depot_name in depot_names:\n expected_depot_ids.append(sharing_env['companies'][company_idx]['depots'][depot_name]['id'])\n\n assert sorted(depot_ids) == sorted(expected_depot_ids)\n\n\n@skip_if_remote\ndef test_sharing_depots(env_with_default_sharing_setup):\n sharing_env = env_with_default_sharing_setup\n\n # Requesting depots of company 0 by users of company 0\n\n company_idx = 0\n # Admin can see a depot of his own company even if access to the depot is not granted to him\n _check_shared_depots(sharing_env, UserKind.admin, company_idx, depot_names=['', '0', '1', '2', '0;1', '0;2', '1;2', '0;1;2', '1,2', '0,1,2', '0;1,2'])\n # Manager cannot see a depot of his own company if access to the depot is not granted to him\n _check_shared_depots(sharing_env, UserKind.trusted_manager, company_idx, depot_names=[])\n # Manager cannot see a depot of his own company if access to the depot is not granted to him\n _check_shared_depots(sharing_env, UserKind.manager, company_idx, depot_names=[])\n # App user has no permission to request depots even of his own company\n _check_shared_depots(sharing_env, UserKind.app, company_idx, depot_names=None)\n\n # Requesting depots of company 1 by users of company 0\n\n company_idx = 1\n # Admin can see a depot of an other company if the depot of the company has orders shared\n # with admin's company\n _check_shared_depots(sharing_env, UserKind.admin, company_idx, depot_names=['0', '0;1', '0;2', '0;1;2', '0,1,2', '0;1,2'])\n # Manager can see a depot of an other company if the depot of the company has orders shared\n # with manager's company and access to the company is granted to him\n _check_shared_depots(sharing_env, UserKind.trusted_manager, company_idx, depot_names=['0', '0;1', '0;2', '0;1;2', '0,1,2', '0;1,2'])\n # Manager has no permission to request depots of an other company if access to the company is not granted to him\n _check_shared_depots(sharing_env, UserKind.manager, company_idx, depot_names=None)\n # App user has no permission to request depots of an other company\n _check_shared_depots(sharing_env, UserKind.app, company_idx, depot_names=None)\n\n # Requesting depots of company 2 by users of company 0\n\n company_idx = 2\n # Admin has no permission to request depots of an other company if the company has no orders shared with admin's company\n _check_shared_depots(sharing_env, UserKind.admin, company_idx, depot_names=None)\n # Manager has no permission to request depots of an other company if the company has no orders shared with manager's company\n _check_shared_depots(sharing_env, UserKind.trusted_manager, company_idx, depot_names=None)\n # Manager has no permission to request depots of an other company if the company has no orders shared with manager's company\n _check_shared_depots(sharing_env, UserKind.manager, company_idx, depot_names=None)\n # App user has no permission to request depots of an other company\n _check_shared_depots(sharing_env, UserKind.app, company_idx, depot_names=None)\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"maps/incremental_tests/test_sharing_depots.py","file_name":"test_sharing_depots.py","file_ext":"py","file_size_in_byte":4572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13241635935","text":"#!/usr/bin/env python\n#\n# Author: Yang Gao \n#\nimport unittest\nimport numpy as np\nimport symtensor as st\n\ndef make_kpts(lattice, nmp):\n ks_each_axis = []\n for n in nmp:\n ks = np.arange(n, dtype=float) / n\n ks_each_axis.append(ks)\n arrays = [np.asarray(x) for x in ks_each_axis]\n nd = len(arrays)\n dims = [nd] + [len(x) for x in arrays]\n out = np.ndarray(dims)\n shape = [-1] + [1] * nd\n for i, arr in enumerate(arrays):\n out[i] = arr.reshape(shape[:nd-i])\n scaled_kpts = out.reshape(nd,-1).T\n gvec = get_reciprocal_vectors(lattice)\n kpts = np.dot(scaled_kpts, gvec)\n return kpts\n\ndef get_reciprocal_vectors(lattice):\n b = np.linalg.inv(lattice.T)\n return 2*np.pi * b\n\ndef get_kconserv(lattice, kpts):\n nkpts = kpts.shape[0]\n a = lattice / (2*np.pi)\n kconserv = np.zeros((nkpts,nkpts,nkpts), dtype=int)\n kvKLM = kpts[:,None,None,:] - kpts[:,None,:] + kpts\n for N, kvN in enumerate(kpts):\n kvKLMN = np.einsum('wx,klmx->wklm', a, kvKLM - kvN, optimize=True)\n kvKLMN_int = np.rint(kvKLMN)\n mask = np.einsum('wklm->klm', abs(kvKLMN - kvKLMN_int)) < 1e-9\n kconserv[mask] = N\n return kconserv\n\nlattice = np.eye(3)*5\nkpts = make_kpts(lattice, [2,2,1])\ngvec = get_reciprocal_vectors(lattice)\nnkpts, nocc, nvir = len(kpts), 3, 5\nkconserv = get_kconserv(lattice, kpts)\nthresh = 1e-6\nkshift=2\nsym_phys = ['++--', [kpts,]*4, None, gvec]\nsym_chem = ['+-+-', [kpts,]*4, None, gvec]\nsym_t1 = ['+-',[kpts,]*2, None, gvec]\nsym_eom = ['++-', [kpts,]*3, kpts[kshift], gvec]\nsym_s = ['+', [kpts], kpts[kshift], gvec]\nclass PBCNUMPYTest(unittest.TestCase):\n\n def test_222(self):\n A = st.random([nocc,nocc],sym_t1)\n B = st.random([nocc,nvir],sym_t1)\n C = st.random([nvir,nvir],sym_t1)\n A_dense, B_dense, C_dense = A.make_dense(), B.make_dense(), C.make_dense()\n\n X = st.einsum('ACac,ICic->IAia', C_dense, B_dense)\n X = st.einsum('IAia,IA->Iia', X, A.get_irrep_map())\n\n X1 = st.einsum('ac,ic->ia', C, B)\n diff = (X1-X).norm() / np.sqrt(X.size)\n self.assertTrue(diffIAia', A_dense, B_dense)\n Y = st.einsum('IAia,IA->Iia', Y, A.get_irrep_map())\n Y1 = st.einsum('ki,ka->ia', A, B)\n diff = (Y1-Y).norm() / np.sqrt(Y.size)\n self.assertTrue(diffIJABijab', A_dense, B_dense)\n X = st.einsum('IJABijab,JB->IJAijab', X, A.get_irrep_map())\n\n X1 = st.einsum('ia,jb->ijab', A, B)\n diff = (X1-X).norm() / np.sqrt(X.size)\n self.assertTrue(diffIAia', A_dense, B_dense)\n X = st.einsum('IAia,IA->Iia', X, A.get_irrep_map())\n\n X1 = st.einsum('kc,kica->ia', A, B)\n diff = (X1-X).norm() / np.sqrt(X.size)\n self.assertTrue(diffklij', B, A)\n X = st.einsum('KCLJkclj,ICic->KLIJklij', B_dense, A_dense)\n X = st.einsum('KLIJklij,KLIJ->KLIklij', X, X1.get_irrep_map())\n\n diff = (X1-X).norm() / np.sqrt(X.size)\n self.assertTrue(diffklij', B, A)\n Y = st.einsum('LCKIlcki,JCjc->KLIJklij', B_dense, A_dense)\n Y = st.einsum('KLIJklij,KLIJ->KLIklij', Y, Y1.get_irrep_map())\n diff = (Y1-Y).norm() / np.sqrt(Y.size)\n self.assertTrue(diffakic', C, A)\n Z = st.einsum('KCADkcad,IDid->AKICakic', C_dense, A_dense)\n Z = st.einsum('AKICakic,AKIC->AKIakic', Z, Z1.get_irrep_map())\n\n\n diff = (Z1-Z).norm() / np.sqrt(Z.size)\n self.assertTrue(diffki', A, B)\n X = st.einsum('KCLDkcld,ILCDilcd->KIki', A_dense, B_dense)\n X = st.einsum('KIki,KI->Kki', X, X1.get_irrep_map())\n\n\n diff = (X1-X).norm() / np.sqrt(X.size)\n self.assertTrue(diffia', C, B)\n Y = st.einsum('KDACkdac,IKCDikcd->IAia', C_dense, B_dense)\n Y = st.einsum('IAia,IA->Iia', Y, Y1.get_irrep_map())\n\n\n diff = (Y1-Y).norm() / np.sqrt(Y.size)\n self.assertTrue(diffklij', B, C)\n X = st.einsum('KCLDkcld,IJCDijcd->KLIJklij', B_dense, C_dense)\n X = st.einsum('KLIJklij,KLIJ->KLIklij', X, X1.get_irrep_map())\n\n\n diff = (X1-X).norm() / np.sqrt(X.size)\n self.assertTrue(diffakic', B, C)\n Y = st.einsum('LDKCldkc,ILDAilda->AKICakic', B_dense, C_dense)\n Y = st.einsum('AKICakic,AKIC->AKIakic', Y, Y1.get_irrep_map())\n diff = (Y1-Y).norm() / np.sqrt(Y.size)\n self.assertTrue(diffijab', D, C)\n Z = st.einsum('ABCDabcd,IJCDijcd->IJABijab', D_dense, C_dense)\n Z = st.einsum('IJABijab,IJAB->IJAijab', Z, Z1.get_irrep_map())\n\n\n diff = (Z1-Z).norm() / np.sqrt(Z.size)\n self.assertTrue(diffijb', klij, klb)\n X = st.einsum('KLIJklij,KLBklb->IJBijb', klij_dense, klb_dense)\n X = st.einsum('IJBijb,IJB->IJijb', X, X1.get_irrep_map())\n\n\n diff = (X1-X).norm() / np.sqrt(X.size)\n self.assertTrue(diffijb', lbdj, ild)\n Y = st.einsum('LBDJlbdj,ILDild->IJBijb', lbdj_dense, ild_dense)\n Y = st.einsum('IJBijb,IJB->IJijb', Y, Y1.get_irrep_map())\n\n\n diff = (Y1-Y).norm() / np.sqrt(Y.size)\n self.assertTrue(diffc', lkdc, kld)\n X = st.einsum('LKDClkdc,KLDkld->Cc', lkdc_dense, kld_dense)\n X = st.einsum('Cc,C->c', X, X1.get_irrep_map())\n\n\n diff = (X1-X).norm() / np.sqrt(X.size)\n self.assertTrue(diffc', kldc, kld)\n Y = st.einsum('KLDCkldc,KLDkld->Cc', kldc_dense, kld_dense)\n Y = st.einsum('Cc,C->c', Y, Y1.get_irrep_map())\n\n\n diff = (Y1-Y).norm() / np.sqrt(Y.size)\n self.assertTrue(diffi',ki, k)\n X = st.einsum('KIki,Kk->Ii', ki_dense, k_dense)\n X = st.einsum('Ii,I->i', X, X1.get_irrep_map())\n\n\n diff = (X1-X).norm() / np.sqrt(X.size)\n self.assertTrue(diffi', ld, ild)\n X = st.einsum('LDld,ILDild->Ii', ld_dense, ild_dense)\n X = st.einsum('Ii,I->i', X, X1.get_irrep_map())\n\n diff = (X1-X).norm() / np.sqrt(X.size)\n self.assertTrue(diffijb', c, ijcb)\n X = st.einsum('Cc,IJCBijcb->IJBijb', c_dense, ijcb_dense)\n X = st.einsum('IJBijb,IJB->IJijb',X,X1.get_irrep_map())\n\n\n diff = (X1-X).norm() / np.sqrt(X.size)\n self.assertTrue(diffijb', bd, ijd)\n X = st.einsum('BDbd,IJDijd->IJBijb', bd_dense, ijd_dense)\n X = st.einsum('IJBijb,IJB->IJijb', X, X1.get_irrep_map())\n\n\n diff = (X1-X).norm() / np.sqrt(X.size)\n self.assertTrue(diffijb', ki, kjb)\n Y = st.einsum('KIki,KJBkjb->IJBijb', ki_dense, kjb_dense)\n Y = st.einsum('IJBijb,IJB->IJijb', Y, Y1.get_irrep_map())\n\n diff = (Y1-Y).norm() / np.sqrt(Y.size)\n self.assertTrue(diff', ijab_dense, ijba_dense)\n X1 = st.einsum('ijab,ijba->', ijab, ijba)\n\n diff = abs(X-X1)\n self.assertTrue(diff>> from moviepy.io.downloader import download_website\n >>>\n >>> download_website(\n ... \"http://localhost:8000/media/chaplin.mp4\",\n ... \"media/chaplin-copy.mp4\",\n ... )\n >>>\n \"\"\"\n if os.path.exists(filename) and not overwrite: # pragma: no cover\n return\n\n if \".\" in url:\n with urllib.request.urlopen(url) as req, open(filename, \"wb\") as f:\n shutil.copyfileobj(req, f, 128)\n\n else:\n try:\n subprocess_call([\"youtube-dl\", url, \"-o\", filename])\n except OSError as e:\n raise OSError(\n (\n \"Error running youtube-dl.\\n%sA possible reason is that\"\n \" youtube-dl is not installed on your computer. Install it \"\n \" with 'pip install youtube_dl'.\"\n )\n % ((e.message + \"\\n\" if hasattr(e, \"message\") else \"\"))\n )\n","repo_name":"Zulko/moviepy","sub_path":"moviepy/video/io/downloader.py","file_name":"downloader.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","stars":11113,"dataset":"github-code","pt":"3"} +{"seq_id":"4558584864","text":"import unittest\nfrom typing import List\n\n# WRONG. Doesn't work with negative values in some negative scenarios\n# [10, -1, -10, 1, 2, 3] would exclude 10 because discarding 10 - 1 - 10 sounds\n# like an improvement (0 if discard -11 if not)\nclass Solution:\n # gist of the solution:\n # move left-to-right to search the start\n # do the same right-to-left\n # The result we are looking for is a subarray, we search for the left index\n # and the right index of that subarray.\n # To find the left index of the max subarray we start with the left of the\n # entire array as the candidate left index of the subarray.\n # Then we move to the right looking for a left index that would result\n # in a better subarray than the current candidate.\n # To determine if an index is an improvement we track the sum of the values\n # between the existing best candidate and the current index\n # (the sum of the value we would exclude if we were to use the index) :\n # if the sum is negative it means it's worth it to exclude the value before\n # the current index from the result => we have found a better candidate.\n # We go through this search until we reached the end of the array.\n # The same process is then repeated with the right index.\n # at the end return the sum of the subarray\n\n def maxSubArray(self, nums: List[int]) -> int:\n assert len(nums) > 0\n\n # my approach doesn't work when the numbers are all non-positive :(\n max_element = max(nums)\n if max_element <= 0:\n return max_element\n\n # candidate for left of solution, index included in the solution\n candidate_left = 0\n # candidate for right of solution, index excluded from the solution\n candidate_right = len(nums)\n\n # look for the best candidate_left\n\n # i : index potentially better than the current candidate_left.\n i = candidate_left + 1\n # keep a sum of the values between candidate_left and\n # before the current index(i).\n # If it ever becomes negative it means it's better to not use\n # the values between candidate_left and the left of i :\n # i becomes the new candidate\n improvement = 0\n\n while i < candidate_right:\n improvement += nums[i-1]\n # changing the candidate or not if the improvement is zero won't\n # affect the return value (the sum) but it affects the size of\n # the subarray : move forward if 0 to have a smaller subarray\n # (faster for the final sum).\n if improvement <= 0:\n candidate_left = i\n improvement = 0\n i += 1\n\n # look for the best candidate_right\n i = candidate_right - 1\n improvement = 0\n while i > candidate_left:\n improvement += nums[i]\n if improvement <= 0:\n candidate_right = i\n improvement = 0\n i -= 1\n\n return sum(nums[candidate_left:candidate_right])\n\n\n# brute-force\n# test all possible subarrays\nclass Solution:\n def maxSubArray(self, nums: List[int]) -> int:\n assert len(nums) > 0\n best_max = float('-inf') # best so far\n for from_ in range(0, len(nums)): # from included\n max_current_from = 0 # best seen so far for current from_\n for to in range(from_+1, len(nums)+1): # to excluded\n # instead of doing a sum(nums[from, to]) :\n # the best possible subarray for [from, to) is either\n # the new (last) element (as if throwing away previous start)\n # or\n # the previous best + this one (as if keep previous start)\n max_current_from = max(nums[to-1], max_current_from + nums[to-1])\n if max_current_from > best_max:\n best_max = max_current_from\n return best_max\n\nclass Solution:\n def maxSubArray(self, nums: List[int]) -> int:\n assert len(nums) > 0\n best_max_so_far = float('-inf')\n max_sum_ending_with_x = 0\n for x in nums:\n # the max subarray ending with the current x is either\n # -only x\n # -the best subarray ending at the x of theprevious iteration + x\n max_sum_ending_with_x = max(x, max_sum_ending_with_x + x)\n best_max_so_far = max(best_max_so_far, max_sum_ending_with_x)\n return best_max_so_far\n\n\nclass TestSolution(unittest.TestCase):\n def setUp(self):\n self.s = Solution()\n\n def assertMaxSubArray(self,\n expected: int,\n nums: List[int]) -> None:\n self.assertEqual(expected, self.s.maxSubArray(nums))\n\n def test_examples(self):\n self.assertMaxSubArray(6, [-2, 1, -3, 4, -1, 2, 1, -5, 4])\n self.assertMaxSubArray(1, [1])\n self.assertMaxSubArray(23, [5, 4, -1, 7, 8])\n\n def test_negative(self):\n self.assertMaxSubArray(-1, [-1, -2, -3])\n self.assertMaxSubArray(-1, [-2, -1, -3])\n self.assertMaxSubArray(-1, [-3, -2, -1])\n\n def test_take_all_in_solution(self):\n self.assertMaxSubArray(3, [1, 2])\n self.assertMaxSubArray(6, [1, 2, 3])\n self.assertMaxSubArray(2, [1, 0, 0, 0, 1])\n\n def test_increasing_or_decreasing(self):\n self.assertMaxSubArray(15, [1, 2, 3, 4, 5])\n self.assertMaxSubArray(15, [-1, 0, 1, 2, 3, 4, 5])\n self.assertMaxSubArray(15, [5, 4, 3, 2, 1])\n self.assertMaxSubArray(15, [5, 4, 3, 2, 1, 0, -1])\n\n def test_increasing_then_decreasing(self):\n self.assertMaxSubArray(9, [1, 2, 3, 2, 1])\n self.assertMaxSubArray(8, [1, 2, 3, 2])\n self.assertMaxSubArray(4, [0, 1, 2, 1, 0])\n self.assertMaxSubArray(4, [-1, 0, 1, 2, 1, 0, -1])\n\n def test_decreasing_then_increasing(self):\n self.assertMaxSubArray(11, [3, 2, 1, 2, 3])\n self.assertMaxSubArray(6, [2, 1, 0, 1, 2])\n self.assertMaxSubArray(1, [1, 0, -1, 0, 1])\n self.assertMaxSubArray(4, [3, 1, 0, -1, 0, 1])\n self.assertMaxSubArray(4, [1, 0, -1, 0, 1, 3])\n\n def test_min_max_items_value(self):\n min_value = -10 ** 5\n max_value = 10 ** 5\n self.assertMaxSubArray(min_value, [min_value])\n self.assertMaxSubArray(max_value, [max_value])\n self.assertMaxSubArray(min_value, [min_value] * 2)\n self.assertMaxSubArray(max_value * 2, [max_value] * 2)\n self.assertMaxSubArray(max_value, [min_value, max_value])\n\n # @unittest.skip(\"slow test\")\n def test_max_size(self):\n self.assertMaxSubArray(0, [0] * (3 * 10 ** 4))\n\n def test_must_not_always_skip_negative(self):\n # correct to exclude -1\n self.assertMaxSubArray(0, [-1, -2, -3, 0, -4, -5])\n # error to exclude -1\n self.assertMaxSubArray(-1, [-1, -2, -3, -4, -5])\n # correct to exclude -5\n self.assertMaxSubArray(-1, [-5, -4, -3, -2, -1])\n\n # must not exclude a negative value if there only are\n # lower values after?\n\n # error to exclude 10 (and before)\n self.assertMaxSubArray(10, [10, -1, -10, 1, 2, 3])\n\n # must not exclude a set of values if the value after are\n # lower than the sum of values before (improvement)\n # reformulated :\n # there must be a value higher or equal to improvement\n\n # look on both left and right for each iteration?\n # move the one that gives the greater benefit?\n self.assertMaxSubArray(4, [-10, 0, 1, 2, 1, 0, -1])\n","repo_name":"tburette/leetcode","sub_path":"problems/53MaximumSubarray.py","file_name":"53MaximumSubarray.py","file_ext":"py","file_size_in_byte":7543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23284441674","text":"import torch.nn as nn\nfrom typing import List, NamedTuple, Optional\n# typing library provides support for type hints, type aliases, new types, simplifying complex signatures\n# type checking verifies and enforces the constraints of the types, ensuring that the program is type-safe\nfrom torch import Tensor\n\n# So, this is an application: could have been done alternatively as:\n# class EncoderOut(NamedTuple):\n# encoder_out: Tensor\n# src_lengths: Tensor\n# Also, Optional is type hint for Union[..., None]\n\nEncoderOut = NamedTuple(\n \"EncoderOut\",\n [\n (\"encoder_out\", Tensor), # T x B x C\n (\"encoder_padding_mask\", Tensor), # B x T\n (\"encoder_embedding\", Tensor), # B x T x C\n (\"encoder_states\", Optional[List[Tensor]]), # List[T x B x C]\n (\"src_tokens\", Optional[Tensor]), # B x T\n (\"src_lengths\", Optional[Tensor]), # B x 1\n ],\n)\n\n# This is the encoder template\nclass FairseqEncoder(nn.Module):\n \"\"\"Base class for encoders.\"\"\"\n \n # The constructor takes in a dictionary\n def __init__(self, dictionary):\n super().__init__()\n self.dictionary = dictionary\n\n # Forward takesn in source tokens and source lengths\n def forward(self, src_tokens, src_lengths=None, **kwargs):\n \"\"\"\n Args:\n src_tokens (LongTensor): tokens in the source language of shape\n `(batch, src_len)`\n src_lengths (LongTensor): lengths of each source sentence of shape\n `(batch)`\n \"\"\"\n raise NotImplementedError\n\n # reorder the forward method's output\n def reorder_encoder_out(self, encoder_out, new_order):\n \"\"\"\n Reorder encoder output according to `new_order`.\n\n Args:\n encoder_out: output from the ``forward()`` method\n new_order (LongTensor): desired order\n\n Returns:\n `encoder_out` rearranged according to `new_order`\n \"\"\"\n raise NotImplementedError\n\n # Okay, why is this here: 1 milion is the maximum length\n def max_positions(self):\n \"\"\"Maximum input length supported by the encoder.\"\"\"\n return 1e6 # an arbitrary large number\n\n # Don't know what this does: why is this required?\n def upgrade_state_dict(self, state_dict):\n \"\"\"Upgrade a (possibly old) state dict for new versions of fairseq.\"\"\"\n return state_dict\n","repo_name":"vyraun/long-tailed","sub_path":"fairseq/models/fairseq_encoder.py","file_name":"fairseq_encoder.py","file_ext":"py","file_size_in_byte":2378,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"3"} +{"seq_id":"2661744884","text":"#Author:Huangliang \n#Time:2018/5/25\nimport tkinter as tk\nfrom tkinter.filedialog import *\nimport tkinter.messagebox\nimport Final_Fantasy_Driver as FFD\nimport demo_training as DT\nimport threading\n\nwindow = tk.Tk()\nwindow.title('Pedestrians Detector')\nwindow.geometry('300x210')\n\ndef tick():\n import winsound\n winsound.PlaySound('./prompt_tone/3.wav', winsound.SND_ASYNC)\ndef tick1():\n import winsound\n winsound.PlaySound('./prompt_tone/1.wav', winsound.SND_ASYNC)\ndef Choose():\n tick()\n choose = var.get()\n if choose == 1:\n DEMOTRAINING()\n elif choose == 2:\n VIDEODETECTION()\n\ndef DEMOTRAINING():\n window1 = tk.Toplevel()\n window1.title('设置路径')\n window1.geometry('400x150')\n path1 = tk.StringVar()\n path2 = tk.StringVar()\n\n def selectPath1():\n tick1()\n fd = LoadFileDialog(window1) # 创建打开文件对话框\n filename = fd.go() # 显示打开文件对话框,并获取选择的文件名称\n print(filename)\n path1.set(filename) # 全局变量\n print(path1.get())\n\n def selectPath2():\n tick1()\n fd = LoadFileDialog(window1) # 创建打开文件对话框\n filename = fd.go() # 显示打开文件对话框,并获取选择的文件名称\n print(filename)\n path2.set(filename) # 全局变量\n print(path2.get())\n\n def training():\n tick()\n newfilename1 = path1.get()\n newfilename2 = path2.get()\n newfilename1 = newfilename1.replace('\\\\','/')\n newfilename2 = newfilename2.replace('\\\\','/')\n print(newfilename1)\n print(newfilename2)\n window1.destroy()\n\n # 显示训练日志\n window2 = tk.Toplevel()\n window2.title('训练中...')\n window2.geometry('400x600')\n\n\n try:\n # 创建子线程进行训练,不卡主线程去刷新UI\n new_thread_1 = threading.Thread(target=DT.main, args=(newfilename1, newfilename2, window2,))\n new_thread_1.setDaemon(True)\n new_thread_1.start()\n\n except:\n print(\"Error: unable to start training\")\n\n window2.mainloop()\n\n l0 = Label(window1, text=\"请保证样本集文件夹中存在.lst图片名列表文件\\n\").grid(row=0, column=1)\n l1 = Label(window1, text=\"POS目标路径:\").grid(row=1, column=0)\n et1 = Entry(window1, textvariable=path1).grid(row=1, column=1)\n bt1 = Button(window1, text=\"路径选择\", command=selectPath1).grid(row=1, column=2)\n l1 = Label(window1, text=\"NEG目标路径:\").grid(row=2, column=0)\n et2 = Entry(window1, textvariable = path2).grid(row = 2, column = 1)\n bt2 = Button(window1, text=\"路径选择\", command=selectPath2).grid(row=2, column=2)\n bt3 = Button(window1, text=\" 确定 \", command=training).grid(row=3, column=2)\n\n window1.mainloop()\n\ndef VIDEODETECTION():\n window2 = tk.Toplevel()\n window2.title('设置路径')\n window2.geometry('290x105')\n path3 = tk.StringVar()\n def selectPath1():\n tick1()\n fd = LoadFileDialog(window2) # 创建打开文件对话框\n filename = fd.go() # 显示打开文件对话框,并获取选择的文件名称\n path3.set(filename) # 全局变量\n\n def detection():\n tick()\n filename = path3.get()\n newfilename = filename.replace('\\\\', '/')\n print(newfilename)\n FFD.main(newfilename)\n tick()\n tk.messagebox.showinfo('提示', '视频处理完成')\n\n l0 = Label(window2, text=\"请选择视频文件的路径\\n\").grid(row=0, column=1)\n l1 = Label(window2, text=\"原始视频路径:\").grid(row=1, column=0)\n et1 = Entry(window2, textvariable=path3).grid(row=1, column=1)\n bt1 = Button(window2, text=\"路径选择\", command=selectPath1).grid(row=1, column=2)\n bt3 = Button(window2, text=\" 确定 \", command=detection).grid(row=2, column=2)\n window2.mainloop()\n\nvar = tk.IntVar()\nl1 = Label(window,text='\\n欢迎使用视频行人检测系统\\n\\n请选择您需要的功能\\n').pack()\nr1 = tk.Radiobutton(window, text='训练样本',variable=var, value=1)\nr1.pack()\nr2 = tk.Radiobutton(window, text='检测视频',variable=var, value=2)\nr2.pack()\nbt = tk.Button(window, text='确定', width=15, height=2, command=Choose)\nbt.pack()\n\nwindow.mainloop()\n","repo_name":"yellowei/AI","sub_path":"xingrenjianceguidesign/GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":4317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34099231596","text":"t = int(input().strip())\n\nfor _ in range(t):\n\n n, c, q = map(int, input().split())\n s = input().strip()\n \n currLen = n\n\n seg = [(0, currLen, 0)] # [lowerbound, upperbound), start(l)\n\n def find_orgn(sg, x):\n for i in sg:\n if i[1] > x:\n return i[2] + x - i[0]\n\n for _ in range(c):\n l, r = map(lambda x: int(x) - 1, input().split())\n seg.append((currLen, currLen + r - l + 1, l))\n currLen += r - l + 1\n\n # print(seg)\n # for i in range(currLen):\n # k = i\n # while k >= n:\n # k = find_orgn(seg, k)\n # print(s[k], end='')\n # print()\n\n for _ in range(q):\n k = int(input().strip()) - 1\n while k >= n:\n k = find_orgn(seg, k)\n print(s[k])\n\n","repo_name":"xxu-mzwyt/competitive-programming-solutions","sub_path":"Codeforces/Codeforces Round #807 (Div. 2)/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11545572107","text":"#Some of this code is adapted from Hannah Nesser.\n\nfrom datetime import datetime,timedelta\nfrom glob import glob\nimport pickle\nimport os.path\nimport xarray as xr\nimport numpy as np\nimport pandas as pd\nimport observation_operators as obsop\n\ndata_vars = ['time', 'start_time', 'midpoint_time', 'time_components', 'value',\n'latitude', 'longitude', 'altitude', 'assimilation_concerns',\n'obspack_id']\n\n#Make a filter function (output function) with start and end date and 2-value (min,max) tuples for lat and lon bounds\ndef make_filter_fxn(start_date,end_date,lat_bounds=None,lon_bounds=None):\n\t# Define a filtering function\n\tdef filter_obspack(data):\n\t# Define the variables to keep\n\t\t# Subset variables\n\t\tdata = data[data_vars]\n\t\t# Subset for time and location\n\t\tdata = data.where((data['time'].dt.date >= start_date.date()) & (data['time'].dt.date <= end_date.date()), drop=True)\n\t\tif lat_bounds is not None:\n\t\t\tdata = data.where((data['latitude'] >= lat_bounds[0]) & (data['latitude'] <= lat_bounds[1]), drop=True)\n\t\tif lon_bounds is not None:\n\t\t\tdata = data.where((data['longitude'] >= lon_bounds[0]) & (data['longitude'] <= lon_bounds[1]),drop=True)\n\t\t# Save out a platform variable\n\t\tplatform = data.attrs['dataset_project'].split('-')[0]\n\t\tdata['platform'] = xr.DataArray([platform]*len(data.obs), dims=('obs'))\n\t\t# Correct to local timezone if it's an in situ or surface observation\n\t\tif (len(data.obs) > 0) and (platform in ['surface', 'tower']):\n\t\t\tutc_conv = data.attrs['site_utc2lst']\n\t\t\tif int(utc_conv) != utc_conv:\n\t\t\t\tprint('UTC CONVERSION FACTOR IS NOT AN INTEGER : ', data.attrs['dataset_name'])\n\t\t\tdata['utc_conv'] = xr.DataArray(utc_conv*np.ones(len(data.obs)),dims=('obs'))\n\t\t\t# data['time_ltc'] = dc(data['time']) + np.timedelta64(int(utc_conv), 'h')\n\t\telse:\n\t\t\tdata['utc_conv'] = xr.DataArray(np.zeros(len(data.obs)), dims=('obs'))\n\t\t\t# data['time_ltc'] = dc(data['time'])\n\t\treturn data\n\treturn filter_obspack\n\n#Prepare raw obspack data for input into GEOSChem\n#Take raw obspack data from raw_obspack_dir --> process from start_date to end_date --> output to gc_obspack_dir\ndef prep_obspack(raw_obspack_dir,gc_obspack_dir,filename_format,start_date,end_date):\n\t# Get a list of the files\n\tfiles = glob(f'{raw_obspack_dir}/*.nc')\n\tfiles = [f for f in files if f.split('/')[-1][:11] != 'obspack_ch4']\n\tfiles.sort()\n\t#Make filter function\n\tfilter_obspack = make_filter_fxn(start_date,end_date)\n\t## Iterate through the files and see which are relevant to the domain\n\tfiltered_files = []\n\tfor i, f in enumerate(files):\n\t\top = xr.open_dataset(f)\n\t\t# Only use files in the needed time, latitude, and longitude\n\t\t# ranges\n\t\ttry:\n\t\t\top = filter_obspack(op)\n\t\texcept ValueError:\n\t\t\tcontinue\n\t\texcept KeyError:\n\t\t\tprint(f)\n\t\t# If the file is empty, continue through the loop\n\t\tif len(op.obs) == 0:\n\t\t\tcontinue\n\t\t# If the file still has observations, append it to conus_files\n\t\tfiltered_files.append(op)\n\t# Now combine all the files\n\tobspack = xr.concat(filtered_files,dim='obs')\n\t# Check for the sampling strategy\n\t## Get the time in hours of each sample\n\tobspack['obs_length'] = (obspack['time'] - obspack['start_time'])\n\tobspack['obs_length'] = obspack['obs_length'].dt.seconds*2/(60*60)\n\t## Convert that to the sampling strategy flag\n\t## ss = place holder for sampling strategy\n\tobspack['ss'] = xr.DataArray(999*np.ones(len(obspack.obs)), dims=('obs'))\n\t## Closest to 4 hours\n\tobspack['ss'] = obspack['ss'].where(obspack['obs_length'] > 5.25, 1)\n\t## Closest to 90 minutes\n\tobspack['ss'] = obspack['ss'].where(obspack['obs_length'] > 2.75, 3)\n\t## Closest to 1 hour\n\tobspack['ss'] = obspack['ss'].where(obspack['obs_length'] > 1.25, 2)\n\t## Closest to instantaneous\n\tobspack['ss'] = obspack['ss'].where(obspack['obs_length'] > 0.5, 4)\n\t## Cast to int\n\tobspack['ss'] = obspack['ss'].astype(int)\n\t# Rename and add attributes\n\tobspack = obspack.rename({'ss' : 'CT_sampling_strategy'})\n\tobspack['CT_sampling_strategy'].attrs = {'_FillValue' : -9,'long_name' : 'model sampling strategy','values' : 'How to sample model. 1=4-hour avg; 2=1-hour avg; 3=90-min avg; 4=instantaneous'}\n\t# Other clean up\n\tobspack.attrs = {}\n\tobspack = obspack.drop(['obs_length', 'start_time', 'midpoint_time'])\n\t#Drop data without sampling strategy\n\tinds_to_drop = np.where(obspack['CT_sampling_strategy'].values==999)[0]\n\tobspack=obspack.drop_isel(obs=inds_to_drop)\n\t# And iterate through the unique days\n\tname_str = filename_format.split('YYYYMMDD.nc')[0]\n\tdelta = end_date - start_date # returns timedelta\n\tfor i in range(delta.days + 1):\n\t\tday = start_date + timedelta(days=i)\n\t\t# Subset for that day\n\t\tdaily = obspack.where((obspack['time'].dt.month == day.month) & (obspack['time'].dt.day == day.day) & (obspack['time'].dt.year == day.year), drop=True)\n\t\t# If there is no data, continue\n\t\tif len(daily.obs) == 0:\n\t\t\tcontinue\n\t\t# Data type fix\n\t\tdaily['obspack_id'] = daily['obspack_id'].astype('S200')\n\t\tdaily['platform'] = daily['platform'].astype('S50')\n\t\t# Time fix\n\t\tdaily['time'].encoding['units'] = 'seconds since 1970-01-01 00:00:00 UTC'\n\t\tdaily['time'].encoding['calendar'] = 'proleptic_gregorian'\n\t\t# daily['time_ltc'].encoding['units'] = 'seconds since 1970-01-01 00:00:00 UTC'\n\t\t# daily['time_ltc'].encoding['calendar'] = 'proleptic_gregorian'\n\t\t# One last rename\n\t\t# daily = daily.rename({'value' : 'obs'})\n\t\t# Otherwise, save out\n\t\tprint(f'Saving {day.year:04d}-{day.month:02d}-{day.day:02d}')\n\t\tdaily.to_netcdf(f'{gc_obspack_dir}/{name_str}{day.year:04d}{day.month:02d}{day.day:02d}.nc',unlimited_dims=['obs'])\n\ndef filter_postprocess_obspack_from_file(data):\n\treturn data[['obspack_id', 'value', 'altitude', 'latitude', 'longitude', 'time', 'utc_conv', 'platform']]\n\nclass ObsPack_Translator(obsop.Observation_Translator):\n\tdef __init__(self,verbose=1):\n\t\tsuper().__init__(verbose)\n\t#Save dictionary of dates for later use\n\tdef initialReadDate(self):\n\t\tsourcedir = self.spc_config['gc_obspack_path']\n\t\tobs_list = glob(f'{sourcedir}/*.nc')\n\t\tobs_list.sort()\n\t\tobs_dates = [datetime.strptime(obs.split('/')[-1][-11:-3], \"%Y%m%d\") for obs in obs_list]\n\t\twith open(f\"{self.scratch}/obspack_dates.pickle\", 'wb') as handle:\n\t\t\tpickle.dump(obs_dates, handle)\n\t\treturn obs_dates\n\t#Timeperiod is two datetime objects\n\tdef globObs(self,species,timeperiod, interval=None):\n\t\tsourcedir = self.spc_config['gc_obspack_path']\n\t\tif os.path.exists(f\"{self.scratch}/obspack_dates.pickle\"):\n\t\t\twith open(f\"{self.scratch}/obspack_dates.pickle\", 'rb') as handle:\n\t\t\t\tobs_dates = pickle.load(handle)\n\t\telse:\n\t\t\tobs_dates = self.initialReadDate()\n\t\tobs_list = glob(f'{sourcedir}/*.nc')\n\t\tobs_list.sort()\n\t\tif interval:\n\t\t\tobs_list = [obs for obs,t in zip(obs_list,obs_dates) if (t>=timeperiod[0]) and (t=timeperiod[0]) and (t dict:\n arabic_w = u'\\u0621-\\u063A\\u0641-\\u064A' # ARABIC ALPHABET\n arabic_w += u'\\u067E\\u0686\\u0698\\u06A9\\u06AF\\u06CC\\u0654' # EXTENDED ARABIC LETTERS\n\n # preserve order where mapping across languages may be applicable, e.g. numbers or punctuation marks\n ranges = \\\n {'punctuation': {'fa': r'\"\\/\\؟!٪()،؛:.', 'en': r'\"\\/\\?!%(),;:.'},\n 'numerals': {'fa': '۰۱۲۳۴۵۶۷۸۹', 'en': '0123456789', 'ar': '٠١٢٣٤٥٦٧٨٩'},\n 'alphabet': {'fa': arabic_w, 'en': r'[a-zA-Z]'},\n 'diacritics': {'fa': u'\\u0610-\\u061A\\u064B-\\u065F'}}\n\n return ranges[boundary] if boundary else ranges\n\n def get_punctuation(self) -> str:\n return self.punctuation\n\n def get_numerals(self) -> str:\n return self.digits\n\n def get_alphabet(self) -> str:\n return self.alphabet\n\n def is_supported(self, lang: str) -> bool:\n return True if lang in self.supported_languages else False\n\n def trim_whitespace(self, text: str) -> str:\n return re.sub('(\\s){2,}', '\\g<1>', text)\n\n def filter_xml_tags(self, text: str) -> str:\n return re.sub(r'<.*?>', '', text)\n\n def filter_url(self, text: str, rep: str = ''):\n return re.sub(r'https?:\\/\\/\\S+\\/?', rep, text)\n\n def localize_punc(self, text: str, sep: str = ' ') -> str:\n out_charset = self.get_punctuation()\n punct_marks = self.get_unicode_range('punctuation')\n # a list of punctuation marks not used by the current locale\n in_charsets = [punct_marks[lang] for lang in punct_marks if lang != self.locale]\n\n for i in range(len(in_charsets)):\n tbl = str.maketrans(in_charsets[i], out_charset)\n text = text.translate(tbl)\n\n if sep:\n text = re.sub('(?', text)\n text = re.sub('([' + out_charset + ']){1,3}(?!'+ sep +')', '\\g<0>' + sep, text)\n\n return text\n\n def localize_digits(self, text: str) -> str:\n out_charset = self.get_numerals()\n numerals = self.get_unicode_range('numerals')\n\n # a list of digit characters not used by the current locale\n in_charsets = [numerals[lang] for lang in numerals if lang != self.locale]\n\n for i in range(len(in_charsets)):\n tbl = str.maketrans(in_charsets[i], out_charset)\n text = text.translate(tbl)\n\n return text\n\n\nclass Persianizer(Normalizer):\n\n def localize_punc(self, text: str, sep: str = ' '):\n tbl = str.maketrans('«»', '\"\"')\n text = text.translate(tbl)\n return super(Persianizer, self).localize_punc(text)\n\n @staticmethod\n def get_affixes(affix_type: str = 'all') -> dict:\n affixes = \\\n {'suffix_INFL': ['های*', 'ها ای', 'ای+', 'ی+', 'مان', 'تان', 'شان', 'تری?', 'ترین',\n 'هایم', 'هایت', 'هایش', 'هایمان', 'هایتان', 'هایشان', 'ام', 'ات', 'اش'],\n 'suffix_LEX': ['ای', 'اید', 'ام', 'ایم', 'اند', 'جاتی?', 'آوری?', 'نشینی?', 'کننده', 'کنندگی', 'کنندگان',\n 'پاشی?', 'پوشی?', 'پوشانی?', 'شناسی?', 'شناسانی?', 'پذیری?', 'پذیرانی?', 'ناپذیری?',\n 'شکنی?', 'شکنانی?', 'فشانی?', 'سازی?', 'آلودی?', 'آمیزی?', 'زدای*', 'خوردگی', 'زدگی',\n 'انگیزی?', 'خیزی?', 'سوزی?', 'پراکنی', 'خوری', 'افکنی?', 'دانی?', 'گرفتگی', 'المللی?',\n 'پروری?', 'پریشی?', 'نویسی?', 'وار', 'واره', 'کارانی?', 'پژوهی?', 'سنجی?', 'بانی?',\n 'کنان', 'پردازی?', 'رسانی?', 'یابی?', 'پیما', 'گری?', 'گیری?', 'مندی?', 'ساعته',\n 'ور', 'اندازی?', 'مندی?', 'مندانی?'],\n 'prefix_INFL': ['ن?می'],\n 'prefix_LEX': ['نا', 'بی', 'فرا', 'سوء', 'غیر'],\n 'circum_LEX': ['هم\\s\\S+?ی']}\n\n if affix_type != 'all':\n return {key: value for key, value in affixes.items() if key.startswith(affix_type)}\n else:\n return affixes\n\n def normalize_affixation(self, text: str, affix_type: str = 'all', p_sep: str = '', s_sep: str = '') -> str:\n affix_type = [affix_type]\n types = affix_type if affix_type[0] != 'all' else ['prefix', 'suffix']\n affixes = {}\n for t in types:\n affixes.update(self.get_affixes(t))\n\n for affix_type, affix_list in affixes.items():\n if affix_type.startswith('prefix'):\n for prefix in affix_list:\n pattern = r'(?:^|(?<=\\W))({})\\W+'\n pattern = pattern.format(prefix)\n text = re.sub(pattern, '\\g<1>' + p_sep, text)\n elif affix_type.startswith('suffix'):\n for suffix in affix_list:\n pattern = r'\\W+({})(?=\\W)'\n pattern = pattern.format(suffix)\n text = re.sub(pattern, s_sep + '\\g<1>', text)\n elif affix_type.startswith('circum'):\n for circum in affix_list:\n old_words = re.findall(circum, text)\n for word in old_words:\n text = text.replace(word, re.sub(r'\\s', '', word))\n\n return text\n\n def filter_zwnj(self, text: str, replace: str = '') -> str:\n return text.replace('\\u200c', replace)\n\n def filter_diacritics(self, text: str) -> str:\n diacritics = self.get_unicode_range('diacritics')\n return re.sub('[{}]'.format(diacritics[self.locale]), '', text)\n\n def filter_foreign(self, text: str) -> str:\n # TODO: use class internal methods and attributes\n arabic = '\\s'\n arabic += '\\u060C\\u061B\\u061F\\u06D4' # ARABIC COMMA SEMICOLON QUESTION FULLSTOP\n arabic += '\\u064B-\\u0652' # ARABIC DIACRITICS\n arabic += '\\u0621-\\u063A\\u0641-\\u064A' # ARABIC ALPHABET\n arabic += '\\u067E\\u0686\\u0698\\u06A9\\u06AF\\u06CC\\u0654'\n arabic += '\\u0660-\\u0669\\u06F0-\\u06F9' # ARABIC DIGITS\n arabic += '\\u0640' # ARABIC TATWEEL\n\n non_arabic = '[^' + arabic + ']'\n return re.sub(non_arabic, '', text)\n\n def trim_whitespace(self, text: str) -> str:\n return super(Persianizer, self).trim_whitespace(text)\n\n def filter_tatvil(self, text: str) -> str:\n tatweel = '\\u0640'\n return text.replace(tatweel, '')\n\n def filter_yah_ezafe(self, text: str) -> str:\n pass\n\n def normalize_hamza(self, text: str) -> str:\n text = re.sub(r'(?<=['+'آاوی'+'])'+'ء'+'(?=[\\s])', '', text)\n text = re.sub(r'(? str:\n mapping = str.maketrans('يكة', 'یکه')\n return text.translate(mapping)\n\n def filter_nonsense(self, text: str, preserve: str = '') -> str:\n pass\n","repo_name":"sadeghieh/PersianNLP","sub_path":"normalizer.py","file_name":"normalizer.py","file_ext":"py","file_size_in_byte":7975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"975053016","text":"import os\n\nfrom markupsafe import Markup\n\nfrom bitcaster.app import app\n\n\n@app.template_filter()\ndef checkmark(on_off):\n \"\"\"Convert a string to all caps.\"\"\"\n if on_off:\n value = ''\n else:\n value = ''\n return Markup(value)\n\n\n@app.context_processor\ndef globals():\n return dict(GOOGLE_ANALYTICS_CODE=os.environ.get('GOOGLE_ANALYTICS_CODE', None),\n )\n","repo_name":"bitcaster-io/bitcaster-io","sub_path":"src/bitcaster/template_utils.py","file_name":"template_utils.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33895805631","text":"# 치킨 배달\n\nfrom itertools import combinations\n \n# n, m = map(int, input().split())\n# board = [list(map(int, input().split())) for _ in range(n)]\n\n# n, m=5, 3\n# board=[[0, 0, 1, 0, 0],\n# [0, 0, 2, 0, 1],\n# [0, 1, 2, 0, 0],\n# [0, 0, 1, 0, 0],\n# [0, 0, 0, 0, 2]]\n \nn, m=5, 2\nboard=[[0, 2, 0, 1, 0],\n[1, 0, 1, 0, 0],\n[0, 0, 0, 0, 0],\n[2, 0, 0, 1, 1],\n[2, 2, 0, 1, 2]]\n\nhouse = []\nchicken = []\nfor i in range(n):\n for j in range(n):\n if board[i][j] == 1: house.append((i, j))\n elif board[i][j] == 2: chicken.append((i, j))\n \nminv = float('inf') #양의 무한대\nfor ch in combinations(chicken, m):\n #print(ch)\n sumv = 0\n for home in house:\n # sumv += min([abs(home[0]-i[0])+abs(home[1]-i[1]) for i in ch])\n dist=[]\n for i in ch:\n dist.append(abs(home[0]-i[0])+abs(home[1]-i[1]))\n sumv+=min(dist)\n if minv <= sumv: break # sumv가 minv보다 커지는순간 더 더할 필요가 없으므로 break\n if sumv < minv: minv = sumv\n \nprint(minv)","repo_name":"Greek-and-Roman-God/Athena","sub_path":"codingtest/week12/delivery_chicken.py","file_name":"delivery_chicken.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33550587263","text":"# pylint: disable=no-member, arguments-differ, redefined-builtin, missing-docstring, line-too-long, invalid-name\nimport torch\n\nfrom e3nn import o3\nfrom e3nn import rs\nfrom e3nn.networks import (\n GatedConvParityNetwork,\n GatedConvNetwork,\n ImageS2Network,\n S2ConvNetwork,\n S2ParityNetwork,\n)\n\n\ndef test_parity_network():\n torch.set_default_dtype(torch.float64)\n\n lmax = 3\n Rs = [(1, l, 1) for l in range(lmax + 1)]\n model = GatedConvParityNetwork(Rs, 4, Rs, lmax, feature_product=True)\n\n features = rs.randn(1, 4, Rs)\n geometry = torch.randn(1, 4, 3)\n\n output = model(features, geometry)\n\n angles = o3.rand_angles()\n D = rs.rep(Rs, *angles, 1)\n R = -o3.rot(*angles)\n ein = torch.einsum\n output2 = ein('ij,zaj->zai', D.T, model(ein('ij,zaj->zai', D, features), ein('ij,zaj->zai', R, geometry)))\n\n assert (output - output2).abs().max() < 1e-10 * output.abs().max()\n\n\ndef test_network():\n torch.set_default_dtype(torch.float64)\n\n lmax = 3\n Rs = [(1, l) for l in range(lmax + 1)]\n model = GatedConvNetwork(Rs, 4 * Rs, Rs, lmax, feature_product=True)\n\n features = rs.randn(1, 4, Rs)\n geometry = torch.randn(1, 4, 3)\n\n output = model(features, geometry)\n\n angles = o3.rand_angles()\n D = rs.rep(Rs, *angles)\n R = o3.rot(*angles)\n ein = torch.einsum\n output2 = ein('ij,zaj->zai', D.T, model(ein('ij,zaj->zai', D, features), ein('ij,zaj->zai', R, geometry)))\n\n assert (output - output2).abs().max() < 1e-10 * output.abs().max()\n\n\ndef test_image_network():\n torch.set_default_dtype(torch.float64)\n\n Rs = [0, 0, 3]\n\n model = ImageS2Network(\n Rs_in=Rs,\n mul=4,\n lmax=6,\n Rs_out=Rs,\n size=5,\n layers=3\n )\n\n image = rs.randn(1, 16, 16, 16, Rs)\n model(image)\n\n\ndef test_s2conv_network():\n torch.set_default_dtype(torch.float64)\n\n lmax = 3\n Rs = [(1, l, 1) for l in range(lmax + 1)]\n model = S2ConvNetwork(Rs, 4, Rs, lmax)\n\n features = rs.randn(1, 4, Rs)\n geometry = torch.randn(1, 4, 3)\n\n output = model(features, geometry)\n\n angles = o3.rand_angles()\n D = rs.rep(Rs, *angles, 1)\n R = -o3.rot(*angles)\n ein = torch.einsum\n output2 = ein('ij,zaj->zai', D.T, model(ein('ij,zaj->zai', D, features), ein('ij,zaj->zai', R, geometry)))\n\n assert (output - output2).abs().max() < 1e-10 * output.abs().max()\n\n\ndef test_equivariance_s2parity_network():\n torch.set_default_dtype(torch.float64)\n mul = 3\n Rs_in = [(mul, l, -1) for l in range(3 + 1)]\n Rs_out = [(mul, l, 1) for l in range(3 + 1)]\n\n net = S2ParityNetwork(Rs_in, mul, lmax=3, Rs_out=Rs_out)\n\n abc = o3.rand_angles()\n D_in = rs.rep(Rs_in, *abc, 1)\n D_out = rs.rep(Rs_out, *abc, 1)\n\n fea = rs.randn(10, Rs_in)\n\n x1 = torch.einsum(\"ij,zj->zi\", D_out, net(fea))\n x2 = net(torch.einsum(\"ij,zj->zi\", D_in, fea))\n assert (x1 - x2).norm() < 1e-3 * x1.norm()\n","repo_name":"drorlab/gert","sub_path":"e3nn/tests/networks_test.py","file_name":"networks_test.py","file_ext":"py","file_size_in_byte":2915,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"33968241881","text":"# encoding=utf-8\n# Author: Yu-Lun Chiang\n# Description: Test preprocess function\n\nimport logging\nimport pytest\n\nlogger = logging.getLogger(__name__)\n\n\ndef test__preprocess_exception1(extractor):\n with pytest.raises(ValueError) as excinfo:\n extractor._preprocess(123)\n assert (\n str(excinfo.value)\n == \"Text must be tokenized ! Expected text to be List[str], but got .\"\n )\n\n\ndef test__preprocess_exception2(extractor):\n with pytest.raises(ValueError) as excinfo:\n extractor._preprocess([\"輸入\", 123])\n assert (\n str(excinfo.value)\n == \"Text must be tokenized ! Expected text to be List[str], but got .\"\n )","repo_name":"allenyummy/KeyExtractor","sub_path":"tests/test__preprocess/test__preprocess_exception.py","file_name":"test__preprocess_exception.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"3"} +{"seq_id":"13755487809","text":"import sys\n\nfrom splitter_fun import find_all, split_data\n\nfiles_ext = ['TS3', 'TS4', 'TS5']\n# files_ext = ['TS5']\nnew_files_ends = ['_Ex.bin', '_Ey.bin', '_Hx.bin', '_Hy.bin', '_Hz.bin']\nfile_name = sys.argv[1]\n\nfor ext in files_ext:\n file = open(file_name + '.' + ext, 'rb')\n data = file.read()\n\n new_file_suffix = ext.lower()\n marker = data[8:12]\n headers = [x-8 for x in find_all(data, marker)]\n\n result = split_data(data, headers, 32)\n\n for index, file_end in enumerate(new_files_ends):\n new_file = open(file_name+new_file_suffix+file_end, 'wb')\n new_file.write(result[index])\n new_file.close()\n\n file.close()\n\n","repo_name":"AltmanEA/lavr","sub_path":"signal-file-splitter/splitter.py","file_name":"splitter.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12923032335","text":"import numpy as np\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom data import *\n'''\nDiscriminative models for the ARA approach to Adversarial Classification\n'''\n\ndef create_logistic_regression_model(\n penalty='l1',\n dual=False,\n tol=1e-4,\n C=1.0,\n fit_intercept=True,\n intercept_scaling=1,\n random_state=None\n):\n '''create a logistic regression model\n\n penalty -- norm used in the penalization (string)\n dual -- dual or primal formulation (boolean)\n tol -- tolerance for stopping criteria\n C -- inverse of regulation strength (float)\n fit_intercept -- if a constant should be added to decion function (boolean)\n intercept_scaliing -- if True, instance vector x becomes [x, self.intercept_scaling] (float)\n class_weight -- Over/undersamples the samples of each class given weights (dict, optional)\n random_state -- Seed (int)\n tol -- tolerance (float)\n\n return the logistic regression model\n '''\n\n clf_LR = LogisticRegression(\n penalty=penalty,\n dual=dual,\n C=C,\n fit_intercept=fit_intercept,\n intercept_scaling=intercept_scaling,\n random_state=random_state)\n return clf_LR\n\n\ndef create_random_forest_model(\n n_estimators=100,\n criterion=\"gini\",\n max_depth=None,\n min_samples_split=2,\n min_samples_leaf=1,\n min_weight_fraction_leaf=0.,\n max_features=\"auto\",\n max_leaf_nodes=None,\n min_impurity_decrease=0.,\n min_impurity_split=1e-7,\n bootstrap=True,\n oob_score=False,\n n_jobs=None,\n random_state=None,\n verbose=0,\n warm_start=False,\n class_weight=None\n ):\n '''create a random forest model\n\n n_estimators -- number of trees (integer),\n criterion -- measures the quality of a split (\"gini\",\"entropy\"),\n max_depth -- maximum depth of the tree (integer),\n min_samples_split -- minimum samples to split (integer),\n min_samples_leaf -- minimum samples to be a leaf node (integer),\n min_weight_fraction_leaf -- minimum weight fraction of the sum total of weights to be a leaf node (float),\n max_features -- number of features (integer,float,string),\n max_leaf_nodes -- best nodes are defined as relative reduction in impurity (integer),\n min_impurity_decrease -- when a node is split, this is the value of impurity boundary (integer),\n min_impurity_split -- threshold of stopping in tree growth (float),\n bootstrap -- whether bootstrap samples are used to build a tree (boolean),\n oob_score -- whether to use out-of-bag samples to estimate the generalization accuracy (boolean),\n n_jobs -- number of jobs in parallel (integer),\n random_state -- randomness of the bootstrapping of the samples (integer),\n verbose -- controls the verbosity when fitting and predicting (integer),\n warm_start -- with True reuse the solution of the previous call to fit otherwise fit a whole new forest (boolean),\n class_weight -- weights associated with classes (dict, list of dicts)\n\n '''\n clfRF = RandomForestClassifier(\n n_estimators=n_estimators,\n criterion=criterion,\n max_depth=max_depth,\n min_samples_split=min_samples_split,\n min_samples_leaf=min_samples_leaf,\n min_weight_fraction_leaf=min_weight_fraction_leaf,\n max_features=max_features,\n max_leaf_nodes=max_leaf_nodes,\n min_impurity_decrease=min_impurity_decrease,\n min_impurity_split=min_impurity_split,\n bootstrap=bootstrap,\n oob_score=oob_score,\n n_jobs=n_jobs,\n random_state=random_state,\n verbose=verbose,\n warm_start=warm_start,\n class_weight=class_weight)\n return clfRF\n\n\nif __name__ == '__main__':\n X, y = get_spam_data(\"data/uciData.csv\")\n X_train, X_test, y_train, y_test = generate_train_test(X, y, q=0.3)\n #\n \n","repo_name":"roinaveiro/ACRA_2","sub_path":"src/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25665758792","text":"import random\n\n\n#crear un diccionario\nmi_dicccionario = {}\n\n#llenar el diccionario\nmi_dicccionario[\"Bolívar\"] = \"Cartagena\"\nmi_dicccionario[\"Santander\"] = \"Bucaramanga\"\nmi_dicccionario[\"Nariño\"] = \"Pasto\"\nmi_dicccionario[\"Risaralda\"] = \"Pereira\"\nmi_dicccionario[\"Cesar\"] = \"Valledupar\"\nmi_dicccionario[\"Boyacá\"] = \"Tunja\"\nmi_dicccionario[\"Tolima\"] = \"Ibagué\"\nmi_dicccionario[\"Quindío\"] = \"Armenia\"\n\n#crear el ciclo\nwhile True:\n print('Departamentos de colombia')\n for departamento in mi_dicccionario:\n print(departamento)\n \n \n #apartamento random\n departamento_azar = random.choice(list(mi_dicccionario.keys()))\n \n #peticion\n intentos = 0\n while intentos < 3:\n respuesta_user = input(f\"Ingrese la capital de {departamento_azar} (o escriba 'salir' para terminar la ejecucion )\" ).strip()\n if respuesta_user.lower() == 'salir':\n exit() #salir del programa\n \n elif respuesta_user == mi_dicccionario[departamento_azar]:\n print(\"Correcto!\")\n break #salir del ciclo si a cierta\n else:\n print(\"Respuesta incorrecta. INTENTA OTRA VEZ\")\n print(\"La primera letra es con Mayuscula\")\n intentos +=1\n else:\n print(\"Has pertido tus tres oportunidades. \")\n # Eliminar el departamento adivinado para no repetirlo\n del mi_dicccionario[departamento_azar]\n \n \n \n ","repo_name":"Stanlysalazar/jueves","sub_path":"2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"43227181691","text":"'''\nGiven s1, s2, s3, find whether s3 is formed by the interleaving of s1 and s2.\n\nFor example,\nGiven:\ns1 = \"aabcc\",\ns2 = \"dbbca\",\n\nWhen s3 = \"aadbbcbcac\", return true.\nWhen s3 = \"aadbbbaccc\", return false.\n'''\n\nfrom functools import wraps\n\ndef memo(func):\n cache = {}\n @wraps(func)\n def wrapper(*args):\n if args not in cache:\n cache[args] = func(*args)\n return cache[args]\n wrapper._cache = cache\n return wrapper\n\n\nclass Solution(object):\n def isInterleave(self, s1, s2, s3):\n \"\"\"\n :type s1: str\n :type s2: str\n :type s3: str\n :rtype: bool\n \"\"\"\n l1, l2, l3 = map(lambda x: len(x), (s1, s2, s3))\n\n @memo\n def rec(i, j, k):\n if i == l1:\n return s2[j:] == s3[k:]\n elif j == l2:\n return s1[i:] == s3[k:]\n if s1[i] == s3[k] and rec(i + 1, j, k + 1):\n return True\n if s2[j] == s3[k] and rec(i, j + 1, k + 1):\n return True\n return False\n\n return rec(0, 0, 0)\n\n\nif __name__ == '__main__':\n s1, s2, s3 = \"abcd\", \"efg\", \"aefgbcd\"\n Solution().isInterleave(s1, s2, s3)\n","repo_name":"wufangjie/leetcode","sub_path":"097. Interleaving String.py","file_name":"097. Interleaving String.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"17200839895","text":"N = 4\n\ndef printSolution(board):\n for i in range(N):\n for j in range(N):\n print(board[i][j], end=\" \")\n print()\n\n\n\"\"\" A Optimized function to check if\na queen can be placed on board[row][col] \"\"\"\n\n\ndef isSafe(row, col, slashCode, backslashCode,\n rowLookup, slashCodeLookup,\n backslashCodeLookup):\n if (slashCodeLookup[slashCode[row][col]] or\n backslashCodeLookup[backslashCode[row][col]] or\n rowLookup[row]):\n return False\n return True\n\n\n\"\"\" A recursive utility function\nto solve N Queen problem \"\"\"\n\n\ndef solveNQueensUtil(board, col, slashCode, backslashCode,\n rowLookup, slashCodeLookup,\n backslashCodeLookup):\n \"\"\" base case: If all queens are\n placed then return True \"\"\"\n if (col >= N):\n return True\n for i in range(N):\n if (isSafe(i, col, slashCode, backslashCode,\n rowLookup, slashCodeLookup,\n backslashCodeLookup)):\n\n \"\"\" Place this queen in board[i][col] \"\"\"\n board[i][col] = 1\n rowLookup[i] = True\n slashCodeLookup[slashCode[i][col]] = True\n backslashCodeLookup[backslashCode[i][col]] = True\n\n \"\"\" recur to place rest of the queens \"\"\"\n if (solveNQueensUtil(board, col + 1,\n slashCode, backslashCode,\n rowLookup, slashCodeLookup,\n backslashCodeLookup)):\n return True\n\n \"\"\" If placing queen in board[i][col]\n\t\t\tdoesn't lead to a solution,then backtrack \"\"\"\n\n \"\"\" Remove queen from board[i][col] \"\"\"\n board[i][col] = 0\n rowLookup[i] = False\n slashCodeLookup[slashCode[i][col]] = False\n backslashCodeLookup[backslashCode[i][col]] = False\n\n \"\"\" If queen can not be place in any row in\n\tthis column col then return False \"\"\"\n return False\n\n\n\"\"\" This function solves the N Queen problem using\nBranch or Bound. It mainly uses solveNQueensUtil()to\nsolve the problem. It returns False if queens\ncannot be placed,otherwise return True or\nprints placement of queens in the form of 1s.\nPlease note that there may be more than one\nsolutions,this function prints one of the\nfeasible solutions.\"\"\"\n\n\ndef solveNQueens():\n board = [[0 for i in range(N)]\n for j in range(N)]\n\n # helper matrices\n slashCode = [[0 for i in range(N)]\n for j in range(N)]\n backslashCode = [[0 for i in range(N)]\n for j in range(N)]\n\n # arrays to tell us which rows are occupied\n rowLookup = [False] * N\n\n # keep two arrays to tell us\n # which diagonals are occupied\n x = 2 * N - 1\n slashCodeLookup = [False] * x\n backslashCodeLookup = [False] * x\n\n # initialize helper matrices\n for rr in range(N):\n for cc in range(N):\n slashCode[rr][cc] = rr + cc\n backslashCode[rr][cc] = rr - cc + N - 1\n\n if (solveNQueensUtil(board, 0, slashCode, backslashCode,\n rowLookup, slashCodeLookup,\n backslashCodeLookup) == False):\n print(\"Solution does not exist\")\n return False\n\n # solution found\n printSolution(board)\n return True\n\n\n# Driver Code\nsolveNQueens()\n\n# This code is contributed by SHUBHAMSINGH10\n","repo_name":"Avanti1980/course-alg","sub_path":"codes/nqueen-bb.py","file_name":"nqueen-bb.py","file_ext":"py","file_size_in_byte":3395,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"33759204668","text":"import logging, time\nfrom threading import Thread\nimport threading\n\nclass Hammer(object):\n '''\n This is an abstract class for hammering, normally used for unit tests only.\n '''\n logger = logging.getLogger('Hammer')\n\n HAMMERING_DELAY = 0.1\n\n def __init__(self, nattempts, nparallel=10):\n self.nattempts = nattempts\n self.nparallel = nparallel\n\n self.next_round = 0\n self.lock = threading.Lock() # this lock has to be acquired before use of 'next_round' attribute\n\n self.should_stop = False\n\n # launch as many threads as needed to reach desired parallelism, but don't exceed the number of attempts\n self.hammer_threads = []\n nthreads = self.nparallel if (nattempts < 0) or (self.nparallel < nattempts) else nattempts\n for hammer_id in range(nthreads):\n thread_name = 'Hammer-%d' % hammer_id\n thread = Thread(target=self.run, name=thread_name)\n thread.daemon = True\n self.hammer_threads.append(thread)\n\n def start(self):\n for thread in self.hammer_threads:\n thread.start()\n self.logger.debug(\"spawned thread %s\", thread)\n\n def run(self):\n '''\n This method is a target of multiple threads running in parallel.\n '''\n self.logger.debug(\"running, thread %s\", threading.currentThread())\n\n while True:\n # get ourselves a round id\n with self.lock:\n if self.should_stop:\n # quitting this thread because we were requested to\n self.logger.debug(\"exiting (should_stop is True), thread %s\", threading.currentThread())\n return\n elif (self.nattempts > 0) and (self.next_round >= self.nattempts):\n # quitting this thread because too many rounds are made already\n self.logger.debug(\"exiting (next_round %d > nattempts %d), thread %s\",\n self.next_round, self.nattempts,\n threading.currentThread())\n return\n else:\n this_nround = self.next_round\n self.next_round += 1\n\n self.logger.debug(\"invoking hammer(), thread %s\", threading.currentThread())\n self.hammer(this_nround)\n\n # wait a little while before repeating\n time.sleep(self.HAMMERING_DELAY)\n\n def hammer(self, round):\n '''\n This method can be overridden by subclasses to do something useful. Round parameter contains a sequence\n number of the invocation of this method.\n '''\n raise NotImplemented('subclasses must override this method')\n\n def stop(self):\n self.logger.debug(\"stopping %s\", self)\n self.should_stop = True\n\n def spawn_hammer_thread(self, nround):\n # connect to the peer, do something, disconnect\n try:\n self.logger.debug(\"start hammering %s (round %i)\", self.peer, nround)\n self.hammer(nround)\n self.logger.debug(\"stopped hammering %s (round %d)\", self.peer, nround)\n except Exception as ex:\n self.logger.error('error hammering %s (round %d): %s', self.peer, nround, ex)\n","repo_name":"abbbe/sslcaudit","sub_path":"sslcaudit/test/Hammer.py","file_name":"Hammer.py","file_ext":"py","file_size_in_byte":3249,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"3"} +{"seq_id":"70249362323","text":"# coding: utf-8\n\nfrom hashlib import md5\nimport misaka\n\nfrom ..utils.markdown_utils import ArticleRenderer\n\n\ndef get_hashed_value(string):\n '''\n @rtype: int value with length of 16\n '''\n return int(md5(string).hexdigest(), 16) % (10 ** 16)\n\n\ndef _dump_para(text):\n md = misaka.Markdown(\n # skip-html - 跳过原文中的 HTML 代码\n # hard-wrap - 每个 \\n 都渲染为
\n renderer=ArticleRenderer(flags=('skip-html', 'hard-wrap')),\n # space-headers - 只将 # Title 转为
\n # #Title 会保持原样\n extensions=('disable-indented-code', 'autolink', 'space-headers'))\n\n return {\n 'type': 'text',\n 'text': text,\n 'html': md(text)\n }\n","repo_name":"duohedianshuihao/share_blog_backend","sub_path":"arsenal/article/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23522401763","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nthe macros can help you to merge cells with value in each ones\r\nand add merged value to merged cells\r\n\r\n@author: volom\r\n\"\"\"\r\n\r\nimport os\r\nimport re\r\nimport win32com.client as win32com\r\nwin32c = win32com.constants\r\n\r\n\r\n\r\nexcel = win32com.gencache.EnsureDispatch('Excel.Application')\r\nexcel.Visible = True # delete in case of .exe transformation\r\nexcel.Application.EnableEvents = False\r\nexcel.Application.ScreenUpdating = True\r\nexcel.Application.DisplayAlerts = False\r\n\r\n# create variable with value in selected cells\r\nvalue_selection = excel.Selection.Value\r\n\r\n\r\n# merge cells\r\nexcel.Selection.Merge()\r\n\r\n\r\n# Add merged value to merged cells\r\n\r\nexcel.Selection.Value = ' '.join([str(j) for i in value_selection for j in i if j != None])\r\nexcel.Application.DisplayAlerts = True\r\n\r\n","repo_name":"volom/Office_Toolkit","sub_path":"merge_cell_text.py","file_name":"merge_cell_text.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"33362593831","text":"import os\r\nclear = lambda: os.system('clear')\r\nclear()\r\n\r\nlogo = \"\"\" \r\n ,adPPYba, ,adPPYYba, ,adPPYba, ,adPPYba, ,adPPYYba, 8b,dPPYba, \r\na8\" \"\" \"\" `Y8 a8P_____88 I8[ \"\" \"\" `Y8 88P' \"Y8 \r\n8b ,adPPPPP88 8PP\"\"\"\"\"\"\" `\"Y8ba, ,adPPPPP88 88 \r\n\"8a, ,aa 88, ,88 \"8b, ,aa aa ]8I 88, ,88 88 \r\n `\"Ybbd8\"' `\"8bbdP\"Y8 `\"Ybbd8\"' `\"YbbdP\"' `\"8bbdP\"Y8 88 \r\n 88 88 \r\n \"\" 88 \r\n 88 \r\n ,adPPYba, 88 8b,dPPYba, 88,dPPYba, ,adPPYba, 8b,dPPYba, \r\na8\" \"\" 88 88P' \"8a 88P' \"8a a8P_____88 88P' \"Y8 \r\n8b 88 88 d8 88 88 8PP\"\"\"\"\"\"\" 88 \r\n\"8a, ,aa 88 88b, ,a8\" 88 88 \"8b, ,aa 88 \r\n `\"Ybbd8\"' 88 88`YbbdP\"' 88 88 `\"Ybbd8\"' 88 \r\n 88 \r\n 88 \r\n\"\"\"\r\n\r\n# Initialise alphabet list\r\nalphabet = list(\"abcdefghijklmnopqrstuvwxyz\")\r\n\r\ndef encrypt(message, num):\r\n encrypted_message = \"\"\r\n for c in message:\r\n # If its a letter in the alphabet, change it (leaving spaces and punctuation)\r\n if c in alphabet:\r\n # Get the new index based on shift amount input\r\n shift_index = alphabet.index(c) + num\r\n if shift_index > (len(alphabet) - 1):\r\n shift_index -= (len(alphabet) - 1)\r\n elif shift_index < 0:\r\n shift_index += (len(alphabet) - 1)\r\n \r\n # Get the encrypted character and add it to the message \r\n encrypted_message += alphabet[shift_index]\r\n \r\n # Else leave it be \r\n else:\r\n encrypted_message += c\r\n \r\n return encrypted_message\r\n\r\n \r\nwhile True:\r\n print(logo)\r\n choice = input(\"Type 'encode' to encrypt, type 'decode' to decrypt:\\n\")\r\n\r\n if choice == \"encode\":\r\n message = list(input(\"\\nEnter your message: \\n\").lower())\r\n shift_num = int(input(\"\\nEnter an encryption number: \\n\"))\r\n print(\"\\nHere is your encrypted message:\\n\")\r\n print(encrypt(message, (shift_num % len(alphabet))))\r\n elif choice == \"decode\":\r\n message = list(input(\"\\nEnter message to decrypt: \\n\").lower())\r\n shift_num = int(input(\"\\nEnter a decryption number: \\n\"))\r\n print(\"\\nHere is your decrypted message:\\n\")\r\n print(encrypt(message, -(shift_num % len(alphabet))))\r\n else:\r\n print(\"Sorry, command not recognised, please try again.\")\r\n \r\n exit = input(\"\\nType 'yes' if you want to go again. Otherwise type 'no'.\\n\")\r\n if exit == \"no\":\r\n print(\"Goodbye\")\r\n break\r\n else:\r\n clear()\r\n \r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"heySanj/100-PYTHON","sub_path":"exercises/caesar.py","file_name":"caesar.py","file_ext":"py","file_size_in_byte":2830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72204968401","text":"\"\"\"\n- OK\n - Instabilidades\n\nvalores_coletados = [0, 1, 1, 1, 0, 0, 1, 1]\nresultado = 3\n\nvalores_coletados = [1, 1, 1, 1, 0, 0, 1, 1]\nresultado = 4\n\"\"\"\n\n\ndef max_time_server(array): # Complexidade o(n)\n max_time = 0\n current_time = 0\n\n for value in array:\n if value == 1:\n current_time += 1\n else:\n current_time = 0\n if current_time >= max_time:\n max_time = current_time\n\n return max_time\n\n\nprint(max_time_server([1, 1, 1, 1, 0, 0, 1, 1]))\n","repo_name":"Fedolfo/trybe-exercises","sub_path":"Ciência de Computação/bloco 37/dia 2/exercise/exercise1.py","file_name":"exercise1.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"pt","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"25497612106","text":"#236\n\ndef lowestCommonAncestor(root, p, q):\n # basic case\n if root is None:\n return None\n if root.val == p.val or root.val == q.val:\n return root\n\n # recursive\n left = lowestCommonAncestor(root.left, p, q)\n right = lowestCommonAncestor(root.right, p, q)\n\n # check condition\n if (left is None and right is None):\n return None\n elif (left is not None and right is not None):\n return root\n elif left is None:\n return right\n else:\n return left\n\n\n'''\nTC is O(n) , search all nodes\nSC is O(n) , will recursive all nodes, each take one space\n'''","repo_name":"YiLiang0821/LeetcodePractice","sub_path":"tree/Medium/Lowest Common Ancestor of a Binary Tree.py","file_name":"Lowest Common Ancestor of a Binary Tree.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12732612543","text":"from collections import OrderedDict\n\nwordDict = OrderedDict()\n\nfor _ in range(int(input())):\n word = input()\n wordDict[word] = wordDict.get(word, 0) + 1\n\nprint(len(wordDict))\nprint(*[n for word, n in wordDict.items()])","repo_name":"sesn/hackerrank","sub_path":"practice/word_order.py","file_name":"word_order.py","file_ext":"py","file_size_in_byte":220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42555964737","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 4 15:17:01 2019\n\n@author: NEIL_YU\n\"\"\"\n\ndef OwO():\n a=3\n print(a)\n \n \ndef main():\n \n a=1\n b=2\n c=3\n OwO()\n\nif __name__==\"__main__\":\n main()","repo_name":"MilkLiver/PythonPratice01","sub_path":"PythonPratice01/AllPython/anaconda/localTest01.py","file_name":"localTest01.py","file_ext":"py","file_size_in_byte":216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25497383456","text":"# 76\n# sliding wiondows\n# hash\nfrom collections import defaultdict\ndef minWindow(s, t):\n # edge case\n if t == '':\n return ''\n \n # set up hash map t\n countT = defaultdict(int)\n for i in t:\n countT[i] += 1\n need = len(countT)\n \n # prepare windows\n windows = defaultdict(int)\n have = 0\n res = [-1, -1]\n length = float('inf')\n start = 0\n\n for end in range(len(s)):\n char = s[end]\n windows[char] += 1\n\n # update have if meet condition\n if char in countT and windows[char] == countT[char]:\n have += 1\n \n # check have == need -> find substring\n while have == need:\n # update res is shorter\n if (end - start + 1) < length:\n res = [start, end]\n length = end - start + 1\n # pop out from start\n windows[s[start]] -= 1\n if s[start] in countT and windows[s[start]] < countT[s[start]]:\n have -= 1\n start += 1\n start, end = res\n return s[start:end+1] if length != float('inf') else ''\n\ns = \"bbaa\"\nt = \"aba\"\nprint(minWindow(s, t))\n\n'''\nTC is O(n)\n'''","repo_name":"YiLiang0821/LeetcodePractice","sub_path":"Array and Strings/Hard/Minimum Window Substring.py","file_name":"Minimum Window Substring.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21441022167","text":"# -*- coding: utf-8 -*-\n\"\"\"Plugins for zest.releaser for Diazo themes.\"\"\"\n\n# python imports\nfrom ConfigParser import ConfigParser\nfrom zest.releaser import utils\nimport logging\nimport os\nimport pkg_resources\nimport shutil\nimport tempfile\nimport zest.releaser.choose\nimport zipfile\n\n\nlogger = logging.getLogger(__name__)\n\nSETUP_CONFIG_FILE = 'setup.cfg'\nSECTION = 'ps.releaser'\nOPTION_ENABLED = 'diazo_export.enabled'\nOPTION_DIAZO_PATH = 'diazo_export.path'\nOPTION_TITLE_UPDATE = 'diazo_export.adjust_title'\n\n\ndef release_diazo(data):\n \"\"\"Release a diazo theme from a folder.\"\"\"\n if not os.path.exists(SETUP_CONFIG_FILE):\n return\n\n config = ConfigParser()\n config.read(SETUP_CONFIG_FILE)\n\n if not config.has_option(SECTION, OPTION_ENABLED):\n return\n\n try:\n enabled = config.getboolean(SECTION, OPTION_ENABLED)\n except ValueError:\n pass\n\n if not enabled:\n return\n\n if not config.has_option(SECTION, OPTION_DIAZO_PATH):\n return\n\n path = config.get(SECTION, OPTION_DIAZO_PATH)\n if path is None:\n return\n\n if not os.path.exists(path):\n logger.warning(\n 'Diazo path does not exist. We can not create a zip file.'\n )\n return\n\n if not utils.ask('Create a zip file of the Diazo Theme?', default=True):\n return\n\n package_name = data.get('name')\n tmp_folder = tempfile.mkdtemp()\n diazo_folder = os.path.join(tmp_folder, package_name)\n shutil.copytree(path, diazo_folder)\n\n manifest_file = os.path.join(diazo_folder, 'manifest.cfg')\n has_manifest = os.path.exists(manifest_file)\n if has_manifest and config.has_option(SECTION, OPTION_TITLE_UPDATE):\n try:\n config.getboolean(SECTION, OPTION_TITLE_UPDATE)\n except ValueError:\n pass\n else:\n if utils.ask(\n 'Add version number to the theme title in exported zip file?',\n default=True,\n ):\n manifest = ConfigParser()\n manifest.read(manifest_file)\n version = pkg_resources.get_distribution(package_name).version\n title = manifest.get('theme', 'title')\n manifest.set('theme', 'title', ' '.join([title, version]))\n with open(manifest_file, 'wb') as configfile:\n manifest.write(configfile)\n create_zipfile(tmp_folder, data.get('workingdir'), package_name)\n shutil.rmtree(tmp_folder)\n\n\ndef create_zipfile(src, dist, package_name):\n \"\"\"Creates a ZIP file \"\"\"\n # Work on the source root dir.\n os.chdir(src)\n\n # Prepare the zip file name\n filename = package_name + '.zip'\n\n # We need the full path.\n filename = os.path.join(dist, filename)\n logger.info('Creating zip file at: {0}'.format(filename))\n\n zf = zipfile.ZipFile(filename, 'w')\n for dirpath, dirnames, filenames in os.walk('./'):\n for name in filenames:\n path = os.path.normpath(os.path.join(dirpath, name))\n if os.path.isfile(path):\n zf.write(path, path)\n # Close file to write to disk.\n zf.close()\n\n\ndef main():\n vcs = zest.releaser.choose.version_control()\n data = {\n 'name': vcs.name,\n 'workingdir': os.getcwd(),\n }\n release_diazo(data)\n","repo_name":"propertyshelf/ps.releaser","sub_path":"src/ps/releaser/diazo.py","file_name":"diazo.py","file_ext":"py","file_size_in_byte":3289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13800655591","text":"\nimport pandas as pd\nimport pickle\nimport os\nimport datetime\nimport numpy as np\nimport ast\nfrom gensim.models import KeyedVectors\nfrom medinify.scrapers.webmd_scraper import WebMDScraper\nfrom medinify.scrapers.drugs_scraper import DrugsScraper\nfrom medinify.scrapers.drugratingz_scraper import DrugRatingzScraper\nfrom medinify.scrapers.everydayhealth_scraper import EverydayHealthScraper\nfrom medinify.datasets.process.process import Processor\nfrom medinify import config\n\n\nclass Dataset:\n \"\"\"\n The Dataset class stores and processes data\n If also wraps around the functionality of the review scrapers for collecting review data\n\n Attributes:\n data_used: list of review data to collect (i.e., comment, rating, drug, date, etc.)\n data: DataFrame containing review data\n scraper: Which scraper to use for scraping functionality\n processor: The dataset's processor, defines how data is transformed into numeric representation\n start_timestamp: If collecting reviews, time when collection started\n end_timestamp: If collecting reviews, time when collection ended\n use_rating: whether or not to store rating data\n use_dates: whether or not to store date data\n use_drugs: whether or not to store drug name data\n use_user_ids: whether or not to store user id data\n use_urls: whether or not to store drug url data\n \"\"\"\n scraper = None\n\n def __init__(self, scraper=None,\n use_rating=True, use_dates=True,\n use_drugs=True, use_user_ids=False,\n use_urls=False, w2v_file=None, pos=None,\n pos_threshold=4.0, neg_threshold=2.0,\n num_classes=2, rating_type='effectiveness',\n data_representation='count'):\n self.data_used = []\n if scraper == 'WebMD':\n self.scraper = WebMDScraper(collect_ratings=use_rating, collect_dates=use_dates,\n collect_drugs=use_drugs, collect_user_ids=use_user_ids,\n collect_urls=use_urls)\n elif scraper == 'Drugs':\n self.scraper = DrugsScraper(collect_ratings=use_rating, collect_dates=use_dates,\n collect_drugs=use_drugs, collect_user_ids=use_user_ids,\n collect_urls=use_urls)\n elif scraper == 'DrugRatingz':\n self.scraper = DrugRatingzScraper(collect_ratings=use_rating, collect_dates=use_dates,\n collect_drugs=use_drugs, collect_user_ids=use_user_ids,\n collect_urls=use_urls)\n elif scraper == 'EverydayHealth':\n self.scraper = EverydayHealthScraper(collect_ratings=use_rating, collect_dates=use_dates,\n collect_drugs=use_drugs, collect_user_ids=use_user_ids,\n collect_urls=use_urls)\n\n if self.scraper:\n self.data_used = self.scraper.data_collected\n else:\n self.data_used.append('comment')\n if use_rating:\n self.data_used.append('rating')\n if use_dates:\n self.data_used.append('date')\n if use_drugs:\n self.data_used.append('drug')\n if use_user_ids:\n self.data_used.append('user id')\n if use_urls:\n self.data_used.append('url')\n\n self.start_timestamp = None\n self.end_timestamp = None\n self.data = pd.DataFrame(columns=self.data_used)\n self.processor = Processor()\n\n if w2v_file:\n wv = KeyedVectors.load_word2vec_format(w2v_file)\n w2v = dict(zip(list(wv.vocab.keys()), wv.vectors))\n config.WORD_2_VEC = w2v\n if pos:\n config.POS = pos\n\n if not config.POS_THRESHOLD:\n config.POS_THRESHOLD = pos_threshold\n if not config.NUM_CLASSES:\n config.NUM_CLASSES = num_classes\n if not config.NEG_THRESHOLD:\n config.NEG_THRESHOLD = neg_threshold\n if not config.RATING_TYPE:\n config.RATING_TYPE = rating_type\n if not config.DATA_REPRESENTATION:\n config.DATA_REPRESENTATION = data_representation\n\n def collect(self, url):\n \"\"\"\n Given a url, collects drug review data into Dataset\n :param url: drug reviews url\n \"\"\"\n assert self.scraper, \"In order to collect reviews, a scraper must be specified\"\n\n if not self.start_timestamp:\n self.start_timestamp = str(datetime.datetime.now())\n\n self.scraper.scrape(url)\n self.data = self.data.append(self.scraper.dataset, ignore_index=True)\n\n self.end_timestamp = str(datetime.datetime.now())\n\n def collect_from_drug_names(self, drug_names_file, output_file, start=0):\n \"\"\"\n Given a text file listing drug names, collects a dataset of reviews for those drugs\n :param drug_names_file: path to urls file\n :param output_file: where to output data\n :param start: from where in the urls file to start scraping\n \"\"\"\n assert self.scraper, \"In order to collect reviews, a scraper must be specified\"\n\n if not self.start_timestamp:\n self.start_timestamp = str(datetime.datetime.now())\n\n print('\\nCollecting urls...')\n self.scraper.get_urls(drug_names_file, 'medinify/scrapers/temp_urls_file.txt')\n print('\\nScraping urls...')\n self.scraper.scrape_urls('medinify/scrapers/temp_urls_file.txt', output_file=output_file, start=start)\n self.data = self.data.append(self.scraper.dataset, ignore_index=True)\n\n os.remove('medinify/scrapers/temp_urls_file.txt')\n print('Collected reviews.')\n\n self.end_timestamp = str(datetime.datetime.now())\n\n def collect_from_urls(self, urls_file, output_file, start=0):\n \"\"\"\n Given a file listing drug urls, collects review data into Dataset\n :param urls_file: path to file listing drug urls\n :param output_file: where to output data\n :param start: from where in the urls file to start scraping\n \"\"\"\n assert self.scraper, \"In order to collect reviews, a scraper must be specified\"\n\n if not self.start_timestamp:\n self.start_timestamp = str(datetime.datetime.now())\n\n print('\\nScraping urls...')\n self.scraper.scrape_urls(urls_file, output_file=output_file, start=start)\n self.data = self.data.append(self.scraper.dataset, ignore_index=True)\n\n self.end_timestamp = str(datetime.datetime.now())\n\n def write_file(self, output_file, write_comments=True,\n write_ratings=True, write_date=True,\n write_drugs=True, write_user_ids=False,\n write_urls=False):\n \"\"\"\n Write csv file containing data\n :param output_file: csv output file path\n :param write_comments: whether or not to write comments to csv file\n :param write_ratings: whether or not to write ratings to csv file\n :param write_date: whether or not to write dates to csv file\n :param write_drugs: whether or not to write drug names to csv file\n :param write_urls: whether or not to write urls to csv file\n :param write_user_ids: whether or not to write urls to csv file\n \"\"\"\n columns = []\n if write_comments:\n columns.append('comment')\n if write_ratings:\n columns.append('rating')\n if write_date:\n columns.append('date')\n if write_drugs:\n columns.append('drug')\n if write_user_ids:\n columns.append('user id')\n if write_urls:\n columns.append('url')\n self.remove_empty_comments()\n self.remove_duplicate_comments()\n self.remove_float_comments()\n self.data.to_csv(output_file, columns=columns, index=False)\n\n def save_data(self, output_file):\n \"\"\"\n Saves Dataset in compressed pickle file\n :param output_file: path to output pickle file\n \"\"\"\n with open(output_file, 'wb') as pkl:\n pickle.dump(self.data, pkl, protocol=pickle.HIGHEST_PROTOCOL)\n pickle.dump(self.start_timestamp, pkl, protocol=pickle.HIGHEST_PROTOCOL)\n pickle.dump(self.end_timestamp, pkl, protocol=pickle.HIGHEST_PROTOCOL)\n pickle.dump(self.processor, pkl, protocol=pickle.HIGHEST_PROTOCOL)\n\n def load_data(self, pickle_file):\n \"\"\"\n Loads dataset from compressed pickle file\n :param pickle_file: path to saved pickle file\n \"\"\"\n with open(pickle_file, 'rb') as pkl:\n self.data = pickle.load(pkl)\n self.start_timestamp = pickle.load(pkl)\n self.end_timestamp = pickle.load(pkl)\n self.processor = pickle.load(pkl)\n self.remove_float_comments()\n\n def load_file(self, csv_file):\n \"\"\"\n Loads dataset from csv file\n :param csv_file: path to csv file to load\n \"\"\"\n self.data = pd.read_csv(csv_file)\n\n def remove_empty_comments(self):\n \"\"\"\n Removes empty comments from Dataset\n \"\"\"\n data_array = self.data.to_numpy()\n new_list = []\n num_empty = 0\n\n for review in data_array:\n if type(review[0]) != float:\n new_list.append(review)\n else:\n num_empty += 1\n\n new_array = np.asarray(new_list)\n self.data = pd.DataFrame(new_array, columns=self.data_used)\n print('Removed {} empty comment(s).'.format(num_empty))\n\n def remove_duplicate_comments(self):\n \"\"\"\n Removes duplicate comments from Dataset\n \"\"\"\n data_array = self.data.to_numpy()\n not_dupes = []\n not_dupes_reviews = []\n num_dupes = 0\n\n for review in data_array:\n if review[0] not in not_dupes_reviews:\n not_dupes_reviews.append(review[0])\n not_dupes.append(review)\n else:\n num_dupes += 1\n\n new_array = np.asarray(not_dupes)\n self.data = pd.DataFrame(new_array, columns=self.data_used)\n print('Removed {} duplicate comment(s).'.format(num_dupes))\n\n def remove_float_comments(self):\n \"\"\"\n Removes float comments from Dataset\n \"\"\"\n data_array = self.data.to_numpy()\n not_floats = []\n num_floats = 0\n\n for review in data_array:\n if type(review[0]) == str:\n not_floats.append(review)\n else:\n num_floats += 1\n\n new_array = np.asarray(not_floats)\n self.data = pd.DataFrame(new_array, columns=self.data_used)\n print('Removed {} float comment(s).'.format(num_floats))\n\n def print_stats(self):\n \"\"\"\n Calculates and prints data distribution statistics\n \"\"\"\n ratings = self.data['rating'].to_numpy()\n num_reviews = len(ratings)\n\n if type(ratings[0]) == str and ratings[0][0] == '{':\n ratings = [ast.literal_eval(x) for x in ratings]\n rating_types = list(ratings[0].keys())\n ratings_sets = {rating_type: [] for rating_type in rating_types}\n data = {rating_type: {} for rating_type in rating_types}\n for rating in ratings:\n for rating_type in rating_types:\n ratings_sets[rating_type].append(float(rating[rating_type]))\n for rating_type in rating_types:\n ratings_sets[rating_type] = np.asarray(ratings_sets[rating_type])\n data[rating_type]['range'] = (np.amin(ratings_sets[rating_type]),\n np.amax(ratings_sets[rating_type]))\n data[rating_type]['num_pos'] = len([x for x in ratings_sets[rating_type]\n if x >= config.POS_THRESHOLD])\n data[rating_type]['num_neg'] = len([x for x in ratings_sets[rating_type]\n if x <= config.NEG_THRESHOLD])\n data[rating_type]['num_neutral'] = len([x for x in ratings_sets[rating_type]\n if config.NEG_THRESHOLD < x < config.POS_THRESHOLD])\n\n print('\\nDataset Stats:\\n')\n print('Number of reviews with ratings: {}'.format(num_reviews))\n print('Types of ratings: {}'.format(rating_types))\n print('\\nRating type distributions:\\n')\n for rating_type in rating_types:\n print('{}:'.format(rating_type))\n print('\\tRating Range: {}'.format(data[rating_type]['range']))\n print('\\tPositive Reviews: {}'.format(data[rating_type]['num_pos']))\n print('\\tNegative Reviews: {}'.format(data[rating_type]['num_neg']))\n print('\\tNeutral Reviews: {}'.format(data[rating_type]['num_neutral']))\n print('\\tPos:Neg Ratio: {}\\n'.format(data[rating_type]['num_pos'] / data[rating_type]['num_neg']))\n\n elif type(ratings[0]) == np.float64:\n ratings = np.asarray([x for x in ratings if not np.isnan(x)])\n num_reviews = len(ratings)\n range_ = (np.amin(ratings), np.amax(ratings))\n num_pos = len([x for x in ratings if x >= config.POS_THRESHOLD])\n num_neg = len([x for x in ratings if x <= config.NEG_THRESHOLD])\n num_neutral = len([x for x in ratings if config.NEG_THRESHOLD < x < config.POS_THRESHOLD])\n\n print('\\nDataset Stats:\\n')\n print('Number of reviews with ratings: {}'.format(num_reviews))\n print('Rating Range: {}'.format(range_))\n print('Positive Reviews: {}'.format(num_pos))\n print('Negative Reviews: {}'.format(num_neg))\n print('Neutral Reviews: {}'.format(num_neutral))\n print('Pos:Neg Ratio: {}\\n'.format(num_pos / num_neg))\n\n else:\n raise ValueError('This type of rating ({}) is not supported.'.format(type(ratings[0])))\n\n def get_count_vectors(self, classifying=False):\n \"\"\"\n Wraps around Processor function\n :param classifying: if running classification on data\n :return: data, target\n \"\"\"\n reviews = self.processor.get_count_vectors(self.data['comment'], self.data['rating'])\n data, target, comments = [], [], []\n for review in reviews:\n if config.NUM_CLASSES == 2 and review.target in [0.0, 1.0]:\n data.append(review.data)\n target.append(review.target)\n comments.append(review.comment)\n elif config.NUM_CLASSES == 3 and review.target in [0.0, 1.0, 2.0]:\n data.append(review.data)\n target.append(review.target)\n comments.append(review.comment)\n elif config.NUM_CLASSES == 5 and review.target in [0.0, 1.0, 2.0, 3.0, 4.0]:\n data.append(review.data)\n target.append(review.target)\n comments.append(review.comment)\n\n if classifying:\n return data, target, comments\n else:\n return data, target\n\n def get_tfidf_vectors(self, classifying=False):\n \"\"\"\n Wraps around Processor function\n :param classifying: if running classification on data\n :return: data, target\n \"\"\"\n reviews = self.processor.get_tfidf_vectors(self.data['comment'], self.data['rating'])\n data, target, comments = [], [], []\n for review in reviews:\n if config.NUM_CLASSES == 2 and review.target in [0.0, 1.0]:\n data.append(review.data)\n target.append(review.target)\n comments.append(review.comment)\n elif config.NUM_CLASSES == 3 and review.target in [0.0, 1.0, 2.0]:\n data.append(review.data)\n target.append(review.target)\n comments.append(review.comment)\n elif config.NUM_CLASSES == 5 and review.target in [0.0, 1.0, 2.0, 3.0, 4.0]:\n data.append(review.data)\n target.append(review.target)\n comments.append(review.comment)\n\n if classifying:\n return data, target, comments\n else:\n return data, target\n\n def get_average_embeddings(self, classifying=False):\n \"\"\"\n Wraps around Processor function\n :param classifying: if running classification on data\n :return: data, target\n \"\"\"\n reviews = self.processor.get_average_embeddings(self.data['comment'], self.data['rating'])\n data, target, comments = [], [], []\n for review in reviews:\n if not np.sum(review.data) == 0:\n if config.NUM_CLASSES == 2 and review.target in [0.0, 1.0]:\n data.append(review.data)\n target.append(review.target)\n comments.append(review.comment)\n elif config.NUM_CLASSES == 3 and review.target in [0.0, 1.0, 2.0]:\n data.append(review.data)\n target.append(review.target)\n comments.append(review.comment)\n elif config.NUM_CLASSES == 5 and review.target in [0.0, 1.0, 2.0, 3.0, 4.0]:\n data.append(review.data)\n target.append(review.target)\n comments.append(review.comment)\n\n if classifying:\n return data, target, comments\n else:\n return data, target\n\n def get_pos_vectors(self, classifying=False):\n \"\"\"\n Wraps around Processor function\n :param classifying: if running classification on data\n :return: data, target\n \"\"\"\n reviews = self.processor.get_pos_vectors(self.data['comment'], self.data['rating'])\n data, target, comments = [], [], []\n for review in reviews:\n if not np.sum(review.data) == 0:\n if config.NUM_CLASSES == 2 and review.target in [0.0, 1.0]:\n data.append(review.data)\n target.append(review.target)\n comments.append(review.comment)\n elif config.NUM_CLASSES == 3 and review.target in [0.0, 1.0, 2.0]:\n data.append(review.data)\n target.append(review.target)\n comments.append(review.comment)\n elif config.NUM_CLASSES == 5 and review.target in [0.0, 1.0, 2.0, 3.0, 4.0]:\n data.append(review.data)\n target.append(review.target)\n comments.append(review.comment)\n\n if classifying:\n return data, target, comments\n else:\n return data, target\n\n\n\n","repo_name":"appiahla/medinify","sub_path":"medinify/datasets/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":18922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"28067240065","text":"from inspect import getmembers, isroutine\nfrom typing import Optional\n\nfrom construct import Const, Int8ul, Int16ul, Int32ul, PaddedString, Struct, core\n\nfrom .exceptions import InconsistentFATAttributes, NotInitialized\nfrom .fatfs_state import BootSectorState\nfrom .utils import (ALLOWED_SECTOR_SIZES, ALLOWED_SECTORS_PER_CLUSTER, EMPTY_BYTE, FAT32, FULL_BYTE,\n SHORT_NAMES_ENCODING, FATDefaults, generate_4bytes_random, pad_string)\n\n\nclass BootSector:\n \"\"\"\n This class describes the first sector of the volume in the Reserved Region.\n It contains data from BPB (BIOS Parameter Block) and BS (Boot sector). The fields of the BPB and BS are mixed in\n the header of the physical boot sector. Fields with prefix BPB belongs to BPB block and with prefix BS\n belongs to the actual boot sector.\n\n Please beware, that the name of class BootSector refer to data both from the boot sector and BPB.\n ESP32 ignores fields with prefix \"BS_\"! Fields with prefix BPB_ are essential to read the filesystem.\n \"\"\"\n MAX_VOL_LAB_SIZE = 11\n MAX_OEM_NAME_SIZE = 8\n MAX_FS_TYPE_SIZE = 8\n\n # the FAT specification defines 512 bytes for the boot sector header\n BOOT_HEADER_SIZE = 512\n\n BOOT_SECTOR_HEADER = Struct(\n # this value reflects BS_jmpBoot used for ESP32 boot sector (any other accepted)\n 'BS_jmpBoot' / Const(b'\\xeb\\xfe\\x90'),\n 'BS_OEMName' / PaddedString(MAX_OEM_NAME_SIZE, SHORT_NAMES_ENCODING),\n 'BPB_BytsPerSec' / Int16ul,\n 'BPB_SecPerClus' / Int8ul,\n 'BPB_RsvdSecCnt' / Int16ul,\n 'BPB_NumFATs' / Int8ul,\n 'BPB_RootEntCnt' / Int16ul,\n 'BPB_TotSec16' / Int16ul, # zero if the FAT type is 32, otherwise number of sectors\n 'BPB_Media' / Int8ul,\n 'BPB_FATSz16' / Int16ul, # for FAT32 always zero, for FAT12/FAT16 number of sectors per FAT\n 'BPB_SecPerTrk' / Int16ul,\n 'BPB_NumHeads' / Int16ul,\n 'BPB_HiddSec' / Int32ul,\n 'BPB_TotSec32' / Int32ul, # zero if the FAT type is 12/16, otherwise number of sectors\n 'BS_DrvNum' / Const(b'\\x80'),\n 'BS_Reserved1' / Const(EMPTY_BYTE),\n 'BS_BootSig' / Const(b'\\x29'),\n 'BS_VolID' / Int32ul,\n 'BS_VolLab' / PaddedString(MAX_VOL_LAB_SIZE, SHORT_NAMES_ENCODING),\n 'BS_FilSysType' / PaddedString(MAX_FS_TYPE_SIZE, SHORT_NAMES_ENCODING),\n 'BS_EMPTY' / Const(448 * EMPTY_BYTE),\n 'Signature_word' / Const(FATDefaults.SIGNATURE_WORD)\n )\n assert BOOT_SECTOR_HEADER.sizeof() == BOOT_HEADER_SIZE\n\n def __init__(self, boot_sector_state: Optional[BootSectorState] = None) -> None:\n self._parsed_header: dict = {}\n self.boot_sector_state: BootSectorState = boot_sector_state\n\n def generate_boot_sector(self) -> None:\n boot_sector_state: BootSectorState = self.boot_sector_state\n if boot_sector_state is None:\n raise NotInitialized('The BootSectorState instance is not initialized!')\n volume_uuid = generate_4bytes_random()\n pad_header: bytes = (boot_sector_state.sector_size - BootSector.BOOT_HEADER_SIZE) * EMPTY_BYTE\n data_content: bytes = boot_sector_state.data_sectors * boot_sector_state.sector_size * FULL_BYTE\n root_dir_content: bytes = boot_sector_state.root_dir_sectors_cnt * boot_sector_state.sector_size * EMPTY_BYTE\n fat_tables_content: bytes = (boot_sector_state.sectors_per_fat_cnt\n * boot_sector_state.fat_tables_cnt\n * boot_sector_state.sector_size\n * EMPTY_BYTE)\n self.boot_sector_state.binary_image = (\n BootSector.BOOT_SECTOR_HEADER.build(\n dict(BS_OEMName=pad_string(boot_sector_state.oem_name, size=BootSector.MAX_OEM_NAME_SIZE),\n BPB_BytsPerSec=boot_sector_state.sector_size,\n BPB_SecPerClus=boot_sector_state.sectors_per_cluster,\n BPB_RsvdSecCnt=boot_sector_state.reserved_sectors_cnt,\n BPB_NumFATs=boot_sector_state.fat_tables_cnt,\n BPB_RootEntCnt=boot_sector_state.entries_root_count,\n # if fat type is 12 or 16 BPB_TotSec16 is filled and BPB_TotSec32 is 0x00 and vice versa\n BPB_TotSec16=0x00 if boot_sector_state.fatfs_type == FAT32 else boot_sector_state.sectors_count,\n BPB_Media=boot_sector_state.media_type,\n BPB_FATSz16=boot_sector_state.sectors_per_fat_cnt,\n BPB_SecPerTrk=boot_sector_state.sec_per_track,\n BPB_NumHeads=boot_sector_state.num_heads,\n BPB_HiddSec=boot_sector_state.hidden_sectors,\n BPB_TotSec32=boot_sector_state.sectors_count if boot_sector_state.fatfs_type == FAT32 else 0x00,\n BS_VolID=volume_uuid,\n BS_VolLab=pad_string(boot_sector_state.volume_label,\n size=BootSector.MAX_VOL_LAB_SIZE),\n BS_FilSysType=pad_string(boot_sector_state.file_sys_type,\n size=BootSector.MAX_FS_TYPE_SIZE)\n )\n ) + pad_header + fat_tables_content + root_dir_content + data_content\n )\n\n def parse_boot_sector(self, binary_data: bytes) -> None:\n \"\"\"\n Checks the validity of the boot sector and derives the metadata from boot sector to the structured shape.\n \"\"\"\n try:\n self._parsed_header = BootSector.BOOT_SECTOR_HEADER.parse(binary_data)\n except core.StreamError:\n raise NotInitialized('The boot sector header is not parsed successfully!')\n\n if self._parsed_header['BPB_TotSec16'] != 0x00:\n sectors_count_: int = self._parsed_header['BPB_TotSec16']\n elif self._parsed_header['BPB_TotSec32'] != 0x00:\n # uncomment for FAT32 implementation\n # sectors_count_ = self._parsed_header['BPB_TotSec32']\n # possible_fat_types = [FAT32]\n assert self._parsed_header['BPB_TotSec16'] == 0\n raise NotImplementedError('FAT32 not implemented!')\n else:\n raise InconsistentFATAttributes('The number of FS sectors cannot be zero!')\n\n if self._parsed_header['BPB_BytsPerSec'] not in ALLOWED_SECTOR_SIZES:\n raise InconsistentFATAttributes(f'The number of bytes '\n f\"per sector is {self._parsed_header['BPB_BytsPerSec']}! \"\n f'The accepted values are {ALLOWED_SECTOR_SIZES}')\n if self._parsed_header['BPB_SecPerClus'] not in ALLOWED_SECTORS_PER_CLUSTER:\n raise InconsistentFATAttributes(f'The number of sectors per cluster '\n f\"is {self._parsed_header['BPB_SecPerClus']}\"\n f'The accepted values are {ALLOWED_SECTORS_PER_CLUSTER}')\n\n total_root_bytes: int = self._parsed_header['BPB_RootEntCnt'] * FATDefaults.ENTRY_SIZE\n root_dir_sectors_cnt_: int = total_root_bytes // self._parsed_header['BPB_BytsPerSec']\n self.boot_sector_state = BootSectorState(oem_name=self._parsed_header['BS_OEMName'],\n sector_size=self._parsed_header['BPB_BytsPerSec'],\n sectors_per_cluster=self._parsed_header['BPB_SecPerClus'],\n reserved_sectors_cnt=self._parsed_header['BPB_RsvdSecCnt'],\n fat_tables_cnt=self._parsed_header['BPB_NumFATs'],\n root_dir_sectors_cnt=root_dir_sectors_cnt_,\n sectors_count=sectors_count_,\n media_type=self._parsed_header['BPB_Media'],\n sec_per_track=self._parsed_header['BPB_SecPerTrk'],\n num_heads=self._parsed_header['BPB_NumHeads'],\n hidden_sectors=self._parsed_header['BPB_HiddSec'],\n volume_label=self._parsed_header['BS_VolLab'],\n file_sys_type=self._parsed_header['BS_FilSysType'],\n volume_uuid=self._parsed_header['BS_VolID'])\n self.boot_sector_state.binary_image = binary_data\n assert self.boot_sector_state.file_sys_type in (f'FAT{self.boot_sector_state.fatfs_type} ', 'FAT ')\n\n def __str__(self) -> str:\n \"\"\"\n FATFS properties parser (internal helper tool for fatfsgen.py/fatfsparse.py)\n Provides all the properties of given FATFS instance by parsing its boot sector (returns formatted string)\n \"\"\"\n\n if self._parsed_header == {}:\n return 'Boot sector is not initialized!'\n res: str = 'FATFS properties:\\n'\n for member in getmembers(self.boot_sector_state, lambda a: not (isroutine(a))):\n prop_ = getattr(self.boot_sector_state, member[0])\n if isinstance(prop_, int) or isinstance(prop_, str) and not member[0].startswith('_'):\n res += f'{member[0]}: {prop_}\\n'\n return res\n\n @property\n def binary_image(self) -> bytes:\n # when BootSector is not instantiated, self.boot_sector_state might be None\n if self.boot_sector_state is None or len(self.boot_sector_state.binary_image) == 0:\n raise NotInitialized('Boot sector is not initialized!')\n bin_image_: bytes = self.boot_sector_state.binary_image\n return bin_image_\n","repo_name":"espressif/esp-idf","sub_path":"components/fatfs/fatfs_utils/boot_sector.py","file_name":"boot_sector.py","file_ext":"py","file_size_in_byte":9828,"program_lang":"python","lang":"en","doc_type":"code","stars":11541,"dataset":"github-code","pt":"3"} +{"seq_id":"5024751261","text":"#!/usr/bin/env python\n\"\"\"Utilities collection for Analysis\"\"\"\n\ndef sigsort(param):\n # mXX-1000_mA-0p25_lxy-0p3\n params_ = param.split('_')\n mxx = float(params_[0].split('-')[-1])\n ma = float(params_[1].split('-')[-1].replace('p', '.'))\n lxy = float(params_[2].split('-')[-1].replace('p', '.'))\n\n return lxy*1e6 + mxx*1e3 + ma\n","repo_name":"phylsix/FireHydrant","sub_path":"FireHydrant/Analysis/Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34206602945","text":"#!/usr/bin/python3\n\ndef safe_print_list(my_list=[], x=0):\n check = 0\n for s in range(x):\n try:\n print(my_list[s], end=\"\")\n check += 1\n except IndexError:\n break\n print()\n return (check)\n","repo_name":"Mitche98/alx-higher_level_programming","sub_path":"0x05-python-exceptions/0-safe_print_list.py","file_name":"0-safe_print_list.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42105327790","text":"def scan(sentence):\n\twords = sentence.split(' ')\n\treturn [token_word(word) for word in words]\n\t\ndef token_word(word):\n\ttoken = get_token(word)\n\tif token == 'number':\n\t\tword = int(word)\n\t\t\n\treturn (token, word)\n\t\ntokens_words = {\n\t'direction': ['north', 'east', 'south'],\n\t'verb': ['go', 'kill', 'eat'],\n\t'stop': ['the', 'in', 'of'],\n\t'noun': ['bear', 'princess']\n}\t\n\ndef get_token(word):\n\tfor token, words in tokens_words.items():\n\t\tif word in words:\n\t\t\treturn token\n\t\n\ttry:\n\t\tint(word)\n\t\treturn 'number'\n\texcept ValueError:\n\t\treturn 'error'","repo_name":"ejulio/coding","sub_path":"learn-python-the-hard-way/ex48/ex48/lexicon.py","file_name":"lexicon.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"1255192227","text":"from DDPG.algo import DDPG\r\nfrom DDPG.model import MLPContiModel, MLPEvaluateModel\r\nfrom TOOLS.Logger import LoggerPrinter\r\nfrom ENVS.Envs import MountainCarContiEnv, PendulumEnv\r\nimport numpy as np\r\n\r\n\r\ndef main(game_index: int, mode: str) -> None:\r\n \"\"\"\r\n 该函数实现了在各个游戏下对DDPG算法进行测试\r\n :param game_index: int,\r\n 测试游戏环境的编号:\r\n 1. 连续控制下的倒立摆\r\n 2. 连续控制下的高山行车\r\n :param mode: str,\r\n 控制模式,[TRAIN, TEST]\r\n :return: None,\r\n \"\"\"\r\n logger = LoggerPrinter()\r\n if game_index == 1:\r\n exp_name = 'Pendulum'\r\n env = PendulumEnv(logger=logger)\r\n act_high = np.array([2., ])\r\n act_low = np.array([-2., ])\r\n policy_model = MLPContiModel(env.obs_dim, env.act_dim, (30, 15), 'Sigmoid', logger)\r\n evaluate_model = MLPEvaluateModel(env.obs_dim, env.act_dim, (30, 15), 'Sigmoid', logger)\r\n gamma = 0.95\r\n eva_lr = 0.005\r\n pol_lr = 0.005\r\n rho = 0.005\r\n learn_epochs = 50\r\n max_iter_per_epoch = 1500\r\n is_OU_noise = True\r\n\r\n elif game_index == 2:\r\n exp_name = 'MountainCarConti'\r\n env = MountainCarContiEnv(logger=logger)\r\n act_high = np.array([1., ])\r\n act_low = np.array([-1., ])\r\n policy_model = MLPContiModel(env.obs_dim, env.act_dim, (40, 25), 'Sigmoid', logger)\r\n evaluate_model = MLPEvaluateModel(env.obs_dim, env.act_dim, (30, 15), 'Sigmoid', logger)\r\n gamma = 0.99\r\n eva_lr = 0.005\r\n pol_lr = 0.005\r\n rho = 0.005\r\n learn_epochs = 100\r\n max_iter_per_epoch = 1500\r\n is_OU_noise = True\r\n\r\n ddpg = DDPG(env, policy_model, evaluate_model, 'MODEL_PARAMS', exp_name, logger, gamma=gamma,\r\n eva_lr=eva_lr, pol_lr=pol_lr, rho=rho, conti_act_high=act_high, conti_act_low=act_low,\r\n is_OU_noise=is_OU_noise)\r\n if mode == 'TRAIN':\r\n ddpg.train(buffer_size=1000000, retrain_label=False, learn_epochs=learn_epochs,\r\n max_iter_per_epoch=max_iter_per_epoch, sample_size=200, save_freq=100, noise_scale=0.1,\r\n start_steps=20000, update_after=10000, update_every=50, render=False)\r\n ddpg.test(test_epochs=10, max_iter_per_epoch=2000)\r\n elif mode == 'TEST':\r\n ddpg.test(test_epochs=10, max_iter_per_epoch=2000)\r\n\r\n\r\nif __name__ == '__main__':\r\n main(1, 'TRAIN')\r\n\r\n\r\n","repo_name":"xiaojianyang820/StandardRL","sub_path":"Test_DDPG.py","file_name":"Test_DDPG.py","file_ext":"py","file_size_in_byte":2479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34009234793","text":"# -*- coding:utf-8 -*-\nfrom django.shortcuts import render\nfrom django.http import HttpResponse,Http404\nfrom vulnerabilities.models import Vulnerabilities\nimport logging\nlogging.basicConfig(level=logging.INFO,\n format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',\n datefmt='%a, %d %b %Y %H:%M:%S',\n filename='myapp.log',\n filemode='w')\n#def hello(request):\n# return HttpResponse(\"Hello World\")\ndef hello(request):\n result = {'a':1}\n return render(request, 'vulnerabilities/show.html',{'dict':result})\n\ndef index(request):\n result = {'a':1}\n return render(request, 'vulnerabilities/index.html',{\"dict\":result})\n#一个总览,显示标题,时间等重要信息,提供showDetail的链接\ndef showAll(request):\n #render的后端用列表实现即可\n result = [] \n count = len(Vulnerabilities.objects.all())\n for i in range(count):\n result.append(Vulnerabilities.objects.all()[i].toDict())\n logging.info(result)\n return render(request, 'vulnerabilities/showAll.html',{'result':result,})\n\n#显示漏洞细节 \ndef showDetail(request,id):\n id = int(id)\n tmpValue = Vulnerabilities.objects.get(pk=id).toDict()\n return render(request,'vulnerabilities/showDetail.html',{'tmpValue':tmpValue})\n# return HttpResponse(str(tmpValue))\n\n#新增漏洞\ndef add(request):\n\n result = {}\n if request.POST:\n #for key in request.POST:\n logging.info(request.POST)\n logging.info(request.POST.get(\"event_type\"))\n event_title = request.POST.get(\"event_title\")\n find_time = request.POST.get(\"find_time\")\n start_time = request.POST.get(\"start_time\")\n finish_time = request.POST.get(\"finish_time\")\n event_level = request.POST.get(\"event_level\")\n\n event_type = request.POST.get(\"event_type\")\n solve_time = request.POST.get(\"solve_time\")\n resp_group = request.POST.get(\"resp_group\")\n operater = request.POST.get(\"operater\")\n\n finder = request.POST.get(\"finder\")\n current_state = request.POST.get(\"current_state\")\n event_rate = request.POST.get(\"current_state\")\n phenomena = request.POST.get(\"phenomena\") \n \n# try:\n if True:\n tmpValue = Vulnerabilities(event_title=event_title,find_time=find_time,\n start_time=start_time,finish_time=finish_time,event_level=event_level, \\\n event_type = event_type, solve_time=solve_time,resp_group=resp_group, \\\n operater=operater,phenomena=phenomena, finder=finder, \\\n current_state=current_state,event_rate=event_rate) \n\n tmpValue.save()\n# except Exception as e:\n \n #logging.info(e)\n #return HttpResponse(\"error when saving!\")\n return render(request, 'vulnerabilities/add_vulnerabilities.html',{\"dict\":result})\n else:\n return render(request, 'vulnerabilities/add_vulnerabilities.html',{\"dict\":result})\n\n# Create your views here.\n\n","repo_name":"927-wy/vulnerability_management_system","sub_path":"vms/vulnerabilities/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"41008577295","text":"import base64\nimport contextlib\nimport os\nimport tempfile\nimport unittest\n\nfrom pathlib import Path\nfrom random import choices\n\nimport yaml\n\nimport packpath\n\n\n# 1x1 transparent PNG from https://png-pixel.com\nTEST_IMG = \"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg==\"\n\n\nclass TestPackPath(unittest.TestCase):\n pack_title = \"Test Sticker Pack 😎\"\n pack_author = \"Test Author Name ✍️\"\n pack_emojis = [\n \"🤡\",\n \"🥑\",\n \"🙈\",\n \"🚢\",\n \"🐶\",\n ]\n pack_n_stickers = 10\n\n @contextlib.contextmanager\n def pack_config(self, filename: os.PathLike):\n \"\"\"Make `filename` the temporary config.yaml file for a path.\"\"\"\n os.rename(filename, self.pack_dir / \"config.yaml\")\n yield\n os.rename(self.pack_dir / \"config.yaml\", filename)\n\n def setUp(self):\n self.pack_dir = Path(tempfile.mkdtemp())\n self.pack_stickers = []\n\n _, self.yaml_right = tempfile.mkstemp(dir=self.pack_dir)\n _, self.yaml_wrong_cover = tempfile.mkstemp(dir=self.pack_dir)\n _, self.yaml_wrong_sticker = tempfile.mkstemp(dir=self.pack_dir)\n\n self.test_img = base64.b64decode(TEST_IMG)\n\n self.yaml_right_content = {\n \"pack\": {\n \"title\": self.pack_title,\n \"author\": self.pack_author,\n \"cover\": \"\",\n },\n \"stickers\": {},\n }\n\n for i in range(0, self.pack_n_stickers):\n fd, filename = tempfile.mkstemp(dir=self.pack_dir, prefix=f\"{i:03}_\")\n basename = Path(filename).name\n emoji = choices(self.pack_emojis)[0]\n\n self.yaml_right_content[\"stickers\"][basename] = emoji\n # Register the order of the emojis\n self.pack_stickers.append(emoji)\n\n with os.fdopen(fd, \"wb\") as image:\n image.write(self.test_img)\n\n # A fully valid YAML\n self.yaml_right_content[\"pack\"][\"cover\"] = choices([*self.yaml_right_content[\"stickers\"].keys()])[0]\n with open(self.yaml_right, \"wt\") as yaml_right:\n yaml.dump(self.yaml_right_content, yaml_right, allow_unicode=True)\n\n # A valid YAML with wrong pack.cover\n yaml_wrong_cover = {**self.yaml_right_content}\n yaml_wrong_cover[\"pack\"][\"cover\"] = \"non-existant-file\"\n with open(self.yaml_wrong_cover, \"wt\") as yaml_wrong:\n yaml.dump(yaml_wrong_cover, yaml_wrong, allow_unicode=True)\n\n # A valid YAML with bad filenames\n yaml_wrong_sticker = {**self.yaml_right_content}\n yaml_wrong_sticker[\"stickers\"][\"non-existant-file\"] = \"❌\"\n with open(self.yaml_wrong_sticker, \"wt\") as yaml_wrong:\n yaml.dump(yaml_wrong_sticker, yaml_wrong, allow_unicode=True)\n\n def test_load_yaml(self):\n \"\"\"Test that a valid YAML can be loaded into a pack.\"\"\"\n pp = packpath.PackPath()\n\n with self.pack_config(self.yaml_right):\n pp.load_path(self.pack_dir)\n\n self.assertEqual(self.pack_title, pp.title)\n self.assertEqual(self.pack_author, pp.author)\n\n self.assertEqual(self.pack_n_stickers, len(pp.stickers))\n\n for i, s in enumerate(pp.stickers):\n self.assertEqual(self.pack_stickers[i], s.emoji)\n self.assertEqual(self.test_img, s.image_data)\n\n self.assertEqual(self.test_img, pp.cover.image_data)\n self.assertEqual(self.pack_n_stickers, pp.nb_stickers)\n\n def test_bad_yaml_cover(self):\n \"\"\"A valid YAML with a cover that does not exist.\"\"\"\n pp = packpath.PackPath()\n\n with self.pack_config(self.yaml_wrong_cover):\n with self.assertRaises(FileNotFoundError):\n pp.load_path(self.pack_dir)\n\n def test_bad_stickers(self):\n \"\"\"A valid YAML with stickers that do not exist.\"\"\"\n pp = packpath.PackPath()\n\n with self.pack_config(self.yaml_wrong_sticker):\n with self.assertRaises(FileNotFoundError):\n pp.load_path(self.pack_dir)\n","repo_name":"diegoe/packpath","sub_path":"packpath/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4054,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"12579196460","text":"from unittest import TestCase\n\nfrom zope.interface.common.mapping import IMapping\n\nfrom tubes.routing import Router, Routed, to\nfrom tubes.tube import series, tube, receiver\nfrom tubes.fan import Out, In\n\n@tube\nclass Participant(object):\n \"\"\"\n A single participant in a chat system.\n \"\"\"\n outputType = Routed(IMapping)\n\n def __init__(self, hub, requestsFount, responsesDrain):\n \"\"\"\n Create a L{Participant}.\n \"\"\"\n self._hub = hub\n self._in = In()\n self._in.fount.flowTo(responsesDrain)\n\n self._router = Router()\n self._participating = {}\n\n # `self._in' is both commands from our own client and also messages\n # from other clients.\n requestsFount.flowTo(series(self, self._router.drain))\n\n self.client = self._router.newRoute(\"client\")\n self.client.flowTo(self._in.newDrain())\n\n\n def received(self, item):\n \"\"\"\n An item was received.\n\n @param item: A dictionary featuring a 'type' indicating which command\n it is.\n\n @return: a response routed to the router.\n \"\"\"\n kwargs = item.copy()\n return getattr(self, \"do_\" + kwargs.pop(\"type\"))(**kwargs)\n\n\n def do_name(self, name):\n \"\"\"\n From client; set the name of this client.\n\n @param name: The nickname for this client.\n \"\"\"\n self.name = name\n yield to(self.client, dict(named=name))\n\n\n def do_join(self, channel):\n \"\"\"\n From client; instruct this client to join a channel with the given\n name.\n\n @param channel: the name of the channel to join.\n \"\"\"\n fountFromChannel, drainToChannel = (\n self._hub.channelNamed(channel).participate(self)\n )\n fountFromChannel.flowTo(self._in.newDrain())\n fountToChannel = self._router.newRoute(\"->{}\".format(channel))\n fountToChannel.flowTo(drainToChannel)\n\n self._participating[channel] = fountToChannel\n yield to(self._participating[channel],\n dict(type=\"joined\"))\n\n\n def do_speak(self, channel, message, id):\n \"\"\"\n From client; say something on the given channel.\n\n @param channel: the name of the channel\n\n @param message: the text of the message to relay\n\n @param id: a unique identifier for this message\n \"\"\"\n yield to(self._participating[channel],\n dict(type=\"spoke\", message=message, id=id))\n\n\n\nclass Channel(object):\n \"\"\"\n A chat room.\n \"\"\"\n def __init__(self, name):\n self._name = name\n self._out = Out()\n self._in = In()\n self._in.fount.flowTo(self._out.drain)\n\n\n def participate(self, participant):\n \"\"\"\n Create a new drain of messages going to this channel and a new fount of\n messages coming from this channel, for the given participant.\n\n @param participant: the name of the participant joining.\n\n @return: a 2-tuple of (new fount, new drain)\n \"\"\"\n @receiver(IMapping, IMapping,\n name=\"->addSender({}, {})\".format(participant.name,\n self._name))\n def addSender(item):\n yield dict(item, sender=participant.name, channel=self._name)\n\n return (self._out.newFount(),\n series(addSender, self._in.newDrain()))\n\n\n\n@tube\nclass OnStop(object):\n \"\"\"\n Utility class to hook 'stopped' with a callable.\n \"\"\"\n\n def __init__(self, callback):\n \"\"\"\n Create an L{OnStop} with a callback.\n \"\"\"\n self.callback = callback\n\n\n def received(self, item):\n \"\"\"\n We received a message; relay it on unmodified since we only care about\n L{OnStop}.\n\n @param item: anything\n \"\"\"\n yield item\n\n\n def stopped(self, reason):\n \"\"\"\n The flow stopped; invoke the given callback.\n\n @param reason: ignored.\n\n @return: no results (empty iterable)\n \"\"\"\n self.callback()\n return ()\n\n\n\nclass Hub(object):\n \"\"\"\n A chat hub; the nexus object for a whole channel namespace (i.e.: server).\n \"\"\"\n def __init__(self):\n self.participants = []\n self.channels = {}\n\n\n def newParticipantFlow(self, flow):\n \"\"\"\n Create a flow for a new participant.\n\n @param flow: a L{Flow} with a drain and a fount for receiving commands;\n JSON-style dictionaries with a 'type' key indicating which verb to\n invoke on L{Participant}.\n \"\"\"\n commandFount = flow.fount.flowTo(\n series(OnStop(lambda: self.participants.remove(participant)))\n )\n commandDrain = flow.drain\n participant = Participant(self, commandFount, commandDrain)\n self.participants.append(participant)\n\n\n def channelNamed(self, name):\n \"\"\"\n Retrieve a L{Channel} with the given name.\n\n @param name: the name of the channel.\n\n @return: a L{Channel}.\n \"\"\"\n if name not in self.channels:\n self.channels[name] = Channel(name)\n return self.channels[name]\n\n\n\nclass ChatTests(TestCase):\n \"\"\"\n Integration test cases for putting together fan.In and fan.Out in a useful\n configuration for pubsub or multi-user chat.\n \"\"\"\n\n def test_joining(self):\n \"\"\"\n Test that we receive a response from joining.\n \"\"\"\n from ..listening import Flow\n from .util import FakeFount, FakeDrain\n h = Hub()\n ff = FakeFount()\n fd = FakeDrain()\n h.newParticipantFlow(Flow(ff, fd))\n ff.drain.receive({\"type\": \"name\", \"name\": \"bob\"})\n self.assertEqual(fd.received.pop(0), {\"named\": \"bob\"})\n ff.drain.receive({\"type\": \"join\", \"channel\": \"bobs\"})\n self.assertEqual(fd.received, [{\"type\": \"joined\",\n \"sender\": \"bob\",\n \"channel\": \"bobs\"}])\n","repo_name":"twisted/tubes","sub_path":"tubes/test/test_chatter.py","file_name":"test_chatter.py","file_ext":"py","file_size_in_byte":5995,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"3"} +{"seq_id":"46306437978","text":"import RPi.GPIO as GPIO\nimport time\n\n#핀 설정\npriPin = 2\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(priPin, GPIO.IN, GPIO.PUD_UP)\n\ntry:\n\twhile(True):\n\t\tif GPIO.input(priPin) == GPIO.LOW:\n\t\t\tprint(\"Detected\")\n\t\telse:\n\t\t\tprint(\"a\")\n\t\ttime.sleep(0.3)\nexcept KeyboardInterrupt:\n\tGPIO.cleanup()\n","repo_name":"youngjea-Choi/RaspberryPi-YJ-","sub_path":"RaspberryPi-main/RaspberryPi-main/RaspberryPi_Source/pirTest.py","file_name":"pirTest.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"35492471273","text":"import logging\n\nfrom pandas import DataFrame\n\nfrom exceptions.custom_exceptions import NullDfException\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef normalize_column(df: DataFrame, column: str) -> DataFrame:\n \"\"\"\n Normalizes columns in a DataFrame\n\n Args:\n DataFrame: The input DataFrame\n str: Name of the column to normalize\n\n Returns:\n DataFrame: A new DataFrame with the specified column normalized between 0 and 1\n\n Raises:\n NullDfException: If the input DataFrame is None\n ValueError: If the specified column does not exist in the DataFrame\n\n \"\"\"\n if df is None:\n LOGGER.warning(\"DataFrame is None\")\n raise NullDfException(\"DF is None\")\n elif column not in df.columns:\n LOGGER.warning(\n \"DataFrame empty or column does not exist in the given DataFrame\"\n )\n else:\n df[column] = (df[column] - df[column].min()) / (\n df[column].max() - df[column].min()\n )\n return df\n","repo_name":"brainiac-ns/ml-basics","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"43343082946","text":"# ------------------------------------------------- #\r\n# Title: Lab7-1\r\n# Description: A simple example of storing data in a binary file\r\n# ChangeLog: (Who, When, What)\r\n# Deborah Yenubari, 3.4.21, Created Script\r\n# ------------------------------------------------- #\r\nimport pickle # This imports code from another code file!\r\n\r\n# Data -------------------------------------------- #\r\nlstTable = []\r\nfile = \"AppData.txt\"\r\nstrID = \"\"\r\nstrName = \"\"\r\nobjFile = \"AppData.data\"\r\nobjFileData = []\r\n\r\n\r\n# Processing -------------------------------------- #\r\nlstHeader = [\"ID\", \"NAME\"]\r\nprint(lstHeader[0], lstHeader[1], sep=\" \")\r\nlstRow = [\"1\", \"James Madison\"]\r\nprint(lstRow[0], lstRow[1], sep=\",\")\r\n\r\n\r\ndef display_current_data(lstTable):\r\n lstTable.append(lstHeader)\r\n lstTable.append(lstRow)\r\n print(lstTable)\r\n\r\n\r\ndef save_data_to_file(lstTable, file):\r\n file = open(\"AppData.txt\", \"a\")\r\n for lstRow in lstTable:\r\n file.write(lstRow[0] + \",\" + lstRow[1] + \"\\n\")\r\n file.close()\r\n return lstTable\r\n # except IOError:\r\n # print(\"Error: Can't find file!\")\r\n # else:\r\n # print(\"Data saved in text file, 'AppData.txt'!\")\r\n # file = open(\"AppData.txt\", \"a\")\r\n # for lstRow in lstTable:\r\n # file.write(lstRow[0] + \",\" + lstRow[1] + \"\\n\")\r\n # file.close()\r\n # return lstTable\r\n\r\n\r\ndef read_data_from_file(file, lstTable):\r\n file = open(\"AppData.txt\", \"r\")\r\n fileData = file.read().splitlines()\r\n for line in file:\r\n print(fileData[0].strip() + \",\" + fileData[1].strip() + \"\\n\")\r\n print(fileData)\r\n file.close()\r\n\r\n\r\n# Get ID and NAME From user, then store it in a list object\r\n\r\ndef add_new_data(strID, strName):\r\n try:\r\n strID = input(\"Enter an ID: \")\r\n if strID <= \"0\":\r\n raise ValueError\r\n except ValueError:\r\n print(\"Please enter a number above zero\")\r\n finally:\r\n strID = input(\"Enter an ID: \")\r\n try:\r\n strName = input(\"Enter a Name: \")\r\n for char in strName:\r\n if char in strName == \"!\":\r\n raise ValueError\r\n elif char == \"#\":\r\n raise ValueError\r\n elif char == \"@\":\r\n raise ValueError\r\n elif char == \"%\":\r\n raise ValueError\r\n elif char == \"$\":\r\n raise ValueError\r\n elif char == \"%\":\r\n raise ValueError\r\n elif char == \"&\":\r\n raise ValueError\r\n elif char == \"*\":\r\n raise ValueError\r\n except ValueError:\r\n print(\"Please note symbols are not allowed\")\r\n finally:\r\n strName = input(\"Enter a Name: \")\r\n lstRow = [strID, strName]\r\n lstTable.append(lstRow)\r\n print(\"\\n\")\r\n print(lstTable)\r\n\r\n # # print(\"Type in your ID and Name: \")\r\n # strID = input(\"Enter an ID: \")\r\n # strName = input(\"Enter a Name: \")\r\n # lstRow = [strID, strName]\r\n # lstTable.append(lstRow)\r\n # print(\"\\n\")\r\n # print(lstTable)\r\n #\r\n\r\ndef save_data_to_binary_file(lstTable, objFile):\r\n objFile = open(\"AppData.data\", \"ab\")\r\n pickle.dump(lstTable, objFile)\r\n objFile.close()\r\n\r\n\r\ndef read_data_from_binary_file(objFileData, lstTable):\r\n objFile = open(\"AppData.data\", \"rb\")\r\n objFileData = pickle.load(objFile) # read all data from the file at once\r\n print(objFileData)\r\n objFile.close()\r\n\r\n\r\n# Presentation ------------------------------------ #\r\n\r\n# Main Script\r\n\r\n\r\nintChoice = True\r\n\r\nwhile True:\r\n print(\"\"\"\r\n Please choose an option:\r\n 1. Display Current Data\r\n 2. Save Data to File\r\n 3. Read Data from File \r\n 4. Add New Data\r\n 5. Save List to Binary File\r\n 6. Read from Binary File to List\r\n \"\"\")\r\n # Step 2\r\n # Add a new item to the List(Table) each time the user makes that choice\r\n intChoice = input(\"Enter 1, 2, 3, 4, 5, 6 or exit: \")\r\n\r\n if intChoice == \"1\":\r\n print(\"Your Current Data is: \")\r\n display_current_data(lstTable)\r\n elif intChoice == \"2\":\r\n answer = input(\"Would you like to Save your Data to file? Yes or No:\")\r\n if answer.lower() == \"yes\":\r\n print(\"Data saved to file, 'AppData.txt'!\")\r\n save_data_to_file(lstTable, file)\r\n elif intChoice == \"3\":\r\n print(\"Here is the current Data from the file, 'AppData.txt': \")\r\n read_data_from_file(file,lstTable)\r\n elif intChoice == \"4\":\r\n print(\"Type in your ID and Name: \")\r\n add_new_data(strID, strName)\r\n elif intChoice == \"5\":\r\n print(\"Data saved to Binary File, 'AppData.data!\")\r\n save_data_to_binary_file(lstTable, objFile)\r\n elif intChoice == \"6\":\r\n print(\"Showing pickled Data from Binary File, 'AppData.data': \")\r\n read_data_from_binary_file(objFileData, lstTable)\r\n else:\r\n break","repo_name":"DY202/IntroToProg-Python-Mod07","sub_path":"Lab 7.1.py","file_name":"Lab 7.1.py","file_ext":"py","file_size_in_byte":4827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26512064907","text":"path_to_file = 'C:\\tur\\re.txt'\na = str(input('Введите слово:'))\n#with open(r'C:\\tur\\re.txt', encoding='utf-8') as file:\n #for line in file:\n #print(line)\nfile = open(r'C:\\tur\\re.txt', encoding='utf-8')\ntext=file.read()\nl = [line.strip() for line in file]\nif a in text:\n print (a)\n print(l[a])","repo_name":"DanilaZV/Python","sub_path":"Python/algoritmies/fjgfj.py","file_name":"fjgfj.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29041447777","text":"# -*- coding: utf-8 -*-\nimport pytest\nimport os\nimport time\nimport re\nimport resource\nimport subprocess\nfrom datetime import timedelta\nfrom shlex import split\n\nfrom configs import InstanceConfig, BadIpAddrConfig, CoroutineStackConfig, LogQueueConfig\n\nfrom balancer.test.util import asserts\nfrom balancer.test.util import process\nfrom balancer.test.util.stdlib.multirun import Multirun\nfrom balancer.test.util.predef import http\n\nfrom balancer.test.util.process import BalancerStartError\n\n\ndef check_workers(ctx):\n workers = ctx.balancer.get_workers()\n\n for run in Multirun():\n with run:\n log_content = ctx.manager.fs.read_file(ctx.balancer.config.instance_log)\n\n for child in workers:\n spawned = 'Spawned child with pid {pid} and its admin addr'.format(pid=child)\n assert spawned in log_content, 'Spawned child is not in instance_log'\n\n\nTHREAD_WORKER_ID_REGEXP = re.compile(r'Spawned thread child with workerId (\\d+) and type worker')\n\n\ndef test_workers_spawn(ctx):\n workers = 15\n\n ctx.start_balancer(InstanceConfig(workers=15))\n\n assert workers == ctx.get_worker_count()\n\n for i in range(30):\n log_content = ctx.manager.fs.read_file(ctx.balancer.config.instance_log)\n matched = THREAD_WORKER_ID_REGEXP.findall(log_content)\n if len(matched) == workers:\n break\n time.sleep(1)\n else:\n raise Exception('Timed out')\n\n worker_ids = []\n for worker_id in matched:\n worker_ids.append(int(worker_id))\n\n assert set(worker_ids) == set(map(lambda x: x + 1, xrange(workers)))\n\n\ndef test_worker_pids_in_log(ctx):\n \"\"\"\n Запускаемся с instance_log, видим список воркеров в логе\n \"\"\"\n ctx.start_balancer(InstanceConfig(workers=4))\n\n check_workers(ctx)\n\n\ndef test_enable_reuse_port(ctx):\n \"\"\"\n BALANCER-536\n Проверяем что балансер запускается с опцией enable_reuse_port\n \"\"\"\n ctx.start_balancer(InstanceConfig(enable_reuse_port=True))\n response = ctx.perform_request(http.request.post(data='data'))\n asserts.status(response, 200)\n\n\ndef test_dns_timeout_tduration_format(ctx):\n \"\"\"\n BALANCER-124\n Параметр dns_timeout должен понимать значение в формате TDuration\n \"\"\"\n ctx.start_balancer(InstanceConfig(dns_timeout='10s'))\n\n assert ctx.balancer.is_alive()\n\n\ndef test_dns_timeout_milliseconds_format(ctx):\n \"\"\"\n BALANCER-124\n Параметр dns_timeout должен понимать значение, заданное в миллисекундах\n \"\"\"\n ctx.start_balancer(InstanceConfig(dns_timeout=10))\n\n assert ctx.balancer.is_alive()\n\n\ndef test_so_keepalive_enabled(ctx):\n \"\"\"\n BALANCER-31\n Если в instance включена опция tcp_keepalive,\n то при отутствии клиентских пакетов в течение tcp_keep_idle\n клиенту должен отправиться keepalive probe пакет\n \"\"\"\n delay = 1\n count = 5\n min_delta = timedelta(seconds=0.9 * delay)\n max_delta = timedelta(seconds=1.1 * delay)\n\n ctx.start_balancer(InstanceConfig(tcp_keep_idle=delay, tcp_keep_intvl=1, tcp_keep_cnt=1))\n tcpdump = ctx.manager.tcpdump.start(ctx.balancer.config.port)\n\n with ctx.create_http_connection():\n time.sleep((count + 0.1) * delay)\n for run in Multirun(sum_delay=3):\n with run:\n tcpdump.read_all()\n\n sessions = tcpdump.get_sessions()\n assert len(sessions) > 0\n sess = sessions[0]\n server_packets = sess.other_server_packets\n deltas = zip(server_packets[:-1], server_packets[1:])\n check_delta = lambda t1_t2: min_delta < t1_t2[1] - t1_t2[0] < max_delta\n assert len(server_packets) >= count\n assert all(map(check_delta, deltas))\n\n\ndef base_test_coroutine_stack_size(ctx, coro_stack_size):\n pytest.skip(\"TODO(velavokr): BALANCER-2359 - implement a working stack monitoring\")\n\n\ndef test_coroutine_stack_size(ctx):\n \"\"\"\n BALANCER-450\n Размер стека корутин указывается при помощи параметра -C при запуске бинарника балансера\n \"\"\"\n coro_stack_size = 1000 * 1024\n ctx.start_balancer(CoroutineStackConfig(coro_stack_size=coro_stack_size))\n base_test_coroutine_stack_size(ctx, coro_stack_size)\n\n\ndef test_default_coroutine_stack_size(ctx):\n \"\"\"\n BALANCER-450\n По умолчанию размер стека корутин равен 100 Кб\n \"\"\"\n default_coro_stack_size = 100 * 1024\n ctx.start_balancer(CoroutineStackConfig())\n base_test_coroutine_stack_size(ctx, default_coro_stack_size)\n\n\n@pytest.mark.parametrize('value', [0, -1])\ndef test_log_queue_invalid_max_size_parse_error(ctx, value):\n \"\"\"\n BALANCER-573\n Балансер не должен запускаться если указано нулевое или отрицательное значение для опции log_queue_max_size\n \"\"\"\n with pytest.raises(BalancerStartError):\n ctx.start_balancer(LogQueueConfig(log_queue_max_size=value))\n\n\n@pytest.mark.parametrize('value', [0, -1])\ndef test_log_queue_invalid_submit_attempts_parse_error(ctx, value):\n \"\"\"\n BALANCER-573\n Балансер не должен запускаться если указано нулевое или отрицательное значение для опции log_queue_submit_attempts_count\n \"\"\"\n with pytest.raises(BalancerStartError):\n ctx.start_balancer(LogQueueConfig(log_queue_submit_attempts_count=value))\n\n\ndef test_log_queue_flush_interval(ctx):\n \"\"\"\n BALANCER-573\n Логи должны записываються по истечению flush interval\n \"\"\"\n balancer = ctx.start_balancer(LogQueueConfig(log_queue_max_size=1000, log_queue_submit_attempts_count=1, log_queue_flush_interval='3s'))\n\n response = ctx.perform_request(http.request.get())\n asserts.status(response, 200)\n asserts.content(response, 'OK')\n\n log_content = ctx.manager.fs.read_file(balancer.config.instance_log)\n log_size = len(log_content.splitlines())\n time.sleep(3)\n for run in Multirun(plan=[0.1] * 20):\n with run:\n log_content = ctx.manager.fs.read_file(balancer.config.instance_log)\n assert len(log_content.splitlines()) > log_size, \"Log flush interval has already passed, there should be records in the log\"\n\n\n@pytest.mark.parametrize('workers', [1, 3, 20])\ndef test_childs_stats(ctx, workers):\n ctx.start_balancer(InstanceConfig(workers=workers))\n\n for run in Multirun():\n with run:\n alive = ctx.get_unistat()['childs-alive_ammv']\n assert alive == workers\n\n\ndef test_workers_stats(ctx):\n ctx.start_balancer(InstanceConfig(workers=1))\n\n unistat = ctx.get_unistat()\n assert 'worker-tcp_conns_ammv' in unistat\n assert 'worker-tcp_max_conns_ammv' in unistat\n assert 'worker-conts_ready_ammv' in unistat\n assert 'worker-conts_waiting_ammv' in unistat\n assert 'worker-cpu_usage_time_desync_summ' in unistat\n assert 'worker-processed_log_items_summ' in unistat\n assert 'worker-lost_log_items_summ' in unistat\n\n\ndef test_maxconn(ctx):\n req = 'GET / HTTP/1.1\\r\\n\\r\\n'\n ctx.start_balancer(InstanceConfig(maxconn=2, sosndbuf=1, buffer=1))\n time.sleep(1)\n\n # conns = [conn1]\n conn1 = ctx.create_http_connection().create_stream()\n time.sleep(0.1)\n\n # conns = [conn1, conn2]\n conn2 = ctx.create_http_connection().create_stream()\n time.sleep(0.1)\n\n # conns = [conn2, conn1]\n conn1.write(req[0])\n time.sleep(0.1)\n\n # conns = [conn1, conn3], conn2 is evicted\n conn3 = ctx.create_http_connection().create_stream()\n time.sleep(0.1)\n\n # chosen by the LRU connection cancel policy\n with pytest.raises(Exception):\n conn2.write(req[0])\n\n # conns = [conn3, conn1]\n conn1.write(req[1:])\n time.sleep(0.1)\n\n # conns = [conn1, conn3]\n conn3.write(req[1:])\n time.sleep(0.1)\n\n # conns = [conn1, conn3]\n assert conn1.read_response().status == 200\n time.sleep(0.1)\n\n # conns = [conn4, conn1], conn3 is evicted\n conn4 = ctx.create_http_connection().create_stream()\n time.sleep(0.1)\n\n # Chosen by the LRU connection cancel policy. Have to perform an additional request to defeat tcp read buffers\n with pytest.raises(Exception):\n conn3.read_response()\n conn3.write(req)\n conn3.read_response()\n\n conn1.write(req)\n assert conn1.read_response().status == 200\n conn4.write(req)\n assert conn4.read_response().status == 200\n\n\ndef test_open_and_close_connection_counters(ctx):\n ctx.start_balancer(InstanceConfig())\n time.sleep(1)\n\n with ctx.create_http_connection() as conn:\n response = conn.perform_request(http.request.get())\n asserts.status(response, 200)\n\n asserts.is_not_closed(conn.sock)\n\n unistat = ctx.get_unistat()\n assert unistat['worker-tcp_conns_ammv'] == 1\n assert unistat['worker-tcp_conns_open_summ'] == 1\n assert unistat['worker-tcp_conns_close_summ'] == 0\n\n unistat = ctx.get_unistat()\n assert unistat['worker-tcp_conns_ammv'] == 0\n assert unistat['worker-tcp_conns_open_summ'] == 1\n assert unistat['worker-tcp_conns_close_summ'] == 1\n\n\ndef test_fd_stats(ctx):\n ctx.start_balancer(InstanceConfig(workers=1))\n\n soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)\n\n fdsize = 0\n master_pid = process.get_children(ctx.balancer.pid, ctx.logger, recursive=False)[0]\n p2 = subprocess.Popen(split(\"cat /proc/\" + str(master_pid) + \"/status\"), stdout=subprocess.PIPE)\n second, _ = p2.communicate()\n lines = second.splitlines()\n for line in lines:\n tmp = line.split('\\t')\n if tmp[0] == \"FDSize:\":\n fdsize = int(tmp[1])\n break\n\n unistat = ctx.get_unistat()\n\n assert unistat['no_file_limit_ammv'] == soft\n assert unistat['fd_size_ammv'] == fdsize\n\n\ndef test_brackets_in_ip_addrs_parsing(ctx):\n with pytest.raises(BalancerStartError):\n ctx.start_balancer(BadIpAddrConfig())\n\n\ndef test_fail_while_not_enough_fd(ctx):\n resource.setrlimit(resource.RLIMIT_NOFILE, (4096, 4096))\n with pytest.raises(BalancerStartError):\n ctx.start_balancer(InstanceConfig(workers=10, set_no_file=True))\n\n\n@pytest.mark.parametrize('workers', [1, 2, 3])\ndef test_pass_when_enough_fd(ctx, workers):\n _, hard = resource.getrlimit(resource.RLIMIT_NOFILE)\n resource.setrlimit(resource.RLIMIT_NOFILE, (20000 * workers, hard))\n ctx.start_balancer(InstanceConfig(workers=workers, set_no_file=True))\n\n\n@pytest.mark.parametrize('workers', [1, 2, 3])\ndef test_set_fd_limit(ctx, workers):\n _, hard = resource.getrlimit(resource.RLIMIT_NOFILE)\n resource.setrlimit(resource.RLIMIT_NOFILE, (10000 * workers, hard))\n ctx.start_balancer(InstanceConfig(workers=workers, set_no_file=True))\n\n unistat = ctx.get_unistat()\n assert unistat['no_file_limit_ammv'] >= 20000 * workers\n\n\ndef test_bad_admin_port(ctx):\n with pytest.raises(BalancerStartError):\n ctx.start_balancer(InstanceConfig(bad_admin_port=True))\n\n path = os.getcwd()\n path += \"/testing_out_stuff/__tests__.test_instance/test_bad_admin_port/balancer_stderr.txt\"\n stderr = ctx.manager.fs.read_file(path)\n assert stderr.find('bind failed for [::1]:1') != -1\n\n\ndef test_bad_stats_port(ctx):\n with pytest.raises(BalancerStartError):\n ctx.start_balancer(InstanceConfig(bad_stats_port=True))\n\n path = os.getcwd()\n path += \"/testing_out_stuff/__tests__.test_instance/test_bad_stats_port/balancer_stderr.txt\"\n stderr = ctx.manager.fs.read_file(path)\n assert stderr.find('bind failed for [::1]:1') != -1\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"balancer/test/functional/instance/test_instance.py","file_name":"test_instance.py","file_ext":"py","file_size_in_byte":11917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26106087493","text":"import pdb\nimport time\nimport decimal\nimport datetime\nimport logging\n\nimport pandas as pd\nimport numpy as np\n\nfrom utils.unit import Unit\nfrom utils.charts import BulletGraph\n\nfrom db.database import Database\n\nfrom ibapi.contract import Contract\n\nfrom ib.ibexceptions import GetAccountDataException, \\\n TransformEODDataException, \\\n InsertNewUnitException, \\\n UpdateStopOrderException, \\\n AddStopOrdersException, \\\n GetDataFromMarketDataException, \\\n ResolveContractDetailsException, \\\n IBTimeoutException, \\\n UnresolvedContractException, \\\n MultipleContractException\n\n\nclass Driver(object):\n \"\"\"Class that drives 'The Process'.\n \"\"\"\n\n def __init__(self):\n self.logger = logging.getLogger(__name__)\n\n def spot_trading_opportunities(self, instrument_eod_data, tfs_settings,\n account_size, port_config):\n \"\"\"Checks the EOD data and checks if there are trading\n opportunities.\n\n :param instrument_eod_data: dataframe row containing\n the EOD data for an instrument\n :param tfs_settings: tfs section of the settings file\n :param account_size: equity value of IB account\n :param port_config: portfolio data from config file\n \"\"\"\n\n db = Database()\n\n eod_data = self._transform_eod_data(instrument_eod_data) # dict\n\n ticker = eod_data['ticker']\n eod_data['instrument_id'] = db.get_instrument_id(ticker).instr_id[0]\n eod_data['position_info'] = db.get_position_size(ticker)\n\n stop, target = self._get_stop_and_target(eod_data)\n\n if not pd.isnull(stop):\n instrument_eod_data.loc['stop_price'] = stop\n instrument_eod_data.loc['next_price_target'] = target\n\n new_positions = self._check_positions(\n eod_data,\n tfs_settings,\n account_size,\n port_config)\n\n return new_positions, instrument_eod_data\n\n def draw_bulletgraph(self, instr_data):\n bg = BulletGraph()\n\n chart = bg.draw_graph(\n instr_data,\n size=(8, 5),\n axis_label=\"EOD\", label_color=\"black\",\n bar_color=\"#252525\", target_color='#f7f7f7',\n title=\"My title\")\n\n return chart\n\n def add_columns(self, df, column_names=None):\n \"\"\"Add columns to a dataframe\n\n :param df: dataframe to which columns have to be added\n :param column_names: list of strings with column names\n \"\"\"\n\n if column_names is not None:\n for c in column_names:\n df[c] = np.nan\n\n return df\n\n def get_account_data(self, ib, sleep_time=3):\n \"\"\"Retrieves some data about the IB account.\n\n :param ib: The IB application\n :return: buying power and account size\n\n \"\"\"\n\n try:\n account_info = ib.get_account_summary(9001)\n account_number = account_info[0][1]\n buying_power = float([a[3] for a in account_info\n if a[2] == 'BuyingPower'][0])\n account_size = float([a[3] for a in account_info\n if a[2] == 'NetLiquidation'][0])\n time.sleep(sleep_time)\n return buying_power, account_size, account_number\n except Exception as e:\n raise GetAccountDataException(e)\n\n def get_historical_data(self, ib, instrument, duration=\"60 D\"):\n \"\"\"Get daily forex market data.\n\n :param ib: the ib application\n :param isntrument: .\n\n :return: returns dictionary with forex data.\n \"\"\"\n\n f_metadata = self._get_instrument_metadata(instrument)\n\n try:\n contract = self._get_ib_contract(\n ib,\n f_metadata[\"sec_type\"],\n f_metadata[\"ticker\"],\n f_metadata[\"exchange\"],\n f_metadata[\"currency\"])\n historic_data = ib.get_IB_historical_data(\n contract,\n duration=duration)\n except (ResolveContractDetailsException,\n IBTimeoutException,\n UnresolvedContractException,\n MultipleContractException) as e:\n self.logger.error(\"Error getting contract data for %s: %s\" %\n (f_metadata[\"ticker\"], e), exc_info=True)\n return\n except Exception as e:\n self.logger.error(\"Error retrieving historical data for %s: %s\"\n % (f_metadata[\"ticker\"], e), exc_info=True)\n return\n\n return historic_data\n\n def _get_stop_orders(self, open_orders):\n \"\"\"Get stop orders from open order information obect.\n\n :param open_orders: set of open orders\n\n :return: dataframe with stop orders\n \"\"\"\n\n data_dict = {}\n data_dict['stop_price'] = []\n data_dict['quantity'] = []\n data_dict['close_action'] = []\n data_dict['order_id'] = []\n df_index = []\n\n for order_id in open_orders.keys():\n order = open_orders[order_id].order\n if order.orderType == \"STP\":\n contract = open_orders[order_id].contract\n data_dict['stop_price'].append(order.auxPrice)\n data_dict['quantity'].append(int(order.totalQuantity))\n data_dict['close_action'].append(order.action)\n data_dict['order_id'].append(order_id)\n if contract.secType == \"CASH\":\n df_index.append(contract.symbol + contract.currency)\n else:\n df_index.append(contract.symbol)\n\n stop_orders = pd.DataFrame(data_dict, index=df_index)\n\n return stop_orders\n\n def _get_orders(self, open_orders, order_type=None):\n \"\"\"Retrieves specific order types from the open orders information\n object.\n\n :param open_orders: set of open orders to choose from.\n :param order_type: specific order type to look for (None=every type)\n\n :return: dataframe of orders\n \"\"\"\n\n \"\"\"\n print(tempOrderId, order.orderType, order.action, contract.symbol,\n \"(\" + contract.secType + \")\",\n order.totalQuantity, \"@\", order.auxPrice, order.tif)\n \"\"\"\n stop_orders = None\n if order_type == \"STP\":\n stop_orders = self._get_stop_orders(open_orders)\n\n return stop_orders\n\n def add_stop_orders(self, add_to_df, ib):\n \"\"\"Retrieves current stop orders and adds them\n to a specified datasetself.\n\n :param add_to_df: dataset to which orders have to added\n :param to_add: data to add\n :param ib: the ib api\n\n :return: dataframe and contracts\n \"\"\"\n\n try:\n open_orders = ib.get_open_orders()\n stop_orders = self._get_orders(\n open_orders, order_type=\"STP\")\n except Exception as e:\n raise AddStopOrdersException(e)\n\n new_eod_data = add_to_df.join(stop_orders, how='left')\n\n return new_eod_data, open_orders\n\n def update_stop_orders(self, dataset):\n \"\"\"Evaluates a dataframe to check if stop orders\n have to be updated or not.\n\n :param dataset: 2D tuple,\n (0) dataframe with eod_data\n (1) dictionaary with contracts, dict key = order id\n\n :return: ???\n \"\"\"\n\n instruments = dataset[0]\n contracts = dataset[1]\n try:\n open_positions = instruments.loc[instruments['stop_price'] > 0]\n for index, row in open_positions.iterrows():\n if row['close_action'] == \"BUY\": # we're short\n if row['20DayHigh'] < row['stop_price']:\n # update/modify stop order\n print(\"update stop order:\\n\",\n \"\\tSTP {0} {1} {2}@{3}\".format(\n row['close_action'],\n row['ticker'],\n int(row['quantity']),\n row['20DayHigh']))\n elif row['close_action'] == \"SELL\":\n if row['20DayLow'] > row['stop_price']:\n # update/modify stop order\n print(\"update stop order:\\n\",\n \"\\tSTP {0} {1} {2}@{3}\".format(\n row['close_action'],\n row['ticker'],\n int(row['quantity']),\n row['20DayLow']))\n except Exception as e:\n raise UpdateStopOrderException(e)\n\n # eod_data.loc[eod_data['close'] > eod_data['55DayHigh']]\n\n def _get_ib_contract(self, ib, security_type, symbol, exchange_name,\n currency):\n \"\"\"Gets the contract from interactive brokers.\n\n :param ib: the ib application\n :param security_type: type of instrument to search\n :param symbol: the symbol or ticker of the instrument\n :param exchange_name: the name of the exchange to search\n :param currency: currency in which instrument is denominated\n\n :return: the IB contract\n \"\"\"\n\n ibcontract = Contract()\n ibcontract.secType = security_type\n ibcontract.symbol = symbol\n ibcontract.exchange = exchange_name\n ibcontract.currency = currency\n\n try:\n return ib.resolve_ib_contract(ibcontract)\n except Exception as e:\n self.logger.error(\"Error getting contract data for %s: %s\"\n % (symbol, e))\n raise\n\n def _get_instrument_metadata(self, instrument):\n \"\"\"Retrieves the metadata for each instrument in the\n instrument list.\n\n :param instrument: instrument as read from the config file\n\n :return: dictionary of instrument properties\n \"\"\"\n\n instrument_props = {}\n instrument_props[\"identifier\"] = instrument[0].upper()\n instrument_props[\"exchange\"] = instrument[1].split(',')[1].lstrip()\n instrument_props[\"sec_type\"] = instrument[1].split(',')[2].lstrip()\n instrument_props[\"currency\"] = instrument[1].split(', ')[3].lstrip().upper()\n instrument_props[\"ticker\"] = instrument[1].split(',')[4].lstrip().upper()\n\n return instrument_props\n\n def get_tfs_settings(self, tfs_settings=None):\n \"\"\"Gets the settings for the trend following strategy\n from the settings.cfg file.\n\n :param tfs_settings: the 'tfs' section of the settings file\n\n :return: returns a dictionary with the tfs settings.\n \"\"\"\n\n tfs_settings_dict = {}\n tfs_settings_dict['atr_horizon'] = int(tfs_settings['atr_horizon'])\n\n tfs_settings_dict['entry_breakout_periods'] = int(tfs_settings['entry_breakout_periods'])\n tfs_settings_dict['exit_breakout_periods'] = int(tfs_settings['exit_breakout_periods'])\n tfs_settings_dict['account_risk'] = decimal.Decimal(tfs_settings['account_risk'])\n tfs_settings_dict['unit_stop'] = int(tfs_settings['unit_stop'])\n tfs_settings_dict['first_unit_stop'] = int(tfs_settings['first_unit_stop'])\n tfs_settings_dict['nr_equities'] = int(tfs_settings['nr_equities'])\n tfs_settings_dict['nr_units'] = int(tfs_settings['nr_units'])\n tfs_settings_dict['max_units'] = int(tfs_settings['max_units'])\n\n return tfs_settings_dict\n\n def _transform_eod_data(self, data_row):\n \"\"\"Transforms eod data in a dataframe row\n into a dictionary.\n\n :param data_row: dataframe row containing the data\n\n :return: dict containing EOD data.\n \"\"\"\n\n eod_transform = {}\n\n try:\n eod_transform['close_price'] = data_row['close']\n eod_transform['lt_day_high'] = data_row['55DayHigh']\n eod_transform['lt_day_low'] = data_row['55DayLow']\n eod_transform['st_day_high'] = data_row['20DayHigh']\n eod_transform['st_day_low'] = data_row['20DayLow']\n eod_transform['ticker'] = data_row['ticker']\n eod_transform['atr'] = data_row['atr']\n eod_transform['pos_size_1'] = data_row['pos_size (1st)']\n eod_transform['pos_size_2'] = data_row['pos_size (other)']\n except Exception as e:\n raise TransformEODDataException(e)\n\n return eod_transform\n\n def _insert_new_unit(\n self,\n instrument_data,\n tfs_settings,\n account_size,\n position_type,\n create_new_position=False,\n first_unit_bool=True,\n position_id=0):\n \"\"\"Creates a unit, adds it to a position and update\n position data.\n\n \"\"\"\n db = Database()\n\n max_unit_id = instrument_data['position_info'].unit_id.max()\n pos_id = instrument_data['position_info'].pos_id.min()\n\n if first_unit_bool:\n unit_id = 1\n else:\n unit_id = max_unit_id + 1\n\n try:\n unit = Unit(\n account_size=account_size,\n atr=instrument_data['atr'],\n account_risk=tfs_settings['account_risk'],\n unit_stop=tfs_settings['unit_stop'],\n first_unit_stop=tfs_settings['first_unit_stop'],\n nr_equities=tfs_settings['nr_equities'],\n nr_units=tfs_settings['nr_units'],\n ticker=instrument_data['ticker'],\n price=instrument_data['close_price'],\n pos_type=position_type,\n first_unit=first_unit_bool)\n new_unit = db.create_unit(\n unit,\n unit_id,\n position_id,\n position_type)\n position_info = db.get_position_size(instrument_data['ticker'])\n updated_pos = db.update_position(\n position_info=position_info)\n except Exception as e:\n raise InsertNewUnitException(\n \"Could not add new unit to the \"\n \"database. Error: \\n\", e)\n\n def _create_first_unit(\n self,\n instrument_data,\n tfs_settings,\n account_size,\n portfolio_config):\n \"\"\"Checks if a brand new position has to be opened. If not,\n no action is taken.\n\n :param instrument_data: row with instrument specific data:\n 'close_price'\n 'lt_day_high'\n 'lt_day_low'\n 'st_day_high'\n 'st_day_low'\n 'ticker'\n 'atr'\n 'pos_size_1'\n 'pos_size_2'\n :param tfs_settings: tfs settings from config file\n :param account_size: IB account size\n :param portfolio_config: portfolio data from config file\n\n :return:\n\n \"\"\"\n db = Database()\n ticker = instrument_data['ticker']\n\n potential_new_unit = self._potential_new_unit(\n instrument_data['close_price'],\n instrument_data['lt_day_high'],\n instrument_data['lt_day_low'])\n\n if potential_new_unit is not None:\n create_new_unit = potential_new_unit[0] # True/False\n pos_type = potential_new_unit[1] # long/short\n action = \"BUY\" if pos_type == \"long\" else \"SELL\"\n if create_new_unit:\n instrument_id = db.get_instrument_id(ticker).instr_id[0]\n self.logger.info(\"Ready to create %s position for %s.\" %\n (pos_type.upper(), ticker))\n # position_info = db.get_position_size(ticker)\n prepared_order = self._prepare_order(\n instrument_data,\n portfolio_config,\n action)\n\n \"\"\"\n new_position = db.create_position(\n instrument_id,\n date_today_iso)\n self._insert_new_unit(\n instrument_data,\n tfs_settings,\n account_size,\n pos_type,\n first_unit_bool=True,\n position_id=new_position)\n position_info = db.get_position_size(ticker)\n updated_pos = db.update_position(\n position_info=position_info)\n \"\"\"\n\n def _add_new_unit(\n self,\n instrument_data,\n tfs_settings,\n account_size,\n portfolio_config):\n \"\"\"Checks if we have to add a new unit to an existing\n position.\n\n :param portfolio_config: portfolio data from config file\n\n \"\"\"\n\n db = Database()\n\n ticker = instrument_data['ticker']\n pos_id = instrument_data['position_info'].pos_id.min()\n pos_size = instrument_data['position_info'].pos_size.min()\n risk_exposure = instrument_data['position_info'].risk_exposure.min()\n max_unit_id = instrument_data['position_info'].unit_id.max()\n\n if pos_size < 0:\n pos_type = \"short\"\n price_target = instrument_data['position_info'].next_price_target.min()\n stop_price = instrument_data['position_info'].stop_price.min()\n elif pos_size > 0:\n pos_type = \"long\"\n price_target = instrument_data['position_info'].next_price_target.max()\n stop_price = instrument_data['position_info'].stop_price.min()\n\n action = \"BUY\" if pos_type == \"long\" else \"SELL\"\n\n # check if we have to add units or move up stops\n if ((instrument_data['close_price'] > price_target\n and pos_type == \"long\")\n or (instrument_data['close_price'] < price_target\n and pos_type == \"short\")):\n if max_unit_id < tfs_settings['max_units']:\n print('add new unit for {0}'.format(ticker))\n self.logger.info(\"Add new unit for %s.\" % ticker)\n prepared_order = self._prepare_order(\n instrument_data,\n portfolio_config,\n action,\n unit_nr=max_unit_id + 1)\n \"\"\"\n self._insert_new_unit(\n instrument_data,\n tfs_settings,\n account_size,\n pos_type,\n first_unit_bool=False,\n position_id=pos_id)\n position_info = db.get_position_size(ticker)\n updated_pos = db.update_position(\n position_info=position_info)\n \"\"\"\n elif max_unit_id == tfs_settings['max_units']:\n if risk_exposure > 0:\n print(\"move up stop for {0} and \"\n \"set new stop level.\".format(ticker))\n self.logger.info(\n \"Move up stop for %s and \"\n \"set new stop level\" % ticker.upper())\n # calculate new stop, move it up only 1 ATR\n updated_pos = db.update_position(\n position_info=position_info,\n break_even=True)\n\n def _check_positions(\n self,\n instrument_data,\n tfs_settings,\n account_size,\n portfolio_config):\n \"\"\"Checks if we can open new positions\n or scale in on existing ones.\n\n :param instrument_data: dict containing essential\n information about the instrument:\n 'close_price'\n 'lt_day_high'\n 'lt_day_low'\n 'st_day_high'\n 'st_day_low'\n 'ticker'\n 'atr'\n 'pos_size_1'\n 'pos_size_2'\n :param tfs_data: contains data in tfs section of config file\n :param account_size: value of the trading account\n :param portfolio_config: portfolio data from config file\n\n :return: ???\n \"\"\"\n tfs_settings = self.get_tfs_settings(tfs_settings) # dict\n\n if instrument_data['position_info'].shape[0] == 0: # no positions\n self._create_first_unit(\n instrument_data,\n tfs_settings,\n account_size,\n portfolio_config)\n elif instrument_data['position_info'].shape[0] > 0: # >=1 positions\n new_unit = self._add_new_unit(\n instrument_data,\n tfs_settings,\n account_size,\n portfolio_config)\n\n def _potential_new_unit(self, close_price, lt_day_high, lt_day_low):\n \"\"\"Checks if a new unit has to be created.\n\n :param close_price: close price of instrument\n :param lt_day_high: the long term (55 day) high\n :param lt_day_low: the long term (55 day) low\n\n :return: (True/False, long/short)\n \"\"\"\n\n pos_type = \"long\"\n create_new_unit = False\n\n if close_price > lt_day_high:\n create_new_unit = True\n elif close_price < lt_day_low:\n pos_type = \"short\"\n create_new_unit = True\n else:\n return\n\n return (create_new_unit, pos_type)\n\n def _get_stop_and_target(self, position_info):\n \"\"\"\n Retrieves stop price and next target price\n for a given position.\n\n :param position_info: information about the positions\n :return: 2 dimensional tuple with stop price and\n next target price, np.nan if empty (so no open positions)\n \"\"\"\n\n pos_size = position_info['position_info'].pos_size.min()\n if pos_size < 0:\n price_target = position_info['position_info'].next_price_target.min()\n stop_price = position_info['position_info'].stop_price.min()\n elif pos_size > 0:\n price_target = position_info['position_info'].next_price_target.max()\n stop_price = position_info['position_info'].stop_price.max()\n\n if not pd.isnull(pos_size):\n return stop_price, price_target\n else:\n return np.nan, np.nan\n\n def prepare_orders(self, eod_data, instr_list):\n \"\"\"Checks if orders have to be prepared based on\n the EOD data.\n\n :param eod_data: dataset with all relevant EOD data\n :param instr_list: portfolio data from config file\n\n :return: ???\n \"\"\"\n\n prepared_orders = []\n db = Database()\n\n candidates = \\\n eod_data.loc[(eod_data['close'] < eod_data['55DayLow'])\n | (eod_data['close'] > eod_data['55DayHigh'])]\n\n if not candidates.empty:\n for index, row in candidates.iterrows():\n ticker = row['ticker']\n quantity = row['pos_size (1st)']\n if row['close'] > row['55DayHigh']:\n action = \"BUY\"\n elif row['close'] < row['55DayLow']:\n action = \"SELL\"\n\n meta = [ins for ins in instr_list\n if ins[0].upper() == ticker][0]\n meta_dict = self._get_instrument_metadata(meta)\n db.add_order_to_queue(quantity, action, \"LMTADP\",\n ticker=ticker,\n sectype=meta_dict['sec_type'],\n exchange=meta_dict['exchange'],\n currency=meta_dict['currency'])\n\n prepared_orders.append(ticker)\n\n return prepared_orders\n\n def _prepare_order(self, eod_data, portfolio_config, action,\n unit_nr=1):\n \"\"\"Checks if orders have to be prepared based on\n the EOD data.\n\n :param eod_data: dataset with all relevant EOD data\n 'close_price'\n 'lt_day_high'\n 'lt_day_low'\n 'st_day_high'\n 'st_day_low'\n 'ticker'\n 'atr'\n 'pos_size_1'\n 'pos_size_2'\n :param portfolio_config: portfolio data from config file\n\n :return: ???\n \"\"\"\n\n db = Database()\n\n ticker = eod_data['ticker']\n if unit_nr == 1:\n quantity = eod_data['pos_size_1']\n else:\n quantity = eod_data['pos_size_2']\n\n meta = [ins for ins in portfolio_config\n if ins[0].upper() == ticker][0]\n meta_dict = self._get_instrument_metadata(meta)\n\n db.add_order_to_queue(quantity, action, \"LMTADP\",\n ticker=ticker,\n sectype=meta_dict['sec_type'],\n exchange=meta_dict['exchange'],\n currency=meta_dict['currency'],\n unit_nr=unit_nr)\n\n return ticker\n\n def trace_order_status(self, orderId, order_details):\n \"\"\"Monitor the status of an order and\n takes action to update the pending orders\n table.\n\n :param order_details: object with order info\n orderInformation(\n orderId, status=status, filled=filled,\n avgFillPrice=avgFillPrice, permid=permid,\n parentId=parentId, lastFillPrice=lastFillPrice,\n clientId=clientId, whyHeld=whyHeld)\n :return:\n \"\"\"\n\n self.logger.info(\n \"ORDER DETAILS UPDATE: \"\n \"Order status: %s, \"\n \"filled: %s, \"\n \"avgFillPrice: %s, \"\n \"lastFillPrice: %s, \"\n \"order id: %s.\" %\n (order_details.status, order_details.filled,\n order_details.avgFillPrice, order_details.lastFillPrice,\n orderId))\n\n if order_details.status.lower() == \"filled\":\n self.logger.info(\"ORDER %s FILLED!\" % (orderId))\n","repo_name":"zwocram/TFS","sub_path":"tfs/utils/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":25809,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"19709670924","text":"# -*- coding:utf-8 -*-\n'''\nCreated on 2018.3.18\n\n@author: Yu Qu\n'''\nimport networkx as nx\nfrom math import *\n\n##########################################################\ndef static_analysis():\n G = nx.DiGraph()\n findFile = open(subject+'/classgraph.dot','r')\n each_lines= findFile.readlines()\n for each_line in each_lines:\n if each_line.__contains__('>'):\n edge=each_line.split('>')\n edge[0]=edge[0][edge[0].index('\\\"')+1:edge[0].rindex('\\\"')]\n edge[1]=edge[1][edge[1].index('\\\"')+1:edge[1].rindex('\\\"')]\n if(G.has_edge(edge[0],edge[1])==False):\n G.add_edge(edge[0],edge[1])\n else:\n if each_line.count('\\\"')==2:\n node=each_line[each_line.index('\\\"')+1:each_line.rindex('\\\"')]\n if(G.has_node(node)==False):\n G.add_node(node) \n findFile.close()\n return G\n##########################################################\nconf_file=open('Subject.dict')\nlines=conf_file.readlines()\nfor each_line in lines:\n records=each_line.strip().split(',')\n subject=records[0]\n bug_file=records[1]\n edge_file=open(subject+'/edgelist','w')\n mapping_file=open(subject+'/node_mapping','w')\n \n G=static_analysis()\n print('Number of Nodes:'+str(G.number_of_nodes()))\n print('Number of Edges:'+str(G.size())) \n nx.write_gexf(G,subject+\"/GraphForTraditionalNetworkMetrics.gexf\")\n \n node_list=list(G.nodes())\n for e in G.edges():\n source=e[0]\n dist=e[1]\n s=node_list.index(source)\n d=node_list.index(dist)\n edge_file.write(str(s)+' '+str(d)+'\\n')\n index=0\n for each_node in node_list:\n mapping_file.write(each_node+','+str(index)+'\\n')\n index=index+1\n \n","repo_name":"quyutest/node2defect","sub_path":"Driver.py","file_name":"Driver.py","file_ext":"py","file_size_in_byte":1777,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"3"} +{"seq_id":"14885914664","text":"import logging\nfrom collections import defaultdict\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn import clone, metrics\nfrom sklearn.model_selection import cross_validate\n\nfrom util.estimator.tests.base import EstimatorTestMixin\n\n\nclass RegressorTestMixin(EstimatorTestMixin):\n PRECISION = 2\n\n def test_coverage_and_rmse(self, *nargs):\n logging.info(\"Loading %s model goldset\", self.MODEL_CLS.__name__)\n started_at = pd.Timestamp.now(tz=\"US/Pacific\")\n model, X, y = self._load_model_goldset(*nargs)\n scorers = self._make_scorers(model)\n\n logging.info(\"Starting %s validation\", self.MODEL_CLS.__name__)\n if hasattr(model, \"_goldset_weights\"):\n # NOTE: Add `return_train_score = True` to check for overfitting\n scores = cross_val_scores_weighted(model, X, y, model._goldset_weights(),\n cv=model._cv_splitter(), scorers=scorers)\n else:\n scores = cross_validate(model, X, y, cv=model._cv_splitter(),\n scoring=scorers)\n\n self._test_best_score(scores[\"test_coverage\"].mean(), method=\"test_coverage\", dt=started_at)\n\n # NOTE: make_scorer() negates RMSEs to make greater better. Negating back for readability.\n self._test_best_score(-scores[\"test_rmse\"].mean(), method=\"test_rmse\", dt=started_at, greater_is_better=False)\n self._test_best_score(scores[\"test_equality\"].mean(), method=\"test_equality\", dt=started_at)\n\n @classmethod\n def _make_scorers(cls, model):\n # NOTE: All our metrics ignore NaN values\n # except for coverage which explicitly checks for those\n def _coverage(y_true, y_pred, sample_weight=None, **kwargs):\n if sample_weight is None:\n sample_weight = pd.Series(np.ones(y_true.shape), index=y_true.index)\n numerator = float((pd.notna(y_pred) * sample_weight).sum())\n denominator = sample_weight.sum()\n return numerator / denominator\n\n def _equality(y_true, y_pred, sample_weight=None, **kwargs):\n nan_mask = pd.notna(y_pred)\n if sample_weight is None:\n sample_weight = pd.Series(np.ones(y_true.shape), index=y_true.index)\n numerator = ((y_true[nan_mask].round(cls.PRECISION) == y_pred[nan_mask].round(cls.PRECISION)) * sample_weight).sum()\n denominator = sample_weight[nan_mask].sum()\n return numerator / denominator\n\n scorers = {\n \"coverage\": metrics.make_scorer(_coverage),\n \"rmse\": metrics.make_scorer(model._rmse, greater_is_better=False),\n \"equality\": metrics.make_scorer(_equality)\n }\n return scorers\n\n# Adapted from https://github.com/scikit-learn/scikit-learn/issues/4632#issuecomment-393945555\ndef cross_val_scores_weighted(model, X, y, weights, cv, scorers):\n scores = defaultdict(list)\n\n for train_ix, test_ix in cv.split(X, y):\n model_clone = clone(model)\n X_train, X_test = X.iloc[train_ix], X.iloc[test_ix]\n y_train, y_test = y.iloc[train_ix], y.iloc[test_ix]\n\n weights_train, weights_test = weights.iloc[train_ix], weights.iloc[test_ix]\n model_clone.fit(X_train, y_train, sample_weight=weights_train)\n\n y_pred = model_clone.predict(X_test)\n for name, scorer in scorers.items():\n score = scorer._sign * scorer._score_func(y_test, y_pred, sample_weight=weights_test)\n scores[\"test_%s\" % name].append(score)\n\n scores = { name: np.array(results) for name, results in scores.items() }\n return scores\n","repo_name":"kushalc/coreutils","sub_path":"util/estimator/tests/regressor.py","file_name":"regressor.py","file_ext":"py","file_size_in_byte":3624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"27084792807","text":"#!/usr/bin/env python\nimport sys\nimport ROOT\nimport numpy as np\n\ntry:\n import plotly.graph_objects as go\n from plotly.subplots import make_subplots\n import plotly.io as pio\n import chart_studio.plotly as charts\n\n pio.renderers.default = \"browser\"\n pio.templates.default = \"plotly_white\"\n\nexcept ImportError:\n print(\"Sorry, but to make the nice plots, you really need a computer with 'plotly' installed.\")\n sys.exit(1)\n\nlib_found = ROOT.gSystem.FindDynamicLibrary(\"libRasterLib\")\nif len(lib_found):\n ROOT.gSystem.Load(\"libRasterLib\")\nelse:\n print(\"Please make sure the libRasterLib (.so, .dylib) is in your LD_LIBRARY_PATH.\")\n sys.exit(1)\n\nif len(sys.argv) < 2:\n print(f\"Please specify input file. {sys.argv[0]}\")\n sys.exit(1)\n\nras = ROOT.RasterEvioTool(sys.argv[1])\nras.Next()\nras.Next()\nprint(f\"Analyzing the Raster data for run: {ras.GetRunNumber()} (this is zero for special runs.)\")\n#\nif len(sys.argv) > 2:\n N = int(sys.argv[2])\nelse:\n N = 70000\n\nif len(sys.argv) > 3:\n skip = int(sys.argv[3])\nelse:\n skip = 0\n\nfor i in range(skip):\n ras.Next()\n\ntimes = np.zeros(N)\nevtnum = np.zeros(N)\nadc0 = np.zeros(N)\nadc1 = np.zeros(N)\nhover = [\"\"]*N\ni = 0\nfirst_time = ras.GetTimeRasterCrate()\nwhile i < N:\n ras.Next()\n evtnum[i] = ras.GetEventNumberRasterCrate()\n times[i] = 5*(ras.GetTimeRasterCrate() - first_time)/1E9\n hover[i] = f\"time: {times[i]:6.3f}s
event: {i+skip:10,d}\"\n adc0[i] = ras.GetRaster(0)\n adc1[i] = ras.GetRaster(1)\n i += 1\n\nfig = make_subplots(rows=2, cols=3, specs=[[{}, {}, {}], [{\"colspan\": 3}, None, None]],\n subplot_titles=[\"adc1 vs adc0, first 1/2 of cycle.\", \"adc1 vs adc0, first 1/2 of cycle.\",\n \"adc1 vs adc0 histogram\", \"adc values versus time stamp.\"])\n# fig.add_trace(go.Scatter(y=evtnum, name=\"event number\", line=dict(color=\"green\")), row=1, col=1, secondary_y=True)\n# fig.add_trace(go.Scatter(y=times, name=\"time\", line=dict(color=\"blue\")), row=1, col=1)\nfig.add_trace(go.Scatter(x=adc0[0:N//2], y=adc1[0:N//2], name=\"adc1 vs adc0\",\n mode='markers',\n marker_size=1.5,\n hovertext=hover[0:N//2],\n marker_color=times[0:N//2]), row=1, col=1)\nfig.add_trace(go.Scatter(x=adc0[N//2:N], y=adc1[N//2:N], name=\"adc1 vs adc0\",\n mode='markers',\n marker_size=1.5,\n hovertext=hover[N//2:N],\n marker_color=times[N//2:N]), row=1, col=2)\n\n# fig.add_trace(go.Scatter(x=adc0, y=adc1, name=\"adc1 vs adc0\",\n# mode='markers',\n# hovertext=hover,\n# marker_size=1.5,\n# marker_color=times), row=1, col=3)\n\nfig.add_trace(go.Histogram2d(x=adc0, y=adc1,\n autobinx=False,\n # histnorm='probability',\n xbins=dict(start=0., end=4096, size=20.),\n autobiny=False,\n ybins=dict(start=0., end=4096., size=20.),\n # colorscale=[[0, 'rgb(12,51,131)'], [0.25, 'rgb(10,136,186)'], [0.5, 'rgb(242,211,56)'], [0.75, 'rgb(242,143,56)'], [1, 'rgb(217,30,30)']]\n ),\n row=1, col=3)\n\n\nfig.add_trace(go.Scatter(x=times, y=adc0, name=\"adc0\", hovertext=hover, line=dict(color=\"red\")), row=2, col=1)\nfig.add_trace(go.Scatter(x=times, y=adc1, name=\"adc0\", hovertext=hover, line=dict(color=\"green\")), row=2, col=1)\nfig.update_layout(\n title=go.layout.Title(\n text=\"Plots Raster ADC Bank 59\",\n yanchor=\"top\",\n y=0.98,\n xanchor=\"center\",\n x=0.5,\n font=dict(size=24)))\nfig.update_xaxes(title_text=\"ADC 0\", row=1, col=1)\nfig.update_yaxes(title_text=\"ADC 1\", row=1, col=1)\nfig.update_xaxes(title_text=\"ADC 0\", row=1, col=2)\nfig.update_yaxes(title_text=\"ADC 1\", row=1, col=2)\nfig.update_xaxes(title_text=\"ADC 0\", row=1, col=3)\nfig.update_yaxes(title_text=\"ADC 1\", row=1, col=3)\n\nfig.update_xaxes(title_text=\"5* time stamp/1e9 [s]\", row=2, col=1)\nfig.update_yaxes(title_text=\"ADC raw value\", row=2, col=1)\n\nfig.write_html(\"raster_adc.html\")\nfig.write_image(\"raster_adc.pdf\", width=4000, height=2000)\nfig.show()\n","repo_name":"JeffersonLab/RasterMon","sub_path":"Plot_WaveForm.py","file_name":"Plot_WaveForm.py","file_ext":"py","file_size_in_byte":4263,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"7778245857","text":"#!/usr/bin/Python\nimport hmac\nimport hashlib\nimport logging\nimport requests\nimport time\ntry:\n\tfrom urllib import urlencode\n\n# for python3\nexcept ImportError:\n\tfrom urllib.parse import urlencode\n\n\nENDPOINT = \"https://www.exchbtc.com\"\n\nBUY = \"BUY\"\nSELL = \"SELL\"\n\nLIMIT = \"LIMIT\"\nMARKET = \"MARKET\"\n\nGTC = \"GTC\"\nIOC = \"IOC\"\n\noptions = {}\n\n\ndef set(apiKey, secret):\n\t\"\"\"Set API key and secret.\n\tMust be called before any making any signed API calls.\n\t\"\"\"\n\toptions[\"apiKey\"] = apiKey\n\toptions[\"secret\"] = secret\n\t\ndef AllOrders(status, **kwargs):\n\tparams = {\"status\":status}\n\tparams.update(kwargs)\n\tdata = signedRequest(\"GET\", \"/api/v7/allOrders\", params)\n\treturn data\n\t\ndef order(symbol, side, type, qty, price, newClientOrderID, **kwargs):\n\tparams = {\n\t\t\"symbol\": symbol, \n\t\t\"side\":side,\n\t\t\"type\": type,\n\t\t\"quantity\":qty,\n\t\t\"price\":price,\n\t\t\"newClientOrderID\":newClientOrderID\n\t\t}\n\tparams.update(kwargs)\n\tdata = signedRequest(\"GET\", \"/api/v7/order\", params)\n\treturn data\n\t\ndef exchangeInfo():\n\tparams = {}\n\tdata = signedRequest(\"GET\", \"/api/v7/exchangeInfo\", params)\n\treturn data\n\t\ndef balance(**kwargs):\n\tparams = {}\n\tparams.update(kwargs)\n\tdata = signedRequest(\"GET\", \"/api/v7/balance\", params)\n\treturn data\n\ndef request(method, path, params=None):\n\tresp = requests.request(method, ENDPOINT + path, params=params)\n\tdata = resp.json()\n\tif \"msg\" in data:\n\t\tlogging.error(data['msg'])\n\treturn data\n\n\ndef signedRequest(method, path, params):\n\tif \"apiKey\" not in options or \"secret\" not in options:\n\t\traise ValueError(\"Api key and secret must be set\")\n\n\tquery = urlencode(sorted(params.items()))\n\tquery += \"×tamp={}\".format(int(time.time() * 1000)-500)\n\tprint(query)\n\tsecret = bytes(options[\"secret\"].encode(\"utf-8\"))\n\tsignature = hmac.new(secret, query.encode(\"utf-8\"),\n\t\t\t\t\t\t hashlib.sha256).hexdigest()\n\tquery += \"&signature={}\".format(signature)\n\tresp = requests.request(method,\n\t\t\t\t\t\t\tENDPOINT + path + \"?\" + query,\n\t\t\t\t\t\t\theaders={\"X-MBX-APIKEY\": options[\"apiKey\"]})\n\tprint(resp.url)\t\n\tdata = resp.json()\n\tif \"msg\" in data:\n\t\tlogging.error(data['msg'])\n\treturn data\n\ndef formatNumber(x):\n\tif isinstance(x, float):\n\t\treturn \"{:.8f}\".format(x)\n\telse:\n\t\treturn str(x)\n\nif __name__ == '__main__':\n\tapikey = 'b3f8cddfbff0f8c2d27b41cff6403fec33c87fd4f85379259955e25be7a6a6a1'\n\tsecretkey = '4fcf58f6e4471124049f608c69acbb9b729f168ff341335d9e5af7ff495f59d6'\n\tset(apikey, secretkey)\n\tprint(exchangeInfo())\n\t\n\t\n\t","repo_name":"exchbt/example","sub_path":"exchbtc.py","file_name":"exchbtc.py","file_ext":"py","file_size_in_byte":2406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"32275225819","text":"from fastapi import FastAPI, Header, Depends, HTTPException\nimport auth\nimport item\nfrom post_parameter import PostCredential, PostRefreshToken, PostItem\nimport datetime\n\ntags_metadata = [\n {\n \"name\": \"認証\",\n \"description\": \"ログインやトークンの管理など認証に関する操作を扱います。\",\n },\n {\n \"name\": \"アイテム\",\n \"description\": \"項目の CRUD 操作を扱います。\",\n }\n]\n\nitem_repository = item.ItemRepository()\nauth_repository = auth.AuthRepository()\n\napp = FastAPI(\n title=\"高速サンプルサーバー\",\n description=\"開発環境の確認などサンプルのサーバーが必要になった時に使い���す。\",\n version=\"0.1.0\",\n openapi_tags=tags_metadata\n)\n\n\ndef get_auth_token(Authorization: str = Header(None)):\n if Authorization is None:\n raise HTTPException(\n 401, headers={\"WWW-Authenticate\": 'Bearer realm=\"token_required\"'})\n if Authorization[:7] != \"Bearer \":\n raise HTTPException(\n 400, headers={\"WWW-Authenticate\": 'Bearer realm=\"invalid_request\"'})\n return Authorization[7:]\n\n\ndef is_authorized(Authorization: str = Header(None)):\n token = get_auth_token(Authorization)\n return auth_repository.isInclude(token, datetime.datetime.now())\n\n# ------------------------------------------------------------------------------\n\n\n@app.post(\"/auth/token\", tags=[\"認証\"], description=\"トークンを作成します ※ ログイン\")\ndef create_token(credential: PostCredential):\n return auth_repository.authenticate(credential)\n\n\n@app.post(\"/auth/refresh\", tags=[\"認証\"], description=\"トークンを更新します\")\ndef refresh_token(id: int, refresh_token: PostRefreshToken):\n return auth_repository.refresh(id, refresh_token)\n\n\n@app.delete(\"/auth/token\", tags=[\"認証\"], description=\"トークンを削除します ※ログアウト\")\ndef drop_token(token: str = Depends(get_auth_token)):\n return auth_repository.remove(token)\n\n# ------------------------------------------------------------------------------\n\n\n@app.get(\"/items\", tags=[\"アイテム\"], description=\"一覧を取得します\")\ndef get_items():\n return item_repository.list()\n\n\n@app.post(\"/item\", tags=[\"アイテム\"], description=\"新規登録します\")\ndef create_item(post_item: PostItem, isAuthorized: str = Depends(is_authorized)):\n if not isAuthorized:\n raise HTTPException(401)\n return item_repository.append(post_item)\n\n\n@app.put(\"/item/{id}\", tags=[\"アイテム\"], description=\"更新します\")\ndef update_item(id: int, post_item: PostItem, isAuthorized: str = Depends(is_authorized)):\n if not isAuthorized:\n raise HTTPException(401)\n return item_repository.update(id, post_item)\n\n\n@app.delete(\"/item/{id}\", tags=[\"アイテム\"], description=\"削除します\")\ndef remove_item(id: int, isAuthorized: str = Depends(is_authorized)):\n if not isAuthorized:\n raise HTTPException(401)\n return item_repository.remove(id)\n","repo_name":"MasayukiFukada/FastSampleServer","sub_path":"main_app.py","file_name":"main_app.py","file_ext":"py","file_size_in_byte":3011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71788343441","text":"import matplotlib\nimport matplotlib.pyplot as plt\nimport re\nimport os\nfrom subprocess import Popen\nimport socket\nfrom threading import Thread\nfrom typing import Dict, List\nfrom src.senders import Sender\nfrom os.path import join\n\n\nRECEIVER_FILE = \"run_receiver.py\"\nAVERAGE_SEGMENT_SIZE = 80\nQUEUE_LOG_FILE = \"downlink_queue.log\"\nQUEUE_LOG_TMP_FILE = \"downlink_queue_tmp.log\"\n\nDROP_LOG = \"debug_log.log\"\nDROP_LOG_TMP_FILE = \"debug_log_tmp.log\"\n\ndef generate_mahimahi_command(mahimahi_settings: Dict) -> str:\n if mahimahi_settings.get('loss'):\n loss_directive = \"mm-loss downlink %f\" % mahimahi_settings.get('loss')\n else:\n loss_directive = \"\"\n\n queue_type = mahimahi_settings.get('queue_type', 'droptail')\n\n if mahimahi_settings.get('downlink_queue_options'):\n downlink_queue_options = \"--downlink-queue-args=\" + \",\".join(\n [\"%s=%s\" % (key, value)\n for key, value in mahimahi_settings.get('downlink_queue_options').items()]\n )\n else:\n downlink_queue_options = \"\"\n\n if mahimahi_settings.get('uplink_queue_options'):\n uplink_queue_options = \" \".join(\n [\"--downlink-queue-args=%s=%s\" % (key, value)\n for key, value in mahimahi_settings.get('uplink_queue_options').items()]\n )\n else:\n uplink_queue_options = \"\"\n\n return \"mm-delay {delay} {loss_directive} mm-link traces/{trace_file} traces/{trace_file} --downlink-queue={queue_type} {downlink_queue_options} {uplink_queue_options} --downlink-queue-log={queue_log_file}\".format(\n delay=mahimahi_settings['delay'],\n downlink_queue_options=downlink_queue_options,\n uplink_queue_options=uplink_queue_options,\n loss_directive=loss_directive,\n trace_file=mahimahi_settings['trace_file'],\n queue_type=queue_type,\n queue_log_file=QUEUE_LOG_FILE\n )\n\ndef get_open_udp_port():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n s.bind(('', 0))\n port = s.getsockname()[1]\n s.close()\n return port\n\nSENDER_COLORS = [\"blue\", \"red\", \"green\", \"cyan\", \"magenta\", \"yellow\", \"black\"]\n\ndef print_performance(\n senders: List[Sender],\n num_seconds: int,\n episode_num : int,\n write_to_disk : bool ,\n output_dir : str,\n experiment_dir : str\n ):\n\n if write_to_disk:\n with open(join(experiment_dir, \"episode_\" + str(episode_num) + \"_stats.txt\" ), 'w') as out_stats:\n for sender in senders:\n out_stats.write(\"Results for sender %d, with strategy: %s\" % (sender.port, sender.strategy.__class__.__name__) + \"\\n\")\n out_stats.write(\"**Throughput:** %f bytes/s\" % (AVERAGE_SEGMENT_SIZE * (sender.strategy.ack_count/num_seconds)) + \"\\n\")\n out_stats.write(\"**Average RTT:** %f ms\" % ((float(sum(sender.strategy.rtts))/len(sender.strategy.rtts)) * 1000) + \"\\n\")\n out_stats.write(\"\\n\")\n else:\n for sender in senders:\n print(\"Results for sender %d, with strategy: %s\" % (sender.port, sender.strategy.__class__.__name__))\n print(\"**Throughput:** %f bytes/s\" % (AVERAGE_SEGMENT_SIZE * (sender.strategy.ack_count/num_seconds)))\n print(\"**Average RTT:** %f ms\" % ((float(sum(sender.strategy.rtts))/len(sender.strategy.rtts)) * 1000))\n print(\"\")\n\n\n # Compute the queue log stuff\n queue_log_lines = open(QUEUE_LOG_TMP_FILE).read().split(\"\\n\")[1:]\n regex = re.compile(\"\\d+ # (\\d+)\")\n\n plt.plot([int(regex.match(line).group(1)) for line in queue_log_lines if regex.match(line) is not None])\n\n plt.xlabel(\"Time\")\n plt.ylabel(\"Link Queue Size\")\n\n if write_to_disk:\n plt.savefig(join(experiment_dir, \"episode_\" + str(episode_num) + \"_link-queue-size.png\" ))\n plt.close()\n else:\n plt.show()\n\n handles = []\n for idx, sender in enumerate(senders):\n plt.plot(*zip(*sender.strategy.cwnds), c=SENDER_COLORS[idx], label=sender.strategy.__class__.__name__)\n plt.legend()\n plt.xlabel(\"Time\")\n plt.ylabel(\"Congestion Window Size\")\n\n if write_to_disk:\n plt.savefig(join(experiment_dir, \"episode_\" + str(episode_num) + \"_cwnd.png\" ))\n plt.close()\n else:\n plt.show()\n print(\"\")\n\n for idx, sender in enumerate(senders):\n plt.plot(*zip(*sender.strategy.rtt_recordings), c=SENDER_COLORS[idx], label=sender.strategy.__class__.__name__)\n plt.legend()\n plt.xlabel(\"Time\")\n plt.ylabel(\"Current RTT\")\n if write_to_disk:\n plt.savefig(join(experiment_dir, \"episode_\" + str(episode_num) +\"_rtt.png\" ))\n plt.close()\n else:\n plt.show()\n\ndef run_with_mahi_settings(\n mahimahi_settings: Dict, \n seconds_to_run: int, \n senders: List, \n should_print_performance: bool , \n episode_num : int,\n write_to_disk : bool ,\n output_dir : str,\n experiment_dir : str\n ):\n mahimahi_cmd = generate_mahimahi_command(mahimahi_settings)\n\n sender_ports = \" \".join([\"$MAHIMAHI_BASE %s\" % sender.port for sender in senders])\n\n cmd = \"%s -- sh -c 'python3 %s %d %s' > out.out\" % (mahimahi_cmd, RECEIVER_FILE, seconds_to_run, sender_ports)\n Popen(cmd, shell=True)\n for sender in senders:\n sender.handshake()\n threads = [Thread(target=sender.run, args=[seconds_to_run]) for sender in senders]\n for thread in threads:\n thread.start()\n for thread in threads:\n thread.join()\n\n os.rename(QUEUE_LOG_FILE, QUEUE_LOG_TMP_FILE)\n #os.rename(DROP_LOG, DROP_LOG_TMP_FILE)\n\n if should_print_performance:\n print_performance(senders, seconds_to_run, episode_num, write_to_disk, output_dir, experiment_dir)\n Popen(\"pkill -f mm-link\", shell=True).wait()\n Popen(\"pkill -f run_receiver\", shell=True).wait()\n","repo_name":"squidarth/network-performance-jupyter","sub_path":"src/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":5942,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"35362396327","text":"\"\"\"train rwkv using long-range arena benchmark dataset\"\"\"\nimport os\nos.environ['CUDA_VISIBLE_DEVICES'] = '1'\n\nimport logging\n\nimport jax\nfrom jax import jit, numpy as np\nfrom jax.nn.initializers import zeros, glorot_normal\nimport optax\nimport wandb\nimport os.path as op\n\nfrom rwkv_basic import rwkv_net\nfrom rwkv_batch import rwkv_net_batch\nfrom rwkv_utils import get_tokenizer, rnn_generate\nfrom rwkv_train_utils import init_weight_info, init_weights, init_uniform, KeyGen, get_loss_fn, get_acc_fn\nfrom data_utils import load_npy_as_dataloader\n\nuse_wandb = True\nadam_params = {\n 'learning_rate': 1e-4,\n 'b1': 0.9,\n 'b2': 0.999,\n 'eps': 1e-8,\n}\nlion_params = {\n 'learning_rate': 1e-4,\n 'b1': 0.95,\n 'b2': 0.98,\n 'weight_decay': 0.01\n}\nrun_config = {\n 'name': 'rwkv-shakespeare',\n 'data': 'data/shakespeare.npy',\n 'n_epoch': 100,\n 'batch_size': 4,\n 'eval_freq': 1000,\n 'n_channel': 768,\n 'n_layer': 12,\n 'n_ffn': 3072,\n # 'opt': 'adam',\n # 'opt_params': adam_params,\n 'opt': 'lion',\n 'opt_params': lion_params,\n 'block_size': 256,\n 'save_step': 10000,\n}\n\nif use_wandb:\n wandb_run = wandb.init(\n project=\"inside-transformer\",\n config=run_config,\n )\n\ntokenizer = get_tokenizer()\n\n# initialize weights\nlogging.info(\"initializing weights...\")\nwinfo = init_weight_info(\n tokenizer.get_vocab_size(),\n run_config['n_channel'],\n run_config['n_layer'],\n run_config['n_ffn'],\n)\n\nkeygen = KeyGen()\n\n# option 1:\n# all zero init but head and embedding\nweights = init_weights(winfo, keygen, zeros) # key is not required for zeros init\nweights['emb']['weight'] = init_uniform(keygen(), winfo['emb']['weight'], a=-1e-4, b=1e-4)\nweights['head']['weight'] = init_uniform(keygen(), winfo['head']['weight'], a=-1e-4, b=1e-4)\n\n# option 2:\n# glorot_normal for all 2d matrices and zero for all 1d vectors\n# w_init_fn = lambda key, shape: glorot_normal()(key, shape) if len(shape) == 2 else zeros(key, shape)\n# weights = init_weights(winfo, keygen, w_init_fn)\nlogging.info(\"weights initialized\")\n\n# initialize optimizers\nlogging.info(\"initializing optimizer...\")\noptimizer = {'lion': optax.lion, 'adam': optax.adam}[run_config['opt']](**run_config['opt_params'])\nopt_state = optimizer.init(weights)\nlogging.info(\"optimizer initialized\")\n\n#%%\n# setup loss, its grad, accuracy and validation\nloss_fn = get_loss_fn(rwkv_net_batch)\nloss_fn_grad = jax.value_and_grad(loss_fn)\nacc_fn = get_acc_fn(rwkv_net_batch)\n\ndef get_validation_results(weights):\n prompt = \"The quick brown fox jumps over the lazy\"\n output = rnn_generate(rwkv_net, weights, prompt, 50, tokenizer)\n res = {'output': output}\n return res\n\n@jit\ndef make_step(weights, opt_state, batch):\n loss_val, grads = loss_fn_grad(weights, batch)\n updates, opt_state = optimizer.update(grads, opt_state, weights)\n weights = optax.apply_updates(weights, updates)\n return weights, opt_state, loss_val\n\ni_step = 0\nlogging.info(\"start training...\")\nfor _ in range(run_config['n_epoch']):\n trainloader = load_npy_as_dataloader(run_config['data'], batch_size=run_config['batch_size'], block_size=run_config['block_size'])\n for batch in trainloader:\n weights, opt_state, loss_val = make_step(weights, opt_state, batch)\n if i_step % 10 == 0:\n print(f\"step: {i_step}, batch loss: {loss_val}\")\n if i_step % run_config['eval_freq'] == 0:\n print(f\"step: {i_step}, batch loss: {loss_val}\")\n res = get_validation_results(weights)\n if use_wandb:\n wandb.log({\n \"batch_loss\": loss_val,\n \"n_tokens_trained\": i_step * run_config['batch_size'] * run_config['block_size'],\n \"generated\": wandb.Html(res['output'])\n })\n if \"n_train_step\" in run_config and i_step >= run_config['n_train_step']:\n break\n if i_step % run_config['save_step'] == 0:\n ofile = op.join(wandb_run.dir, f\"rwkv_weights_{i_step}.npy\") if use_wandb else f\"rwkv_weights_{i_step}.npy\"\n np.save(ofile, weights)\n i_step += 1\n\nofile = op.join(wandb_run.dir, \"rwkv_weights.npy\") if use_wandb else \"rwkv_weights.npy\"\nnp.save(ofile, weights)\n\nif use_wandb: wandb.finish()\n\n# example loading saved weights with np\n# res = np.load(\"rwkv_weights.npy\", allow_pickle=True).item()\n","repo_name":"guanyilun/scratch","sub_path":"nano-llm/attempt6/rwkv_train.py","file_name":"rwkv_train.py","file_ext":"py","file_size_in_byte":4373,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"7715426366","text":"\"\"\"\"Models\"\"\"\n\nfrom abc import ABC, abstractmethod\nimport logging\nfrom typing import AsyncIterable, TypedDict\n\nfrom async_reolink.api.connection import Connection\nfrom async_reolink.api.commands import CommandRequest, CommandResponse\n\n\nclass MockConnectionValues(TypedDict):\n\n is_connected: bool\n connection_id: int\n hostname: str\n\n\n_MOCK_DEFAULTS: MockConnectionValues = {\n \"connection_id\": 1,\n \"hostname\": \"Mock Host\",\n \"is_connected\": True,\n}\n\n\nclass MockConnection(Connection, ABC):\n \"\"\"Mocked Connection\"\"\"\n\n def __init__(self, *args, logger: logging.Logger = None, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self._mocked: MockConnectionValues = {}.update(_MOCK_DEFAULTS)\n self._logger = logger\n\n @property\n def is_connected(self) -> bool:\n return self._mocked[\"is_connected\"]\n\n @property\n def connection_id(self) -> int:\n return self._mocked[\"connection_id\"]\n\n @property\n def hostname(self):\n return self._mocked[\"hostname\"]\n\n async def connect(self, hostname: str, port: int = None, timeout: float = ...):\n if self._logger is not None:\n self._logger.info(\"connect fired\")\n return\n\n async def disconnect(self):\n if self._logger is not None:\n self._logger.info(\"disconnect fired\")\n return\n\n\nclass MockConnection_SingleExecute(MockConnection, ABC):\n \"\"\"Mocked Single execute response\"\"\"\n\n @abstractmethod\n async def _mocked_execute(self, request: CommandRequest) -> CommandResponse | bytes:\n ...\n\n def _execute(self, *args: CommandRequest) -> AsyncIterable[CommandResponse | bytes]:\n if self._logger is not None:\n self._logger.info(\"_execute fired\")\n\n async def _mock_iterable():\n for request in args:\n yield await self._mocked_execute(request)\n\n return _mock_iterable()\n","repo_name":"xannor/py_reolinkrestapi","sub_path":"tests/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42260955050","text":"import ttkbootstrap as ttk\n\n\nclass MatchCard(ttk.Frame):\n def __init__(self, parent, match_name, team_1, team_2, country, city, discipline, date):\n super().__init__(parent, relief=ttk.SOLID)\n\n # Nazwa meczu\n match_name_label = ttk.Label(self, text=match_name, font=(\"Arial\", 14, \"bold\"))\n match_name_label.pack(anchor=ttk.W)\n\n # Nazwy drużyn\n team_names_label = ttk.Label(self, text=f\"{team_1} - {team_2}\", font=(\"Arial\", 12, \"bold\"))\n team_names_label.pack(anchor=ttk.W)\n\n # Lokalizacja meczu\n location_label = ttk.Label(self, text=f\"{city}, {country}\", font=(\"Arial\", 10))\n location_label.pack(anchor=ttk.W)\n\n # Informacje o dyscyplinie\n discipline_label = ttk.Label(self, text=discipline, font=(\"Arial\", 10))\n discipline_label.pack(anchor=ttk.W)\n\n # Data\n date_label = ttk.Label(self, text=date, font=(\"Arial\", 8))\n date_label.pack(anchor=ttk.W)\n","repo_name":"K-Ptak/bookmakerDB","sub_path":"model/matchcard.py","file_name":"matchcard.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18731764621","text":"# -*- coding: utf-8 -*-\r\n\r\n\"\"\" This is the expected File Format for Feature_pipeline.py script \"\"\"\r\n\r\n## import required libraries\r\n\r\nfrom multiprocessing import Pool\r\nfrom feature_generators import calc_recency,calc_freq,calc_norm\r\nimport time \r\nfrom itertools import repeat\r\nimport numpy as np # linear algebra\r\nimport time\r\nimport pandas as pd\r\nimport os\r\n\r\n\r\ndef fitness_calculation(data):\r\n if ((data['sd_0'] == 0 ) and (data['sd_1'] == 0)) and (((data['avg_0'] == 0) and (data['avg_1'] != 0)) or ((data['avg_0'] != 0) and (data['avg_1'] == 0))):\r\n return 9999999999\r\n elif (((data['sd_0'] == 0 ) and (data['sd_1'] != 0)) or ((data['sd_0'] != 0) and (data['sd_1'] == 0))) and (data['avg_0'] == data['avg_1']):\r\n return 1\r\n elif ((data['sd_0'] != 0 ) and (data['sd_1'] != 0)) and (data['avg_0'] != 0):\r\n return ((data['avg_1']/data['sd_1'])/(data['avg_0']/data['sd_0']))\r\n elif ((data['sd_0'] != 0 ) and (data['sd_1'] != 0)) and ((data['avg_0'] == 0) and (data['avg_1'] != 0)):\r\n return 9999999999\r\n else:\r\n return 1\r\n\r\n# define functions for recency calculation\r\ndef create_recency_features(pool):\r\n column = ['event_name','plan_type','specialty']\r\n res_recency = pool.apply_async(calc_recency,(column,train_data)).get()\r\n res_recency['fitness_value'] = res_recency.apply(fitness_calculation, axis=1)\r\n return res_recency\r\n\r\n# define functions for frequency calculation.\r\ndef create_frequency_features(pool):\r\n column = ['event_name','plan_type','specialty']\r\n res_freq = pool.apply_async(calc_freq,(column,train_data)).get()\r\n res_freq['fitness_value'] = res_freq.apply(fitness_calculation, axis=1)\r\n return res_freq\r\n\r\n\r\ndef create_normchange_features(pool):\r\n column = ['event_name','plan_type','specialty']\r\n res_normChange = pool.apply_async(calc_norm,(column,train_data)).get()\r\n res_normChange['fitness_value'] = res_normChange.apply(fitness_calculation, axis=1)\r\n return res_normChange\r\n\r\n\r\nif __name__ == '__main__':\r\n try:\r\n start=time.time()\r\n # load the training set - Do not edit this line\r\n train_transaction_df = pd.read_csv(\"train_data.csv\",nrows=1000,usecols=['patient_id','event_name','specialty','plan_type','event_time']) #The train dataset name an path should remail unchanged train_data.csv- Do not edit this line\r\n # append the train label.\r\n\r\n #load the test set- Do not edit this line\r\n # test_transaction_df = pd.read_csv(\"test_data.csv\") #The test dataset name an path should remail unchanged train_data.csv- Do not edit this line\r\n\r\n time_var = 'event_time'\r\n id_var = 'patient_id'\r\n y_var = 'outcome_flag'\r\n labels = pd.read_csv('train_labels.csv')\r\n\r\n train_data = pd.merge(train_transaction_df, labels, on='patient_id', how='left')\r\n print('DataFrame Loaded...')\r\n pool = Pool(processes=4)\r\n res=pd.DataFrame()\r\n rec = create_recency_features(pool)\r\n freq = create_frequency_features(pool)\r\n normCh = create_normchange_features(pool)\r\n res=rec.append(freq,ignore_index=True)\r\n res=res.append(normCh,ignore_index=True)\r\n # print(res.shape)\r\n res.to_csv('final_fit__pool_v5.csv',index=False)\r\n print('Done',time.time()-start)\r\n except:\r\n print('Except : Exception Occurred',time.time()-start)\r\n pool.close()\r\n finally:\r\n print('Finally : Done',time.time()-start)\r\n pool.close()","repo_name":"rajat5ranjan/Machine-Hack","sub_path":"MH - ZS Patient Discovery Hackathon/Fitness Calculation/Feature Generator using Multiprocessing/Feature_Pipeline- Multiprocessing.py","file_name":"Feature_Pipeline- Multiprocessing.py","file_ext":"py","file_size_in_byte":3488,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"3"} +{"seq_id":"19677337192","text":"import pygame\nfrom player import Player\n\ndef menu():\n pygame.init()\n screen = pygame.display.set_mode((750,750))\n menu_image = pygame.image.load('images/menu_background.png')\n menu_image = pygame.transform.scale(menu_image, (750,750))\n\n font = pygame.font.Font('font/Elfboyclassic.ttf', 125)\n font2 = pygame.font.Font('font/zorque.ttf', 125)\n title = font2.render(\"You Died :(\", True, (244, 44, 4))\n again = font2.render(\"Again?\", True, (244, 44, 4))\n font3 = pygame.font.Font('font/zorque.ttf', 86)\n mainmenu = font3.render(\"Main Menu\", True, (244, 44, 4))\n\n clock = pygame.time.Clock()\n\n exit = False\n while not exit:\n \n for event in pygame.event.get():\n if event.type == pygame.QUIT: \n exit=True\n if event.type == pygame.MOUSEBUTTONDOWN and event.button ==1:\n pos = pygame.mouse.get_pos()\n if pygame.Rect((130, 210, 500, 100)).collidepoint(pos):\n Player.reset_player()\n return 1\n if event.type == pygame.MOUSEBUTTONDOWN and event.button ==1:\n pos = pygame.mouse.get_pos()\n if pygame.Rect((130, 350, 500, 100)).collidepoint(pos):\n Player.reset_player()\n return 0\n if event.type == pygame.QUIT:\n return -1\n \n\n screen.fill((23, 96, 135))\n pygame.draw.rect(screen, (8, 61, 119), pygame.Rect(115, 135 , 530, 130))\n pygame.draw.rect(screen, (8, 61, 119), pygame.Rect(115, 335 , 530, 130))\n pygame.draw.rect(screen, (23, 96, 135), pygame.Rect(130, 150, 500, 100))\n pygame.draw.rect(screen, (23, 96, 135), pygame.Rect(130, 350, 500, 100))\n # pygame.draw.rect(screen, (0,0,0), pygame.Rect(115, 15, 530, 130))\n # pygame.draw.rect(screen, (101, 184, 145), pygame.Rect(130, 30, 500, 100))\n screen.blit(mainmenu, (126, 345))\n screen.blit(title, (30, 5))\n screen.blit(again, (140, 130))\n pygame.display.flip()\n clock.tick(60)","repo_name":"SSZZCODER/Crash-","sub_path":"deathscreen.py","file_name":"deathscreen.py","file_ext":"py","file_size_in_byte":2073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17230098935","text":"\"\"\"\r\nThe user is prompted to select a 'F' or 'M' on screen\r\nThis module allows the user to manually enter the tickers on screen\r\nwhen selected 'M'\r\nor upload the file containing tickers\r\nwhen selevted 'F'\r\n@author: lipi\r\n\"\"\"\r\nimport os\r\nimport pandas as pd\r\n\r\n\r\ndef stock_entry_option():\r\n print ('Please note that the program supports Stocks listed in S&P500 as of now')\r\n r = raw_input ('Select F to choose a file of tickers\\OR\\Select M for manual input of tickers: ')\r\n StockTickerArray = []\r\n \r\n if r != 'F' and r != 'M':\r\n raise ValueError(\"\\nOops! wrong entry\\Enter only F or M\")\r\n \r\n if r == 'F':#for upload through file \r\n user_input = raw_input(\"Enter your ticker file: \")\r\n assert os.path.exists(user_input), \"I did not find the file at, \"+str(user_input)\r\n #open the file\r\n xlsx = pd.ExcelFile(user_input)\r\n S1 = xlsx.parse(0)\r\n StockTickerArray = (S1['TICKERS'].values.tolist())\r\n \r\n else: #to upload manually \r\n StockCount = input ('Input the number of stocks in the portfolio: ')\r\n S1 = list()\r\n for i in range(1,StockCount+1):\r\n StockTicker = raw_input ('Enter Stock Ticker '+str(i)+': ')\r\n StockTickerArray.append(str(StockTicker)) \r\n \r\n #Broad Market Index is entered \r\n StockTickerArray.append('^GSPC') \r\n return StockTickerArray","repo_name":"Lipishree/WQU-Capstone","sub_path":"CAP_STCK_ENTRY.py","file_name":"CAP_STCK_ENTRY.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"3"} +{"seq_id":"8258004837","text":"# 100DaysOfCode Day 7 - Hangman\n\nfrom random import choice\nfrom day_007_Hangman.hangman_words import word_list\nfrom day_007_Hangman.hangman_art import stages, logo\n\nchosen_word = choice(word_list)\nblank_cases = ['_'] * len(chosen_word)\n\nlives = 6\nend_of_game = False\nprint(logo)\nwhile not end_of_game:\n letter = input('Guess a letter: ').lower()\n if letter in chosen_word:\n for i, v in enumerate(chosen_word):\n if letter == chosen_word[i]:\n blank_cases[i] = letter\n else:\n lives -= 1\n if lives == 0:\n print('You Lose!')\n end_of_game = True\n if '_' not in blank_cases:\n print('You Win!')\n end_of_game = True\n print(' '.join(blank_cases))\n print(stages[lives])\n","repo_name":"leandropinheiroalves/course-100-days-of-code","sub_path":"day_007_Hangman/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"7505641332","text":"import os\n\nfrom django.db.models import Q\nfrom django.http.response import Http404\nfrom django.views.generic import DetailView, ListView\nfrom utils.pagination import make_pagination\n\nfrom recipes.models import Recipe\n\nPER_PAGE = int(os.environ.get('PER_PAGE', 6))\n\n\nclass RecipeListViewBase(ListView):\n model = Recipe\n context_object_name = 'recipes'\n paginate_by = None\n ordering = ['-id']\n template_name = 'recipes/pages/home.html'\n\n def get_queryset(self, *args, **kwargs):\n qs = super().get_queryset(*args, **kwargs)\n qs = qs.filter(\n is_published=True,\n )\n return qs\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(**kwargs)\n page_obj, pagination_range = make_pagination(\n self.request,\n context['recipes'],\n PER_PAGE\n )\n context.update({\n 'recipes': page_obj, 'pagination_range': pagination_range\n })\n return context\n\n\nclass RecipeListViewHome(RecipeListViewBase):\n template_name = 'recipes/pages/home.html'\n\n\nclass RecipeListViewCategory(RecipeListViewBase):\n template_name = 'recipes/pages/category.html'\n\n def get_queryset(self, *args, **kwargs):\n queryset = super().get_queryset(*args, **kwargs)\n new_queryset = queryset.filter(\n category__id=self.kwargs.get('category_id')\n )\n\n if not new_queryset:\n raise Http404()\n\n return new_queryset\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n context.update({\n 'title': f'{context.get(\"recipes\")[0].category.name} - Category | '\n })\n return context\n\n\nclass RecipeListViewSearch(RecipeListViewBase):\n template_name = 'recipes/pages/search.html'\n\n def get_queryset(self, *args, **kwargs):\n search_term = self.request.GET.get('q', '').strip()\n\n if not search_term:\n raise Http404()\n\n queryset = super().get_queryset(*args, **kwargs)\n\n new_queryset = queryset.filter(\n Q(\n Q(title__icontains=search_term) |\n Q(description__icontains=search_term),\n )\n )\n return new_queryset\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n search_term = self.request.GET.get('q', '').strip()\n\n context.update({\n 'page_title': f'Search for \"{search_term}\" |',\n 'search_term': search_term,\n 'additional_url_query': f'&q={search_term}',\n })\n return context\n\n\nclass RecipeDetailView(DetailView):\n model = Recipe\n context_object_name = 'recipe'\n template_name = 'recipes/pages/recipe-view.html'\n\n def get_queryset(self, *args, **kwargs):\n queryset = super().get_queryset(*args, **kwargs)\n new_queryset = queryset.filter(\n is_published=True\n )\n return new_queryset\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n context.update({\n 'is_detail_page': True\n })\n return context\n","repo_name":"mignnoni/django-recipes-project","sub_path":"recipes/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12278193531","text":"\"\"\"\nCopy from allennlp https://github.com/allenai/allennlp/blob/master/allennlp/common/file_utils.py\nUtilities for working with the local dataset cache.\n\"\"\"\n\nimport os\nimport logging\nimport shutil\nimport tempfile\nimport json\nfrom urllib.parse import urlparse\nfrom pathlib import Path\nfrom typing import Optional, Tuple, Union, IO, Callable, Set\nfrom hashlib import sha256\nfrom functools import wraps\n\nimport boto3\nimport botocore\nfrom botocore.exceptions import ClientError\nimport requests\nfrom requests.adapters import HTTPAdapter\nfrom requests.packages.urllib3.util.retry import Retry # pylint: disable=import-error\n\n\"\"\"\n:class:`~allennlp.common.tqdm.Tqdm` wraps tqdm so we can add configurable\nglobal defaults for certain tqdm parameters.\n\"\"\"\n\nfrom tqdm import tqdm as _tqdm\n# This is neccesary to stop tqdm from hanging\n# when exceptions are raised inside iterators.\n# It should have been fixed in 4.2.1, but it still\n# occurs.\n# TODO(Mark): Remove this once tqdm cleans up after itself properly.\n# https://github.com/tqdm/tqdm/issues/469\n_tqdm.monitor_interval = 0\n\nclass Tqdm:\n # These defaults are the same as the argument defaults in tqdm.\n default_mininterval: float = 0.1\n\n @staticmethod\n def set_default_mininterval(value: float) -> None:\n Tqdm.default_mininterval = value\n\n @staticmethod\n def set_slower_interval(use_slower_interval: bool) -> None:\n \"\"\"\n If ``use_slower_interval`` is ``True``, we will dramatically slow down ``tqdm's`` default\n output rate. ``tqdm's`` default output rate is great for interactively watching progress,\n but it is not great for log files. You might want to set this if you are primarily going\n to be looking at output through log files, not the terminal.\n \"\"\"\n if use_slower_interval:\n Tqdm.default_mininterval = 10.0\n else:\n Tqdm.default_mininterval = 0.1\n\n @staticmethod\n def tqdm(*args, **kwargs):\n new_kwargs = {\n 'mininterval': Tqdm.default_mininterval,\n **kwargs\n }\n\n return _tqdm(*args, **new_kwargs)\n\n\nlogger = logging.getLogger(__name__) # pylint: disable=invalid-name\n\nCACHE_ROOT = Path(os.getenv('ALLENNLP_CACHE_ROOT', Path.home() / '.allennlp'))\nCACHE_DIRECTORY = str(CACHE_ROOT / \"cache\")\nDEPRECATED_CACHE_DIRECTORY = str(CACHE_ROOT / \"datasets\")\n\n# This variable was deprecated in 0.7.2 since we use a single folder for caching\n# all types of files (datasets, models, etc.)\nDATASET_CACHE = CACHE_DIRECTORY\n\n# Warn if the user is still using the deprecated cache directory.\nif os.path.exists(DEPRECATED_CACHE_DIRECTORY):\n logger = logging.getLogger(__name__) # pylint: disable=invalid-name\n logger.warning(f\"Deprecated cache directory found ({DEPRECATED_CACHE_DIRECTORY}). \"\n f\"Please remove this directory from your system to free up space.\")\n\n\ndef url_to_filename(url: str, etag: str = None) -> str:\n \"\"\"\n Convert `url` into a hashed filename in a repeatable way.\n If `etag` is specified, append its hash to the url's, delimited\n by a period.\n \"\"\"\n url_bytes = url.encode('utf-8')\n url_hash = sha256(url_bytes)\n filename = url_hash.hexdigest()\n\n if etag:\n etag_bytes = etag.encode('utf-8')\n etag_hash = sha256(etag_bytes)\n filename += '.' + etag_hash.hexdigest()\n\n return filename\n\n\ndef filename_to_url(filename: str, cache_dir: str = None) -> Tuple[str, str]:\n \"\"\"\n Return the url and etag (which may be ``None``) stored for `filename`.\n Raise ``FileNotFoundError`` if `filename` or its stored metadata do not exist.\n \"\"\"\n if cache_dir is None:\n cache_dir = CACHE_DIRECTORY\n\n cache_path = os.path.join(cache_dir, filename)\n if not os.path.exists(cache_path):\n raise FileNotFoundError(\"file {} not found\".format(cache_path))\n\n meta_path = cache_path + '.json'\n if not os.path.exists(meta_path):\n raise FileNotFoundError(\"file {} not found\".format(meta_path))\n\n with open(meta_path) as meta_file:\n metadata = json.load(meta_file)\n url = metadata['url']\n etag = metadata['etag']\n\n return url, etag\n\ndef cached_path(url_or_filename: Union[str, Path], cache_dir: str = None) -> str:\n \"\"\"\n Given something that might be a URL (or might be a local path),\n determine which. If it's a URL, download the file and cache it, and\n return the path to the cached file. If it's already a local path,\n make sure the file exists and then return the path.\n \"\"\"\n if cache_dir is None:\n cache_dir = CACHE_DIRECTORY\n if isinstance(url_or_filename, Path):\n url_or_filename = str(url_or_filename)\n\n url_or_filename = os.path.expanduser(url_or_filename)\n parsed = urlparse(url_or_filename)\n\n if parsed.scheme in ('http', 'https', 's3'):\n # URL, so get it from the cache (downloading if necessary)\n return get_from_cache(url_or_filename, cache_dir)\n elif os.path.exists(url_or_filename):\n # File, and it exists.\n return url_or_filename\n elif parsed.scheme == '':\n # File, but it doesn't exist.\n raise FileNotFoundError(\"file {} not found\".format(url_or_filename))\n else:\n # Something unknown\n raise ValueError(\"unable to parse {} as a URL or as a local path\".format(url_or_filename))\n\ndef is_url_or_existing_file(url_or_filename: Union[str, Path, None]) -> bool:\n \"\"\"\n Given something that might be a URL (or might be a local path),\n determine check if it's url or an existing file path.\n \"\"\"\n if url_or_filename is None:\n return False\n url_or_filename = os.path.expanduser(str(url_or_filename))\n parsed = urlparse(url_or_filename)\n return parsed.scheme in ('http', 'https', 's3') or os.path.exists(url_or_filename)\n\ndef split_s3_path(url: str) -> Tuple[str, str]:\n \"\"\"Split a full s3 path into the bucket name and path.\"\"\"\n parsed = urlparse(url)\n if not parsed.netloc or not parsed.path:\n raise ValueError(\"bad s3 path {}\".format(url))\n bucket_name = parsed.netloc\n s3_path = parsed.path\n # Remove '/' at beginning of path.\n if s3_path.startswith(\"/\"):\n s3_path = s3_path[1:]\n return bucket_name, s3_path\n\n\ndef s3_request(func: Callable):\n \"\"\"\n Wrapper function for s3 requests in order to create more helpful error\n messages.\n \"\"\"\n\n @wraps(func)\n def wrapper(url: str, *args, **kwargs):\n try:\n return func(url, *args, **kwargs)\n except ClientError as exc:\n if int(exc.response[\"Error\"][\"Code\"]) == 404:\n raise FileNotFoundError(\"file {} not found\".format(url))\n else:\n raise\n\n return wrapper\n\n\ndef get_s3_resource():\n session = boto3.session.Session()\n if session.get_credentials() is None:\n # Use unsigned requests.\n s3_resource = session.resource(\"s3\", config=botocore.client.Config(signature_version=botocore.UNSIGNED))\n else:\n s3_resource = session.resource(\"s3\")\n return s3_resource\n\n\n@s3_request\ndef s3_etag(url: str) -> Optional[str]:\n \"\"\"Check ETag on S3 object.\"\"\"\n s3_resource = get_s3_resource()\n bucket_name, s3_path = split_s3_path(url)\n s3_object = s3_resource.Object(bucket_name, s3_path)\n return s3_object.e_tag\n\n\n@s3_request\ndef s3_get(url: str, temp_file: IO) -> None:\n \"\"\"Pull a file directly from S3.\"\"\"\n s3_resource = get_s3_resource()\n bucket_name, s3_path = split_s3_path(url)\n s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)\n\ndef session_with_backoff() -> requests.Session:\n \"\"\"\n We ran into an issue where http requests to s3 were timing out,\n possibly because we were making too many requests too quickly.\n This helper function returns a requests session that has retry-with-backoff\n built in.\n\n see https://stackoverflow.com/questions/23267409/how-to-implement-retry-mechanism-into-python-requests-library\n \"\"\"\n session = requests.Session()\n retries = Retry(total=5, backoff_factor=1, status_forcelist=[502, 503, 504])\n session.mount('http://', HTTPAdapter(max_retries=retries))\n session.mount('https://', HTTPAdapter(max_retries=retries))\n\n return session\n\ndef http_get(url: str, temp_file: IO) -> None:\n with session_with_backoff() as session:\n req = session.get(url, stream=True)\n content_length = req.headers.get('Content-Length')\n total = int(content_length) if content_length is not None else None\n progress = Tqdm.tqdm(unit=\"B\", total=total)\n for chunk in req.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n progress.update(len(chunk))\n temp_file.write(chunk)\n progress.close()\n\n\n# TODO(joelgrus): do we want to do checksums or anything like that?\ndef get_from_cache(url: str, cache_dir: str = None) -> str:\n \"\"\"\n Given a URL, look for the corresponding dataset in the local cache.\n If it's not there, download it. Then return the path to the cached file.\n \"\"\"\n if cache_dir is None:\n cache_dir = CACHE_DIRECTORY\n\n os.makedirs(cache_dir, exist_ok=True)\n\n # Get eTag to add to filename, if it exists.\n if url.startswith(\"s3://\"):\n etag = s3_etag(url)\n else:\n with session_with_backoff() as session:\n response = session.head(url, allow_redirects=True)\n if response.status_code != 200:\n raise IOError(\"HEAD request failed for url {} with status code {}\"\n .format(url, response.status_code))\n etag = response.headers.get(\"ETag\")\n\n filename = url_to_filename(url, etag)\n\n # get cache path to put the file\n cache_path = os.path.join(cache_dir, filename)\n\n if not os.path.exists(cache_path):\n # Download to temporary file, then copy to cache dir once finished.\n # Otherwise you get corrupt cache entries if the download gets interrupted.\n with tempfile.NamedTemporaryFile() as temp_file:\n logger.info(\"%s not found in cache, downloading to %s\", url, temp_file.name)\n\n # GET file object\n if url.startswith(\"s3://\"):\n s3_get(url, temp_file)\n else:\n http_get(url, temp_file)\n\n # we are copying the file before closing it, so flush to avoid truncation\n temp_file.flush()\n # shutil.copyfileobj() starts at the current position, so go to the start\n temp_file.seek(0)\n\n logger.info(\"copying %s to cache at %s\", temp_file.name, cache_path)\n with open(cache_path, 'wb') as cache_file:\n shutil.copyfileobj(temp_file, cache_file)\n\n logger.info(\"creating metadata file for %s\", cache_path)\n meta = {'url': url, 'etag': etag}\n meta_path = cache_path + '.json'\n with open(meta_path, 'w') as meta_file:\n json.dump(meta, meta_file)\n\n logger.info(\"removing temp file %s\", temp_file.name)\n\n return cache_path\n\n\ndef read_set_from_file(filename: str) -> Set[str]:\n \"\"\"\n Extract a de-duped collection (set) of text from a file.\n Expected file format is one item per line.\n \"\"\"\n collection = set()\n with open(filename, 'r') as file_:\n for line in file_:\n collection.add(line.rstrip())\n return collection\n\n\ndef get_file_extension(path: str, dot=True, lower: bool = True):\n ext = os.path.splitext(path)[1]\n ext = ext if dot else ext[1:]\n return ext.lower() if lower else ext\n","repo_name":"thu-coai/CrossWOZ","sub_path":"convlab2/util/allennlp_file_utils.py","file_name":"allennlp_file_utils.py","file_ext":"py","file_size_in_byte":11553,"program_lang":"python","lang":"en","doc_type":"code","stars":539,"dataset":"github-code","pt":"3"} +{"seq_id":"38053582618","text":"import pygame\n\n\n\n\ndef foo(): \n upper_limit= 20\n iters= {\n 1:0\n }\n start= 2\n max_so_far=0\n for n in range(start, upper_limit+1):\n i=n\n count= 0\n while i != 1:\n if i % 2 == 0:\n i = i // 2\n count += 1\n else:\n i = i * 3 + 1\n count += 1 \n if count > max_so_far:\n max_so_far=count\n iters.update({n : count})\n\n\n print(iters)\n print(\"Max num of iterations: \", max_so_far)\n \n\n scale=10\n for j in iters:\n iters[j]= iters[j]*scale\n j=j*scale\n iters[n]= count\n coords= list(iters.items())\n pygame.init()\n display = pygame.display.set_mode()\n display.set_colorkey(\"white\")\n new_display = pygame.transform.flip(display, False, True)\n display.blit(new_display, (0,0))\n pygame.draw.lines(display, \"white\", False, coords)\n font_name= None\n pygame.font.init()\n font= pygame.font.Font(font_name, 20)\n msg = font.render(f\"Max # of iterations: {max_so_far}\", False, \"white\")\n display.blit(msg, (0,200))\n pygame.display.flip()\n pygame.time.wait(2000)\n \n\n\n\nfoo()\n","repo_name":"bucs110FALL22/portfolio-lthorne3","sub_path":"ch05/lab/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34070228016","text":"from licenseware.common.constants import envs\nfrom licenseware.notifications.uploader_status import save_uploader_status\nfrom licenseware.registry_service import register_upload_status\nfrom licenseware.utils.logger import log\n\n\ndef notify_upload_status(event: dict, status: str):\n \"\"\"\n Notify registry about uploader processing status\n \"\"\"\n\n upload_status = {\n \"tenant_id\": event[\"tenant_id\"],\n \"uploader_id\": event[\"uploader_id\"],\n \"status\": status,\n \"app_id\": envs.APP_ID,\n }\n\n log.info(\n f\"APP PROCESSING EVENT: {envs.APP_ID} in status: {upload_status}\\n for uploader {event['uploader_id']} for tenant {event['tenant_id']}\"\n )\n\n response, status_code = register_upload_status(**upload_status)\n save_uploader_status(**upload_status)\n\n if status_code != 200:\n return False\n return True\n","repo_name":"licenseware/licenseware-sdk-v2","sub_path":"licenseware/notifications/notify_upload_status.py","file_name":"notify_upload_status.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"25458846515","text":"import heapq\nimport time\nimport math\nfrom heapq import heappop, heappush\nfrom typing import Mapping\n\n# Initializing our Nodes(which make up the tree). The nodes contain left child,right child and symbol and the freq of the character.\nclass Node:\n\n def __init__(self,ch,freq,left,right):\n self.sym = ch\n self.freq = freq\n self.left = left\n self.right = right\n \n def __eq__(self, other):\n return self.freq == other.freq\n\n def __gt__(self, other):\n return self.freq > other.freq\n \n def __lt__(self, other):\n return self.freq < other.freq\n \n\n# Counting the frequencies of each character.\ndef CountFreq(Str):\n Freq = {}\n for sym in Str:\n if sym in Freq:\n Freq[sym] += 1\n else:\n Freq[sym] = 1\n return Freq\n# Humman treemaker\n# We made a priority queue based on the frequencies of the given characters and making huffman using that PQ.\ndef Huffmantreemaker(Str,Freq):\n nodePQ = [] \n for key,val in Freq.items():\n nodePQ.append(Node(key,val,None,None))\n heapq.heapify(nodePQ)\n while len(nodePQ) > 1:\n Lchild = heappop(nodePQ)\n Rchild = heappop(nodePQ)\n Freq = Lchild.freq + Rchild.freq\n newnode = Node(None,Freq,Lchild,Rchild)\n heappush(nodePQ,newnode)\n Root = nodePQ[0]\n return Root\n\n# we are mapping every character here to a binary string, as we move towards the left child we add '0' and when move\n# towards the right child we add '1' in the respective binary strings.\ndef Coding(Root,Str,Codemap):\n \n if Root is None:\n return\n\n if (Root.left is None) and (Root.right is None):\n Codemap[Root.sym] = Str\n \n Coding(Root.left,Str + '0',Codemap)\n Coding(Root.right,Str + '1',Codemap)\n\n# we are forming our encoded string by replacing every character by its respective binary string. \ndef encoding(Str,Codemap):\n start_time = time.time()\n encoded_Str = ''\n\n for i in Str:\n encoded_Str = encoded_Str + Codemap.get(i)\n end_time = time.time()\n print(f\"Execution time for compressing: {(end_time - start_time)*1000} ms\")\n return encoded_Str\n\n# The helper functions helps us to find the leaf node with the given binary string that we get as we iterate through\n# our encoded string\ndef Helper(Root,index,Encoded_Str,decoded_Str):\n if (Root is None):\n return index,decoded_Str\n if (Root.left is None) and (Root.right is None):\n decoded_Str = decoded_Str + Root.sym\n return index,decoded_Str\n\n index = index + 1\n if Encoded_Str[index] == '0':\n Root = Root.left\n elif Encoded_Str[index] == '1' :\n Root = Root.right\n return Helper(Root,index,Encoded_Str,decoded_Str)\n\n \n# We are decoding our encoded string here, with the help of the codemap we created. We keep moving through the loop \n# until we hit a leaf node, once we hit the leaf node we replace the given binary string with its character that we had mapped to it\ndef decoding(Encoded_Str,Root,Codemap):\n start_time = time.time()\n decoded_Str = ''\n index = -1\n n = len(Encoded_str)\n while index < n-1:\n (index,decoded_Str) = Helper(Root,index,Encoded_Str,decoded_Str)\n\n end_time = time.time()\n print(f\"Execution time for decompressing: {(end_time - start_time)*1000} ms\")\n return decoded_Str\n\n\n# Driver Function \n# File \nwith open('file3input.txt','r') as file:\n Str = file.read()\nF = CountFreq(Str)\nRoot = Huffmantreemaker(Str,F)\nCodemap = {}\nCoding(Root,'',Codemap)\nprint(Codemap)\nprint(\"\\n\\n\\n\")\n\nEncoded_str = encoding(Str,Codemap)\nwith open(\"encode_file3.txt\",\"w\") as file:\n file.write(Encoded_str)\n file.close\n\nwith open(\"encode_file3.txt\",\"r\") as file:\n Encoded_source = file.read()\nDecoded_Str = decoding(Encoded_source,Root,Codemap)\n\nwith open(\"decode_file3.txt\",\"w\") as file:\n file.write(Decoded_Str)\n file.close\n\n# Comparing sizes of encoded and decoded files\nwith open(\"file3input.txt\",\"r\") as file:\n print(f\"Size of the encoded file: {math.floor((len(file.read())))} bytes\")\n\nwith open(\"encode_file3.txt\",\"r\") as file:\n print(f\"Size of the decoded file: {math.floor((len(file.read())/8))} bytes\")\n","repo_name":"DhruvHirpara555/Huffman","sub_path":"main_1.py","file_name":"main_1.py","file_ext":"py","file_size_in_byte":4191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"41863061080","text":"import pandas as pd\nimport numpy as np\n\ntrain_events = pd.read_csv(\"child-mind-institute-detect-sleep-states/train_events.csv\")\n\n# finding the nan's \nseries_has_NaN = train_events.groupby('series_id')['step'].apply(lambda x: x.isnull().any())\nprint(series_has_NaN.value_counts())\nno_NaN_series = series_has_NaN[~series_has_NaN].index.tolist()\nprint(no_NaN_series)\n\n# also drop these two truncated events series (EDA):\nno_NaN_series.remove('31011ade7c0a') # incomplete events data\nno_NaN_series.remove('a596ad0b82aa') # incomplete events data\n\ndef get_train_series(series):\n train_series = pd.read_parquet(\"child-mind-institute-detect-sleep-states/train_series.parquet\", filters=[('series_id','=',series)])\n train_events = pd.read_csv(\"child-mind-institute-detect-sleep-states/train_events.csv\").query('series_id == @series')\n \n # cleaning etc.\n train_events = train_events.dropna()\n train_events[\"step\"] = train_events[\"step\"].astype(\"int\")\n train_events[\"awake\"] = train_events[\"event\"].replace({\"onset\":1,\"wakeup\":0})\n\n train = pd.merge(train_series, train_events[['step','awake']], on='step', how='left')\n train[\"awake\"] = train[\"awake\"].bfill(axis ='rows')\n\n # final section:\n # train_events.groupby('series_id').tail(1)[\"event\"].unique()\n # Result: the last event is always a \"wakeup\"\n train['awake'] = train['awake'].fillna(1) # awake\n train[\"awake\"] = train[\"awake\"].astype(\"int\")\n return(train)\n\nclean_train_data = []\n\nfor series_id in no_NaN_series:\n train = get_train_series(series_id)\n clean_train_data.append(train)\n\n\nZzzs_train = pd.concat(clean_train_data).reset_index(drop=True)\nZzzs_train[\"series_id\"].nunique()\n\nZzzs_train.to_parquet('child-mind-institute-detect-sleep-states/Zzzs_train.parquet')\n","repo_name":"teja00/DetectSleepState","sub_path":"dataConversion.py","file_name":"dataConversion.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"24219953642","text":"dict = []\r\nhame = []\r\n\r\nfrom media import meedia\r\nfrom clip import clipp\r\nfrom film import filmm\r\nfrom serial import seriall\r\nfrom mostanad import mos\r\nfrom pyfiglet import Figlet\r\n\r\nwelcam = Figlet(font='standard')\r\nprint (welcam.renderText('Paniz store'))\r\n\r\nclass actor:\r\n pass\r\n\r\ndata = open(\"hamechi.txt\",\"r\")\r\ndata = data.read().split(\"\\n\")\r\nfor i in range (len(data)):\r\n hame = data[i].split(\",\")\r\n if hame[0]==\"film\":\r\n dict.append(filmm(hame[0],hame[1],hame[2],hame[3],hame[4],hame[5],hame[6],hame[7]))\r\n elif hame[0]==\"series\":\r\n dict.append(seriall(hame[0],hame[1],hame[2],hame[3],hame[4],hame[5],hame[6],hame[7]))\r\n elif hame[0]==\"clip\":\r\n dict.append(clipp(hame[0],hame[1],hame[2],hame[3],hame[4],hame[5],hame[6],hame[7]))\r\n elif hame[0]==\"doc\":\r\n dict.append(mos(hame[0],hame[1],hame[2],hame[3],hame[4],hame[5],hame[6],hame[7]))\r\n\r\n\r\ndef ad():\r\n typ = input(\"film? serial? mostanad? clip? \")\r\n i_D = int(input(\"code? \"))\r\n name = input(\"name? \")\r\n directo = float(input(\"director? \"))\r\n IMDBscor = float(input(\"IMDBscore? \"))\r\n urll = input(\"url? \")\r\n duratio = int(input(\"duration? \"))\r\n cast = input(\"casts? \")\r\n dict.append({\"type\": typ, \"id\": i_D, \"name\" : name, \"director\": directo, \"IMDBsco\" : IMDBscor,\"url\":urll, \"duration\": duratio, \"casts\": cast})\r\n print(\"shod\")\r\n\r\ndef edit():\r\n idd = input(\"shomare chando mikhai edit koni? \")\r\n for i in range(len(hame)):\r\n if hame[i][\"id\"] == idd:\r\n while True:\r\n hame[i]['typ'] = input('name type: ')\r\n hame[i]['name'] = input('name jadid: ')\r\n hame[i]['IMDBscore'] = input('IMDBscore jadid: ')\r\n hame[i]['url'] = input('link jadid: ')\r\n hame[i]['duration'] = input('duration jadid: ')\r\n print(\"shod\")\r\ndef lete():\r\n idd = int(input(\"kodumo mikhai vardari? \"))\r\n for i in range(len(hame)):\r\n if hame[i][\"id\"] == idd:\r\n hame.pop(i)\r\n print(\"shod\")\r\n\r\ndef serch():\r\n s = input(\"donbal chi hasti? \" )\r\n for i in range (len(hame)):\r\n if s == hame[i][\"name\"]:\r\n print(hame[i])\r\n\r\ndef danlod():\r\n idd = int(input(\"kodum? \"))\r\n for i in range(len(hame)):\r\n if idd == hame[i][\"id\"]:\r\n hame[i].download()\r\n\r\n \r\ndef sho_menu():\r\n print(\"1-add film\")\r\n print(\"2- eddit film\")\r\n print(\"3- del film\")\r\n print(\"4- search\")\r\n print(\"5- show list\")\r\n print(\"6- download\")\r\n\r\nwhile True:\r\n sho_menu()\r\n choi = int(input('please entekhab komim:'))\r\n \r\n if choi == 1:\r\n ad()\r\n elif choi == 2:\r\n edit()\r\n elif choi ==3:\r\n lete()\r\n elif choi == 4:\r\n serch()\r\n elif choi == 5:\r\n print(hame)\r\n elif choi == 6:\r\n danlod()","repo_name":"paniz-fatemi/new","sub_path":"10/10-1.py","file_name":"10-1.py","file_ext":"py","file_size_in_byte":2860,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"14443995924","text":"# **************************************************************************\n# *\n# * Authors: Grigory Sharov (gsharov@mrc-lmb.cam.ac.uk)\n# *\n# * MRC Laboratory of Molecular Biology (MRC-LMB)\n# *\n# * This program is free software; you can redistribute it and/or modify\n# * it under the terms of the GNU General Public License as published by\n# * the Free Software Foundation; either version 3 of the License, or\n# * (at your option) any later version.\n# *\n# * This program is distributed in the hope that it will be useful,\n# * but WITHOUT ANY WARRANTY; without even the implied warranty of\n# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# * GNU General Public License for more details.\n# *\n# * You should have received a copy of the GNU General Public License\n# * along with this program; if not, write to the Free Software\n# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA\n# * 02111-1307 USA\n# *\n# * All comments concerning this program package may be sent to the\n# * e-mail address 'scipion@cnb.csic.es'\n# *\n# **************************************************************************\n\nfrom pyworkflow import BETA\nimport pyworkflow.protocol.params as params\nfrom tomo.objects import SetOfCoordinates3D\n\nfrom .. import Plugin\nfrom ..constants import TOMOTWIN_MODEL\nfrom .protocol_base import ProtTomoTwinBase\n\n\nclass ProtTomoTwinRefPicking(ProtTomoTwinBase):\n \"\"\" Reference-based picking with TomoTwin. \"\"\"\n\n _label = 'reference-based picking'\n _devStatus = BETA\n _possibleOutputs = {'output3DCoordinates': SetOfCoordinates3D}\n _requiresRefs = True\n\n def __init__(self, **kwargs):\n ProtTomoTwinBase.__init__(self, **kwargs)\n self.stepsExecutionMode = params.STEPS_PARALLEL\n\n # --------------------------- DEFINE param functions ----------------------\n def _defineParams(self, form):\n self._defineInputParams(form)\n self._defineEmbedParams(form)\n self._definePickingParams(form)\n\n form.addParallelSection(threads=1)\n\n # --------------------------- INSERT steps functions ----------------------\n def _insertAllSteps(self):\n self._createFilenameTemplates()\n convertStepId = self._insertFunctionStep(self.convertInputStep)\n deps = []\n embedRefStepId = self._insertFunctionStep(self.embedRefsStep,\n prerequisites=convertStepId)\n deps.append(embedRefStepId)\n\n tomoIds = self._getInputTomos().aggregate([\"COUNT\"], \"_tsId\", [\"_tsId\"])\n tomoIds = set([d['_tsId'] for d in tomoIds])\n\n for tomoId in tomoIds:\n embedTomoStepId = self._insertFunctionStep(self.embedTomoStep,\n tomoId,\n prerequisites=convertStepId)\n deps.append(embedTomoStepId)\n self._insertFunctionStep(self.pickingStep, tomoId,\n prerequisites=deps)\n\n self._insertFunctionStep(self.createOutputStep)\n\n # --------------------------- STEPS functions -----------------------------\n def embedRefsStep(self):\n \"\"\" Embed the references. \"\"\"\n self.runProgram(self.getProgram(\"tomotwin_embed.py\"),\n self._getEmbedRefsArgs())\n\n # --------------------------- INFO functions ------------------------------\n def _validate(self):\n errors = []\n\n refs = self.inputRefs.get()\n scale = refs.getSamplingRate() / self._getInputTomos().getSamplingRate()\n doScale = abs(scale - 1.0) > 0.001\n if doScale:\n errors.append(\"Tomograms and references must have the same pixel size!\")\n\n return errors\n\n def _warningsExtra(self):\n warnings = []\n\n refs = self.inputRefs.get()\n if refs.getXDim() != 37:\n warnings.append(\"Because TomoTwin was trained on many proteins at \"\n \"once, we needed to find a box size that worked \"\n \"for all proteins. Therefore, all proteins were \"\n \"used with a pixel size of 10Å and a box size of \"\n \"37 pixels. Because of this, you must extract your \"\n \"reference with a box size of 37 pixels. If your \"\n \"protein is too large for this box at 10Å/pix (much \"\n \"larger than a ribosome) then you should scale the \"\n \"pixel size of your tomogram until it fits rather \"\n \"than changing the box size. Likewise if your \"\n \"protein is so small that at 10Å/pix it only fills \"\n \"one to two pixels of the box, you should scale \"\n \"your tomogram pixel size until the particle is \"\n \"bigger, however we’ve found that for proteins down \"\n \"to 100 kDa, 10Å/pix is sufficient for the 37 box.\")\n\n return warnings\n\n # --------------------------- UTILS functions ------------------------------\n def _getEmbedRefsArgs(self):\n return [\n f\"subvolumes -m {Plugin.getVar(TOMOTWIN_MODEL)}\",\n \"-v ../tmp/input_refs/*.mrc\",\n f\"-b {self.batchRefs.get()}\",\n \"-o embed/refs\"\n ]\n\n def _getMapArgs(self, tomoId):\n return [\n \"distance -r embed/refs/embeddings.temb\",\n f\"-v embed/tomos/{tomoId}_embeddings.temb\",\n f\"-o {tomoId}/\"\n ]\n","repo_name":"scipion-em/scipion-em-tomotwin","sub_path":"tomotwin/protocols/protocol_picking_ref.py","file_name":"protocol_picking_ref.py","file_ext":"py","file_size_in_byte":5593,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"30411292611","text":"import json\nimport random\nimport argparse\n\nfrom src.slurm import init_signal_handler, init_distributed_mode\nfrom src.data.loader import check_data_params, load_data\nfrom src.utils import bool_flag, initialize_exp, set_sampling_probs, shuf_order\nfrom src.model import check_model_params, build_model\nfrom src.trainer import SingleTrainer, EncDecTrainer\nfrom src.evaluation.evaluator import SingleEvaluator, EncDecEvaluator\n\n\ndef get_parser():\n \"\"\"\n Generate a parameters parser.\n \"\"\"\n # parse parameters\n parser = argparse.ArgumentParser(description=\"Language transfer\")\n\n # main parameters\n parser.add_argument(\"--dump_path\", type=str, default=\"./dumped/\",\n help=\"Experiment dump path\")\n parser.add_argument(\"--exp_name\", type=str, default=\"\",\n help=\"Experiment name\")\n parser.add_argument(\"--save_periodic\", type=int, default=0,\n help=\"Save the model periodically (0 to disable)\")\n parser.add_argument(\"--exp_id\", type=str, default=\"\",\n help=\"Experiment ID\")\n\n # float16 / AMP API\n parser.add_argument(\"--fp16\", type=bool_flag, default=False,\n help=\"Run model with float16\")\n parser.add_argument(\"--amp\", type=int, default=-1,\n help=\"Use AMP wrapper for float16 / distributed / gradient accumulation. Level of optimization. -1 to disable.\")\n\n # only use an encoder (use a specific decoder for machine translation)\n parser.add_argument(\"--encoder_only\", type=bool_flag, default=True,\n help=\"Only use an encoder\")\n\n # model parameters\n parser.add_argument(\"--emb_dim\", type=int, default=512,\n help=\"Embedding layer size\")\n parser.add_argument(\"--emb_dim_encoder\", type=int, default=0,\n help=\"Embedding layer size\")\n parser.add_argument(\"--emb_dim_decoder\", type=int, default=0,\n help=\"Embedding layer size\")\n parser.add_argument(\"--n_layers\", type=int, default=4,\n help=\"Number of Transformer layers\")\n parser.add_argument(\"--n_layers_encoder\", type=int, default=0,\n help=\"Number of Transformer layers for the encoder\")\n parser.add_argument(\"--n_layers_decoder\", type=int, default=0,\n help=\"Number of Transformer layers for the decoder\")\n parser.add_argument(\"--n_heads\", type=int, default=8,\n help=\"Number of Transformer heads\")\n parser.add_argument(\"--dropout\", type=float, default=0,\n help=\"Dropout\")\n parser.add_argument(\"--attention_dropout\", type=float, default=0,\n help=\"Dropout in the attention layer\")\n parser.add_argument(\"--gelu_activation\", type=bool_flag, default=False,\n help=\"Use a GELU activation instead of ReLU\")\n parser.add_argument(\"--share_inout_emb\", type=bool_flag, default=True,\n help=\"Share input and output embeddings\")\n parser.add_argument(\"--sinusoidal_embeddings\", type=bool_flag, default=False,\n help=\"Use sinusoidal embeddings\")\n parser.add_argument(\"--use_lang_emb\", type=bool_flag, default=True,\n help=\"Use language embedding\")\n\n # causal language modeling task parameters\n parser.add_argument(\"--context_size\", type=int, default=0,\n help=\"Context size (0 means that the first elements in sequences won't have any context)\")\n\n # masked language modeling task parameters\n parser.add_argument(\"--word_pred\", type=float, default=0.15,\n help=\"Fraction of words for which we need to make a prediction\")\n parser.add_argument(\"--sample_alpha\", type=float, default=0,\n help=\"Exponent for transforming word counts to probabilities (~word2vec sampling)\")\n parser.add_argument(\"--word_mask_keep_rand\", type=str, default=\"0.8,0.1,0.1\",\n help=\"Fraction of words to mask out / keep / randomize, among the words to predict\")\n\n # input sentence noise\n parser.add_argument(\"--word_shuffle\", type=float, default=0,\n help=\"Randomly shuffle input words (0 to disable)\")\n parser.add_argument(\"--word_dropout\", type=float, default=0,\n help=\"Randomly dropout input words (0 to disable)\")\n parser.add_argument(\"--word_blank\", type=float, default=0,\n help=\"Randomly blank input words (0 to disable)\")\n\n # data\n parser.add_argument(\"--data_path\", type=str, default=\"\",\n help=\"Data path\")\n parser.add_argument(\"--lgs\", type=str, default=\"\",\n help=\"Languages (lg1-lg2-lg3 .. ex: en-fr-es-de)\")\n parser.add_argument(\"--max_vocab\", type=int, default=-1,\n help=\"Maximum vocabulary size (-1 to disable)\")\n parser.add_argument(\"--min_count\", type=int, default=0,\n help=\"Minimum vocabulary count\")\n parser.add_argument(\"--lg_sampling_factor\", type=float, default=-1,\n help=\"Language sampling factor\")\n parser.add_argument(\"--has_sentences_ids\", type=bool_flag, default=False,\n help=\"Parallel sentences has an id or not in parallel datasets.\")\n\n # batch parameters\n parser.add_argument(\"--bptt\", type=int, default=256,\n help=\"Sequence length\")\n parser.add_argument(\"--max_len\", type=int, default=100,\n help=\"Maximum length of sentences (after BPE)\")\n parser.add_argument(\"--group_by_size\", type=bool_flag, default=True,\n help=\"Sort sentences by size during the training\")\n parser.add_argument(\"--batch_size\", type=int, default=32,\n help=\"Number of sentences per batch\")\n parser.add_argument(\"--max_batch_size\", type=int, default=0,\n help=\"Maximum number of sentences per batch (used in combination with tokens_per_batch, 0 to disable)\")\n parser.add_argument(\"--tokens_per_batch\", type=int, default=-1,\n help=\"Number of tokens per batch\")\n\n parser.add_argument(\"--gen_tpb_multiplier\", type=int, default=1,\n help=\"Multiplier of token per batch during generation when doing back translation. Typically 4\")\n\n # training parameters\n parser.add_argument(\"--split_data\", type=bool_flag, default=False,\n help=\"Split data across workers of a same node\")\n parser.add_argument(\"--split_data_accross_gpu\", type=str, default='local',\n help=\"Split data across GPU locally or globally. Set 'local' or 'global'\")\n parser.add_argument(\"--optimizer\", type=str, default=\"adam,lr=0.0001\",\n help=\"Optimizer (SGD / RMSprop / Adam, etc.)\")\n parser.add_argument(\"--clip_grad_norm\", type=float, default=5,\n help=\"Clip gradients norm (0 to disable)\")\n parser.add_argument(\"--epoch_size\", type=int, default=100000,\n help=\"Epoch size / evaluation frequency (-1 for parallel data size)\")\n parser.add_argument(\"--max_epoch\", type=int, default=100000,\n help=\"Maximum epoch size\")\n parser.add_argument(\"--stopping_criterion\", type=str, default=\"\",\n help=\"Stopping criterion, and number of non-increase before stopping the experiment\")\n parser.add_argument(\"--validation_metrics\", type=str, default=\"\",\n help=\"Validation metrics\")\n parser.add_argument(\"--accumulate_gradients\", type=int, default=1,\n help=\"Accumulate model gradients over N iterations (N times larger batch sizes)\")\n\n # training coefficients\n parser.add_argument(\"--lambda_mlm\", type=str, default=\"1\",\n help=\"Prediction coefficient (MLM)\")\n parser.add_argument(\"--lambda_clm\", type=str, default=\"1\",\n help=\"Causal coefficient (LM)\")\n parser.add_argument(\"--lambda_ae\", type=str, default=\"1\",\n help=\"AE coefficient\")\n parser.add_argument(\"--lambda_mt\", type=str, default=\"1\",\n help=\"MT coefficient\")\n parser.add_argument(\"--lambda_bt\", type=str, default=\"1\",\n help=\"BT coefficient\")\n\n # training steps\n parser.add_argument(\"--clm_steps\", type=str, default=\"\",\n help=\"Causal prediction steps (CLM)\")\n parser.add_argument(\"--mlm_steps\", type=str, default=\"\",\n help=\"Masked prediction steps (MLM / TLM)\")\n parser.add_argument(\"--mt_steps\", type=str, default=\"\",\n help=\"Machine translation steps\")\n parser.add_argument(\"--ae_steps\", type=str, default=\"\",\n help=\"Denoising auto-encoder steps\")\n parser.add_argument(\"--bt_steps\", type=str, default=\"\",\n help=\"Back-translation steps\")\n\n # reload pretrained embeddings / pretrained model / checkpoint\n parser.add_argument(\"--reload_emb\", type=str, default=\"\",\n help=\"Reload pretrained word embeddings\")\n parser.add_argument(\"--reload_model\", type=str, default=\"\",\n help=\"Reload a pretrained model\")\n parser.add_argument(\"--reload_checkpoint\", type=str, default=\"\",\n help=\"Reload a checkpoint\")\n\n # beam search (for MT only)\n parser.add_argument(\"--beam_size\", type=int, default=1,\n help=\"Beam size, default = 1 (greedy decoding)\")\n\n parser.add_argument(\"--length_penalty\", type=float, default=1,\n help=\"Length penalty, values < 1.0 favor shorter sentences, while values > 1.0 favor longer ones.\")\n parser.add_argument(\"--early_stopping\", type=bool_flag, default=False,\n help=\"Early stopping, stop as soon as we have `beam_size` hypotheses, although longer ones may have better scores.\")\n # sampling at eval time\n parser.add_argument(\"--number_samples\", type=int, default=1,\n help=\"Number of examples to sample (default = 1)\")\n parser.add_argument(\"--eval_temperature\", type=float, default=None,\n help=\"Evaluation temperature when using several samples\")\n\n # BT parameters\n parser.add_argument(\"--bt_sample_temperature\", type=str, default='0',\n help=\"At BT training, sample temperature for generation\")\n\n # evaluation\n parser.add_argument(\"--eval_bleu\", type=bool_flag, default=False,\n help=\"Evaluate BLEU score during MT training\")\n parser.add_argument(\"--eval_bleu_test_only\", type=bool_flag, default=False,\n help=\"Evaluate BLEU score during MT training\")\n parser.add_argument(\"--eval_computation\", type=bool_flag, default=False,\n help=\"Check if the generated function is compilable, and if it returns the same output as ground truth.\")\n parser.add_argument(\"--generate_hypothesis\", type=bool_flag, default=False,\n help=\"generate hypothesis for test/valid mono dataset\")\n parser.add_argument(\"--eval_only\", type=bool_flag, default=False,\n help=\"Only run evaluations\")\n parser.add_argument(\"--retry_mistmatching_types\", type=bool_flag, default=False,\n help=\"Retry with wrapper at eval time when the types do not match\")\n\n # debug\n parser.add_argument(\"--debug_train\", type=bool_flag, default=False,\n help=\"Use valid sets for train sets (faster loading)\")\n parser.add_argument(\"--debug_slurm\", type=bool_flag, default=False,\n help=\"Debug multi-GPU / multi-node within a SLURM job\")\n parser.add_argument(\"--debug\", help=\"Enable all debug flags\",\n action=\"store_true\")\n\n # multi-gpu / multi-node\n parser.add_argument(\"--local_rank\", type=int, default=-1,\n help=\"Multi-GPU - Local rank\")\n parser.add_argument(\"--master_port\", type=int, default=-1,\n help=\"Master port (for multi-node SLURM jobs)\")\n parser.add_argument(\"--separate_decoders\", type=bool_flag, default=False,\n help=\"Use a separate decoder for each language\")\n\n parser.add_argument(\"--n_share_dec\", type=int, default=0,\n help=\"Number of decoder layers to share\")\n\n return parser\n\n\ndef main(params):\n\n # initialize the multi-GPU / multi-node training\n init_distributed_mode(params)\n\n # initialize the experiment\n logger = initialize_exp(params)\n\n # initialize SLURM signal handler for time limit / pre-emption\n init_signal_handler()\n\n # load data\n data = load_data(params)\n\n # build model\n if params.encoder_only:\n model = build_model(params, data['dico'])\n else:\n encoder, decoder = build_model(params, data['dico'])\n\n # build trainer, reload potential checkpoints / build evaluator\n if params.encoder_only:\n trainer = SingleTrainer(model, data, params)\n evaluator = SingleEvaluator(trainer, data, params)\n else:\n trainer = EncDecTrainer(encoder, decoder, data, params)\n evaluator = EncDecEvaluator(trainer, data, params)\n\n # evaluation\n if params.eval_only:\n scores = evaluator.run_all_evals(trainer)\n for k, v in scores.items():\n logger.info(\"%s -> %.6f\" % (k, v))\n logger.info(\"__log__:%s\" % json.dumps(scores))\n exit()\n\n # set sampling probabilities for training\n set_sampling_probs(data, params)\n\n # language model training\n for _ in range(params.max_epoch):\n\n logger.info(\"============ Starting epoch %i ... ============\" %\n trainer.epoch)\n\n trainer.n_sentences = 0\n\n while trainer.n_sentences < trainer.epoch_size:\n\n # CLM steps\n for lang1, lang2 in shuf_order(params.clm_steps, params):\n trainer.clm_step(lang1, lang2, params.lambda_clm)\n\n # MLM steps (also includes TLM if lang2 is not None)\n for lang1, lang2 in shuf_order(params.mlm_steps, params):\n trainer.mlm_step(lang1, lang2, params.lambda_mlm)\n\n # denoising auto-encoder steps\n for lang in shuf_order(params.ae_steps):\n trainer.mt_step(lang, lang, params.lambda_ae)\n\n # machine translation steps\n for lang1, lang2 in shuf_order(params.mt_steps, params):\n trainer.mt_step(lang1, lang2, params.lambda_mt)\n\n # back-translation steps\n for lang1, lang2, lang3 in shuf_order(params.bt_steps):\n trainer.bt_step(lang1, lang2, lang3,\n params.lambda_bt, params.bt_sample_temperature)\n\n trainer.iter()\n\n logger.info(\"============ End of epoch %i ============\" %\n trainer.epoch)\n\n # evaluate perplexity\n scores = evaluator.run_all_evals(trainer)\n\n # print / JSON log\n for k, v in scores.items():\n logger.info(\"%s -> %.6f\" % (k, v))\n if params.is_master:\n logger.info(\"__log__:%s\" % json.dumps(scores))\n\n # end of epoch\n if params.validation_metrics != '':\n trainer.save_best_model(scores)\n trainer.save_periodic()\n trainer.end_epoch(scores)\n\n\nif __name__ == '__main__':\n\n # generate parser / parse parameters\n parser = get_parser()\n params = parser.parse_args()\n\n # debug mode\n if params.debug:\n params.exp_name = 'debug'\n params.exp_id = 'debug_%08i' % random.randint(0, 100000000)\n params.debug_slurm = True\n params.debug_train = True\n\n # check parameters\n check_data_params(params)\n check_model_params(params)\n\n # run experiment\n main(params)\n","repo_name":"facebookresearch/TransCoder","sub_path":"XLM/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":15899,"program_lang":"python","lang":"en","doc_type":"code","stars":1646,"dataset":"github-code","pt":"3"} +{"seq_id":"4245108403","text":"#!/usr/bin/env python \n# -*- coding: utf-8 -*-\n\n# Created: 2th January 2017\n# authors: Guillaume Perez\nimport random\nfrom math import log\nfrom Tkinter import * # python 3 tkinter lowercase \nimport turtle\nfrom bandit import *\nfrom histo import *\nfrom csv_machine import *\n\nclass BanditAddFrame(Frame):\n \"\"\"docstring for BanditAdd\"\"\"\n def __init__(self, master , main_window, **kwargs):\n Frame.__init__(self,master,**kwargs)\n self.master = master\n self.main_window = main_window\n self.type_var = IntVar()\n self.type_var.set(0)\n self.entry_1_var = StringVar()\n self.entry_1_var.set(0)\n self.entry_2_var = StringVar()\n self.entry_2_var.set(0)\n possible_machines = [\"Gaussian\", \"Pseudo Random\",'CSV']\n self.radios = []\n for i, m in enumerate(possible_machines):\n self.radios.append(Radiobutton(self, text=m, value=i, variable=self.type_var, command=self.set_config))\n self.entry_1 = Entry(self,textvariable = self.entry_1_var ,width=10)\n self.entry_2 = Entry(self,textvariable = self.entry_2_var ,width=10)\n self.label_entry_1 = Label(self, text=\"\")\n self.label_entry_2 = Label(self, text=\"\")\n self.valid_button = Button(self,text=\"Add\", command=self.add)\n #Placement\n for i,br in enumerate(self.radios):\n br.grid(row=i,column=0)\n self.label_entry_1.grid(row=0,column=1)\n self.label_entry_2.grid(row=1,column=1)\n self.entry_1.grid(row=0,column=2)\n self.entry_2.grid(row=1,column=2)\n self.valid_button.grid(row=2,column=1)\n # \n self.set_config()\n\n def set_config(self):\n if self.type_var.get() == 0: ## Gaussian\n self.label_entry_1['text'] = \"Mu\"\n self.label_entry_2['text'] = \"Sigma\"\n elif self.type_var.get() == 1: ## Pseudo Random\n self.label_entry_1['text'] = \"Min\"\n self.label_entry_2['text'] = \"Max\"\n elif self.type_var.get() == 2: ## CSV file\n self.label_entry_1['text'] = \"filename\"\n self.label_entry_2['text'] = \"delimiter\"\n self.entry_2_var.set(\";\")\n\n\n def add(self):\n if self.type_var.get() == 0: ## Gaussian\n mu = float(self.entry_1_var.get())\n sigma = float(self.entry_2_var.get())\n self.main_window.list_machines.append(MachineGaussian(mu,sigma))\n elif self.type_var.get() == 1: ## Pseudo Random\n min_value = float(self.entry_1_var.get())\n max_value = float(self.entry_2_var.get())\n self.main_window.list_machines.append(MachinePseudoRandom(min_value,max_value))\n elif self.type_var.get() == 2: ## Pseudo Random\n filename = self.entry_1_var.get()\n delim = self.entry_2_var.get()\n machines = get_csv_machine(filename,delim)\n for m in machines:\n self.main_window.list_machines.append(m)\n self.main_window.update_machines()\n self.master.destroy()\n\n\nclass BanditSel(Frame):\n \"\"\"docstring for BanditSel\"\"\"\n def __init__(self, master, **kwargs):\n Frame.__init__(self, master, **kwargs)\n self.master = master\n self.add_button = Button(self,text='add', command = self.add_bandit)\n self.list_machines = []\n self.list_label = []\n self.number_of_machines = 0\n self.add_button.pack(side=BOTTOM)\n\n def update_machines(self):\n for i in xrange(self.number_of_machines,len(self.list_machines)):\n self.list_label.append(Label(self,text=self.list_machines[i]))\n self.list_label[i].pack()\n self.master.minmaxrun.run_button['state'] = 'normal'\n\n def add_bandit(self):\n \"\"\"\n Create an Graphic Interface for adding a Bandit\n \"\"\"\n new_window = Toplevel()\n BAF = BanditAddFrame(new_window,self)\n BAF.pack()\n\nclass MinMaxRunFrame(Frame):\n \"\"\"docstring for MinMaxRunFrame\"\"\"\n def __init__(self, master,**kwargs):\n Frame.__init__(self, master, **kwargs)\n self.run_button = Button(self, text='run', command=master.LanceBandit)\n self.run_button['state'] = DISABLED\n self.var_optim = IntVar()\n self.var_optim.set(0)\n r_min = Radiobutton(self, text=\"min\", value=0, variable=self.var_optim)\n r_max = Radiobutton(self, text=\"max\", value=1, variable=self.var_optim)\n #pack\n self.run_button.pack(side=RIGHT) \n r_min.pack(side=RIGHT)\n r_max.pack(side=RIGHT) \n\n\nclass BanditMainFrame(Frame):\n \"\"\"docstring for BanditMainFrame\"\"\"\n def __init__(self, master,**kwargs):\n Frame.__init__(self, master, **kwargs)\n self.master = master\n self.label = Label(self, text = \"Number of iterations : \")\n self.value = StringVar()\n self.value.set('256')\n self.label_resultat = Label(self, text = \"\")\n self.label_resultat.grid(row = 1, column = 1)\n self.nb_iter = Entry(self, textvariable=self.value, width=30)\n self.BDS = BanditSel(self)\n self.histo = HistogramTurtleFrame(self)\n self.minmaxrun = MinMaxRunFrame(self)\n #placement\n self.label.grid(row = 0, column = 0)\n self.nb_iter.grid(row = 0, column = 1)\n self.BDS.grid(row=2, column = 1)\n self.histo.grid(row=2,column=0)\n self.minmaxrun.grid(row = 1, column = 0)\n\n def LanceBandit(self):\n lm = self.BDS.list_machines\n if self.minmaxrun.var_optim.get()==0: #minimization\n bandit1 = BanditMin(lm, int(self.value.get()))\n else : #Maximization\n bandit1 = BanditMax(lm, int(self.value.get()))\n bandit1.run()\n self.label_resultat['text'] = \"Total : \" + str(bandit1.value) + \", average = {0:.2f}\".format(bandit1.value / float(self.value.get()))\n stats = [sum([1 for m in bandit1.choice_m if m == i]) for i in range(len(lm))]\n self.histo.drawHistogram(stats,lm)\n HistoGen(stats,lm)\n\n\n\n\n\n\n\n\n","repo_name":"memo-p/Bandit","sub_path":"GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":6055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"39409001069","text":"import numpy as np\nimport glob\n\ndef read_freq(fname):\n f=open(fname,'r')\n ll=f.readlines()\n nl=len(ll)\n tags=ll[2].strip().split(',')\n #nc=len(tags)-1\n nc=len(tags)\n \n beam=np.zeros([nl-2,nc])\n for i in range(nl-2):\n line=ll[i+2]\n line=line.strip()\n #line=line[:-1]\n beam[i,:]=np.fromstring(line,sep=',')\n phi=beam[:,0].copy()\n beam=beam[:,1:].copy()\n line=ll[1].strip()[:-1]\n line=line[line.find(',')+1:]\n #print(line)\n th=np.fromstring(line,sep=',')\n return beam,th,phi\n\n\ndef read_dir(dirname):\n fnames=glob.glob(dirname+\"/*.csv\")\n #fnames.sort()\n nfreq=len(fnames)\n\n freqs=np.zeros(nfreq)\n for i in range(nfreq):\n tags=fnames[i].split('/')\n tags=tags[-1].split('M')\n freqs[i]=np.int(tags[0])\n inds=freqs.argsort()\n\n\n\n beam,th,phi=read_freq(fnames[inds[0]])\n beam_mat=np.zeros([nfreq,beam.shape[0],beam.shape[1]])\n beam_mat[0,:,:]=beam\n freqs[0]=freqs[inds[0]]\n for i in range(1,nfreq):\n fname=fnames[inds[i]]\n beam_mat[i,:,:],tt,pp=read_freq(fname)\n tags=fname.split('/')\n tags=tags[-1].split('M')\n freqs[i]=np.int(tags[0])\n\n return beam_mat,freqs,th,phi\n","repo_name":"sievers/beam_covariance","sub_path":"read_highz.py","file_name":"read_highz.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29255001497","text":"from nile.api.v1 import (\n clusters,\n local,\n)\n\nfrom maps.geoq.hypotheses.flats.lib import exact_entrance\n\nfrom data_exact_entrance import (\n ENTRANCE_FLAT_RANGE,\n FT_ADDR,\n FT_CENTER,\n FT_NM,\n HYPOTHESIS_DATA,\n NODE,\n)\nfrom utils import (\n prepare_source,\n to_records,\n)\n\n\ndef test_prepare_exact_entrance_hypothesis():\n cluster = clusters.MockCluster()\n job = cluster.job()\n exact_entrance.prepare(job, '')\n\n hypothesis = []\n job.local_run(\n sources={\n 'entrance_flat_range': prepare_source(ENTRANCE_FLAT_RANGE),\n 'ft_addr': prepare_source(FT_ADDR),\n 'ft_center': prepare_source(FT_CENTER),\n 'ft_nm': prepare_source(FT_NM),\n 'node': prepare_source(NODE),\n },\n sinks={\n 'output_table': local.ListSink(hypothesis),\n }\n )\n\n assert hypothesis == to_records(HYPOTHESIS_DATA)\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"maps/tests/test_exact_entrance.py","file_name":"test_exact_entrance.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36576611135","text":"import osCheckers.utils as utils\nfrom pathlib import Path\nimport shutil\nimport subprocess\n\nPROCESS_SUCCESS = 0\nBASIC_FUNC_FAILED_RET_CODE = 242\nSRC_PATH = '/home/user/work/OS_autoGrader/osCheckers/pt_hw12020a/'\n\ndef build_students_directories():\n assignments_path = Path(\"/home/user/work/OS_autoGrader/assignments/\")\n\n # Create a directory for each student.\n for student_file in assignments_path.iterdir(): # iterate on all students folders!\n student_name, student_id = utils.get_student_name_id_from_file_or_dir(str(student_file))\n student_dir_path = assignments_path / f\"{student_name}_{student_id}\"\n try:\n student_dir_path.mkdir()\n except FileNotFoundError as e:\n print(e)\n continue\n \n shutil.copy2(SRC_PATH + 'os.c', student_dir_path)\n shutil.copy2(SRC_PATH + 'os.h', student_dir_path)\n # Check if copying succeeded:\n if not (student_dir_path / 'os.c').exists() or not (student_dir_path / 'os.h').exists():\n print(\"copying files to student dir failed.\")\n continue\n # Rename files to pt.c\n pt_new_path = student_file.rename('pt.c')\n shutil.copy2(str(pt_new_path), student_dir_path)\n if not (student_dir_path / 'pt.c').exists():\n print(\"copying files to student dir failed.\")\n continue\n\n\ndef compile_students_files():\n assignments_path = Path(\"/home/user/work/OS_autoGrader/assignments/\")\n utils.open_names_csv() # Create the grades Excel\n\n # Create a directory for each student.\n for student_dir in assignments_path.iterdir(): # Example: Yuval Helman_315581819\n student_name, student_id = utils.get_student_name_id_from_file_or_dir(str(student_dir), is_dir=True)\n student_dir_path = assignments_path / f\"{student_name}_{student_id}\"\n\n with utils.currentWorkingDir(str(student_dir_path)):\n sp = subprocess.run(['gcc', '-o3', '-w', '-Wall', '-std=c11', 'os.c', 'pt.c', \"-o\", \"tester\"])\n if sp.returncode != PROCESS_SUCCESS:\n print(\"compilation failed for user \", student_name, \"_\", student_id)\n utils.write_to_grades_csv(student_name, student_id, 0, \"compilation error\")\n else:\n run_test_for_user(student_dir_path, student_name, student_id)\n\n\ndef run_test_for_user(student_dir_path: str, student_name, student_id):\n with utils.currentWorkingDir(str(student_dir_path)):\n sp = subprocess.run(['./tester'], capture_output=True, shell=True)\n if isinstance(sp, subprocess.CompletedProcess) and sp.returncode == PROCESS_SUCCESS:\n tester_output = sp.stdout.decode(\"utf-8\")\n student_comments = utils.remove_two_last_lines_from_string(tester_output)\n student_grade = tester_output.split('\\n')[-2]\n utils.write_to_grades_csv(student_name, student_id, student_grade, student_comments)\n if student_grade != '100':\n print(student_grade, '-', tester_output) # DEBUG\n else: # Run a basic-sanity check..\n sp_sanity = subprocess.run(['./tester', '--sanity_check'], capture_output=True, shell=True)\n tester_output = sp.stdout.decode(\"utf-8\")\n if sp_sanity.returncode != BASIC_FUNC_FAILED_RET_CODE:\n utils.write_to_grades_csv(student_name, student_id, 75,\n f\"program raised sig_fault while tested. only basic functionality passed. \")\n print('75 -', tester_output) # DEBUG\n else:\n utils.write_to_grades_csv(student_name, student_id, 60, f\"basic functionality fails. \")\n print('60 -', tester_output) # DEBUG\n\n\nif __name__ == '__main__':\n ''' Run build_Students_directories() on a directory where all the student's c-files are present.\n THen run compile_students_files() to compile and generate results csv. '''\n # build_students_directories()\n compile_students_files()\n","repo_name":"YuvalHelman/OS_autoGrader","sub_path":"osCheckers/pt_hw12020a/compileGrades.py","file_name":"compileGrades.py","file_ext":"py","file_size_in_byte":3999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8553845562","text":"import random\r\n\r\nimport numpy as np\r\nfrom collections import defaultdict\r\nimport os\r\nimport timeit\r\nimport argparse\r\n\r\ndef readtrainfile(file):\r\n triples = []\r\n hr_t = defaultdict(set)\r\n ht_r = defaultdict(set)\r\n h_t = defaultdict(set)\r\n t_h = defaultdict(set)\r\n r_hrt = defaultdict(list)\r\n f = open(file, 'r', encoding='utf-8')\r\n for line in f.readlines():\r\n h, r, t = line.strip().split('\\t')\r\n triples.append((h, r, t))\r\n hr_t[(h, r)].add(t)\r\n ht_r[(h, t)].add(r)\r\n h_t[h].add(t)\r\n t_h[t].add(h)\r\n r_hrt[r].append((h, r, t))\r\n return triples, hr_t, ht_r, h_t, t_h, r_hrt\r\n\r\n\r\ndef generateAxioms(triple_data, p, g, dataset_dir):\r\n triples, hr_t, ht_r, h_t, t_h, r_hrt = triple_data\r\n num_axiom_types = 10\r\n reflexive, symmetric, transitive, \\\r\n equivalent, inverse, subProperty, \\\r\n inferenceChain1, inferenceChain2, \\\r\n inferenceChain3, inferenceChain4 = [set() for i in range(num_axiom_types)]\r\n count = 0\r\n\r\n # 关系组成字典\r\n realtionCos = {}\r\n\r\n for rel in r_hrt.keys():\r\n N = len(r_hrt[rel])\r\n pN = p * N\r\n num_samples = round(N - N * pow(1 - g, 1 / pN))\r\n np.random.shuffle(r_hrt[rel])\r\n num_triples = min(num_samples, len(r_hrt[rel]))\r\n print(\"num_triples\", num_triples)\r\n hrts = r_hrt[rel][:num_triples]\r\n # 每轮打印\r\n if count % 1 == 0:\r\n print('num:%d / reflexive:%d / symmetric:%d '\r\n '/ transitive:%d / inverse:%d / equivalent: %d '\r\n '/ subProperty: %d/ inferenceChain1: %d '\r\n '/ inferenceChain2: %d / inferenceChain3: %d '\r\n '/ inferenceChain4: %d'\r\n % (count, len(reflexive), len(symmetric),\r\n len(transitive), len(inverse), len(equivalent),\r\n len(subProperty), len(inferenceChain1),\r\n len(inferenceChain2), len(inferenceChain3),\r\n len(inferenceChain4)))\r\n\r\n count_triples = 0\r\n for h, r, t in hrts:\r\n print(count_triples, end='\\r')\r\n count_triples += 1\r\n\r\n # 1 relexive\r\n if h == t:\r\n reflexive.add((r,))\r\n # reflexive.add((h, r, t, h, r, t))\r\n\r\n key = str(r) + \"+\" + str(r)\r\n score = 0\r\n if key in realtionCos:\r\n score = realtionCos[key]\r\n else:\r\n score = str(random.uniform(0.9, 1.0))\r\n realtionCos[key] = score\r\n\r\n line_new = str(2) + \"\\t\" + \"(\" + h + \"\\t\" + r + \"\\t\" + t + \")\" \\\r\n \"\\t\" + \"(\" + h + \"\\t\" + r + \"\\t\" + t + \")\" + \"\\t\" + score + \"\\n\"\r\n\r\n with open(\"two.txt\", \"a\", encoding=\"utf-8\") as fa:\r\n fa.write(line_new)\r\n\r\n # 2 symmetric\r\n if (t, r, h) in r_hrt[r]:\r\n symmetric.add((r,))\r\n # symmetric.add((t, r, h, h, r, t))\r\n key = str(r) + \"+\" + str(r)\r\n score = 0\r\n if key in realtionCos:\r\n score = realtionCos[key]\r\n else:\r\n score = str(random.uniform(0.9, 1.0))\r\n realtionCos[key] = score\r\n\r\n line_new = str(2) + \"\\t\" + \"(\" + t + \"\\t\" + r + \"\\t\" + h + \")\" \\\r\n \"\\t\" + \"(\" + h + \"\\t\" + r + \"\\t\" + t + \")\" + \"\\t\" + score + \"\\n\"\r\n\r\n with open(\"two.txt\", \"a\", encoding=\"utf-8\") as fa:\r\n fa.write(line_new)\r\n\r\n # 3 transitive\r\n for t_tmp in hr_t[(h, r)]:\r\n if t_tmp != t and (t_tmp, r, t) in r_hrt[r]:\r\n transitive.add((r,))\r\n # transitive.add((t_tmp, r, t, h, r, t))\r\n\r\n key = str(r) + \"+\" + str(r)\r\n score = 0\r\n if key in realtionCos:\r\n score = realtionCos[key]\r\n else:\r\n score = str(random.uniform(0.9, 1.0))\r\n realtionCos[key] = score\r\n\r\n line_new = str(2) + \"\\t\" + \"(\" + t_tmp + \"\\t\" + r + \"\\t\" + t + \")\" \\\r\n \"\\t\" + \"(\" + h + \"\\t\" + r + \"\\t\" + t + \")\" + \"\\t\" + score + \"\\n\"\r\n with open(\"two.txt\", \"a\", encoding=\"utf-8\") as fa:\r\n fa.write(line_new)\r\n\r\n # 4 equivalent and 6 subProperty\r\n for r_tmp in ht_r[(h, t)]:\r\n if r_tmp != r:\r\n equivalent.add((r, r_tmp))\r\n subProperty.add((r, r_tmp))\r\n # equivalent.add((h, r_tmp, t, h, r, t))\r\n # subProperty.add((h, r_tmp, t, h, r, t))\r\n\r\n key = str(r_tmp) + \"+\" + str(r)\r\n score = 0\r\n if key in realtionCos:\r\n score = realtionCos[key]\r\n else:\r\n score = str(random.uniform(0.9, 1.0))\r\n realtionCos[key] = score\r\n\r\n line_new = str(2) + \"\\t\" + \"(\" + h + \"\\t\" + r_tmp + \"\\t\" + t + \")\" \\\r\n \"\\t\" + \"(\" + h + \"\\t\" + r + \"\\t\" + t + \")\" + \"\\t\" + score + \"\\n\"\r\n with open(\"two.txt\", \"a\", encoding=\"utf-8\") as fa:\r\n fa.write(line_new)\r\n\r\n # 5 inverse\r\n if (t, h) in ht_r.keys():\r\n for r_tmp in ht_r[(t, h)]:\r\n inverse.add((r, r_tmp))\r\n # inverse.add((t, r_tmp, h, h, r, t))\r\n\r\n key = str(r_tmp) + \"+\" + str(r)\r\n score = 0\r\n if key in realtionCos:\r\n score = realtionCos[key]\r\n else:\r\n score = str(random.uniform(0.9, 1.0))\r\n realtionCos[key] = score\r\n\r\n line_new = str(2) + \"\\t\" + \"(\" + t + \"\\t\" + r_tmp + \"\\t\" + h + \")\" \\\r\n \"\\t\" + \"(\" + h + \"\\t\" + r + \"\\t\" + t + \")\" + \"\\t\" + score + \"\\n\"\r\n with open(\"two.txt\", \"a\", encoding=\"utf-8\") as fa:\r\n fa.write(line_new)\r\n\r\n # 7 inferenceChain\r\n # h --> e --> t\r\n h_e = h_t[h]\r\n t_e = t_h[t]\r\n e_common = h_e.intersection(t_e)\r\n # for e in e_common:\r\n # for r1 in ht_r[(h, e)]:\r\n # for r2 in ht_r[(e, t)]:\r\n # inferenceChain.add((r, r1, r2))\r\n for e in e_common:\r\n # h -> e -> t\r\n for r1 in ht_r[(h, e)]:\r\n for r2 in ht_r[(e, t)]:\r\n inferenceChain3.add((r, r1, r2))\r\n # inferenceChain3.add((h, r1, e, e, r2, t, h, r, t))\r\n\r\n key = str(r1) + \"+\" + str(r2) + \"+\" + str(r)\r\n score = 0\r\n if key in realtionCos:\r\n score = realtionCos[key]\r\n else:\r\n score = str(random.uniform(0.9, 1.0))\r\n realtionCos[key] = score\r\n\r\n\r\n s = str(3) + \"\\t\" + \"(\" + h + \"\\t\" + r1 + \"\\t\" + e + \")\" \\\r\n \"\\t\" + \"(\" + e + \"\\t\" + r2 + \"\\t\" + t + \")\" + \\\r\n \"\\t\" + \"(\" + h + \"\\t\" + r + \"\\t\" + t + \")\" + \"\\t\" + score + \"\\n\"\r\n with open(\"groundings.txt\", \"a\", encoding=\"utf-8\") as fa:\r\n fa.write(s)\r\n\r\n # h <- e -> t\r\n for r1 in ht_r[(e, h)]:\r\n for r2 in ht_r[(e, t)]:\r\n inferenceChain1.add((r, r1, r2))\r\n # inferenceChain1.add((e, r1, h, e, r2, t, h, r, t))\r\n\r\n key = str(r1) + \"+\" + str(r2) + \"+\" + str(r)\r\n score = 0\r\n if key in realtionCos:\r\n score = realtionCos[key]\r\n else:\r\n score = str(random.uniform(0.9, 1.0))\r\n realtionCos[key] = score\r\n\r\n s = str(3) + \"\\t\" + \"(\" + e + \"\\t\" + r1 + \"\\t\" + h + \")\" \\\r\n \"\\t\" + \"(\" + e + \"\\t\" + r2 + \"\\t\" + t + \")\" + \\\r\n \"\\t\" + \"(\" + h + \"\\t\" + r + \"\\t\" + t + \")\" + \"\\t\" + score + \"\\n\"\r\n with open(\"groundings.txt\", \"a\", encoding=\"utf-8\") as fa:\r\n fa.write(s)\r\n\r\n # h <- e <- t\r\n for r1 in ht_r[(e, h)]:\r\n for r2 in ht_r[(t, e)]:\r\n inferenceChain2.add((r, r1, r2))\r\n # inferenceChain2.add((e, r1, h, t, r2, e, h, r, t))\r\n\r\n key = str(r1) + \"+\" + str(r2) + \"+\" + str(r)\r\n score = 0\r\n if key in realtionCos:\r\n score = realtionCos[key]\r\n else:\r\n score = str(random.uniform(0.9, 1.0))\r\n realtionCos[key] = score\r\n\r\n s = str(3) + \"\\t\" + \"(\" + h + \"\\t\" + r1 + \"\\t\" + e + \")\" \\\r\n \"\\t\" + \"(\" + t + \"\\t\" + r2 + \"\\t\" + e + \")\" + \\\r\n \"\\t\" + \"(\" + h + \"\\t\" + r + \"\\t\" + t + \")\" + \"\\t\" + score + \"\\n\"\r\n with open(\"groundings.txt\", \"a\", encoding=\"utf-8\") as fa:\r\n fa.write(s)\r\n\r\n # h -> e <- t\r\n for r1 in ht_r[(h, e)]:\r\n for r2 in ht_r[(t, e)]:\r\n inferenceChain4.add((r, r1, r2))\r\n # inferenceChain4.add((h, r1, e, t, r2, e, h, r, t))\r\n\r\n key = str(r1) + \"+\" + str(r2) + \"+\" + str(r)\r\n score = 0\r\n if key in realtionCos:\r\n score = realtionCos[key]\r\n else:\r\n score = str(random.uniform(0.9, 1.0))\r\n realtionCos[key] = score\r\n\r\n s = str(3) + \"\\t\" + \"(\" + h + \"\\t\" + r1 + \"\\t\" + e + \")\" \\\r\n \"\\t\" + \"(\" + t + \"\\t\" + r2 + \"\\t\" + e + \")\" + \\\r\n \"\\t\" + \"(\" + h + \"\\t\" + r + \"\\t\" + t + \")\" + \"\\t\" + score + \"\\n\"\r\n with open(\"groundings.txt\", \"a\", encoding=\"utf-8\") as fa:\r\n fa.write(s)\r\n\r\n count += 1\r\n if count > 3:\r\n break\r\n\r\n print('finish processing')\r\n print('write reflexive file')\r\n writefile(reflexive, os.path.join(dataset_dir, 'axiom_pool/axiom_aeflexive.txt'), 1)\r\n print('write symmetric file')\r\n writefile(symmetric, os.path.join(dataset_dir, 'axiom_pool/axiom_symmetric.txt'), 1)\r\n print('write transitive file')\r\n writefile(transitive, os.path.join(dataset_dir, 'axiom_pool/axiom_transitive.txt'), 1)\r\n print('write inverse file')\r\n writefile(inverse, os.path.join(dataset_dir, 'axiom_pool/axiom_inverse.txt'), 2)\r\n print('write equivalent file')\r\n writefile(equivalent, os.path.join(dataset_dir, 'axiom_pool/axiom_equivalent.txt'), 2)\r\n print('write subProperty file')\r\n writefile(subProperty, os.path.join(dataset_dir, 'axiom_pool/axiom_subProperty.txt'), 2)\r\n print('write inferenceChain1 file')\r\n writefile(inferenceChain1, os.path.join(dataset_dir, 'axiom_pool/axiom_inferenceChain1.txt'), 3)\r\n print('write inferenceChain2 file')\r\n writefile(inferenceChain2, os.path.join(dataset_dir, 'axiom_pool/axiom_inferenceChain2.txt'), 3)\r\n print('write inferenceChain3 file')\r\n writefile(inferenceChain3, os.path.join(dataset_dir, 'axiom_pool/axiom_inferenceChain3.txt'), 3)\r\n print('write inferenceChain4 file')\r\n writefile(inferenceChain4, os.path.join(dataset_dir, 'axiom_pool/axiom_inferenceChain4.txt'), 3)\r\n\r\ndef writefile(axioms, file, num_element):\r\n with open(file, 'w', encoding='utf-8') as f:\r\n for obj in axioms:\r\n for i in range(num_element):\r\n f.write(obj[i])\r\n if i == num_element - 1:\r\n f.write('\\n')\r\n else:\r\n f.write('\\t')\r\n\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser(description='Experiment setup')\r\n parser.add_argument('--dataset_dir', dest='dataset_dir', type=str, default='ROANicews05-15\\\\ROANicews05-15\\\\')\r\n parser.add_argument('--train_file', dest='train_file', type=str, default='rule-train.txt')\r\n parser.add_argument('--axiom_probability', dest='axiom_probability', type=float, default=0.5)\r\n parser.add_argument('--axiom_proportion', dest='axiom_proportion', type=float, default=0.95)\r\n option = parser.parse_args()\r\n file_train = os.path.join(option.dataset_dir, option.train_file)\r\n start = timeit.default_timer()\r\n p = option.axiom_probability\r\n g = option.axiom_proportion\r\n triple_data = readtrainfile(file_train)\r\n generateAxioms(triple_data, p, g, option.dataset_dir)\r\n end = timeit.default_timer()\r\n print('cost time:', end - start)\r\n","repo_name":"DMKE-Lab/TPRG","sub_path":"axiomPoolsNew.py","file_name":"axiomPoolsNew.py","file_ext":"py","file_size_in_byte":13227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29059180257","text":"import os\n\nimport pytest\nimport requests\n\nTEST_HOST = os.environ.get('TEST_HOST', '')\n\ntry:\n import library.python\n import pkgutil\n\n ARCADIA_RUN = True\n\nexcept ImportError:\n ARCADIA_RUN = False\n\n\n@pytest.fixture\ndef refs():\n\n test_host = 'https://refs-test.paysys.yandex.net'\n\n if TEST_HOST and TEST_HOST != 't':\n # t для обратной совместимости со временами,\n # когда по умолчанию тесты ходили на localhost.\n test_host = 'http://localhost:8080' if TEST_HOST == 'l' else TEST_HOST\n\n return test_host\n\n\n@pytest.fixture\ndef refs_get(refs):\n\n def refs_get_(url, *, fields=None):\n query_url = f'{refs}{url}'\n\n if fields:\n query_url = query_url % {'fields': ' '.join(fields)}\n\n result = requests.get(url=query_url, verify=False).json()\n return result\n\n return refs_get_","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"billing/tests/regression/conftest (3).py","file_name":"conftest (3).py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"4734357534","text":"from flask import Flask, render_template, request, redirect\r\napp = Flask(__name__)\r\n@app.route('/')\r\ndef index():\r\n return render_template(\"index.html\")\r\n\r\n@app.route('/ninjas')\r\ndef allninjas():\r\n return render_template('ninjas.html')\r\n\r\n@app.route('/ninjas/')\r\ndef ninjaPick(color):\r\n selected=''\r\n if color==\"purple\":\r\n selected=\"donatello.jpg\"\r\n elif color==\"blue\": \r\n selected=\"leonardo.jpg\"\r\n elif color==\"orange\": \r\n selected=\"michelangelo.jpg\"\r\n elif color==\"red\": \r\n selected=\"raphael.jpg\"\r\n else:\r\n selected=\"notapril.jpg\"\r\n return render_template('allninjas.html', selected=selected)\r\n\r\napp.run(debug=True)","repo_name":"ElenaMBaudrit/flask_fundamentals_exercises","sub_path":"Ninja/ninja-server.py","file_name":"ninja-server.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"9816787114","text":"# from google.cloud import pubsub_v1\n\n# project_id = \"thelab-240901\"\n# topic_name = \"subscriptions/test\"\n\n# publisher = pubsub_v1.PublisherClient()\n# topic_path = publisher.topic_path(project_id, topic_name)\n\n# topic = publisher.create_topic(topic_path)\n\n# print('Topic created: {}'.format(topic))\n\nimport os\nfrom google.cloud import pubsub_v1\nfrom google.api_core.exceptions import AlreadyExists\nfrom serviceclient import TaxiCounts\n\ntopic_name = 'projects/{project_id}/topics/{topic}'.format(\n project_id = \"pubsub-public-data\",\n topic='taxirides-realtime', # Set this to something appropriate.\n )\nproject_id = \"thelab-240901\"\n\nsubscription_name = 'projects/{project_id}/subscriptions/{sub}'.format(\n project_id=project_id,\n sub='taxirides_test', # Set this to something appropriate.\n)\n\ndef setup(topic_name, subscribeription_name):\n subscriber = pubsub_v1.SubscriberClient()\n print(\"subscriber: \", subscriber)\n print(\"topic_name: \", topic_name)\n print(\"subscription_name: \", subscription_name)\n try:\n subscriber.create_subscription(\n name=subscription_name, topic=topic_name)\n except AlreadyExists as e:\n print(\"The subscription topic already exists. Moving ahead to subscribing.\",)\n except Exception as e:\n print(e)\n return subscriber\n\n# def callback(message):\n# print(\"Message: \")\n# print(message.data)\n# # print(\".\", end = '')\n# message.ack()\n\ndef save_and_ack(t,subscriber, received_messages):\n if t.add_counts(len(received_messages)):\n ack_ids = [msg.ack_id for msg in received_messages]\n subscriber.acknowledge(subscription_name, ack_ids)\n \nif __name__ == '__main__':\n import sys\n #if len(sys.argv) != 2:\n # print(\"%s \" % (sys.argv[0]))\n # exit(1)\n subscriber = setup(topic_name, subscription_name)\n\n t = TaxiCounts(\"http://taxiservice-main:80\")\n # future = subscriber.subscribe(subscription_name, callback)\n # future.result()\n while True:\n # subscription_path = subscriber.subscription_path(project_id, subscription_name)\n response = subscriber.pull(subscription_name, max_messages=10)\n save_and_ack(t, subscriber, response.received_messages)\n","repo_name":"swenggmanjeet/taxi-stream-processing","sub_path":"feedconsumers/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"40322777771","text":"from django.contrib import messages\nfrom django.shortcuts import redirect, render\n\nfrom users.forms import UserRegisterForm\n\n\ndef registration(request):\n if request.method == 'POST':\n form = UserRegisterForm(request.POST)\n\n if form.is_valid():\n form.save()\n username = form.cleaned_data.get('username')\n messages.success(request, f'Your account has been created successfully. You can now login 😊')\n return redirect('users:login')\n\n else:\n form = UserRegisterForm()\n\n return render(request, 'users/registration.html', {'form': form})\n","repo_name":"theyashshahs/Tune","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"41498956134","text":"import numpy as np\n\n# 随机生产[2,4]的数组 ,均值1,7 方差0.1\narr = np.random.normal(1.7, 0.1, (2, 4))\n# 数组重排列\narr = arr.reshape(4, 2)\n# 定义一个权重数组\nq = [0.1, 0.9]\n# 使用权重方法\narr_dot = np.dot(arr, q)\n# 定义一个全部为0的数组\narr_zero = np.zeros([4, 3])\n# 定义一个全部为1的数组\narr_one = np.ones([4, 3])\n# 将数组中所有的数都加5\narr_add_five = arr_one + 5\n# 垂直拼接\narr_vstack = np.vstack((arr_one, arr_zero))\n# 水平拼接\narr_hstack = np.hstack((arr_one, arr_zero))\n# 均值\narr_mean = np.mean(arr)\n# 轴最大值\narr_max = np.amax(arr, 1)\n# 轴最小值\narr_min = np.amin(arr, 0)\n# 条件判断\narr_big = arr > 1.7\n# 三目运算\narr_3_big = np.where(arr > 1.7, 1, 0)\n\n# print(arr)\n# print(arr_dot)\n# print(arr_zero)\n# print(arr_one)\n# print(arr_add_five)\n# print(arr_vstack)\n# print(arr_hstack)\n# print(arr_mean)\n# print(arr_max)\n# print(arr_min)\n# print(arr_big)\n# print(arr_3_big)\n","repo_name":"hjmbt/python","sub_path":"exercise_numpy/exercise.py","file_name":"exercise.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5354101324","text":"# tuple - rappresenting unchangeable sequence of objects, different eachother\n\nt = 'abc', 123, 45.67 # we have here string, integer, float # comma creates tuple\nt # ('abc', 123, 45.67)\ntype(t) # \ntp = ('abc', 123, 45.67) # it is always better to use brackets ()\nt == tp #True\nlen((1, 'a', 2.3)) #3 double brackets\n\nlen(1, 'a', 2.3) # one brackets, error\n#Traceback (most recent call last):\n# File \"\", line 1, in \n#TypeError: len() takes exactly one argument (3 given)\n\nt = 'abc'\nt #'abc'\ntv = () # empty tuple\ntv #()\ntype(tv) #\nlen(tv) #0\n\n\n\n#indexing, slicing, contenimento, concatenamento, ripetizione\n\nt = ('abc', 123, 45.67)\nt[0] #'abc'\nt[:2] #('abc', 123)\n123 in t #True\nt + ('xyz', 890) #('abc', 123, 45.67, 'xyz', 890)\nt*2 #('abc', 123, 45.67, 'abc', 123, 45.67)\n\n# tuple are unchangeable, it will be not possible to add or remove or modify objects\nt[0] = 'xyz'\n#Traceback (most recent call last):\n# File \"\", line 1, in \n#TypeError: 'tuple' object does not support item assignment\n\nlen(('abc', 123, 45.67, 'xyz', 890)) #5\nmin((4, 1, 7, 5)) #1\nmax((4, 1, 7, 5)) #7\nt = ('a', 'b', 'c', 'b', 'a')\nt.index('c') #2 # index starts from 0\nt.count('c') #1\nt.count('b') #2 # number of times we have 'b'\n\n","repo_name":"danilonastasi/python_tutorial","sub_path":"Py.code_tuple.py","file_name":"Py.code_tuple.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20092993305","text":"import pickle, time, glob, argparse\nimport torch\nimport numpy as np\nfrom PIL import Image\nfrom scipy.io import loadmat\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--datadir', type=str, default='Data/UnRel')\nparser.add_argument('--humanlabels_to_onehot', type=str, default='COCOStuff/humanlabels_to_onehot.pkl')\nparser.add_argument('--labels_unrel', type=str, default='UnRel/labels_unrel.pkl')\narg = vars(parser.parse_args())\nprint('\\n', arg, '\\n')\n\n\n# Load COCOStuff labels\nhumanlabels_to_onehot = pickle.load(open(arg['humanlabels_to_onehot'], 'rb'))\n\n# Shared classes between COCOStuff biased and UnRel\nshared_classes = ['car', 'bus', 'skateboard']\nshared_classes.append('road') # context category for 'car' and 'bus'\nshared_classes.append('person') # context category for 'skateboard'\n\n# Create a list of image file names (test)\nstart_time = time.time()\nannotations = loadmat('{}/annotations.mat'.format(arg['datadir']))['annotations']\nobject_list = []\nif True:\n count = 0\n labels = {}\n\n # Process images\n for i in range(annotations.shape[0]):\n filename = '{}/images/{}'.format(arg['datadir'], annotations[i][0][0][0][0][0])\n annotation = annotations[i][0][0][0][3] # loadmat creates a deeply nested array\n label = []\n for obj in annotation:\n s = obj[0][0][0][0][0] # loadmat creates a deeply nested array\n object_list.append(s)\n\n if s in shared_classes and humanlabels_to_onehot[s] not in label:\n label.append(humanlabels_to_onehot[s])\n\n label_onehot = torch.nn.functional.one_hot(torch.LongTensor(label), num_classes=171)\n label_onehot = label_onehot.sum(dim=0).float()\n labels[filename] = label_onehot\n\n count += 1\n\n print('Finished processing {} UnRel labels'.format(len(labels)))\n with open(arg['labels_unrel'], 'wb+') as handle:\n pickle.dump(labels, handle)\n\n print('Objects in UnRel:', set(object_list))\n","repo_name":"princetonvisualai/ContextualBias","sub_path":"UnRel/data_process.py","file_name":"data_process.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"3"} +{"seq_id":"14835038697","text":"dados = dict()\nqgols = list()\n\nsomagols = 0\n\ndados['nome'] = str(input('Digite seu nome: ')).strip().upper()\n\nqpart = int(input(f'Quantas partidas {dados[\"nome\"]} jogou? '))\n\nfor i in range(0, qpart):\n qgols.append(int(input(f'Quantidade de gols no {i+1}º jogo: ')))\n somagols += qgols[i]\n\ndados['gols'] = qgols[:]\ndados['total gols'] = somagols\n\nprint('-='*20)\nprint(dados)\nprint('-='*20)\nfor k, v in dados.items():\n print(f'O campo {k} tem o valor {v}')\nprint('-='*20)\nprint(f'O jogador {dados[\"nome\"]} jogou {qpart} partidas.')\n\nfor k, v in enumerate(dados['gols']):\n print(f' =>Na {k+1}º partida ele fez {v} gols')\nprint('-='*20)","repo_name":"Thalisson01/Python","sub_path":"Exercício Python #093 - Cadastro de Jogador de Futebol.py","file_name":"Exercício Python #093 - Cadastro de Jogador de Futebol.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23019067700","text":"## LIST##\n\nbob = ['Bob Smith', 42, 30000, 'software']\nsue = ['Sue Jones',45, 40000, 'hardware']\npeople = [bob, sue]\n\nfor person in people:\n print(person[0].split()[-1])\n person[2]*= 1.2\n\n# Iteration tools: list comprehension, maps, generator expression\npays = [person[2] for person in people]\nprint(pays)\n\npays = map(lambda x: x[2], people)\nprint(list(pays))\n\nsum(person[2] for person in people)\n\npeople.append(['Tom', 50, 0, None])\nprint(people[-1][0])\n\n# Field Labels\n\nNAME, AGE, PAY = range(3)\nprint(bob[NAME])\nprint((PAY, bob[PAY]))\n\n# Fetcher function for positional indexing\nbob= [['name','Bob Simith'],['age',42],['pay',10000]]\nsue = [['name','Sue Jones'],['age',45],['pay',20000]]\npeople = [bob, sue]\n\ndef field (record, label):\n for (fname,fvalue) in record:\n if fname == label:\n return fvalue\n\nprint(field(bob,'age'))\n\n## DICTIONARIES ##\n\nbob ={'name':'Bob Smith', 'age':42, 'pay':30000,'job':'dev'}\nsue = {'name':'Sue Jones', 'age':45, 'pay':40000,'job':'hdw'}\n\n# Other ways to create dictionaries\nbob = dict(name= 'Bob Smith', age =42, pay= 30000, job ='dev')\n\nsue = {}\nsue['name']= 'Sue Jones'\nsue['age']= 45\nsue['pay']= 40000\nsue['job']= 'hdw'\n\n# make dict from key values with optional starting value for all keys\nfields = ('name','age','job','pay')\nrecord = dict.fromkeys(fields, None)\nprint(record)\n\n# Dict iteration tools\npeople =[ bob, sue] # create list consisting dict\n\nnames =[person['name'] for person in people] # collect the names\nprint(names)\n\nnames = list(map(lambda x: x['name'], people))\nprint(names)\n\nprint(sum( person['pay'] for person in people))\n\n# using SQL queries\nG= [rec['name'] for rec in people if rec['age'] >= 45]\nprint(G)\nF = (rec['name'] for rec in people if rec['age'] >= 45)\nprint(next(F)) # run the iteration generator one by one\n\nG =[ rec['age']**2 if rec['age'] >= 45 else rec['age'] for rec in people]\nprint(G)\nF= ( (rec['age']**2 if rec['age'] >= 45 else rec['age']) for rec in people)\nprint(F.__next__())\n\n# Nested structures\nbob2 = {\n 'name':{'first':'Bob','last':'Smith'},\n 'age': 42,\n 'job':['software','writing'],\n 'pay': (40000, 50000)\n}\n\nprint(bob2['name'])\nprint(bob2['name']['last'])\n\nfor job in bob2['job']: print(job)\n\nbob2['job'].append('janitor') # bob get new job\nprint(bob2,\"\\n\",\"\\n\")\n\n# creating dict consisting dicts\ndb = {}\ndb['bob'] = bob\ndb['sue'] = sue\n# print(db)\n\n# import pprint\n# pprint.pprint(db) # nice print of dict object\n\nfor key in db:\n print(db[key],db[key]['name'].split()[-1],\"\\n\")\n\nfor record in db.values(): print(record['pay'],\"\\n\")\n\nx = [ db[key]['name'] for key in db]\nprint(x,'\\n')\nx= [ rec['name'] for rec in db.values()]\nprint(x,'\\n')\n\n# Add new record\ndb['Tom'] = dict(name =\"Tom\", age = 50, job = None,pay = 0)\nprint(list(db.keys()),'\\n')\nsenior_name = [rec['name'] for rec in db.values() if rec['age'] >= 45]\nprint(senior_name,'\\n')\n","repo_name":"ningningliu/Programming-with-Python","sub_path":"Preview/RepresentRecords.py","file_name":"RepresentRecords.py","file_ext":"py","file_size_in_byte":2870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"2492108034","text":"import os\nfrom exception import InvalidMessageException\n\nSANDBOX_FOLDER = '/etc/pinguino/sandbox'\n\ndef create_sandbox_folder(id):\n\n full_path = os.path.join(SANDBOX_FOLDER, id)\n\n if not os.path.exists(full_path):\n os.makedirs(full_path)\n\n return full_path\n\ndef create_file(filename, contents):\n\n with open(filename,\"w+\") as f:\n f.write(contents)\n\ndef extract_attachments(attachments_required, message):\n\n sandbox_folder = create_sandbox_folder(message.id)\n\n for attachment in attachments_required:\n\n attachment_data = message.attachments[attachment]\n attachment_path = os.path.join(sandbox_folder, attachment)\n create_file(attachment_path, attachment_data)\n\n return sandbox_folder\n\ndef validate_attachments(attachments_required, message_attachments):\n\n if not message_attachments or len(message_attachments) != len(attachments_required):\n raise InvalidMessageException('Invalid message attachments')\n\n missing_attachments = []\n\n for attachment in attachments_required:\n if not attachment in message_attachments:\n missing_attachments.append(attachment)\n\n if missing_attachments:\n joined = \", \".join(missing_attachments)\n raise InvalidMessageException('Missing attachment(s) {}'.format(joined))\n\ndef compile(sandbox_folder):\n\n pass","repo_name":"mjgarcia/pinguino","sub_path":"pinguino/tasks/task_common.py","file_name":"task_common.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37683357840","text":"from aiogram import types\nfrom aiogram.dispatcher.handler import CancelHandler\nfrom aiogram.dispatcher.middlewares import BaseMiddleware\nfrom loguru import logger\n\nfrom tgbot.interfaces.connector_bitrix import \\\n EmployeeDoesNotExist, DatabaseError\nfrom tgbot.services.account_promoter import Promoter\nfrom tgbot.services.account_manager import Manager\nfrom tgbot.utils.broadcast import send_messages\nfrom tgbot.config import TG_ADMINS_ID\nfrom tgbot.handlers.start import request_phone\n\n\nclass AccessMiddleware(BaseMiddleware):\n\n async def on_pre_process_message(\n self, message: types.Message, data: dict, *arg, **kwargs):\n user_from_tg = types.User.get_current()\n tg_id = user_from_tg.id\n logger.info(f'user_from_tg: {tg_id}')\n load_answer = await message.answer('Загружаю...')\n try:\n promoter = await Promoter.get(str(tg_id))\n except EmployeeDoesNotExist:\n await send_messages(\n TG_ADMINS_ID,\n f'Promoter EmployeeDoesNotExist {tg_id}')\n promoter = None\n except DatabaseError:\n await send_messages(\n TG_ADMINS_ID,\n f'Promoter DatabaseError {tg_id}')\n promoter = None\n\n if isinstance(promoter, Promoter):\n logger.info(f'promoter: {await promoter.get_vr_code()}')\n data['promoter'] = promoter\n else:\n await load_answer.delete()\n data['promoter'] = None\n data['manager'] = None\n return\n\n try:\n manager = await Manager.get(str(tg_id))\n except EmployeeDoesNotExist:\n # await send_messages(\n # TG_ADMINS_ID,\n # f'Manager EmployeeDoesNotExist {tg_id}')\n manager = None\n except DatabaseError:\n await send_messages(\n TG_ADMINS_ID,\n f'Manager DatabaseError {tg_id}')\n manager = None\n\n if isinstance(manager, Manager):\n logger.info(f'manager: {await manager.get_vr_code()}')\n data['manager'] = manager\n\n await load_answer.delete()\n","repo_name":"lermanMax/account_bot","sub_path":"tgbot/middlewares/authentification.py","file_name":"authentification.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"35078482887","text":"import sys\nimport types\nfrom functools import (\n WRAPPER_UPDATES,\n WRAPPER_ASSIGNMENTS as FUNCTOOLS_ASSIGNMENTS\n)\nfrom inspect import isclass, isfunction\nfrom .compat import NoneType, signature\nfrom .markers import missing, not_specified\n\n\ndef name_or_repr(obj):\n return getattr(obj, '__name__', None) or repr(obj)\n\n\nclass requires(object):\n \"\"\"\n Represents requirements for a particular callable.\n\n The passed in `args` and `kw` should map to the types, including\n any required :class:`~.declarations.how`, for the matching\n arguments or keyword parameters the callable requires.\n\n String names for resources must be used instead of types where the callable\n returning those resources is configured to return the named resource.\n \"\"\"\n\n def __init__(self, *args, **kw):\n check_type(*args)\n check_type(*kw.values())\n self.args = args\n self.kw = kw\n\n def __iter__(self):\n \"\"\"\n When iterated over, yields tuples representing individual\n types required by arguments or keyword parameters in the form\n ``(keyword_name, decorated_type)``.\n\n If the keyword name is ``None``, then the type is for\n a positional argument.\n \"\"\"\n for arg in self.args:\n yield None, arg\n for k, v in self.kw.items():\n yield k, v\n\n def __repr__(self):\n bits = []\n for arg in self.args:\n bits.append(name_or_repr(arg))\n for k, v in sorted(self.kw.items()):\n bits.append('%s=%s' % (k, name_or_repr(v)))\n txt = 'requires(%s)' % ', '.join(bits)\n return txt\n\n def __call__(self, obj):\n obj.__mush_requires__ = self\n return obj\n\n\nclass ReturnsType(object):\n\n def __call__(self, obj):\n obj.__mush_returns__ = self\n return obj\n\n def __repr__(self):\n return self.__class__.__name__ + '()'\n\n\nclass returns_result_type(ReturnsType):\n \"\"\"\n Default declaration that indicates a callable's return value\n should be used as a resource based on the type of the object returned.\n\n ``None`` is ignored as a return value.\n \"\"\"\n\n def process(self, obj):\n if obj is not None:\n yield obj.__class__, obj\n\n\nclass returns_mapping(ReturnsType):\n \"\"\"\n Declaration that indicates a callable returns a mapping of type or name\n to resource.\n \"\"\"\n\n def process(self, mapping):\n return mapping.items()\n\n\nclass returns_sequence(returns_result_type):\n \"\"\"\n Declaration that indicates a callable's returns a sequence of values\n that should be used as a resources based on the type of the object returned.\n\n Any ``None`` values in the sequence are ignored.\n \"\"\"\n\n def process(self, sequence):\n super_process = super(returns_sequence, self).process\n for obj in sequence:\n for pair in super_process(obj):\n yield pair\n\n\nclass returns(returns_result_type):\n \"\"\"\n Declaration that specifies names for returned resources or overrides\n the type of a returned resource.\n\n This declaration can be used to indicate the type or name of a single\n returned resource or, if multiple arguments are passed, that the callable\n will return a sequence of values where each one should be named or have its\n type overridden.\n \"\"\"\n\n def __init__(self, *args):\n check_type(*args)\n self.args = args\n\n def process(self, obj):\n if len(self.args) == 1:\n yield self.args[0], obj\n else:\n for t, o in zip(self.args, obj):\n yield t, o\n\n def __repr__(self):\n args_repr = ', '.join(name_or_repr(arg) for arg in self.args)\n return self.__class__.__name__ + '(' + args_repr + ')'\n\n\ndef lazy(obj):\n \"\"\"\n Declaration that specifies the callable should only be called the first time\n it is required.\n \"\"\"\n obj.__mush_lazy__ = True\n return obj\n\n\nclass how(object):\n \"\"\"\n The base class for type decorators that indicate which part of a\n resource is required by a particular callable.\n\n :param type: The resource type to be decorated.\n :param names: Used to identify the part of the resource to extract.\n \"\"\"\n type_pattern = '%(type)s'\n name_pattern = ''\n\n def __init__(self, type, *names):\n check_type(type)\n self.type = type\n self.names = names\n\n def __repr__(self):\n txt = self.type_pattern % dict(type=name_or_repr(self.type))\n for name in self.names:\n txt += self.name_pattern % dict(name=name)\n return txt\n\n def process(self, o):\n \"\"\"\n Extract the required part of the object passed in.\n :obj:`missing` should be returned if the required part\n cannot be extracted.\n :obj:`missing` may be passed in and is usually be handled\n by returning :obj:`missing` immediately.\n \"\"\"\n return missing\n\nclass optional(how):\n \"\"\"\n A :class:`~.declarations.how` that indicates the callable requires the\n wrapped requirement only if it's present in the :class:`~.context.Context`.\n \"\"\"\n type_pattern = 'optional(%(type)s)'\n\n def process(self, o):\n if o is missing:\n return nothing\n return o\n\n\nclass attr(how):\n \"\"\"\n A :class:`~.declarations.how` that indicates the callable requires the named\n attribute from the decorated type.\n \"\"\"\n name_pattern = '.%(name)s'\n\n def process(self, o):\n if o is missing:\n return o\n try:\n for name in self.names:\n o = getattr(o, name)\n except AttributeError:\n return missing\n else:\n return o\n\n\nclass item(how):\n \"\"\"\n A :class:`~.declarations.how` that indicates the callable requires the named\n item from the decorated type.\n \"\"\"\n name_pattern = '[%(name)r]'\n\n def process(self, o):\n if o is missing:\n return o\n try:\n for name in self.names:\n o = o[name]\n except KeyError:\n return missing\n else:\n return o\n\n\nif sys.version_info[0] == 2:\n ok_types = (type, types.ClassType, str, how)\nelse:\n ok_types = (type, str, how)\n\n\ndef check_type(*objs):\n for obj in objs:\n if not isinstance(obj, ok_types):\n raise TypeError(\n repr(obj)+\" is not a type or label\"\n )\n\n\nclass Nothing(requires, returns):\n\n def process(self, result):\n return ()\n\n#: A singleton that be used as a :class:`~mush.requires` to indicate that a\n#: callable has no required arguments or as a :class:`~mush.returns` to indicate\n#: that anything returned from a callable should be ignored.\nnothing = Nothing()\n\n#: A singleton indicating that a callable's return value should be\n#: stored based on the type of that return value.\nresult_type = returns_result_type()\n\n\ndef maybe_optional(p):\n value = p.name\n if p.default is not p.empty:\n value = optional(value)\n return value\n\n\ndef guess_requirements(obj):\n args = []\n kw = {}\n for name, p in signature(obj).parameters.items():\n if p.kind in {p.POSITIONAL_ONLY, p.POSITIONAL_OR_KEYWORD}:\n args.append(maybe_optional(p))\n elif p.kind is p.KEYWORD_ONLY:\n kw[name] = maybe_optional(p)\n if args or kw:\n return requires(*args, **kw)\n\n\ndef extract_declarations(obj, explicit_requires, explicit_returns, guess=True):\n mush_requires = getattr(obj, '__mush_requires__', None)\n mush_returns = getattr(obj, '__mush_returns__', None)\n annotations = getattr(obj, '__annotations__', None)\n annotations = {} if annotations is None else annotations.copy()\n annotation_returns = annotations.pop('return', None)\n annotation_requires = annotations or None\n\n requires_ = explicit_requires or mush_requires or annotation_requires\n returns_ = explicit_returns or mush_returns or annotation_returns\n\n if isinstance(requires_, requires):\n pass\n elif isinstance(requires_, NoneType):\n if guess:\n requires_ = guess_requirements(obj)\n elif isinstance(requires_, (list, tuple)):\n requires_ = requires(*requires_)\n elif isinstance(requires_, dict):\n requires_ = requires(**requires_)\n else:\n requires_ = requires(requires_)\n\n if isinstance(returns_, (ReturnsType, NoneType)):\n pass\n elif isinstance(returns_, (list, tuple)):\n returns_ = returns(*returns_)\n else:\n returns_ = returns(returns_)\n\n return requires_, returns_\n\n\nWRAPPER_ASSIGNMENTS = FUNCTOOLS_ASSIGNMENTS + (\n '__mush__requires__', '__mush_returns__'\n)\n\n\ndef update_wrapper(wrapper,\n wrapped,\n assigned=WRAPPER_ASSIGNMENTS,\n updated=WRAPPER_UPDATES):\n \"\"\"\n An extended version of :func:`functools.update_wrapper` that\n also preserves Mush's annotations.\n \"\"\"\n # copied here to backport bugfix from Python 3.\n for attr in assigned:\n try:\n value = getattr(wrapped, attr)\n except AttributeError:\n pass\n else:\n setattr(wrapper, attr, value)\n for attr in updated:\n getattr(wrapper, attr).update(getattr(wrapped, attr, {}))\n # Issue #17482: set __wrapped__ last so we don't inadvertently copy it\n # from the wrapped function when updating __dict__\n wrapper.__wrapped__ = wrapped\n # Return the wrapper so this can be used as a decorator via partial()\n return wrapper\n","repo_name":"simplistix/mush","sub_path":"mush/declarations.py","file_name":"declarations.py","file_ext":"py","file_size_in_byte":9518,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"3"} +{"seq_id":"17910225172","text":"def remove_string(string,toRemove):\n list = []\n index = 0\n while index < len(string):\n if string[index: index + len(toRemove)] == toRemove:\n index += len(toRemove)\n else:\n list.append(string[index])\n index += 1\n return \"\".join(list)\n\nprint(remove_string(\"SPAM!HelloSPAM! worldSPAM!!\",\"SPAM!\"))\n","repo_name":"PitrsxD/Python-learning","sub_path":"while_remove_string.py","file_name":"while_remove_string.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22950924553","text":"import datetime\nimport logging\nfrom typing import Optional, Any\n\nfrom fastapi import FastAPI\nfrom redis import Redis\nfrom starlette import status\nfrom starlette.responses import JSONResponse\n\nfrom app.core.cache import CacheManager\nfrom app.core.logging import configure_logging\n\nlogger = logging.getLogger(__name__)\n\n# init loggers at the beginning\nconfigure_logging()\n\napp = FastAPI()\n\ncacher: Optional[CacheManager] = None\n\n\n@app.on_event(\"startup\")\nasync def startup_event():\n global cacher\n if cacher is None:\n cacher = CacheManager()\n\n\n@app.get(\"/api/get\")\nasync def get_cache(name: str):\n \"\"\" Get from cache \"\"\"\n global cacher\n return cacher.get(name=name)\n\n\n@app.post(\"/api/set\")\nasync def set_cache(name: str, value: Any, ex=None, px=None, nx=False, xx=False, keepttl=False):\n \"\"\" Set to cache \"\"\"\n return cacher.set(name, value, ex, px, nx, xx, keepttl)\n\n\n@app.get(\"/health\")\nasync def health() -> JSONResponse:\n \"\"\"Internal use only - Do not use with a client API\"\"\"\n\n return JSONResponse(\n {\n \"Status\": status.HTTP_200_OK,\n \"Timestamp\": datetime.datetime.now().ctime(),\n }\n )\n","repo_name":"nirooma/eMicroservices","sub_path":"workers/tommy/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38025100897","text":"from django.urls import include, path\n\nfrom htmx.views.contact_message import htmx_contact_message\nfrom htmx.views.webinar_mailings import htmx_webinar_mailing_modal\n\nfrom .application_urls import urlpatterns as application_urlpatterns\nfrom .crm_urls import urlpatterns as crm_urlpatterns\n\napp_name = \"htmx\"\n\nurlpatterns = [\n path(\n \"webinar-mailing-modal//\",\n htmx_webinar_mailing_modal,\n name=\"htmx_webinar_mailing_modal\",\n ),\n path(\n \"contact_message\",\n htmx_contact_message,\n name=\"htmx_contact_message\",\n ),\n path(\"application/\", include(application_urlpatterns)),\n path(\"crm/\", include(crm_urlpatterns)),\n]\n","repo_name":"rolzwy7/wykladowcav2","sub_path":"src/htmx/urls/base_urls.py","file_name":"base_urls.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71079719443","text":"from django.urls import path, include\nfrom . import views\nfrom rest_framework import routers\n\nrouter = routers.DefaultRouter()\nrouter.register('polls', views.PollView)\nrouter.register('answers', views.AnswerView)\nrouter.register('questions', views.QuestionView)\n\nurlpatterns = [\n path('', include(router.urls)),\n]\n","repo_name":"arsenitem/heart-web-app","sub_path":"backend/backend-web/polls/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"39245833582","text":"import requests\nimport hashlib\nimport re\n\nif(__name__==\"__main__\"):\n\n grab_message = '----- BEGIN MESSAGE -----
\\r\\n\\t\\t(\\w+)'\n flag_search = 'FLAG-\\w+'\n url = 'https://www.ringzer0team.com/challenges/13'\n cookie = {'PHPSESSID': '9to621vstcdm5h26mk2c7i58c0'}\n r = requests.get(url=url, cookies=cookie)\n # Extracting text from the reponse\n text = re.search(grab_message, r.text)\n hash_text = text.group(1)\n print(r.text)\n print(hash_text)\n final_hash = hashlib.sha512(hash_text.encode('utf-8')).hexdigest()\n print(final_hash)\n\n final_url = 'https://www.ringzer0team.com/challenges/13/' + final_hash\n data = requests.get(final_url, cookies=cookie).content\n data = data.decode().split('
')\n\n flag = re.findall(r\"FLAG-\\w+\", data[1])\n print(flag)","repo_name":"xrr-233/CityUF2021","sub_path":"practice from chall/Hash me please.py","file_name":"Hash me please.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29032091957","text":"from ..base import BaseWorkerTestCase\n\nfrom intranet.magiclinks.src.links.dto import List, String, Image\nfrom intranet.magiclinks.src.links.workers.intranet.yandex_internal import Worker as YandexInternalWorker\n\n\nclass YandexInternalTestCase(BaseWorkerTestCase):\n worker_class = YandexInternalWorker\n worker_class_file = 'yandex_internal'\n\n def test_yandex_internal_parse_url(self):\n urls_data = (\n ('https://test.yandex.ru/test',\n 'test.yandex.ru',\n ),\n ('https://www.test.ya.ru/test',\n 'www.test.ya.ru',\n ),\n ('https://yandex.test.ru/some',\n 'yandex.test.ru',\n ),\n ('https://smth.yandex/some',\n 'smth.yandex',\n ),\n ('https://auto.ru/some',\n 'auto.ru',\n ),\n ('https://avto.ru/some',\n 'avto.ru',\n ),\n )\n for url, hostname_match in urls_data:\n self.parse_url(url, hostname_match)\n\n def test_yandex_internal_not_parse_url(self):\n urls_data = (\n 'https://something.yandex-team.ru/test',\n 'https://yandex-team.ru/test',\n 'https://myyandex-team.ru/test',\n 'https://wilya.ru/some',\n 'https://testyatest.ru/some',\n 'https://autotest.ru/some',\n )\n for url in urls_data:\n self.parse_url(url, should_parse=False)\n\n def test_yandex_internal_yandex_host_success(self):\n url = 'https://something.yandex.ru/test'\n expected_data = {\n url: List(\n ttl=86400,\n value=[\n List(\n ttl=86400,\n value=[\n Image(\n src='https://favicon.yandex.net/favicon/something.yandex.ru',\n text='YandexInternal'\n\n )],\n action={\n \"event\": \"click\",\n \"type\": \"halfscreenpreview\",\n \"url\": 'https://something.yandex.ru/test',\n },\n\n ),\n String(\n value=\"https://something.yandex.ru/test\", ),\n ]\n )\n }\n self.loop.run_until_complete(self.response_check(url, expected_data=expected_data, ))\n\n def test_yandex_internal_kinopoisk_host_success(self):\n url = 'https://kinopoisk.test.ru/test'\n expected_data = {\n url: List(\n ttl=86400,\n value=[\n List(\n ttl=86400,\n value=[\n Image(\n src='https://favicon.yandex.net/favicon/kinopoisk.test.ru',\n text='YandexInternal'\n\n )],\n action={\n \"event\": \"click\",\n \"type\": \"halfscreenpreview\",\n \"url\": 'https://kinopoisk.test.ru/test',\n },\n\n ),\n String(\n value=\"https://kinopoisk.test.ru/test\", ),\n ]\n )\n }\n self.loop.run_until_complete(self.response_check(url, expected_data=expected_data, ))\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"Intranet/workers_tests/intranet/test_yandex_internal.py","file_name":"test_yandex_internal.py","file_ext":"py","file_size_in_byte":3462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22994712440","text":"n = int(input())\npredios = list(map(int,input().split()))\n\ndist = 0\nk = -1\nfor i in range(n):\n d = predios[0] + i + predios[i]\n if d > dist:\n dist = d\n k = i\n \nmax_dist = 0\nfor j in range(n):\n if j != k:\n max_dist = max(max_dist, predios[k] + abs(k-j) + predios[j])\nprint(max_dist)","repo_name":"luisastellet/Beecrowd-extraclasse","sub_path":"Ex3050 - Distância entre amigos.py","file_name":"Ex3050 - Distância entre amigos.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22437903962","text":"import logging\n\nfrom network.arbitrum.constants import ArbitrumConstants\nfrom network.network import EVMNetwork\nfrom stargate import StargateConstants\nfrom utility import Stablecoin\n\nlogger = logging.getLogger(__name__)\n\n\nclass Arbitrum(EVMNetwork):\n\n def __init__(self):\n supported_stablecoins = {\n 'USDT': Stablecoin('USDT', ArbitrumConstants.USDT_CONTRACT_ADDRESS, ArbitrumConstants.USDT_DECIMALS,\n ArbitrumConstants.LAYERZERO_CHAIN_ID, StargateConstants.POOLS['USDT']),\n 'USDC': Stablecoin('USDC', ArbitrumConstants.USDC_CONTRACT_ADDRESS, ArbitrumConstants.USDC_DECIMALS,\n ArbitrumConstants.LAYERZERO_CHAIN_ID, StargateConstants.POOLS['USDC'])\n }\n\n super().__init__(ArbitrumConstants.NAME, ArbitrumConstants.NATIVE_TOKEN, ArbitrumConstants.RPC,\n ArbitrumConstants.LAYERZERO_CHAIN_ID, ArbitrumConstants.STARGATE_ROUTER_CONTRACT_ADDRESS,\n supported_stablecoins)\n\n def get_approve_gas_limit(self) -> int:\n return ArbitrumConstants.APPROVE_GAS_LIMIT\n\n def get_max_fee_per_gas(self) -> int:\n # Fixed value\n return 135000000\n\n def get_transaction_gas_params(self) -> dict:\n gas_params = {\n 'maxFeePerGas': self.get_max_fee_per_gas(),\n 'maxPriorityFeePerGas': 0\n }\n\n logger.debug(f\"{self.name} gas params fetched. Params: {gas_params}\")\n\n return gas_params\n","repo_name":"cppmyk/layerzero-bridger","sub_path":"network/arbitrum/arbitrum.py","file_name":"arbitrum.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","stars":179,"dataset":"github-code","pt":"3"} +{"seq_id":"29060926067","text":"import itertools\nimport uuid\n\nimport pytest\n\nfrom sendr_utils import utcnow\n\nfrom hamcrest import assert_that, equal_to\n\nfrom billing.yandex_pay.yandex_pay.base.entities.enums import PaymentMethodType\nfrom billing.yandex_pay.yandex_pay.core.actions.ready_to_pay import ReadyToPayAction\nfrom billing.yandex_pay.yandex_pay.core.entities.card import Card\nfrom billing.yandex_pay.yandex_pay.core.entities.enums import AuthMethod, CardNetwork, TSPType\nfrom billing.yandex_pay.yandex_pay.core.entities.merchant import Merchant\nfrom billing.yandex_pay.yandex_pay.core.entities.merchant_origin import MerchantOrigin\nfrom billing.yandex_pay.yandex_pay.core.entities.payment_sheet import PaymentMethod\nfrom billing.yandex_pay.yandex_pay.core.entities.user import User\nfrom billing.yandex_pay.yandex_pay.core.exceptions import (\n CoreInsecureMerchantOriginSchemaError, CoreMerchantOriginNotFound\n)\nfrom billing.yandex_pay.yandex_pay.interactions import TrustPaymentsClient\nfrom billing.yandex_pay.yandex_pay.interactions.trust_payments import TrustPaymentMethod\nfrom billing.yandex_pay.yandex_pay.utils.stats import forbidden_user_agent, forbidden_user_agent_os_family\nfrom billing.yandex_pay.yandex_pay.utils.user_agent import UserAgentInfo\n\nOWNER_UID = 5555\nPREDEFINED_CARD_ID = 'aaf024bb-2f0e-4cad-9010-40328ffcae9a'\nMERCHANT_ID = uuid.UUID('789b29e6-d8f2-4e14-8c3f-33679ca590e3')\nMERCHANT_ORIGIN = 'https://market.yandex.ru'\nMERCHANT_ORIGIN_CANONICAL = 'https://market.yandex.ru:443'\nTRUST_CARD_ID = 'card-x1a1234567a12abcd12345a1a'\n\n\n@pytest.fixture\ndef user():\n return User(OWNER_UID)\n\n\n@pytest.fixture(autouse=True)\nasync def merchant(storage):\n return await storage.merchant.create(\n Merchant(\n merchant_id=MERCHANT_ID,\n name='the-name',\n )\n )\n\n\n@pytest.fixture(autouse=True)\nasync def merchant_origin(storage, merchant: Merchant):\n return await storage.merchant_origin.create(MerchantOrigin(\n merchant_id=merchant.merchant_id,\n origin=MERCHANT_ORIGIN_CANONICAL,\n ))\n\n\n@pytest.fixture(autouse=True)\nasync def card(storage):\n return await storage.card.create(\n Card(\n trust_card_id=TRUST_CARD_ID,\n owner_uid=OWNER_UID,\n tsp=TSPType.MASTERCARD,\n expire=utcnow(),\n last4='0000',\n card_id=uuid.UUID(PREDEFINED_CARD_ID),\n )\n )\n\n\n@pytest.fixture(autouse=True)\ndef payment_method():\n return PaymentMethod(\n method_type=PaymentMethodType.CARD,\n gateway='some_gateway',\n gateway_merchant_id='some_gateway_merchant_id',\n allowed_card_networks=[CardNetwork.MASTERCARD],\n allowed_auth_methods=[AuthMethod.PAN_ONLY, AuthMethod.CLOUD_TOKEN],\n )\n\n\n@pytest.fixture\ndef trust_payment_method():\n return TrustPaymentMethod(\n id=TRUST_CARD_ID,\n card_id=TRUST_CARD_ID,\n binding_systems=['trust'],\n orig_uid=str(OWNER_UID),\n payment_method='card',\n system='MasterCard',\n payment_system='MasterCard',\n expiration_month='9',\n expiration_year='2099',\n card_bank='SBERBANK OF RUSSIA',\n expired=False,\n account='1111****1234',\n last_paid_ts=utcnow(),\n binding_ts=utcnow(),\n )\n\n\n@pytest.fixture(autouse=True)\ndef mock_trust_gateway_lpm(mocker, trust_payment_method):\n return mocker.patch.object(\n TrustPaymentsClient,\n 'get_payment_methods',\n mocker.AsyncMock(\n return_value=[trust_payment_method],\n ),\n )\n\n\n@pytest.mark.asyncio\n@pytest.mark.parametrize(\n 'settings_to_overwrite',\n [{'MERCHANT_ORIGIN_VALIDATION_IS_MANDATORY': True}],\n indirect=True,\n)\nasync def test_ready_to_pay__when_user_has_suitable_payment_method(\n mocker, trust_payment_method, payment_method, user\n):\n mocker.patch.object(\n TrustPaymentsClient,\n 'get_payment_methods',\n mocker.AsyncMock(\n return_value=[trust_payment_method],\n ),\n )\n\n result = await ReadyToPayAction(\n user=user,\n merchant_id=MERCHANT_ID,\n merchant_origin=MERCHANT_ORIGIN,\n payment_methods=[payment_method],\n existing_payment_method_required=True,\n ).run()\n\n assert result['is_ready_to_pay']\n\n\n@pytest.mark.parametrize('user_agent_is_allowed', [True, False])\n@pytest.mark.asyncio\nasync def test_ready_to_pay_checks_user_agent_info(\n mocker,\n trust_payment_method,\n user_agent_is_allowed,\n user,\n):\n \"\"\"\n Action использует check_user_agent_is_allowed и ориентируется на его вердикт.\n \"\"\"\n action_user_agent_check_mock = mocker.patch.object(\n ReadyToPayAction,\n 'check_user_agent_is_allowed',\n mocker.Mock(\n return_value=user_agent_is_allowed,\n ),\n )\n\n result = await ReadyToPayAction(\n user=user,\n merchant_id=MERCHANT_ID,\n merchant_origin=MERCHANT_ORIGIN,\n payment_methods=[],\n existing_payment_method_required=False,\n ).run()\n\n action_user_agent_check_mock.assert_called_once()\n assert result['is_ready_to_pay'] == user_agent_is_allowed\n\n\nclass TestCheckUserAgent:\n def test_allowed(self):\n browsers = [\n 'Chrome', 'ChromeMobile', 'Chromium', 'YandexBrowser', 'YandexBrowserLite',\n 'Safari', 'MobileSafari', 'Firefox', 'YandexSearch',\n ]\n os_families = [\n 'iOS', 'Android', 'Bada', 'BlackBerry', 'ChromeOS', 'FirefoxOS', 'FreeBSD', 'Java', 'Linux', 'MacOS',\n 'MeeGo', 'NetBSD', 'OpenBSD', 'Orbis', 'RIMTabletOS', 'SunOS', 'Symbian', 'Tizen', 'Unknown',\n 'UnknownNix', 'WebOS', 'Windows', 'WindowsMobile', 'WindowsPhone', 'WindowsRT',\n ]\n\n def allowed():\n yield None\n yield UserAgentInfo(user_agent='', data={})\n for browser_name, os_family in itertools.product(browsers, os_families):\n yield UserAgentInfo(\n 'user_agent',\n {\n 'BrowserName': browser_name,\n 'OSFamily': os_family,\n }\n )\n\n for user_agent_info in allowed():\n assert ReadyToPayAction.check_user_agent_is_allowed(user_agent_info)\n\n @pytest.mark.parametrize('browsers, os_families', (\n pytest.param(\n ['YandexLauncher'],\n [\n 'iOS', 'Android', 'Bada', 'BlackBerry', 'ChromeOS', 'FirefoxOS', 'FreeBSD', 'Java', 'Linux', 'MacOS',\n 'MeeGo', 'NetBSD', 'OpenBSD', 'Orbis', 'RIMTabletOS', 'SunOS', 'Symbian', 'Tizen', 'Unknown',\n 'UnknownNix', 'WebOS', 'Windows', 'WindowsMobile', 'WindowsPhone', 'WindowsRT',\n ],\n id='test-forbidden-browsers',\n ),\n ))\n def test_not_allowed(self, browsers, os_families):\n\n def not_allowed():\n for browser_name, os_family in itertools.product(browsers, os_families):\n yield UserAgentInfo(\n 'user_agent',\n {\n 'BrowserName': browser_name,\n 'OSFamily': os_family,\n },\n )\n\n for user_agent_info in not_allowed():\n assert not ReadyToPayAction.check_user_agent_is_allowed(user_agent_info)\n\n opera = {'Opera', 'Opera Touch', 'OperaMobile', 'OperaMini'}\n for o in opera:\n assert not ReadyToPayAction.check_user_agent_is_allowed(\n UserAgentInfo('user_agent', {\n 'BrowserName': o,\n 'OSFamily': 'iOS',\n })\n )\n\n\n@pytest.mark.asyncio\nasync def test_now_allowed_should_increment_forbidden_browser_metrics(mocker, user):\n os = 'MyOsFamily'\n\n prev_forbidden = forbidden_user_agent.get()[0][1]\n prev_forbidden_os_faimly = forbidden_user_agent_os_family.labels(os).get()[0][1]\n\n mocker.patch.object(\n ReadyToPayAction,\n 'check_user_agent_is_allowed',\n mocker.Mock(\n return_value=False,\n ),\n )\n\n await ReadyToPayAction(\n user=user,\n merchant_id=MERCHANT_ID,\n merchant_origin=MERCHANT_ORIGIN,\n payment_methods=[],\n existing_payment_method_required=False,\n user_agent_info=UserAgentInfo('ua', {'BrowserName': 'ForbiddenBrowser', 'OSFamily': os}),\n ).run()\n\n assert forbidden_user_agent.get()[0][1] - prev_forbidden == 1\n assert forbidden_user_agent_os_family.labels(os).get()[0][1] - prev_forbidden_os_faimly == 1\n\n\n@pytest.mark.asyncio\n@pytest.mark.parametrize(\n 'settings_to_overwrite',\n [{'MERCHANT_ORIGIN_VALIDATION_IS_MANDATORY': True}],\n indirect=True,\n)\nasync def test_not_ready_to_pay__when_no_trust_payment_methods(\n mocker, payment_method, user\n):\n \"\"\"\n Временный тест на промежуточное поведение:\n пока просто проверяем, что список методов оплаты юзера не пуст.\n \"\"\"\n\n mocker.patch.object(\n TrustPaymentsClient,\n 'get_payment_methods',\n mocker.AsyncMock(\n return_value=[],\n ),\n )\n\n result = await ReadyToPayAction(\n user=user,\n merchant_id=MERCHANT_ID,\n merchant_origin=MERCHANT_ORIGIN,\n payment_methods=[payment_method],\n existing_payment_method_required=True,\n ).run()\n\n assert not result['is_ready_to_pay']\n\n\n@pytest.mark.asyncio\n@pytest.mark.parametrize('settings_to_overwrite', [{'MERCHANT_ORIGIN_VALIDATION_IS_MANDATORY': True}], indirect=True)\n@pytest.mark.parametrize('existing_method_required, expected_result', (\n (True, False),\n (False, True),\n))\nasync def test_ready_to_pay__when_no_user(\n mocker,\n payment_method,\n existing_method_required,\n expected_result,\n):\n trust_mock = mocker.patch.object(TrustPaymentsClient, 'get_payment_methods', mocker.AsyncMock())\n\n result = await ReadyToPayAction(\n user=None,\n merchant_id=MERCHANT_ID,\n merchant_origin=MERCHANT_ORIGIN,\n payment_methods=[payment_method],\n existing_payment_method_required=existing_method_required,\n ).run()\n\n assert_that(result['is_ready_to_pay'], equal_to(expected_result))\n trust_mock.assert_not_called()\n\n\n@pytest.mark.asyncio\n@pytest.mark.parametrize('settings_to_overwrite', [{'MERCHANT_ORIGIN_VALIDATION_IS_MANDATORY': False}], indirect=True)\n@pytest.mark.parametrize('merchant_origin', [\n None,\n 'http://someshop.market.yandex.ru',\n 'https://someshop.market.yandex.ru',\n 'https://someshop.market.yandex.ru:443',\n])\nasync def test_merchant_origin_ignored(merchant_origin, user):\n result = await ReadyToPayAction(\n user=user,\n merchant_id=MERCHANT_ID,\n merchant_origin=merchant_origin,\n payment_methods=[],\n existing_payment_method_required=False,\n ).run()\n\n assert result['is_ready_to_pay']\n\n\n@pytest.mark.asyncio\n@pytest.mark.parametrize('settings_to_overwrite', [\n {'MERCHANT_ORIGIN_VALIDATION_IS_MANDATORY': True}\n], indirect=True)\n@pytest.mark.parametrize('merchant_origin', [\n None,\n 'https://someshop.market.yandex.ru',\n 'https://someshop.market.yandex.ru:443',\n])\nasync def test_merchant_origin_not_found_error(merchant_origin, user):\n \"\"\"\n Если передан какой-то ориджин, то валидация происходит всегда.\n \"\"\"\n with pytest.raises(CoreMerchantOriginNotFound):\n await ReadyToPayAction(\n user=user,\n merchant_id=MERCHANT_ID,\n merchant_origin=merchant_origin,\n payment_methods=[],\n existing_payment_method_required=False,\n ).run()\n\n\n@pytest.mark.asyncio\n@pytest.mark.parametrize('settings_to_overwrite', [\n {'MERCHANT_ORIGIN_VALIDATION_IS_MANDATORY': True},\n], indirect=True)\n@pytest.mark.parametrize('merchant_origin', [\n 'http://market.yandex.ru',\n 'ftp://market.yandex.ru',\n 'market.yandex.ru',\n 'market.yandex.ru:9000',\n])\nasync def test_merchant_origin_insecure_schema_error(merchant_origin, user):\n \"\"\"\n Если передан какой-то ориджин, но схема не HTTPS,\n то будет ошибка схемы в ориджине.\n \"\"\"\n with pytest.raises(CoreInsecureMerchantOriginSchemaError):\n await ReadyToPayAction(\n user=user,\n merchant_id=MERCHANT_ID,\n merchant_origin=merchant_origin,\n payment_methods=[],\n existing_payment_method_required=False,\n ).run()\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"billing/tests/unit/core/actions/test_ready_to_pay.py","file_name":"test_ready_to_pay.py","file_ext":"py","file_size_in_byte":12594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"40065664883","text":"# author: David Gessner \n\nimport pytest\nfrom ft4fttsim.networking import Message\nfrom ft4fttsim.exceptions import FT4FTTSimException\nfrom unittest.mock import sentinel\n\n\nMINIMUM_ETHERNET_FRAME_SIZE = 64\nMAXIMUM_ETHERNET_FRAME_SIZE = 1518\n\n\n@pytest.mark.parametrize(\n \"size_in_bytes\",\n [\n # 64 is the minimum valid size in bytes\n MINIMUM_ETHERNET_FRAME_SIZE,\n # 1518 is the maximum valid size in bytes\n MAXIMUM_ETHERNET_FRAME_SIZE\n ] +\n # also test with a couple of values between the minimum and the\n # maximum size in bytes\n list(range(65, 1519, 404))\n)\ndef test_message_constructor_does_not_raise_exception(\n env, size_in_bytes):\n # Creating a message should not raise any exception or cause errors.\n Message(env, sentinel.dummy_source, sentinel.dummy_destination,\n size_in_bytes, sentinel.dummy_type)\n\n\ndef test_message_constructor_with_data_does_not_raise_exception(env):\n # Creating a message should not raise any exception or cause errors.\n Message(env, sentinel.dummy_source, sentinel.dummy_destination,\n 1111, sentinel.dummy_type, sentinel.dummy_data)\n\n\n@pytest.mark.parametrize(\n \"size_in_bytes\",\n [\n -1000, -1, -0.9, 0, 0.5,\n MINIMUM_ETHERNET_FRAME_SIZE - 1,\n MINIMUM_ETHERNET_FRAME_SIZE + 0.5,\n MAXIMUM_ETHERNET_FRAME_SIZE - 9.1,\n MAXIMUM_ETHERNET_FRAME_SIZE + 1,\n 10000\n ]\n)\ndef test_message_constructor_raises_exception(env, size_in_bytes):\n \"\"\"\n Test that creating a message with invalid size raises an exception.\n\n \"\"\"\n with pytest.raises(FT4FTTSimException):\n Message(env, sentinel.dummy_source, sentinel.dummy_destination,\n size_in_bytes, sentinel.dummy_type)\n\n\ndef test_message_created__returns_expected_destination(env):\n message = Message(env, sentinel.source, sentinel.destinations,\n 1234, sentinel.message_type)\n assert message.destination == sentinel.destinations\n\n\ndef test_message_created__returns_expected_source(env):\n message = Message(env, sentinel.source, sentinel.destinations,\n 1234, sentinel.message_type)\n assert message.source == sentinel.source\n\n\ndef test_messages_with_same_data_are_equal(env):\n message1 = Message(env, sentinel.source, sentinel.destinations,\n 1234, sentinel.message_type,\n sentinel.dummy_data)\n message2 = Message(env, sentinel.source, sentinel.destinations,\n 1234, sentinel.message_type,\n sentinel.dummy_data)\n assert message1 == message2\n\n\ndef test_messages_with_different_data_are_not_equal(env):\n message1 = Message(env, sentinel.source, sentinel.destinations,\n 1234, sentinel.message_type,\n sentinel.dummy_data)\n message2 = Message(env, sentinel.source, sentinel.destinations,\n 1234, sentinel.message_type,\n sentinel.different_dummy_data)\n assert message1 != message2\n\n\ndef test_creating_message_from_template_creates_equal_message(env):\n template_message = Message(\n env, sentinel.source, sentinel.destinations,\n 1234, sentinel.message_type,\n sentinel.dummy_data)\n new_message = Message.from_message(template_message)\n assert template_message == new_message\n","repo_name":"davitenio/ft4fttsim","sub_path":"ft4fttsim/tests/networking/unit/test_message.py","file_name":"test_message.py","file_ext":"py","file_size_in_byte":3389,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"42749975604","text":"from sqlalchemy import Table, Column, Integer, String, DateTime\n\nfrom src.database.database_persistency import DBPersistency\n\nDB_PERSISTENCY = DBPersistency.get_instance()\n\nUSER_TABLE_NAME = \"users\"\nWAITING_NOTIFICATIONS_TABLE_NAME = \"waitingnotifications\"\n\nwaitingNotificationsTable = Table(\n WAITING_NOTIFICATIONS_TABLE_NAME,\n DB_PERSISTENCY.meta,\n Column(\"text\", String),\n Column(\"user_id\", Integer),\n)\n\nuserTable = Table(\n USER_TABLE_NAME,\n DB_PERSISTENCY.meta,\n Column(\"id\", Integer, primary_key=True),\n Column(\"alias\", String),\n Column(\"first_name\", String),\n Column(\"last_name\", String),\n Column(\"birthday\", DateTime),\n Column(\"messenger_id\", Integer),\n Column(\"song_name\", String),\n)\n","repo_name":"ukaserge/Jarvis","sub_path":"speechassistant/src/database/tables/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17273859852","text":"import sqlite3\nfrom sqlite3 import Error\n\n\n\n__doc__ = \"\"\"\nThis module contains the database and tables of the project.\n\"\"\"\nclass Database(object):\n def __init__(self):\n \n try:\n self.conn = sqlite3.connect('e-library.db')\n self.conn.row_factory = sqlite3.Row\n self.cursor = self.conn.cursor()\n self.cursor.execute('PRAGMA foreign_keys = ON')\n self.conn.commit()\n \n except Error as err:\n print(err)\n print('Failed to connect to the database')\n \n\n\n# table --> authors\nclass Authors(Database):\n def __init__(self):\n Database.__init__(self)\n self.cursor.execute('''CREATE TABLE IF NOT EXISTS authors (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n name TEXT NOT NULL\n )''')\n self.conn.commit()\n \n\n# table --> genres\nclass Genres(Database):\n def __init__(self):\n Database.__init__(self)\n self.cursor.execute('''CREATE TABLE IF NOT EXISTS genres (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n name TEXT NOT NULL \n )''')\n self.conn.commit()\n\n# table --> books\nclass Books(Database):\n def __init__(self):\n Database.__init__(self)\n self.cursor.execute('''CREATE TABLE IF NOT EXISTS books (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n title TEXT NOT NULL,\n year INTEGER,\n description TEXT,\n publisher TEXT,\n link TEXT NOT NULL,\n author_id INTEGER NOT NULL,\n genre_id INTEGER NOT NULL,\n FOREIGN KEY(author_id) REFERENCES authors(id) ON DELETE CASCADE,\n FOREIGN KEY(genre_id) REFERENCES genres(id) ON DELETE CASCADE\n )''')\n self.conn.commit()\n \n\n# table --> favorite_shelf\nclass Favorite_shelf(Database):\n def __init__(self):\n Database.__init__(self)\n self.cursor.execute('''CREATE TABLE IF NOT EXISTS favorite_shelf (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n book_id INTEGER,\n FOREIGN KEY(book_id) REFERENCES books(id) ON DELETE CASCADE)''')\n \n self.conn.commit() \n \n\nbooks = Books()\nauthors = Authors()\ngenres = Genres()\nfavorite = Favorite_shelf()","repo_name":"ViolinaS/MyLibrarian","sub_path":"ebook_sql_db.py","file_name":"ebook_sql_db.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"43742983815","text":"from datetime import datetime\n\n\ndef p19():\n res = 0\n for y in range(1901, 2001):\n for m in range(1, 13):\n if datetime(y, m, 1).weekday() == 6:\n res += 1\n return res\n\n\nprint(p19())","repo_name":"halseyhutch/project_euler","sub_path":"p19.py","file_name":"p19.py","file_ext":"py","file_size_in_byte":221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5932846759","text":"#导入模块\nfrom logging import exception\nimport os\nimport time\nimport random\nimport string\nimport tkinter\nimport tkinter as tk\nimport requests\nimport math\nimport tkinter.messagebox\nimport sys\n#定义退出\ndef exit():\n\tprint(\"已退出\")\n\tos.system(\"cls\")\n\tmenu()\n#定义菜单\ndef menu():\n\tos.system(\"cls\")\n\tprint(\"你好,欢迎使用eb工具箱\")\n\ttime.sleep(1)\n\tgongneng = input(\"你好,欢迎使用eb工具箱,项目地址:https://github.com/hithereleon/easybox,好用的话点个star哦~\")\n\ttime.sleep(1)\n\tif gongneng == '1':\n\t\teb()\n\telif gongneng == '2':\n\t\tquestion()\n\telif gongneng == '3':\n\t\tsys.exit(0)\n\telse:\n\t\tmenu()\n#定义工具箱界面\ndef eb():\n\tos.system(\"cls\")\n\tprint(\"欢迎来到工具箱,请选择功能\")\n\ttool = input(\"1.两数相除求余数\\n2.强密码生成器\\n3.99乘法表\\n4.计算器\\n5.翻译器\\n6.退出\\n\")\n\tif tool == '1':\n\t\ttime.sleep(1)\n\t\tyushu()\n\telif tool == '2':\n\t\tpsw1()\n\telif tool == '3':\n\t\tnn()\n\telif tool == '4':\n\t\tcalculat()\n\telif tool == '5':\n\t\ttrans()\n\telif tool == '6':\n\t\tmenu()\n\telse:\n\t\teb()\n#定义题库\ndef question():\n\tos.system(\"cls\")\n\tprint(\"欢迎来到题库训练,请选择你要刷的题\")\n\ttopic = input(\"1.乘法练习\\n2.除法练习\\n3.乘方练习\\n4.退出\\n\")\n\tif topic == '1':\n\t\tmulti()\n\telif topic == '2':\n\t\tdivi()\n\telif topic == '3':\n\t\tpower()\n\telif topic == '4':\n\t\tmenu()\n\telse:\n\t\tquestion()\n#定义余数工具\ndef yushu():\n\tprint(\"答案显示格式(商,余数)\")\n\tfirst = int(input(\"请输入被除数\"))\n\tsecond = int(input(\"请输入除数\"))\n\tprint(divmod(first,second))\n\tos.system(\"pause\")\n\teb()\n#定义强密码\ndef password():\n\t\tpassword=string.ascii_letters+string.digits\n\t\tpasswdask = random.randint(8,16)\n\t\tkey = random.sample(password,passwdask)\n\t\tkeys = ''.join(key)\n\t\treturn keys\ndef psw1():\n\tnum = random.randint(4,10)\n\tpassword()\n\tprint(\"为您提供了\"+ str(num) +\"个密码,请查收\\n\")\n\tfor x in range(int(num)):\n\t\tprint(password())\n\tos.system(\"pause\")\n\teb()\n#99乘法表\ndef nn():\n\tfor i in range(1,10):\n\t\tfor j in range(1,i + 1):\n\t\t\t\t\tmul = i * j\n\t\t\t\t\tprint(\"%s * %s = %s\" %(i,j,mul),end=\" \")\n\t\tprint()\n\ttime.sleep(10)\n\teb()\n#定义乘法练习\ndef multi():\n\thard = input(\"请选择难度:\\na.简单\\nb.难\\n\")\n\tif hard == 'a':\n\t\ta = random.randint(10,40)\n\t\tb = random.randint(10,40)\n\telif hard == 'b':\n\t\ta = random.randint(100,400)\n\t\tb = random.randint(100,400)\n\telse:\n\t\tmulti()\n\tanswer = input(str(a)+\"x\"+str(b)+\"=?\")\n\tif answer == str(a*b):\n\t\tprint(\"你做对了\")\n\t\ttime.sleep(2)\n\telse:\n\t\tprint(\"你做错了\")\n\t\ttime.sleep(2)\n\tquestion()\n#定义除法练习\ndef divi():\n\thard = input(\"请选择难度:\\na.简单\\nb.难(除不尽的求余数)\")\n\tif hard == 'a':\n\t\ta = random.randint(10,40)\n\t\tb = random.randint(10,40)\n\telif hard == 'b':\n\t\ta = random.randint(100,400)\n\t\tb = random.randint(100,400)\n\telse:\n\t\tdivi()\n\tprint(\"题目为:\" + str(a) + \"÷\" + str(b))\n\tx = input(\"请输入商\")\n\ty = input(\"请输入余数,没有填0\")\n\tif x == str(a//b) and y == str(a%b):\n\t\tprint(\"你做对了\")\n\t\ttime.sleep(2)\n\telse:\n\t\tprint(\"你做错了\")\n\t\ttime.sleep(2)\n\tquestion()\n#定义乘方练习\ndef power():\n\thard = input(\"请选择难度:a.简单b.难\")\n\tif hard == 'a':\n\t\ta = random.randint(1,20)\n\t\tb = random.randint(2,10)\n\telif hard == 'b':\n\t\ta = random.randint(20,50)\n\t\tb = random.randint(10,30)\n\telse:\n\t\tpower()\n\tanswer = input(str(a)+\"^\"+str(b)+\"=?\")\n\tif answer == str(a**b):\n\t\tprint(\"你做对了\")\n\t\ttime.sleep(2)\n\telse:\n\t\tprint(\"你做错了\")\n\t\ttime.sleep(2)\n\tquestion()\n#定义计算器\ndef calculat():\n\tclass calculator:\n\t\t#界面布局方法\n\t\tdef __init__(self):\n\t\t\t#创建主界面,并且保存到成员属性中\n\t\t\tself.root = tkinter.Tk()\n\t\t\tself.root.minsize(280, 450)\n\t\t\tself.root.maxsize(280, 470)\n\t\t\tself.root.title('计算器1.0')\n\t\t\t# 设置显式面板的变量\n\t\t\tself.result = tkinter.StringVar()\n\t\t\tself.result.set(0)\n\t\t\t# 设置一个全局变量 运算数字和f符号的列表\n\t\t\tself.lists = []\n\t\t\t# 添加一个用于判断是否按下运算符号的标志\n\t\t\tself.ispresssign = False\n\t\t\t# 界面布局\n\t\t\tself.layout()\n\t\t\tself.root.mainloop()\n\n\n\t\t\n\n\t\t#计算器主界面摆放\n\t\tdef layout(self):\n\t\t\t# 显示屏\n\t\t\tresult = tkinter.StringVar()\n\t\t\tresult.set(0)\n\t\t\tshow_label = tkinter.Label(self.root, bd=3, bg='white', font=('宋体', 30), anchor='e', textvariable=self.result)\n\t\t\tshow_label.place(x=5, y=20, width=270, height=70)\n\t\t\t# 功能��钮MC\n\t\t\tbutton_mc = tkinter.Button(self.root, text='MC', command=self.wait)\n\t\t\tbutton_mc.place(x=5, y=95, width=50, height=50)\n\t\t\t# 功能按钮MR\n\t\t\tbutton_mr = tkinter.Button(self.root, text='MR', command=self.wait)\n\t\t\tbutton_mr.place(x=60, y=95, width=50, height=50)\n\t\t\t# 功能按钮MS\n\t\t\tbutton_ms = tkinter.Button(self.root, text='MS', command=self.wait)\n\t\t\tbutton_ms.place(x=115, y=95, width=50, height=50)\n\t\t\t# 功能按钮M+\n\t\t\tbutton_mjia = tkinter.Button(self.root, text='M+', command=self.wait)\n\t\t\tbutton_mjia.place(x=170, y=95, width=50, height=50)\n\t\t\t# 功能按钮M-\n\t\t\tbutton_mjian = tkinter.Button(self.root, text='M-', command=self.wait)\n\t\t\tbutton_mjian.place(x=225, y=95, width=50, height=50)\n\t\t\t# 功能按钮←\n\t\t\tbutton_zuo = tkinter.Button(self.root, text='←', command=self.dele_one)\n\t\t\tbutton_zuo.place(x=5, y=150, width=50, height=50)\n\t\t\t# 功能按钮CE\n\t\t\tbutton_ce = tkinter.Button(self.root, text='CE', command=lambda: self.result.set(0))\n\t\t\tbutton_ce.place(x=60, y=150, width=50, height=50)\n\t\t\t# 功能按钮C\n\t\t\tbutton_c = tkinter.Button(self.root, text='C', command=self.sweeppress)\n\t\t\tbutton_c.place(x=115, y=150, width=50, height=50)\n\t\t\t# 功能按钮±\n\t\t\tbutton_zf = tkinter.Button(self.root, text='±', command=self.zf)\n\t\t\tbutton_zf.place(x=170, y=150, width=50, height=50)\n\t\t\t# 功能按钮√\n\t\t\tbutton_kpf = tkinter.Button(self.root, text='√', command=self.kpf)\n\t\t\tbutton_kpf.place(x=225, y=150, width=50, height=50)\n\t\t\t# 数字按钮7\n\t\t\tbutton_7 = tkinter.Button(self.root, text='7', command=lambda: self.pressnum('7'))\n\t\t\tbutton_7.place(x=5, y=205, width=50, height=50)\n\t\t\t# 数字按钮8\n\t\t\tbutton_8 = tkinter.Button(self.root, text='8', command=lambda: self.pressnum('8'))\n\t\t\tbutton_8.place(x=60, y=205, width=50, height=50)\n\t\t\t# 数字按钮9\n\t\t\tbutton_9 = tkinter.Button(self.root, text='9', command=lambda: self.pressnum('9'))\n\t\t\tbutton_9.place(x=115, y=205, width=50, height=50)\n\t\t\t# 功能按钮/\n\t\t\tbutton_division = tkinter.Button(self.root, text='/', command=lambda: self.presscalculate('/'))\n\t\t\tbutton_division.place(x=170, y=205, width=50, height=50)\n\t\t\t# 功能按钮%\n\t\t\tbutton_remainder = tkinter.Button(self.root, text='//', command=lambda:self.presscalculate('//'))\n\t\t\tbutton_remainder.place(x=225, y=205, width=50, height=50)\n\t\t\t# 数字按钮4\n\t\t\tbutton_4 = tkinter.Button(self.root, text='4', command=lambda: self.pressnum('4'))\n\t\t\tbutton_4.place(x=5, y=260, width=50, height=50)\n\t\t\t# 数字按钮5\n\t\t\tbutton_5 = tkinter.Button(self.root, text='5', command=lambda: self.pressnum('5'))\n\t\t\tbutton_5.place(x=60, y=260, width=50, height=50)\n\t\t\t# 数字按钮6\n\t\t\tbutton_6 = tkinter.Button(self.root, text='6', command=lambda: self.pressnum('6'))\n\t\t\tbutton_6.place(x=115, y=260, width=50, height=50)\n\t\t\t# 功能按钮*\n\t\t\tbutton_multiplication = tkinter.Button(self.root, text='*', command=lambda: self.presscalculate('*'))\n\t\t\tbutton_multiplication.place(x=170, y=260, width=50, height=50)\n\t\t\t# 功能按钮1/x\n\t\t\tbutton_reciprocal = tkinter.Button(self.root, text='1/x', command=self.ds)\n\t\t\tbutton_reciprocal.place(x=225, y=260, width=50, height=50)\n\t\t\t# 数字按钮1\n\t\t\tbutton_1 = tkinter.Button(self.root, text='1', command=lambda: self.pressnum('1'))\n\t\t\tbutton_1.place(x=5, y=315, width=50, height=50)\n\t\t\t# 数字按钮2\n\t\t\tbutton_2 = tkinter.Button(self.root, text='2', command=lambda: self.pressnum('2'))\n\t\t\tbutton_2.place(x=60, y=315, width=50, height=50)\n\t\t\t# 数字按钮3\n\t\t\tbutton_3 = tkinter.Button(self.root, text='3', command=lambda: self.pressnum('3'))\n\t\t\tbutton_3.place(x=115, y=315, width=50, height=50)\n\t\t\t# 功能按钮-\n\t\t\tbutton_subtraction = tkinter.Button(self.root, text='-', command=lambda: self.presscalculate('-'))\n\t\t\tbutton_subtraction.place(x=170, y=315, width=50, height=50)\n\t\t\t# 功能按钮=\n\t\t\tbutton_equal = tkinter.Button(self.root, text='=', command=lambda: self.pressequal())\n\t\t\tbutton_equal.place(x=225, y=315, width=50, height=105)\n\t\t\t# 数字按钮0\n\t\t\tbutton_0 = tkinter.Button(self.root, text='0', command=lambda: self.pressnum('0'))\n\t\t\tbutton_0.place(x=5, y=370, width=105, height=50)\n\t\t\t# 功能按钮.\n\t\t\tbutton_point = tkinter.Button(self.root, text='.', command=lambda: self.pressnum('.'))\n\t\t\tbutton_point.place(x=115, y=370, width=50, height=50)\n\t\t\t# 功能按钮+\n\t\t\tbutton_plus = tkinter.Button(self.root, text='+', command=lambda: self.presscalculate('+'))\n\t\t\tbutton_plus.place(x=170, y=370, width=50, height=50)\n\n\n\t\t#计算器菜单功能\n\t\tdef myfunc(self):\n\t\t\ttkinter.messagebox.showinfo('','请期待2.0')\n\n\n\t\t#数字方法\n\t\tdef pressnum(self,num):\n\t\t\t# 全局化变量\n\t\t\t# 判断是否按下了运算符号\n\t\t\tif self.ispresssign == False:\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tself.result.set(0)\n\t\t\t\t# 重置运算符号的状��\n\t\t\t\tself.ispresssign = False\n\t\t\tif num == '.':\n\t\t\t\tnum = '0.'\n\t\t\t# 获取面板中的原有数字\n\t\t\toldnum = self.result.get()\n\t\t\t# 判断界面数字是否为0\n\t\t\tif oldnum == '0':\n\t\t\t\tself.result.set(num)\n\t\t\telse:\n\t\t\t\t# 连接上新按下的数字\n\t\t\t\tnewnum = oldnum + num\n\n\t\t\t\t# 将按下的数字写到面板中\n\t\t\t\tself.result.set(newnum)\n\n\n\t\t#运算函数\n\t\tdef presscalculate(self,sign):\n\t\t\t# 保存已经按下的数字和运算符号\n\t\t\t# 获取界面数字\n\t\t\tnum = self.result.get()\n\t\t\tself.lists.append(num)\n\t\t\t# 保存按下的操作符号\n\t\t\tself.lists.append(sign)\n\t\t\t# 设置运算符号为按下状态\n\t\t\tself.ispresssign = True\n\n\n\t\t#获取运算结果\n\t\tdef pressequal(self):\n\t\t\t# 获取所有的列表中的内容(之前的数字和操作)\n\t\t\t# 获取当前界面上的数字\n\t\t\tcurnum = self.result.get()\n\t\t\t# 将当前界面的数字存入列表\n\t\t\tself.lists.append(curnum)\n\t\t\t# 将列表转化为字符串\n\t\t\tcalculatestr = ''.join(self.lists)\n\t\t\t# 使用eval执行字符串中的运算即可\n\t\t\tendnum = eval(calculatestr)\n\t\t\t# 将运算结果显示在界面中\n\t\t\tself.result.set(str(endnum)[:10])\n\t\t\tif self.lists != 0:\n\t\t\t\tself.ispresssign = True\n\t\t\t# 清空运算列表\n\t\t\tself.lists.clear()\n\n\n\t\t#暂未开发说明\n\t\tdef wait(self):\n\t\t\ttkinter.messagebox.showinfo('','功能在努力的实现,请期待2.0版本的更新')\n\n\n\t\t#←按键功能\n\t\tdef dele_one(self):\n\t\t\tif self.result.get() == '' or self.result.get() == '0':\n\t\t\t\tself.result.set('0')\n\t\t\t\treturn\n\t\t\telse:\n\t\t\t\tnum = len(self.result.get())\n\t\t\t\tif num > 1:\n\t\t\t\t\tstrnum = self.result.get()\n\t\t\t\t\tstrnum = strnum[0:num - 1]\n\t\t\t\t\tself.result.set(strnum)\n\t\t\t\telse:\n\t\t\t\t\tself.result.set('0')\n\n\n\t\t#±按键功能\n\t\tdef zf(self):\n\t\t\tstrnum = self.result.get()\n\t\t\tif strnum[0] == '-':\n\t\t\t\tself.result.set(strnum[1:])\n\t\t\telif strnum[0] != '-' and strnum != '0':\n\t\t\t\tself.result.set('-' + strnum)\n\n\n\t\t#1/x按键功能\n\t\tdef ds(self):\n\t\t\tdsnum = 1 / float(self.result.get())\n\t\t\tself.result.set(str(dsnum)[:10])\n\t\t\tif self.lists != 0:\n\t\t\t\tself.ispresssign = True\n\t\t\t# 清空运算列表\n\t\t\tself.lists.clear()\n\n\n\t\t#C按键功能\n\t\tdef sweeppress(self):\n\t\t\tself.lists.clear()\n\t\t\tself.result.set(0)\n\n\n\t\t#√按键功能\n\t\tdef kpf(self):\n\t\t\tstrnum = float(self.result.get())\n\t\t\tendnum = math.sqrt(strnum)\n\t\t\tif str(endnum)[-1] == '0':\n\t\t\t\tself.result.set(str(endnum)[:-2])\n\t\t\telse:\n\t\t\t\tself.result.set(str(endnum)[:10])\n\t\t\tif self.lists != 0:\n\t\t\t\tself.ispresssign = True\n\t\t\t# 清空运算列表\n\t\t\tself.lists.clear()\n\n\n\t#实例化对象\n\tmycalculator = calculator()\n\teb()\n#定义翻译器\ndef trans():\n\twindow=tk.Tk()\n\twindow.title(\"翻译器(如遇报错请忽视,不会影响程序正常运行)\")\n\twindow.geometry(\"497x150+500+500\")\n\n\tl=tk.Label(window,text=\"请输入要翻译的内容:\",font=\"微软雅黑 11\",height=2)\n\tl.grid()\n\tl1=tk.Label(window,text=\"这就是为你翻译的啦:\",font=\"微软雅黑 11\",height=2)\n\tl1.grid()\n\n\tvar=tk.StringVar()\n\n\te=tk.Entry(window,width=32)\n\te.grid(row=0,column=1)\n\te1=tk.Entry(window,textvariable=var,width=32)\n\te1.grid(row=1,column=1)\n\n\n\n\tdef click():\n\t\tcontent=e.get()\n\t\tdata={\n\t\t\t\"i\": content,\n\t\t\t\"from\": \"AUTO\",\n\t\t\t\"to\": \"AUTO\",\n\t\t\t\"smartresult\": \"dict\",\n\t\t\t\"client\": \"fanyideskweb\",\n\t\t\t\"doctype\": \"json\",\n\t\t\t\"version\": \"2.1\",\n\t\t\t\"keyfrom\": \"fanyi.web\",\n\t\t\t\"action\": \"FY_BY_REALTIME\",\n\t\t\t\"typoResult\": \"false\"\n\t\t}\n\t\ttry:\n\t\t\tresponse=requests.post(\"http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule\",data=data).json()\n\t\texcept Exception:\n\t\t\treturn\n\t\t\n\t\t# print(response)\n\t\t# print(type(response))\n\t\tbb=response[\"translateResult\"][0][0][\"tgt\"]\n\t\t# print(bb)\n\t\t# print(type(bb))\n\t\tvar.set(bb)\n\ttry:\n\t\tb=tk.Button(window,text=\"点击翻译\",command=click,width=10,font=\"微软雅黑 12\")\n\texcept exception:\n\t\treturn\n\tb.grid(columnspan=2)\n\n\twindow.mainloop()\n\teb()\n\n\n#正式编写\nmenu()\n","repo_name":"hithereleon/easybox","sub_path":"easybox.py","file_name":"easybox.py","file_ext":"py","file_size_in_byte":12966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71270945682","text":"import os\nfrom netCDF4 import Dataset\nimport numpy as np\n\ndef find_long_term_average_temperature(data_path,maxDepth):\n\t'''\n\tFinds long-term average of sea temperature. \n\n\tInputs:\n\tmaxDepth = maximum depth to average over\n\tdata_path = path to folder where data is stored\n\n\tOutputs:\n\tmean_temp = long-term average of temperature\n\n\t'''\n\n\t#Change directory\n\tos.chdir(data_path)\n\n\t#Save file names into list\n\tfile_dir = os.listdir()\n\tfile_dir = [i for i in file_dir if 'compressed_' in i]\n\n\t#Calculate average temperature and salinity\n\tmean_temp = []\n\tfor i,file in enumerate(file_dir):\n\t\tdata_file = Dataset(file)\n\t\tdepth_file = data_file.variables['st_ocean'][:]\n\t\tboolDepth = depth_file <= maxDepth\n\t\ttemp_file = data_file.variables['temp'][:]\n\t\ttemp_file = temp_file[:,boolDepth,:,:].mean(axis = 1)\n\t\tmean_temp.append(temp_file)\n\tmean_temp = np.array(mean_temp)\n\tmean_temp = mean_temp.mean(axis = 0)\n\tmean_temp = np.ma.masked_values(mean_temp, mean_temp.min())\n\n\treturn mean_temp\n\ndef find_long_term_average_salinity(data_path,maxDepth):\n\n\t'''\n\tFinds long-term average of sea salinity. \n\n\tInputs:\n\tmaxDepth = maximum depth to average over\n\tdata_path = path to folder where data is stored\n\n\tOutputs:\n\tmean_temp = long-term average of salinity \n\n\t'''\n\n\t#Change directory\n\tos.chdir(data_path)\n\n\t#Save file names into list\n\tfile_dir = os.listdir()\n\tfile_dir = [i for i in file_dir if 'compressed_' in i]\n\n\t#Calculate average temperature and salinity\n\tmean_salt = []\n\tfor i,file in enumerate(file_dir):\n\t\tdata_file = Dataset(file)\n\t\tdepth_file = data_file.variables['st_ocean'][:]\n\t\tboolDepth = depth_file <= maxDepth\n\t\tsalt_file = data_file.variables['salt'][:]\n\t\tsalt_file = salt_file[:,boolDepth,:,:].mean(axis = 1)\n\t\tmean_salt.append(salt_file)\n\tmean_salt = np.array(mean_salt)\n\tmean_salt = mean_salt.mean(axis = 0)\n\tmean_salt = np.ma.masked_values(mean_salt, mean_salt.min())\n\n\treturn mean_salt\n\n\n\n","repo_name":"AKannad/BRIDGEUP_ClimateCoders","sub_path":"scripts/sst_anomaly_functions.py","file_name":"sst_anomaly_functions.py","file_ext":"py","file_size_in_byte":1897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8115561104","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 3 December 2018\n@author: Charlie Lewis\n\"\"\"\nimport hashlib\nimport json\nimport time\n\nfrom transitions import Machine\n\nMACHINE_IP_FIELDS = {\n 'ipv4': ('ipv4_rdns', 'ipv4_subnet'),\n 'ipv6': ('ipv6_rdns', 'ipv6_subnet')}\nMACHINE_IP_PREFIXES = {\n 'ipv4': 24, 'ipv6': 64}\n\n\nclass HistoryTypes():\n STATE_CHANGE = 'State Change'\n ACL_CHANGE = 'ACL Change'\n PROPERTY_CHANGE = 'Property Change'\n COPRO_CHANGE = 'Coprocessor Change'\n\n\nclass Endpoint:\n\n states = ['known', 'unknown', 'mirroring', 'inactive', 'abnormal',\n 'shutdown', 'reinvestigating', 'queued']\n\n transitions = [\n {'trigger': 'mirror', 'source': 'unknown',\n 'dest': 'mirroring', 'before': 'update_state_history'},\n {'trigger': 'queue', 'source': 'unknown',\n 'dest': 'queued', 'before': 'update_state_history'},\n {'trigger': 'reinvestigate', 'source': 'known',\n 'dest': 'reinvestigating', 'before': 'update_state_history'},\n {'trigger': 'queue', 'source': 'known',\n 'dest': 'queued', 'before': 'update_state_history'},\n {'trigger': 'shutdown', 'source': 'abnormal',\n 'dest': 'shutdown', 'before': 'update_state_history'},\n {'trigger': 'reinvestigate', 'source': 'abnormal',\n 'dest': 'reinvestigating', 'before': 'update_state_history'},\n {'trigger': 'queue', 'source': 'abnormal',\n 'dest': 'queued', 'before': 'update_state_history'},\n {'trigger': 'mirror', 'source': 'queued',\n 'dest': 'mirroring', 'before': 'update_state_history'},\n {'trigger': 'reinvestigate', 'source': 'queued',\n 'dest': 'reinvestigating', 'before': 'update_state_history'},\n # check all states and put into known/unknown/abnormal/inactive to account for external updates\n {'trigger': 'known', 'source': 'known',\n 'dest': 'known', 'before': 'update_state_history'},\n {'trigger': 'unknown', 'source': 'known',\n 'dest': 'unknown', 'before': 'update_state_history'},\n {'trigger': 'abnormal', 'source': 'known',\n 'dest': 'abnormal', 'before': 'update_state_history'},\n {'trigger': 'inactive', 'source': 'known',\n 'dest': 'inactive', 'before': 'update_state_history'},\n {'trigger': 'known', 'source': 'unknown',\n 'dest': 'known', 'before': 'update_state_history'},\n {'trigger': 'unknown', 'source': 'unknown',\n 'dest': 'unknown', 'before': 'update_state_history'},\n {'trigger': 'abnormal', 'source': 'unknown',\n 'dest': 'abnormal', 'before': 'update_state_history'},\n {'trigger': 'inactive', 'source': 'unknown',\n 'dest': 'inactive', 'before': 'update_state_history'},\n {'trigger': 'known', 'source': 'mirroring',\n 'dest': 'known', 'before': 'update_state_history'},\n {'trigger': 'unknown', 'source': 'mirroring',\n 'dest': 'unknown', 'before': 'update_state_history'},\n {'trigger': 'abnormal', 'source': 'mirroring',\n 'dest': 'abnormal', 'before': 'update_state_history'},\n {'trigger': 'inactive', 'source': 'mirroring',\n 'dest': 'inactive', 'before': 'update_state_history'},\n {'trigger': 'known', 'source': 'inactive',\n 'dest': 'known', 'before': 'update_state_history'},\n {'trigger': 'unknown', 'source': 'inactive',\n 'dest': 'unknown', 'before': 'update_state_history'},\n {'trigger': 'abnormal', 'source': 'inactive',\n 'dest': 'abnormal', 'before': 'update_state_history'},\n {'trigger': 'inactive', 'source': 'inactive',\n 'dest': 'inactive', 'before': 'update_state_history'},\n {'trigger': 'known', 'source': 'abnormal',\n 'dest': 'known', 'before': 'update_state_history'},\n {'trigger': 'unknown', 'source': 'abnormal',\n 'dest': 'unknown', 'before': 'update_state_history'},\n {'trigger': 'abnormal', 'source': 'abnormal',\n 'dest': 'abnormal', 'before': 'update_state_history'},\n {'trigger': 'inactive', 'source': 'abnormal',\n 'dest': 'inactive', 'before': 'update_state_history'},\n {'trigger': 'known', 'source': 'shutdown',\n 'dest': 'known', 'before': 'update_state_history'},\n {'trigger': 'unknown', 'source': 'shutdown',\n 'dest': 'unknown', 'before': 'update_state_history'},\n {'trigger': 'abnormal', 'source': 'shutdown',\n 'dest': 'abnormal', 'before': 'update_state_history'},\n {'trigger': 'inactive', 'source': 'shutdown',\n 'dest': 'inactive', 'before': 'update_state_history'},\n {'trigger': 'known', 'source': 'reinvestigating',\n 'dest': 'known', 'before': 'update_state_history'},\n {'trigger': 'unknown', 'source': 'reinvestigating',\n 'dest': 'unknown', 'before': 'update_state_history'},\n {'trigger': 'abnormal', 'source': 'reinvestigating',\n 'dest': 'abnormal', 'before': 'update_state_history'},\n {'trigger': 'inactive', 'source': 'reinvestigating',\n 'dest': 'inactive', 'before': 'update_state_history'},\n {'trigger': 'known', 'source': 'queued',\n 'dest': 'known', 'before': 'update_state_history'},\n {'trigger': 'unknown', 'source': 'queued',\n 'dest': 'unknown', 'before': 'update_state_history'},\n {'trigger': 'abnormal', 'source': 'queued',\n 'dest': 'abnormal', 'before': 'update_state_history'},\n {'trigger': 'inactive', 'source': 'queued',\n 'dest': 'inactive', 'before': 'update_state_history'}\n ]\n\n copro_states = ['copro_unknown', 'copro_coprocessing',\n 'copro_nominal', 'copro_suspicious', 'copro_queued']\n\n copro_transitions = [\n {'trigger': 'copro_coprocess', 'source': 'copro_unknown',\n 'dest': 'copro_coprocessing', 'before': 'update_copro_history'},\n {'trigger': 'copro_queue', 'source': 'copro_unknown',\n 'dest': 'copro_queued', 'before': 'update_copro_history'},\n {'trigger': 'copro_coprocess', 'source': 'copro_queued',\n 'dest': 'copro_coprocessing', 'before': 'update_copro_history'},\n {'trigger': 'copro_nominal', 'source': 'copro_coprocessing',\n 'dest': 'copro_nominal', 'before': 'update_copro_history'},\n {'trigger': 'copro_suspicious', 'source': 'copro_coprocessing',\n 'dest': 'copro_suspicious', 'before': 'update_copro_history'},\n {'trigger': 'copro_queue', 'source': 'copro_nominal',\n 'dest': 'copro_queued', 'before': 'update_copro_history'},\n {'trigger': 'copro_coprocess', 'source': 'copro_nominal',\n 'dest': 'copro_coprocessing', 'before': 'update_copro_history'},\n {'trigger': 'copro_queue', 'source': 'copro_suspicious',\n 'dest': 'copro_queued', 'before': 'update_copro_history'},\n {'trigger': 'copro_coprocess', 'source': 'copro_suspicious',\n 'dest': 'copro_coprocessing', 'before': 'update_copro_history'},\n\n ]\n\n def __init__(self, hashed_val):\n self.name = hashed_val.strip()\n self.ignore = False\n self.copro_ignores = False\n self.endpoint_data = None\n self.p_next_state = None\n self.p_prev_states = []\n self.p_next_copro_state = None\n self.p_prev_copross_states = []\n self.acl_data = []\n self.metadata = {}\n self.history = []\n self.state = None\n self.copro_state = None\n\n def encode(self):\n endpoint_d = {\n 'name': self.name,\n 'state': self.state,\n 'copro_state': self.copro_state,\n 'ignore': self.ignore,\n 'endpoint_data': self.endpoint_data,\n 'p_next_state': self.p_next_state,\n 'p_prev_states': self.p_prev_states,\n 'acl_data': self.acl_data,\n 'metadata': self.metadata,\n 'history': self.history,\n }\n return str(json.dumps(endpoint_d))\n\n def _add_history_entry(self, entry_type, timestamp, message):\n self.history.append(\n {'type': entry_type, 'timestamp': timestamp, 'message': message})\n\n def update_copro_history(self, event_data):\n self._add_history_entry(\n HistoryTypes.COPRO_CHANGE, time.time(),\n 'Coprocessing state changed from {0} to {1}'.format(event_data.transition.source, event_data.transition.dest))\n\n def update_acl_history(self, event_data, added_acls, removed_acls):\n message = ''\n if added_acls and len(added_acls) > 0:\n message += 'Added the following ACLs: ' + \\\n ', '.join(added_acls) + '\\r\\n'\n if len(message) > 0:\n message += 'and r'\n if removed_acls and len(removed_acls) > 0:\n message += 'R' if len(message) == 0 else ''\n message += 'emoved the following ACLs:' + ', '.join(removed_acls)\n\n self._add_history_entry(HistoryTypes.ACL_CHANGE, time.time(),\n 'State changed from {0} to {1}'.format(event_data.transition.source, event_data.transition.dest))\n\n def update_property_history(self, entry_type, timestamp, field_name, old_value, new_value):\n self._add_history_entry(entry_type, timestamp,\n 'Property {0} changed from {1} to {2}'.format(field_name, old_value, new_value))\n\n def update_state_history(self, event_data):\n self._add_history_entry(\n HistoryTypes.STATE_CHANGE, time.time(),\n 'State changed from {0} to {1}'.format(event_data.transition.source, event_data.transition.dest))\n\n @staticmethod\n def make_hash(machine, trunk=False):\n ''' hash the unique metadata parts of an endpoint '''\n h = hashlib.new('ripemd160')\n words = ['tenant', 'mac', 'segment']\n if trunk:\n words.append('ipv4')\n words.append('ipv6')\n pre_h = ''.join([str(machine.get(word, 'missing')) for word in words])\n h.update(pre_h.encode('utf-8'))\n post_h = h.hexdigest()\n return post_h\n\n\ndef endpoint_factory(hashed_val):\n endpoint = Endpoint(hashed_val)\n machine = Machine(\n model=endpoint,\n states=Endpoint.states,\n transitions=Endpoint.transitions,\n initial='unknown',\n send_event=True)\n machine.name = endpoint.name[:8]+' '\n endpoint.machine = machine\n copro_endpoint = Endpoint(hashed_val)\n copro_machine = Machine(\n model=copro_endpoint,\n states=Endpoint.copro_states,\n transitions=Endpoint.copro_transitions,\n initial='copro_unknown',\n send_event=True)\n copro_machine.name = endpoint.name[:8]+'_copro'\n endpoint.copro_machine = copro_machine\n return endpoint\n\n\nclass EndpointDecoder:\n\n def __init__(self, endpoint):\n e = json.loads(endpoint)\n self.endpoint = endpoint_factory(e['name'])\n self.endpoint.state = e['state']\n self.endpoint.copro_state = e['copro_state']\n if 'ignore' in e:\n if e['ignore']:\n self.endpoint.ignore = True\n else:\n self.endpoint.ignore = False\n else:\n self.endpoint.ignore = False\n if 'metadata' in e:\n self.endpoint.metadata = e['metadata']\n else:\n self.endpoint.metadata = {}\n if 'history' in e:\n self.endpoint.history = e['history']\n else:\n self.endpoint.history = []\n if 'acl_data' in e:\n self.endpoint.acl_data = e['acl_data']\n else:\n self.endpoint.acl_data = []\n self.endpoint.endpoint_data = e['endpoint_data']\n self.endpoint.p_next_state = e['p_next_state']\n self.endpoint.p_prev_states = e['p_prev_states']\n\n def get_endpoint(self):\n return self.endpoint\n","repo_name":"swipswaps/poseidon","sub_path":"poseidon/helpers/endpoint.py","file_name":"endpoint.py","file_ext":"py","file_size_in_byte":11901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"4442320441","text":"from web3 import Web3\n\n\nganache_url = \"http://127.0.0.1:7545\"\nweb3 = Web3(Web3.HTTPProvider(ganache_url))\n\nprint(web3.isConnected())\nprint(web3.eth.blockNumber)\n\naccount_1 = \"0xc93E1E4F77Eb503BDFA75a2E4dE24840E8e8089E\"\naccount_2 = \"0x103568c836F8F4c3272D1fB2b99Afc8e507F9503\"\n\n# signing transactions / authorizing transactions\nprivate_key = \"1b928ed134a04f5f0b2b3308b621f43033702b891973ac18255886d6f79686cf\"\n\n# get the nonce, numbed added to a hashed block, number miners are solvng for\n\nnonce = web3.eth.getTransactionCount(account_1)\n\n# build transaction\ntx = {\n 'nonce': nonce, # prevents you from sending transaction twice on etherium\n 'to': account_2,\n 'value': web3.toWei(1, 'ether'),\n 'gas': 200000, # units of gas, the limit, not in etherium, think of gallons of gas\n 'gasPrice': web3.toWei('50', 'gwei')\n}\n\n# sign transaction\n\nsigned_tx = web3.eth.account.signTransaction(tx, private_key)\ntx_hash = web3.eth.sendRawTransaction(signed_tx.rawTransaction)\nprint(web3.toHex(tx_hash))\n\n# send transaction\n\n# get transaction hash\n","repo_name":"Mike-Rossi/web3_examples","sub_path":"Ex's/3_sending_transactions.py","file_name":"3_sending_transactions.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29255375332","text":"#write function to perform binary search of number from a list\n\ndef binary_search(new_list,number):\n\n #base case to terminate recursion\n if len(new_list) == 0:\n return False\n\n else:\n midpoint = (len(new_list))//2\n\n #base case to terminate recursion\n if new_list[midpoint] == number:\n return True\n\n #recursive case 1\n elif new_list[midpoint] DFT sample mapping not converging\n# (test9b/9c).\n#\n# This is another attempt using regular DSP to extract n0, then\n# fitting the dispersive model after the n0 component of phase has\n# been removed.\n\n# Combine test8 and and test9c:\n# + excite a 2nd order system with a impulse train\n# + pitch (Wo), pulse onset time (n0), 2nd order system parameters\n# (alpha and gamma) random\n# + estimate and extract n0 component of phase using regular DSP\n# + Estimate dispersive part using amplitude spectra\n# + Add n0 component back again\n\nimport numpy as np\nimport sys\nfrom keras.layers import Input, Dense, Concatenate\nfrom keras import models,layers\nfrom keras import initializers\nimport matplotlib.pyplot as plt\nfrom scipy import signal\nfrom keras import backend as K\n# less verbose tensorflow ....\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\n# custom loss function\ndef sparse_loss(y_true, y_pred):\n mask = K.cast( K.not_equal(y_pred, 0), dtype='float32')\n n = K.sum(mask)\n return K.sum(K.square((y_pred - y_true)*mask))/n\n\n# testing custom loss function\nx = Input(shape=(None,))\ny = Input(shape=(None,))\nloss_func = K.Function([x, y], [sparse_loss(x, y)])\nassert loss_func([[[1,1,1]], [[0,2,0]]]) == np.array([1])\nassert loss_func([[[0,1,0]], [[0,2,0]]]) == np.array([1])\n\n# constants\n\nN = 80 # number of time domain samples in frame\nnb_samples = 100000\nnb_batch = 32\nnb_epochs = 100\nwidth = 256\npairs = 2*width\nfo_min = 50\nfo_max = 400\nFs = 8000\n\n# Generate training data.\n\nprint(\"Generate training data\")\n\n# amplitude and phase at rate L\namp = np.zeros((nb_samples, width))\nphase = np.zeros((nb_samples, width))\nphase_disp = np.zeros((nb_samples, width))\n\n# rate \"width\" phase encoded as cos,sin pairs:\nphase_rect = np.zeros((nb_samples, pairs))\namp_ = np.zeros((nb_samples, width))\n\n# side information\nWo = np.zeros(nb_samples)\nL = np.zeros(nb_samples, dtype=int)\nn0 = np.zeros(nb_samples, dtype=int)\nvoiced = np.zeros(nb_samples, dtype=int)\n\nfor i in range(nb_samples):\n\n # distribute fo randomly on a log scale, gives us more training\n # data with low freq frames which have more harmonics and are\n # harder to match\n r = np.random.rand(1)\n log_fo = np.log10(fo_min) + (np.log10(fo_max)-np.log10(fo_min))*r[0]\n fo = fo_min\n fo = 10 ** log_fo\n Wo[i] = fo*2*np.pi/Fs\n L[i] = int(np.floor(np.pi/Wo[i]))\n # pitch period in samples\n P = 2*L[i]\n \n r = np.random.rand(1)\n voiced[i] = r[0] > 0.5\n \n # sample 2nd order IIR filter with random peak freq\n\n r1 = np.random.rand(2)\n r2 = np.random.rand(2)\n if voiced[i]:\n # choose alpha and gamma to get something like voiced speech\n alpha1 = 0.05*np.pi + 0.25*np.pi*r1[0]\n gamma1 = 0.9 + 0.09*r1[1]\n alpha2 = alpha1 + 0.4*np.pi*r2[0]\n gamma2 = 0.9 + 0.05*r2[1]\n else:\n alpha1 = 0.5*np.pi + 0.4*np.pi*r1[0]\n gamma1 = 0.8 + 0.1*r1[1]\n alpha2 = 0.5*np.pi + 0.4*np.pi*r2[0]\n gamma2 = 0.8 + 0.1*r2[1]\n \n w1,h1 = signal.freqz(1, [1, -2*gamma1*np.cos(alpha1), gamma1*gamma1], range(1,L[i]+1)*Wo[i])\n w2,h2 = signal.freqz(1, [1, -2*gamma2*np.cos(alpha2), gamma2*gamma2], range(1,L[i]+1)*Wo[i])\n \n # select n0 between 0...P-1 (it's periodic)\n r = np.random.rand(1) \n n0[i] = r[0]*P\n e = np.exp(-1j*n0[i]*range(1,L[i]+1)*Wo[i])\n\n for m in range(1,L[i]+1):\n amp[i,m] = np.log10(np.abs(h1[m-1]*h2[m-1]))\n if voiced[i]:\n phase[i,m] = np.angle(h1[m-1]*h2[m-1]*e[m-1])\n phase_disp[i,m] = np.angle(h1[m-1]*h2[m-1])\n else:\n r = np.random.rand(1) \n phase[i,m] = r[0]*2*np.pi\n phase_disp[i,m] = phase[i,m]\n \n\n# use regular DSP to estimate n0, and remove effect of linear phase\n\nprint(\"estimate and remove n0\")\nn0_est = np.zeros((nb_samples))\nphase_n0_removed = np.zeros((nb_samples, width))\nfor i in range(nb_samples):\n err_min = 1E32\n P = 2*L[i]\n '''\n for test_n0 in np.arange(0,P,0.25):\n e = np.exp(-1j*test_n0*np.arange(1,L[i]+1)*Wo[i])\n err = np.dot(10**amp[i,1:L[i]+1], np.abs(np.exp(1j*phase[i,1:L[i]+1]) - e)**2)\n if err < err_min:\n err_min = err\n n0_est[i] = test_n0\n print(n0[i], n0_est[i])\n '''\n r = np.random.rand(1)\n n0_est[i] = n0[i] + 2*r - 1\n \n # remove n0_est and set up rect training data \n for m in range(1,L[i]+1):\n phase_n0_removed[i,m] = np.angle(np.exp(1j*phase[i,m]) * np.conj(np.exp(-1j*n0_est[i]*Wo[i]*m)))\n bin = int(np.round(m*Wo[i]*width/np.pi)); bin = min(width-1, bin)\n phase_rect[i,2*bin] = np.cos(phase_n0_removed[i,m])\n phase_rect[i,2*bin+1] = np.sin(phase_n0_removed[i,m])\n amp_[i,bin] = amp[i,m]\n \nmodel = models.Sequential()\nmodel.add(layers.Dense(pairs, activation='relu', input_dim=width))\nmodel.add(layers.Dense(4*pairs, activation='relu'))\nmodel.add(layers.Dense(pairs))\nmodel.summary()\n\nfrom keras import optimizers\nsgd = optimizers.SGD(lr=0.8, decay=1e-6, momentum=0.9, nesterov=True)\nmodel.compile(loss=sparse_loss, optimizer=sgd)\nhistory = model.fit(amp_, phase_rect, batch_size=nb_batch, epochs=nb_epochs)\n\n# measure error in angle over all samples\n\nphase_rect_est = model.predict(amp_)\nphase_est = np.zeros((nb_samples, width))\nused_bins = np.zeros((nb_samples, width), dtype=int)\nfor i in range(nb_samples):\n for m in range(1,L[i]+1):\n bin = int(np.round(m*Wo[i]*width/np.pi)); bin = min(width-1, bin)\n phase_est[i,m] = np.angle(phase_rect_est[i,2*bin] + 1j*phase_rect_est[i,2*bin+1])\n used_bins[i,m] = 1\n \nind = np.nonzero(used_bins)\nc1 = np.exp(1j*phase_disp[ind]); c2 = np.exp(1j*phase_est[ind]);\nerr_angle = np.angle(c1 * np.conj(c2)) \nvar = np.var(err_angle)\nstd = np.std(err_angle)\nprint(\"angle var: %4.2f std: %4.2f rads\" % (var,std))\nprint(\"angle var: %4.2f std: %4.2f degs\" % ((std*180/np.pi)**2,std*180/np.pi))\n\nplot_en = 1;\nif plot_en:\n plt.figure(1)\n plt.plot(history.history['loss'])\n plt.title('model loss')\n plt.xlabel('epoch')\n plt.show(block=False)\n \n plt.figure(2)\n plt.subplot(211)\n plt.hist(err_angle*180/np.pi, bins=20)\n plt.title('phase angle error (deg) and fo (Hz)')\n plt.subplot(212)\n plt.hist(Wo*(Fs/2)/np.pi, bins=20)\n plt.show(block=False)\n\n plt.figure(3)\n plt.title('filter amplitudes')\n for r in range(12):\n plt.subplot(3,4,r+1)\n plt.plot(amp[r,:L[r]],'g')\n plt.show(block=False)\n\n plt.figure(4)\n plt.title('sample vectors and error')\n for r in range(12):\n plt.subplot(3,4,r+1)\n plt.plot(phase_disp[r,:L[r]]*180/np.pi,'g')\n if voiced[r]:\n plt.plot(phase_n0_removed[r,:L[r]]*180/np.pi,'r')\n plt.plot(phase_est[r,:L[r]]*180/np.pi,'b')\n plt.ylim(-180,180)\n plt.show(block=False)\n \n # click on last figure to close all and finish\n plt.waitforbuttonpress(0)\n plt.close()\n","repo_name":"drowe67/phasenn","sub_path":"phasenn_test11.py","file_name":"phasenn_test11.py","file_ext":"py","file_size_in_byte":7135,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"3534795146","text":"from utils import run, ParsingConfig\n\nexample_answer = 1588\n\nexample_data = \"\"\"\nNNCB\n\nCH -> B\nHH -> N\nCB -> H\nNH -> C\nHB -> C\nHC -> B\nHN -> C\nNN -> C\nBH -> H\nNC -> B\nNB -> B\nBN -> B\nBB -> N\nBC -> B\nCC -> N\nCN -> C\n\"\"\"\n\n\nparsing_config = ParsingConfig(\n field_separator=\" -> \",\n)\n\n\ndef solve(data):\n state = data[0][0]\n mappings = {pair: insert for [pair, insert] in data[1:]}\n\n for _ in range(10):\n new_state = \"\"\n for i in range(len(state) - 1):\n insert = mappings[state[i:i+2]]\n new_state += state[i] + insert\n state = new_state + state[-1]\n\n counts = [state.count(x) for x in set(state)]\n return max(counts) - min(counts)\n\n\nreal_answer = 2768\n\n\nif __name__ == \"__main__\":\n run(example_data, example_answer, parsing_config, solve, real_answer)\n","repo_name":"csymeonides/advent-of-code","sub_path":"src/solutions/2021/d14p1.py","file_name":"d14p1.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"31901974947","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 21 08:11:34 2020\n\n@author: Alexander\n\"\"\"\n\nimport requests\nimport pandas as pd\nfrom bs4 import BeautifulSoup \nimport re\n\ndef getScoreFrame(url,Year):\n#传入形参url,Year(录取年份)\n#使用pandas中的read_html()方法,将网站中的数据存入到DataFrame中\n#并对DataFrame做一些删改,保留或添加上需要的信息\n r=requests.get(url)\n r.encoding='utf-8'\n df=pd.read_html(r.text)\n df_temp=pd.DataFrame(df[6].values[1:,:-2])\n if(Year=='2017'):\n if(url=='http://zs.dhu.edu.cn/f0/df/c14086a192735/page.htm'): #2017年浙江,无文理科一列\n df_temp.insert(3,\"Category\",\"无\")\n \n df_temp.columns=[\"Province\",\"unuseful\",\"Major\",\"Category\",\"Score\"]\n del df_temp[\"unuseful\"]\n df_major=df_temp['Major']\n df_temp.drop('Major',axis=1,inplace=True)\n df_temp.insert(2,'Major',df_major)\n #df_temp[['Major','Category']]=df_temp[['Category','Major']]\n else:\n df_temp.columns=[\"Province\",\"Category\",\"unuseful\",\"Major\",\"Score\"] \n del df_temp[\"unuseful\"]\n df_temp.insert(0,\"College\",\"东华大学\")\n df_temp.insert(1,\"Year\",Year)\n df_temp[\"Contributor\"]=\"09118136高成睿\"\n df_temp.replace(\"总计\",\"all\",inplace=True) #把总计改成all\n return df_temp\n\n#获取所有子域名\npage=['1','2','3']\nYear=[\"2017\",\"2018\",\"2019\"]\nhrefBegin={\"2017\":\"f0\",\"2018\":\"38\",\"2019\":\"a3\"}\nhtmls=[[],[],[]]\nfor i,year in enumerate(Year):\n for p in page:\n r=requests.get(\"http://zs.dhu.edu.cn/\"+year+\"ngslqfscx/list\"+p+\".htm\")\n pagesoup=BeautifulSoup(r.text,'lxml')\n for link in pagesoup.find_all(attrs={\"href\":re.compile(r'^/'+hrefBegin.get(year))}):\n htmls[i].append(link.get('href'))\n\n#将所有html中的录取信息表格存入到df_cont(DataFrame)中\nflag=0\nfor i in range(3):\n for h in htmls[i]:\n url=\"http://zs.dhu.edu.cn\"+h\n df_temp=getScoreFrame(url,Year[i])\n if(flag==0):\n df_cont=df_temp\n flag=1\n else:\n df_cont=pd.concat([df_cont,df_temp],axis=0)\ndf_cont.replace('\\u200b',\"all\",inplace=True) #全校分数可能在Major列为零长度字符串'\\u200b',替换为all\ndf_cont.fillna(\"all\",inplace=True) #全校分数可能在Major列为空值,填充为all\n#将df_cont存入.csv文件\ndf_cont.to_csv(\"./score of dhu.csv\",index=False,header=True,encoding='gbk')","repo_name":"eshoyuan/GaokaoRecommend","sub_path":"crawler/17-19录取分数爬虫/09118136东华大学.py","file_name":"09118136东华大学.py","file_ext":"py","file_size_in_byte":2435,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"3"} +{"seq_id":"73172041041","text":"import math\n\nfrom algebra import Vector\n\n\nclass Object(object):\n '''Object in the scene'''\n def __init__(self, surface):\n self.surface = surface\n\n def intersectionParameter(self, ray):\n return None\n\n def normalAt(self, p):\n return self.normal\n\n def baseColorAt(self, point):\n return self.surface.baseColorAt(point)\n\n def calcColor(self, light, lightray, ray, point):\n '''Calculates the color with Phong-Model'''\n \n ka = self.surface.ka\n kd = self.surface.kd\n ks = self.surface.ks\n baseColor = self.surface.baseColorAt(point.coords)\n small_n = self.surface.small_n\n\n n = self.normalAt(point)\n d = ray.direction\n l = lightray.direction\n lr = l.reflected(n) * -1\n \n cin = light.cin\n ca = light.ca\n \n ambient = baseColor * ca * ka \n diffuse = Vector(0,0,0)\n specular = Vector(0,0,0)\n \n specdot = lr.dot(d.scale(-1))\n diffdot = l.dot(n)\n \n if diffdot > 0:\n diffuse = baseColor * cin * kd * diffdot\n if specdot > 0:\n specular = baseColor * cin * ks * specdot**small_n\n \n return ambient + diffuse + specular\n\n\n\nclass Sphere(Object):\n def __init__(self, center, radius, surface):\n Object.__init__(self, surface)\n self.center = center # point\n self.radius = radius # scalar\n\n def __repr__(self):\n return 'Sphere(%s,%s)' % (repr(self.center), self.radius)\n\n def intersectionParameter(self, ray):\n co = self.center - ray.origin\n v = co.dot(ray.direction)\n discriminant = v*v - co.dot(co) + self.radius*self.radius\n if discriminant < 0:\n return None\n else:\n return v - math.sqrt(discriminant)\n\n def normalAt(self, p):\n return (p - self.center).normalized()\n\n \nclass Plane(Object):\n def __init__(self, point, normal, surface):\n Object.__init__(self, surface)\n self.point = point # point\n self.normal = normal.normalized() # vector\n \n def __repr__(self):\n return 'Plane(%s,%s)' % (repr(self.point), repr(self.normal))\n\n def intersectionParameter(self, ray):\n op = ray.origin - self.point\n a = op.dot(self.normal)\n b = ray.direction.dot(self.normal)\n if b:\n return -a/b\n else:\n return None\n\n\nclass Triangle(Object):\n def __init__(self, a, b, c, surface):\n Object.__init__(self, surface)\n\n self.a = a # point\n self.b = b # point\n self.c = c # point\n self.u = self.b - self.a # direction vector\n self.v = self.c - self.a # direction vector\n\n def __repr__(self):\n return 'Triangle(%s,%s,%s)' % (repr(self.a), repr(self.b), repr(self.c))\n\n def intersectionParameter(self, ray):\n w = ray.origin - self.a\n dv = ray.direction.cross(self.v)\n dvu = dv.dot(self.u)\n if dvu == 0.0:\n return None\n wu = w.cross(self.u)\n r = dv.dot(w) / dvu\n s = wu.dot(ray.direction) / dvu\n if 0<=r and r<=1 and 0<=s and s<=1 and r+s <=1:\n return wu.dot(self.v) / dvu\n else:\n return None\n \n def normalAt(self, p):\n return self.u.cross(self.v).normalized()\n","repo_name":"eyesfocus/mi-hsrm-cg","sub_path":"blatt02/objects.py","file_name":"objects.py","file_ext":"py","file_size_in_byte":3365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14251088111","text":"if __name__ == \"__main__\":\n s = input()\n arr = [-1]*26\n pivot = ord('a')\n\n for i in range(len(s)):\n i_where = ord(s[i])-pivot\n if arr[i_where] == -1:\n arr[i_where] = i\n\n for i in arr:\n print(i)\n","repo_name":"nordap/BOJ","sub_path":"10809.py","file_name":"10809.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22538770985","text":"import pygame\nimport random\nimport sys\n# el codigo trabaja con imganes de 32x32\nancho = 1300\nalto = 1000\ndef crearMatriz(imagen):\n mt = []\n ls = []\n info = imagen.get_rect()\n i = 0\n while (i < info[3]/32):\n ls = obtenerFila(imagen,i)\n mt.append(ls)\n i += 1\n\n return mt\n\n\ndef obtenerFila(imagen,fila):\n ls = []\n info = imagen.get_rect()\n a = info[2]\n x = 0\n while ( x < a):\n cuadro = recortar(imagen,[x,fila*32])\n ls.append(cuadro)\n x += 32\n return ls\n\ndef recortar(imagen,pos):\n ls = []\n cuadro = imagen.subsurface(pos[0],pos[1],32,32)#la funcion subsurface() permite tomar una parte de una imagen, recibe (pos x(inicio del recorte), posy(inicio del recorte), recorte en ancho, recorte en alto)\n ls.append(cuadro)#agregamos la imagen a la lista\n\n return cuadro\n\n\nif __name__ == '__main__':\n\n#SECCION DE VARIABLES\n pygame.init()\n pantalla = pygame.display.set_mode([600,400])#cambia resolucion de ventana y define una ventana llamada pantalla\n imagen = pygame.image.load('animals.png')#cargamos la imagen en la variable imagen\n #pantalla.blit(imagen,[0,0])#ponemos la imagen en la pocision 0,0\n info = imagen.get_rect()#informacion de la imagen, pos 2 es ancho, pos 3 es alto\n ls = obtenerFila(imagen,6)\n\n\n\n pygame.display.flip()\n fin = False\n\n while (not fin):\n for event in pygame.event.get():#lista de eventos, tambien event=pygame.event.get(), cambiando el for asi: for e(cualquier variable) in event: siendo event la lista de eventos anterior\n if event.type == pygame.QUIT:\n fin = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n pantalla.blit(ls[10],[0,0])\n pantalla.blit(ls[11],[32,0])\n pantalla.blit(ls[9],[64,0])\n pantalla.blit(ls[10],[96,0])\n pantalla.blit(ls[11],[128,0])\n pygame.display.flip()\n","repo_name":"diegogomez9900/COMPUTACIONGRAFICA","sub_path":"TRABAJOS-CLASE/mapeo.py","file_name":"mapeo.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12982976292","text":"from parser import parameter_parser\nimport torch\nfrom utils import *\nfrom data import Dataset\nfrom torch.utils.data import DataLoader\nimport numpy as np\nimport random\nfrom model import GraLSP\nimport time\nfrom tqdm import trange\nparser = parameter_parser()\ndevice = get_device()\n\nif torch.cuda.is_available():\n torch.cuda.manual_seed(777)\nelse:\n torch.manual_seed(777)\nnp.random.seed(777)\n\n\nprint(\"Reading dataset\")\nedge_list, num_nodes = get_edge_list(parser)\nprint(\"Generating nx graphs...\")\ng = edge_list2nx(edge_list, num_nodes)\nnode_features = torch.tensor(np.load(\"data/\" + parser.dataset_name + \"/features.npy\"), device = device)\nparser.feature_dims = len(node_features[0])\nnode2label = get_node2label(num_nodes)\n\n\nprint(\"Generating RWs...\")\n\nalias_nodes, alias_edges = preprocess_transition_prob(g,num_nodes)\nnode_walks, random_walks = generate_node2vec_walks(g, num_nodes, alias_nodes, alias_edges)\n\n\nnode_anonymous_walks, node_walk_radius = generate_node_walks_and_radius(num_nodes, node_walks)\n\n\nnode_normalized_walk_distr, node_anonym_walktypes = process_anonym_distr(num_nodes, parser.anonym_walk_len, node_anonymous_walks)\n\ntypes_and_nodes = generate_types_and_nodes(num_nodes, node_anonym_walktypes, node_walks, node_walk_radius) #(2708,500,2)\n\ntypes_and_nodes = torch.tensor(types_and_nodes,device = device)\n\nprint(\"Generating Dataloader...\")\ndataset = Dataset(parser,\n num_nodes,\n g,\n random_walks,\n node_normalized_walk_distr)\n\ndef negative_sampling(keys, labels, neg_size):\n negs = np.zeros((neg_size)) \n for j in range(neg_size):\n neg_ = random.choice(dataset.neg_sampling_seq)\n while (neg_ in labels or neg_ in keys):\n neg_ = random.choice(dataset.neg_sampling_seq)\n negs[j] = neg_\n return negs\n\ndef ns_collate(batch):\n keys, labels,walk_key, walk_label, walk_neg = zip(*batch)\n negs = negative_sampling(keys, labels, parser.neg_size)\n return torch.LongTensor(keys), torch.LongTensor(labels), torch.LongTensor(negs), torch.LongTensor(walk_key), torch.LongTensor(walk_label), torch.LongTensor(walk_neg)\n\ndataloader = DataLoader(dataset, shuffle=True, batch_size=parser.batch_size, num_workers=6, collate_fn=ns_collate)\n\ndef train():\n save_path = parser.save_path + \"/\" + parser.dataset_name\n start_time = time.time()\n model = GraLSP(parser,\n node_walks,\n random_walks,\n node_anonymous_walks,\n node_walk_radius,\n node_normalized_walk_distr,\n node_anonym_walktypes,\n types_and_nodes,\n node_features,\n node2label,\n save_path)\n \n model = model.to(device)\n optimizer = torch.optim.Adam(model.parameters(), lr=parser.learning_rate)\n \n print(\"Start Training...\")\n epochs = trange(parser.epochs, leave=True, desc=\"Epoch\")\n for epoch in epochs:\n losses = 0\n batch_link_loss = []\n batch_walk_loss = []\n batch_losses = []\n for i, data in enumerate(dataloader):\n model.train()\n optimizer.zero_grad()\n keys, labels, negs, walk_key, walk_label, walk_neg = data\n keys = keys.to(device)\n labels = labels.to(device)\n negs = negs.to(device)\n walk_key = walk_key.to(device)\n walk_label = walk_label.to(device)\n walk_neg = walk_neg.to(device)\n\n output_keys, output_labels, output_negs = model(keys, labels, negs)\n pos_aff = torch.sum(torch.multiply(output_keys, output_labels), axis = 1)\n neg_aff = torch.matmul(output_keys, output_negs.t())\n\n # pos_aff, neg_aff = model(keys, labels, negs)\n likelihood = torch.log(torch.sigmoid(pos_aff) + 1e-6) + torch.sum(torch.log(1-torch.sigmoid(neg_aff) + 1e-6), axis =1)\n link_loss = -torch.mean(likelihood)\n walk_loss = parser.walk_loss_lambda * model.criterion(walk_key,walk_label,walk_neg)\n # walk_loss = model.criterion(walk_key,walk_label,walk_neg)\n losses = link_loss + walk_loss\n\n losses.backward()\n optimizer.step()\n \n batch_link_loss.append(link_loss.item())\n batch_walk_loss.append(walk_loss.item())\n batch_losses.append(losses.item())\n\n\n\n epochs.set_description(\"Epoch (Loss=%g)\" % round(np.mean(batch_losses), 5))\n if i and i % 500 == 0:\n\n model.evaluate_model()\n model.save_embeddings(i, save_model= False)\n\n\n\n\nif __name__ == \"__main__\":\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n train()","repo_name":"JhuoW/GraLSP_pytorch","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4777,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"3"} +{"seq_id":"2630126625","text":"#pip install pandas\r\n#pip install numpy\r\n#pip install openpyxl\r\n#pip install matplotlib\r\n#pip install seaborn\r\n#pip install scikit-learn\r\n\r\nimport pandas as pd\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.ensemble import RandomForestRegressor\r\nfrom sklearn.metrics import r2_score\r\n\r\n# Importar a base de dados para o python\r\ntabela = pd.read_csv(\"advertising.csv\")\r\n\r\n# Vizualizar a base e fazer ajustes na base de dados\r\nprint(tabela)\r\n\r\n# Análise exploratória -> entender como a sua base de dados está se comportando\r\n\r\n# CORRELAÇÃO\r\n# Tv- Vendas\r\n# Jornal- Vendas\r\n# Radio- Vendas\r\n\r\n\r\n# cria o gráfico\r\nsns.heatmap(tabela.corr(), annot=True, cmap=\"Wistia\")\r\n\r\n# exibe o gráfico\r\nplt.show()\r\n\r\n# Dados de Treino - 70% à 90%\r\n\r\n# Dados de Teste - 10% à 30%\r\n\r\n# O que voce está tentando prever é (y) o resto é (x) (x de teste e outro de treino) (y de teste e outro de treino)\r\n\r\ny = tabela[\"Vendas\"]\r\nx = tabela[[\"TV\", \"Radio\", \"Jornal\"]]\r\n\r\n# x_treino, x_teste, y_treino, y_teste (Nesta Ordem)\r\n# test_size -> proporção teste-treino (30%)\r\nx_treino, x_teste, y_treino, y_teste = train_test_split(x, y, test_size=0.3)\r\n\r\n# Criar a inteligência artificial e fazer as previsoes\r\nmodelo_regressaoLinear = LinearRegression()\r\nmodelo_arvoreDecisao = RandomForestRegressor()\r\n\r\nmodelo_regressaoLinear.fit(x_treino, y_treino)\r\nmodelo_arvoreDecisao.fit(x_treino, y_treino)\r\n\r\n# Fazer a previsão para comparar com o valor real\r\nprevisao_regressaoLinear = modelo_regressaoLinear.predict(x_teste)\r\nprevisao_arvoreDecisao = modelo_arvoreDecisao.predict(x_teste)\r\n\r\nprint(r2_score(y_teste, previsao_regressaoLinear))\r\nprint(r2_score(y_teste, previsao_arvoreDecisao))\r\n\r\n# Vizualização gráfica das previsões\r\ntabela_auxiliar = pd.DataFrame()\r\ntabela_auxiliar['y_teste'] = y_teste\r\ntabela_auxiliar['Previsoes Arvore de Decisao'] = previsao_arvoreDecisao\r\ntabela_auxiliar['Previsoes Regressao Linear'] = previsao_regressaoLinear\r\n\r\nplt.figure(figsize=(15, 6))\r\nsns.lineplot(data=tabela_auxiliar)\r\nplt.show()\r\n\r\n#Fazer uma nova previsao\r\nnovos = pd.read_csv(\"novos.csv\")\r\nprint(novos)\r\n\r\n#previsao vencedora foi a arvore de decisao\r\nprevisao = modelo_arvoreDecisao.predict(novos)\r\nprint(previsao)","repo_name":"Gustav-dev97/MachineLearning-DataScience","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2332,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"21551637663","text":"import speech_recognition as sr\r\n# from gtts import gTTS\r\nimport pyttsx3\r\n# import pyaudio\r\nimport wikipedia\r\nimport pywhatkit\r\nimport datetime\r\nimport pyjokes\r\nimport webbrowser\r\nimport os\r\n\r\nlistener = sr.Recognizer()\r\nengine=pyttsx3.init()\r\ndef talk(text):\r\n newVoiceRate = 145\r\n engine.setProperty('rate',newVoiceRate)\r\n engine.say(text)\r\n engine.runAndWait()\r\n\r\ndef take_command():\r\n try:\r\n with sr.Microphone() as source:\r\n print(\"...listeninng..\")\r\n listener.adjust_for_ambient_noise(source)\r\n voice=listener.listen(source)\r\n command=listener.recognize_google(voice)\r\n command=command.lower()\r\n\r\n if 'alexa' in command:\r\n command=command.replace('alexa','')\r\n talk(command)\r\n print(command)\r\n except:\r\n pass\r\n return command\r\n\r\ndef wishme():\r\n hour=int(datetime.datetime.now)\r\n if hour>0 and hour<12:\r\n talk(\"good morning\")\r\n elif hour<12 and hour>18:\r\n talk(\"good afternoon\")\r\n else:\r\n talk(\"good evening\")\r\n talk(\"hey i am voice assistant what can i help you\")\r\n \r\ndef run_alexa():\r\n command=take_command()\r\n if 'play' in command:\r\n song=command.replace('play','')\r\n print('playing..')\r\n talk('playing')\r\n talk('playing'+song)\r\n pywhatkit.playonyt(song)\r\n elif 'time' in command:\r\n time=datetime.datetime.now().strftime('%I,%M,%S')\r\n print(time)\r\n talk(\"current time is \"+time)\r\n elif 'who is' in command:\r\n person=command.replace('who is','')\r\n info = wikipedia.summary(person,1)\r\n print(info)\r\n talk(info)\r\n elif 'joke' in command:\r\n talk(pyjokes.get_joke())\r\n print(pyjokes.get_joke())\r\n\r\n elif 'date' in command:\r\n talk('sorry i have mingle')\r\n print('sorry i have mingle')\r\n \r\n elif 'open youtube' in command:\r\n webbrowser.open_new_tab(\"www.youtube.com\")\r\n elif 'open google' in command:\r\n webbrowser.open_new_tab(\"www.google.com\")\r\n \r\n elif 'open' in command:\r\n path=\"D:\\\\python2.0\\\\alexxa\"\r\n os.startfile(path)\r\n \r\n \r\n elif 'stop' in command:\r\n exit()\r\n\r\n else:\r\n talk('say it again')\r\n print('say it again')\r\n\r\n\r\n\r\nwishme()\r\nwhile True:\r\n run_alexa()\r\n\r\n","repo_name":"harshmittal246/Virtual-Assistant-Alexa","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"4440827421","text":"# coding: utf-8\nimport zipfile\nfrom tqdm import tqdm\nimport pandas as pd\nfrom czsc.analyze import *\nfrom czsc.enum import Freq\nfrom czsc.signals.signals import get_default_signals, get_s_three_bi, get_s_d0_bi\n\ncur_path = os.path.split(os.path.realpath(__file__))[0]\n\n\ndef read_1min():\n with zipfile.ZipFile(os.path.join(cur_path, 'data/000001.XSHG_1min.zip'), 'r') as z:\n f = z.open('000001.XSHG_1min.csv')\n data = pd.read_csv(f, encoding='utf-8')\n\n data['dt'] = pd.to_datetime(data['dt'])\n records = data.to_dict('records')\n\n bars = []\n for row in tqdm(records, desc='read_1min'):\n bar = RawBar(**row)\n bar.freq = Freq.F1\n bars.append(bar)\n return bars\n\n\ndef read_daily():\n file_kline = os.path.join(cur_path, \"data/000001.SH_D.csv\")\n kline = pd.read_csv(file_kline, encoding=\"utf-8\")\n kline.loc[:, \"dt\"] = pd.to_datetime(kline.dt)\n bars = [RawBar(symbol=row['symbol'], id=i, freq=Freq.D, open=row['open'], dt=row['dt'],\n close=row['close'], high=row['high'], low=row['low'], vol=row['vol'])\n for i, row in kline.iterrows()]\n return bars\n\n\ndef test_find_bi():\n bars = read_daily()\n # 去除包含关系\n bars1 = []\n for bar in bars:\n if len(bars1) < 2:\n bars1.append(NewBar(symbol=bar.symbol, id=bar.id, freq=bar.freq,\n dt=bar.dt, open=bar.open,\n close=bar.close, high=bar.high, low=bar.low,\n vol=bar.vol, elements=[bar]))\n else:\n k1, k2 = bars1[-2:]\n has_include, k3 = remove_include(k1, k2, bar)\n if has_include:\n bars1[-1] = k3\n else:\n bars1.append(k3)\n\n fxs = []\n for i in range(1, len(bars1) - 1):\n fx = check_fx(bars1[i - 1], bars1[i], bars1[i + 1])\n if isinstance(fx, FX):\n fxs.append(fx)\n\n\ndef get_user_signals(c: CZSC) -> OrderedDict:\n \"\"\"在 CZSC 对象上计算信号,这个是标准函数,主要用于研究。\n 实盘时可以按照自己的需要自定义计算哪些信号\n\n :param c: CZSC 对象\n :return: 信号字典\n \"\"\"\n s = OrderedDict({\"symbol\": c.symbol, \"dt\": c.bars_raw[-1].dt, \"close\": c.bars_raw[-1].close})\n # 倒0,特指未确认完成笔\n # 倒1,倒数第1笔的缩写,表示第N笔\n # 倒2,倒数第2笔的缩写,表示第N-1笔\n # 倒3,倒数第3笔的缩写,表示第N-2笔\n # 以此类推\n for i in range(1, 3):\n s.update(get_s_three_bi(c, i))\n s.update(get_s_d0_bi(c))\n return s\n\n\ndef test_czsc_update():\n bars = read_daily()\n # 不计算任何信号\n c = CZSC(bars)\n assert not c.signals\n\n # 计算信号\n c = CZSC(bars, get_signals=get_default_signals)\n assert len(c.bi_list) == 50 and not c.last_bi_extend\n assert isinstance(c.signals, OrderedDict) and len(c.signals) == 38\n\n last_bi = c.bi_list[-1]\n assert len(last_bi.raw_bars) == 32 and last_bi.power_price == last_bi.power\n assert len(last_bi.fake_bis) == 11\n assert last_bi.fake_bis[0].direction == last_bi.fake_bis[-1].direction == last_bi.direction\n # 测试自定义信号\n c = CZSC(bars, get_signals=get_user_signals, signals_n=20)\n assert len(c.signals) == 11\n assert len(c.signals_list) == 20\n assert c.signals_list[-1] == c.signals\n\n kline = [x.__dict__ for x in c.bars_raw]\n bi = [{'dt': x.fx_a.dt, \"bi\": x.fx_a.fx} for x in c.bi_list] + \\\n [{'dt': c.bi_list[-1].fx_b.dt, \"bi\": c.bi_list[-1].fx_b.fx}]\n chart = kline_pro(kline, bi=bi, title=\"{} - {}\".format(c.symbol, c.freq))\n file_html = \"x.html\"\n chart.render(file_html)\n #os.remove(file_html)\n\n\ndef test_get_signals():\n\n def get_test_signals(c: CZSC) -> OrderedDict:\n s = OrderedDict({\"symbol\": c.symbol, \"dt\": c.bars_raw[-1].dt, \"close\": c.bars_raw[-1].close})\n s.update(get_s_d0_bi(c))\n return s\n\n bars = read_daily()\n # 不计算任何信号\n c = CZSC(bars, get_signals=get_test_signals)\n assert c.signals['日线_倒0笔_方向'] == '向下_任意_任意_0'\n assert c.signals['日线_倒0笔_长度'] == '5到9根K线_任意_任意_0'\n","repo_name":"gameDev-AI/CZSC_signal","sub_path":"test/test_analyze.py","file_name":"test_analyze.py","file_ext":"py","file_size_in_byte":4210,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"38644524433","text":"import unittest\r\nfrom unittest.mock import Mock, patch\r\nfrom datetime import timedelta, datetime\r\nfrom collections import defaultdict\r\n\r\nfrom cncparser.utils import (_convert_date, convert_timedelta,\r\n filter_by_date, get_by_date,\r\n _update_default_dict, aggregate_data,\r\n simplify_job_name, sort_descending)\r\n\r\n\r\nclass TestGeneralPurposeUtilityFunctions(unittest.TestCase):\r\n\r\n def test_convert_timedelta_converting_dates_correctly(self):\r\n time = timedelta(seconds=3600)\r\n converted = convert_timedelta(time)\r\n self.assertEqual(converted, '1h 0m 0s')\r\n with self.assertRaises(TypeError):\r\n convert_timedelta(3600)\r\n\r\n def test_update_default_dict_method_works_as_expected(self):\r\n main = defaultdict(int, [('a', 1), ('b', 1), ('c', 1)])\r\n other = defaultdict(int, [('a', 4), ('d', 1)])\r\n _update_default_dict(main, other)\r\n self.assertEqual(dict(main), {'a': 5, 'b': 1, 'c': 1, 'd': 1})\r\n\r\n def test_convert_date_function_returns_timedelta_object(self):\r\n date_string = '1995-07-04'\r\n converted = _convert_date(date_string)\r\n self.assertEqual(converted, datetime(year=1995, month=7, day=4))\r\n date_string = '1995_07_04'\r\n converted = _convert_date(date_string, s_format='%Y_%m_%d')\r\n self.assertEqual(converted, datetime(year=1995, month=7, day=4))\r\n date_string = datetime(year=1995, month=7, day=4)\r\n converted = _convert_date(date_string)\r\n self.assertEqual(converted, datetime(year=1995, month=7, day=4))\r\n with self.assertRaises(TypeError):\r\n _convert_date(12345)\r\n\r\n def test_simplify_job_name_strips_path_and_version_tag(self):\r\n job_name = 'Metalware/Prefabricated/Housings/745.234.100ver20.05.ISO'\r\n simplified_name = simplify_job_name(job_name)\r\n self.assertEqual(simplified_name, '745.234.100.ISO')\r\n\r\n def test_sort_descending_returns_list_of_tuples_in_descending_order(self):\r\n d = {'prg1': 8, 'prg2': 1, 'prg3': 12, 'prg4': 4}\r\n sorted_tuples = sort_descending(d)\r\n self.assertEqual(sorted_tuples, [('prg3', 12), ('prg1', 8),\r\n ('prg4', 4), ('prg2', 1)]\r\n )\r\n\r\n\r\nclass TestUtilityFunctionsThatWorksWithReportObjects(unittest.TestCase):\r\n\r\n def setUp(self):\r\n # Creating list of Mock objects with date attribute\r\n self.reports = [Mock() for x in range(5)]\r\n dates = (datetime(2017, 7, x) for x in range(1, 6))\r\n\r\n for x, y in zip(self.reports, dates):\r\n setattr(x, 'date', y)\r\n\r\n def test_filter_by_date_returns_desired_reports(self):\r\n max_date = datetime(2017, 7, 5)\r\n min_date = datetime(2017, 7, 3)\r\n filtered = filter_by_date(self.reports, min_date, max_date)\r\n # We should get reports with dates: 2017-07-03, 2017-07-04, 2017-07-05.\r\n self.assertCountEqual([x.date for x in filtered],\r\n [datetime(2017, 7, x) for x in range(3, 6)])\r\n\r\n def test_get_by_date_returns_report_with_desired_date(self):\r\n desired_date = '2017-07-04'\r\n report = get_by_date(self.reports, desired_date)\r\n self.assertEqual(report.date, datetime(2017, 7, 4))\r\n desired_date = '2017-07-08'\r\n report = get_by_date(self.reports, desired_date)\r\n self.assertEqual(None, report)\r\n\r\n def test_aggregate_data_function_calls_update_function(self):\r\n with patch('cncparser.utils._update_default_dict') as mock:\r\n aggregate_data(self.reports)\r\n self.assertEqual(mock.call_count, 5)\r\n\r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n","repo_name":"JaveBychkov/primapower-dm-reports-parser","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":3758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5522145627","text":"# 线程\n # 其他一些开发语言如 java ,多线程可以利用多核\n # cpython 解释器下多个线程不能利用多核:规避了所有 io 操作的单线程\n# 协程\n # 操作系统不可见(线程是操作系统调度的最小单位)\n # 本质就是一条线程,多个任务在一条线程上进行切换,来规避 IO 操作,将一条线程的 io 操作降到最低(切换任务造成的结果)\n\n# 线程内切换任务并规避 io 的两个模块:\n# gevent :利用了 greenlet(C编写的任务切换)模块完成切换,并写了自动规避 io 的功能\n# asyncio:利用 yield 语法完成的切换,并写了自动规避 io 的功能(借鉴 tornado 异步 web 框架)\n # yield from 、send 两个语法更好的实现协程\n # 最后出现了 asynicio 模块,python 原生的协程概念被确立(3.4版本)\n # 提供了提供协程功能的关键字:async await\n\n# 进程 数据隔离,数据不安全,操作系统级别,切换效率:开销非常大\n# 线程 数据共享,数据不安全,操作系统级别,切换效率:开销小\n# 协程 数据共享,数据安全,用户级别,切换效率:更小;\n # 协程的所有切换都基于用户,那么只有在用户级别能感知到的 io 操作才会用协程模块来规避(socket\\request\\sleep等)\n # 但是文件操作的io只有���作系统可以感知,print、input等\n # 线程对 io 操作比协程更细腻\n\n# 用户级别的好处\n # 用户判断切换,减轻了操作系统的负担\n # 一条线程如果开了多个协程,从操作系统来看这个线程很忙,这样线程能运行更多的时间,降低了线程之间切换的阻塞时间,提高执行效率\n\n# # 对于操作系统,python代码 --> 编译 --> 字节码 --> 解释器 --> 二进制运行 010001010\n# # 二进制反编译成 LOAD_GLOBAL 显示给用户CPU执行的 0101,其实底层还是二进制\n# import dis\n# def func(a):\n# a += 1\n# dis.dis(func)\n# \"\"\"\n# # 实际CPU执行的内容,数据不安全是因为,一个函数没执行完,CPU就轮转了(操作系统控制的切换)\n# 9 0 LOAD_FAST 0 (a)\n# 2 LOAD_CONST 1 (1)\n# 4 INPLACE_ADD -----> 比如这里发生了CPU时间片轮转\n# 6 STORE_FAST 0 (a)\n# 8 LOAD_CONST 0 (None)\n# 10 RETURN_VALUE\n# \"\"\"\n# # 协程是用户控制的切换,代码层面进行切换,代码执行完之后再切换,而不是操作系统的字节码层面进行切换\n# def func(a):\n# # 切换\n# a += 1\n# # 切换\n\nfrom threading import Thread,current_thread\nimport time,gevent\nfrom gevent import monkey\nmonkey.patch_all()\n\ndef func():\n print(\"start\",current_thread())\n time.sleep(2)\n print(\"end\",current_thread())\n\n# # 线程\n# t_lis = []\n# for i in range(10):\n# t = Thread(target=func)\n# t.start()\n# t_lis.append(t)\n# [t.join() for t in t_lis]\n\n# # 协程\n# # 通过 monkey.patch 识别程序中的阻塞(重新socket等等类),如果没有阻塞,不会运行 gevent.spawn()\n# gevent.spawn(func) # 执行结果只有 start,因为后面没有阻塞,协程切换出去后,需要等2s再切换回来,而\n# # 这行代码后面没有阻塞,还没来得急切换回来,程序就已经结束了,所以只打印了 start\n# # 如果执行完全,则需要代码后的阻塞时间大于代码阻塞的时间,比如 sleep(3)\n# gevent.spawn(func)\n# time.sleep(3) # 这两行代码执行结果正常\n\n\n# **基本用法**\ng_lis = []\nfor _ in range(4):\n g = gevent.spawn(func)\n g_lis.append(g)\ngevent.joinall(g_lis)\n\n# 使用 gevent 协程来实现 socket 并发\n# 4c 5进程 500线程,一台机器可实现 5w 并发\n# gevent_server.py\n# client.py","repo_name":"xdai555/learn-python","sub_path":"note/并发/协程/协程.py","file_name":"协程.py","file_ext":"py","file_size_in_byte":3886,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7593606949","text":"from torch.utils.data.dataset import Dataset\nfrom PIL import Image\nfrom PIL import ImageFilter\nimport pandas as pd\nimport numpy as np\nimport torch\nimport os\nimport random\nimport itertools\nimport glob\n\nimport torch.utils.data.sampler as sampler\nimport torchvision.transforms as transforms\nimport torchvision.transforms.functional as transforms_f\nfrom torch.utils.data.sampler import Sampler\nimport h5py\n\nclass BaseDataSetsWithIndex(Dataset):\n def __init__(self, base_dir=None, split='train', num=None, transform=None, index=16, label_type=0):\n self._base_dir = base_dir\n self.index = index\n self.sample_list = []\n self.split = split\n self.transform = transform\n if self.split == 'train' and 'ACDC' in base_dir:\n with open(self._base_dir + '/train_slices.list', 'r') as f1:\n self.sample_list = f1.readlines()\n self.sample_list = [item.replace('\\n', '')\n for item in self.sample_list]\n if(label_type==1):\n self.sample_list = self.sample_list[:index]\n else:\n self.sample_list = self.sample_list[index:]\n elif self.split == 'train' and 'MM' in base_dir:\n with open(self._base_dir + '/train_slices.txt', 'r') as f1:\n self.sample_list = f1.readlines()\n self.sample_list = [item.replace('.h5\\n', '')\n for item in self.sample_list]\n if(label_type==1):\n self.sample_list = self.sample_list[:index]\n else:\n self.sample_list = self.sample_list[index:]\n\n elif self.split == 'val':\n with open(self._base_dir + '/val.list', 'r') as f:\n self.sample_list = f.readlines()\n self.sample_list = [item.replace('\\n', '')\n for item in self.sample_list]\n if num is not None and self.split == \"train\":\n self.sample_list = self.sample_list[:num-index]\n print(\"total {} samples\".format(len(self.sample_list)))\n\n def __len__(self):\n return len(self.sample_list)\n\n def __getitem__(self, idx):\n case = self.sample_list[idx]\n if self.split == \"train\":\n h5f = h5py.File(self._base_dir +\n \"/data/slices/{}.h5\".format(case), 'r')\n else:\n h5f = h5py.File(self._base_dir + \"/data/{}.h5\".format(case), 'r')\n image = h5f['image'][:]\n label = h5f['label'][:]\n sample = {'image': image, 'label': label}\n if self.split == \"train\" and self.transform!=None:\n sample = self.transform(sample)\n sample[\"idx\"] = idx\n return sample\n\n# class TwoStreamBatchSampler(Sampler):\n# \"\"\"Iterate two sets of indices\n# An 'epoch' is one iteration through the primary indices.\n# During the epoch, the secondary indices are iterated through\n# as many times as needed.\n# \"\"\"\n\n# def __init__(self, primary_indices, secondary_indices, batch_size, secondary_batch_size):\n# self.primary_indices = primary_indices\n# self.secondary_indices = secondary_indices\n# self.secondary_batch_size = secondary_batch_size\n# self.primary_batch_size = batch_size - secondary_batch_size\n\n# assert len(self.primary_indices) >= self.primary_batch_size > 0\n# assert len(self.secondary_indices) >= self.secondary_batch_size > 0\n\n# def __iter__(self):\n# primary_iter = iterate_once(self.primary_indices)\n# secondary_iter = self.iterate_eternally(self.secondary_indices)\n# return (\n# primary_batch + secondary_batch\n# for (primary_batch, secondary_batch)\n# in zip(grouper(primary_iter, self.primary_batch_size),\n# grouper(secondary_iter, self.secondary_batch_size))\n# )\n\n# def __len__(self):\n# return len(self.primary_indices) // self.primary_batch_size\n\n\n# def iterate_eternally(self,indices):\n# n = len(self.data_source)\n# # def infinite_shuffles():\n# # while True:\n# # yield np.random.permutation(indices)\n# # return itertools.chain.from_iterable(infinite_shuffles())\n# for _ in range(self.num_samples // 32):\n# yield from torch.randint(high=n, size=(32,), dtype=torch.int64, generator=torch.Generator()).tolist()\n# yield from torch.randint(high=n, size=(self.num_samples % 32,), dtype=torch.int64, generator=torch.Generator()).tolist()\n\n\n# def iterate_once(iterable):\n# # return np.random.permutation(iterable) # changes here\n# def infinite_shuffles():\n# while True:\n# yield np.random.permutation(iterable)\n# return itertools.chain.from_iterable(infinite_shuffles())\n\n\n\n# def grouper(iterable, n):\n# \"Collect data into fixed-length chunks or blocks\"\n# # grouper('ABCDEFG', 3) --> ABC DEF\"\n# args = [iter(iterable)] * n\n# return zip(*args)\n\nclass Synapse_dataset(Dataset):\n def __init__(self, base_dir, list_dir, split, transform=None):\n self.transform = transform # using transform in torch!\n self.split = split\n if (split == 'test' or split == 'val'):\n self.sample_list = open(os.path.join(list_dir, self.split+'_vol_40.txt')).readlines()\n else:\n self.sample_list = open(os.path.join(list_dir, self.split+'_40.txt')).readlines()\n self.data_dir = base_dir\n\n def __len__(self):\n return len(self.sample_list)\n\n def __getitem__(self, idx):\n if self.split == \"train\":\n slice_name = self.sample_list[idx].strip('\\n')\n data_path = os.path.join(self.data_dir, slice_name+'.npz')\n # print(data_path)\n data = np.load(data_path)\n image, label = data['image'], data['label']\n else:\n vol_name = self.sample_list[idx].strip('\\n')\n filepath = self.data_dir + \"/{}.npy.h5\".format(vol_name)\n data = h5py.File(filepath)\n image, label = data['image'][:], data['label'][:]\n\n sample = {'image': image, 'label': label}\n if self.transform:\n sample = self.transform(sample)\n sample['case_name'] = self.sample_list[idx].strip('\\n')\n return sample\n\nclass Synapse_datasetWithIndex(Dataset):\n def __init__(self, base_dir, list_dir, split, transform=None, index=221, label_type=1):\n self.transform = transform # using transform in torch!\n self.split = split\n \n if 'Lits' in list_dir:\n self.sample_list = open(os.path.join(list_dir, self.split+'_40.txt')).readlines()\n \n elif (split == \"test\" or split == 'val'):\n self.sample_list = open(os.path.join(list_dir, self.split+'_vol.txt')).readlines()\n else:\n self.sample_list = open(os.path.join(list_dir, self.split+'.txt')).readlines()\n \n self.data_dir = base_dir\n self.index = index\n self.label_type = label_type\n if(label_type==1):\n self.sample_list = self.sample_list[:index]\n else:\n self.sample_list = self.sample_list[index:]\n\n def __len__(self):\n return len(self.sample_list)\n\n def __getitem__(self, idx):\n if self.split == \"train\":\n slice_name = self.sample_list[idx].strip('\\n')\n data_path = os.path.join(self.data_dir, slice_name+'.npz')\n # print(data_path)\n data = np.load(data_path)\n image, label = data['image'], data['label']\n else:\n vol_name = self.sample_list[idx].strip('\\n')\n filepath = self.data_dir + \"/{}.npy.h5\".format(vol_name)\n data = h5py.File(filepath)\n image, label = data['image'][:], data['label'][:]\n\n sample = {'image': image, 'label': label}\n if self.transform:\n sample = self.transform(sample)\n sample['case_name'] = self.sample_list[idx].strip('\\n')\n return sample\n","repo_name":"charlesyou999648/ARCO","sub_path":"code/build_dataset.py","file_name":"build_dataset.py","file_ext":"py","file_size_in_byte":8012,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"3"} +{"seq_id":"26114001476","text":"#Entrar 4 numeros pelo teclado \n#Saida\n# Quantas vezes o valor 9 foi digitado\n# Em qual posição o valor 3 foi digitado\n# E quantos numeros pares foram digitado \n\npares = 0\nnove = 0\nMyList = []\n\nfor x in range(4):\n MyList.append(int(input('Digite um numero: ')))\n\nMyTuple = tuple(MyList)\n\nfor t in MyTuple:\n if t%2 == 0 and t != 0 :\n pares += 1 \n\nfor t in MyTuple:\n if t == 9:\n nove += 1\n\nprint('=-'*50)\ntry:\n if MyTuple.index(3) != -1:\n print(f'O valor 3 foi econtrado na posição {MyTuple.index(3)+1}')\nexcept ValueError:\n print(f'O valor 3 não foi digitado')\n\n\nprint(f'O valor 9 foi digitado {nove} vezes')\nprint(f'O total de numeros pares foi de {pares}')","repo_name":"Bobonimo111/Python-treinos-","sub_path":"Aula 16 tuplas/Desafio075.py","file_name":"Desafio075.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"32192029572","text":"import pytest\nfrom flask_jwt_extended import create_access_token\nfrom models.users import User\n\n\n@pytest.mark.skip(\"Need to rework this endpoint.\")\n@pytest.mark.usefixtures(\"app_ctx\")\ndef test_get_users_by_id_without_auth(client):\n response = client.get(\"/api/v1/users/4\")\n assert response.status_code == 401\n\n\n@pytest.mark.usefixtures(\"app_ctx\")\ndef test_get_users_by_id_with_auth(auth_client, loaded_db):\n user = loaded_db.get(User, \"4\")\n access_token = create_access_token(identity=user)\n\n response = auth_client.get(\"/api/v1/users/4\", headers={\"Authorization\": f\"Bearer {str(access_token)}\"})\n assert response.status_code == 200\n assert response.json[\"id\"] == 4\n\n\n@pytest.mark.skip(\"Need to rework this endpoint.\")\n@pytest.mark.usefixtures(\"app_ctx\")\ndef test_get_someone_else_user_with_auth(auth_client, loaded_db):\n user = loaded_db.get(User, \"4\")\n access_token = create_access_token(identity=user)\n\n response = auth_client.get(\"/api/v1/users/1\", headers={\"Authorization\": f\"Bearer {str(access_token)}\"})\n assert response.status_code == 401\n\n\n@pytest.mark.skip(\"Need to rework this endpoint.\")\n@pytest.mark.usefixtures(\"app_ctx\")\ndef test_get_all_users_with_auth(auth_client, loaded_db):\n user = loaded_db.get(User, \"4\")\n access_token = create_access_token(identity=user)\n\n response = auth_client.get(\"/api/v1/users/\", headers={\"Authorization\": f\"Bearer {str(access_token)}\"})\n assert response.status_code == 200\n assert len(response.json) == 12\n\n\n@pytest.mark.usefixtures(\"app_ctx\")\ndef test_get_all_users_without_auth(client):\n response = client.get(\"/api/v1/users/\")\n assert response.status_code == 401\n\n\n@pytest.mark.usefixtures(\"app_ctx\")\ndef test_get_all_users_with_auth_fixture(auth_client):\n response = auth_client.get(\"/api/v1/users/\")\n assert response.status_code == 200\n\n\ndef test_user_to_dict():\n user = User(\n id=1,\n oidc_id=\"abcd\",\n email=\"example@example.com\",\n first_name=\"blah\",\n last_name=\"blah\",\n division=1,\n )\n assert user.to_dict()[\"id\"] == 1\n assert user.to_dict()[\"oidc_id\"] == \"abcd\"\n\n\n@pytest.mark.usefixtures(\"app_ctx\")\ndef test_put_user_invalid_id(auth_client):\n # Send a PUT request with an invalid user ID\n response = auth_client.put(\"/api/v1/users/999\", json={\"first_name\": \"New First Name\"})\n\n # Check that the response status code is 404 Not Found\n assert response.status_code == 400\n\n\n@pytest.mark.usefixtures(\"app_ctx\")\ndef test_put_user_unauthorized(client):\n # Send a PUT request without authorization\n response = client.put(\"/api/v1/users/4\", json={\"first_name\": \"New First Name\"})\n\n # Check that the response status code is 401 Unauthorized\n assert response.status_code == 401\n\n\n@pytest.mark.usefixtures(\"app_ctx\")\ndef test_put_user(auth_client):\n # Send a PUT request to update the user\n response = auth_client.put(\n \"/api/v1/users/4\",\n json={\"first_name\": \"New First Name\"},\n )\n\n # Check that the response status code is 200 OK\n assert response.status_code == 200\n\n # Check that the response data matches the updated user data\n print(response.json)\n assert response.json[\"first_name\"] == \"New First Name\"\n\n # Check that the user was updated in the database\n updated_user = User.query.get(4)\n assert updated_user.first_name == \"New First Name\"\n\n # Revert changes back to original values\n response = auth_client.put(\n \"/api/users/4\",\n json={\"first_name\": \"Amelia\"},\n )\n","repo_name":"HHS/OPRE-OPS","sub_path":"backend/ops_api/tests/ops/users/test_users.py","file_name":"test_users.py","file_ext":"py","file_size_in_byte":3522,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"3"} +{"seq_id":"29916356750","text":"from flask import Flask, redirect, url_for, request\r\napp = Flask(__name__)\r\n\r\n@app.route('/success/')\r\ndef success(name):\r\n import time\r\n from datetime import datetime as dat\r\n\r\n # change hosts path according to your OS\r\n hosts_path = \"C:\\Windows\\System32\\Drivers\\etc\\hosts\"\r\n # localhost's IP\r\n localIP = \"127.0.0.1\"\r\n website_list = [name]\r\n\r\n while True:\r\n if dat(dat.now().year, dat.now().month, dat.now().day, 1) < dat.now() < dat(dat.now().year, dat.now().month,\r\n dat.now().day, 11):\r\n print(\"Process Executed...\")\r\n with open(hosts_path, 'r+') as file:\r\n content = file.read()\r\n for website in website_list:\r\n if website in content:\r\n pass\r\n else:\r\n file.write(localIP + \" \" + website + \"\\n\")\r\n else:\r\n with open(hosts_path, 'r+') as file:\r\n content = file.readlines()\r\n file.seek(0)\r\n for line in content:\r\n if not any(website in line for website in website_list):\r\n file.write(line)\r\n # removing hostnmes from host file\r\n file.truncate()\r\n print(\"Process not executing...\")\r\n time.sleep(5)\r\n\r\n@app.route('/login',methods = ['POST', 'GET'])\r\ndef login():\r\n if request.method == 'POST':\r\n user = request.form['nm']\r\n return redirect(url_for('success',name = user))\r\n else:\r\n user = request.args.get('nm')\r\n return redirect(url_for('success',name = user))\r\n\r\nif __name__ == '__main__':\r\n app.run(debug = True)","repo_name":"Mirakklian/DEVSOC-Hackathon2019","sub_path":"local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34104670681","text":"# https://codeforces.com/problemset/problem/617/B\n# 1300\n\nn = int(input())\npieces = [int(t) for t in input().split()]\n\nnutcount = 0\ncurways = 1\nways = []\nfor i, piece in enumerate(pieces):\n if piece: # nut\n ways.append(curways)\n curways = 1\n nutcount += 1\n else:\n if nutcount:\n curways += 1\n\nresult = 1\nfor w in ways:\n result *= w\nif not ways:\n result = 0\nprint(result)\n","repo_name":"wookiekim/CodingPractice","sub_path":"codeforces/617B.py","file_name":"617B.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"32094716059","text":"# -*- coding: utf-8 -*-\n\n\nfrom .models import *\nfrom .model_lookup import lookup_dict, model_aliases\nfrom ariadne import convert_kwargs_to_snake_case\nfrom datetime import datetime\nfrom sqlalchemy import func, text\n\ndef convert_date_timestamp(date):\n try:\n return datetime.strptime(date, '%d-%m-%Y %H:%M:%S').timestamp()\n except ValueError:\n return datetime.strptime(date, '%d-%m-%Y').timestamp()\n except:\n raise ValueError(\"Invalid date format, should be 'dd-mm-yyyy' or 'dd-mm-yyyy hh:mm:ss'\")\n\ndef selections(info):\n primary_selections = [x for x in info.field_nodes[0].selection_set.selections if x.name.value == info.field_name]\n if primary_selections:\n primary_selections = primary_selections[0]\n fields = [x.name.value for x in primary_selections.selection_set.selections]\n subfields = {x.name.value : [y.name.value for y in x.selection_set.selections] for x in primary_selections.selection_set.selections if x.selection_set}\n return fields, subfields\n else:\n return [], {}\n \n\ndef select_fields(info, results):\n def string_fields_to_sqlalchemy(fields, kind):\n models = lookup_dict[kind]\n fields_dict = {\n models['Main'] : [field for field in fields if hasattr(models['Main'], field)],\n models['Detail'] : [field for field in fields if hasattr(models['Detail'], field)]\n }\n\n all_sqlalchemy_fields = []\n fields_seen = []\n for table, table_fields in fields_dict.items():\n for field in table_fields:\n if field not in fields_seen:\n fields_seen.append(field)\n all_sqlalchemy_fields += [getattr(table, field)]\n \n return all_sqlalchemy_fields\n\n\n fields, subfields = selections(info)\n\n # if not fields, return none because its a total count request\n if fields:\n\n kind = info.field_name if not info.field_name.endswith('s') else info.field_name[:-1]\n if subfields:\n raise Exception('Subfields not supported.')\n for aliased_kind, fields in subfields.items(): \n subkind = model_aliases.get(aliased_kind) or aliased_kind\n ids = result.with_entities(text(f'sun_{subkind}_id')).subquery()\n SubModel = lookup_dict[subkind]['Main']\n SubDetail = lookup_dict[subkind]['Detail']\n subresult = SubModel.query.filter(SubModel.sun_unique_id.in_(ids))\n subresult = subresult.with_entities(*string_fields_to_sqlalchemy(fields, kind))\n\n\n results = results.with_entities(*string_fields_to_sqlalchemy(fields, kind))\n\n\n # convert list of sqlalchemy tuples to list of dicts\n\n result_dict_list = []\n for result in results.all():\n result_dict = {}\n for field in fields:\n result_dict[field] = result[field]\n result_dict_list += [result_dict]\n\n return result_dict_list\n\n\ndef payload(results, info, additional_data = None, errors = None):\n kind = info.field_name\n return_list = kind.endswith('s')\n\n if errors:\n return {\"success\": False, \"errors\": errors}\n\n result = select_fields(info, results)\n return_dict = {\"success\": True}\n\n if additional_data:\n return_dict.update(additional_data)\n\n # If no result, its a total count request\n if result:\n result = result if return_list else result[0]\n return_dict.update({kind : result})\n \n return return_dict\n\n@convert_kwargs_to_snake_case\ndef resolve_get(obj, info, **kwargs):\n kind = info.field_name\n Model = lookup_dict[kind]['Main']\n\n by_id = kwargs.get('by_id')\n reddit_id = kwargs.get('reddit_id')\n errors = []\n\n if by_id and reddit_id:\n errors += [\"Cannot specify both by_id and reddit_id\"]\n\n if by_id:\n result = Model.query.filter_by(sun_unique_id=by_id)\n if not result.count():\n errors += [f\"No {kind}s found with Sun id {by_id}\"]\n if reddit_id:\n result = Model.query.filter_by(reddit_post_id=reddit_id)\n if not result.count():\n errors += [f\"No {kind}s found with reddit_id {reddit_id}\"]\n\n return payload(result, info, errors = errors)\n\n@convert_kwargs_to_snake_case\ndef resolve_get_all(obj, info, **kwargs):\n # determine the singular form of the info.field_name\n # only if it is plural\n\n kind = info.field_name[:-1]\n\n Model = lookup_dict[kind]['Main']\n Detail = lookup_dict[kind]['Detail']\n\n posted_before = kwargs.get('posted_before')\n posted_after = kwargs.get('posted_after')\n updated_before = kwargs.get('updated_before')\n updated_after = kwargs.get('updated_after')\n order_by = kwargs.get('order_by')\n reddit_ids = kwargs.get('reddit_ids')\n sun_subreddit_id = kwargs.get('sun_subreddit_id')\n reddit_subreddit_id = kwargs.get('reddit_subreddit_id')\n sun_account_id = kwargs.get('sun_account_id')\n limit = kwargs.get('limit')\n offset = kwargs.get('offset')\n\n # only look at most recent version of posts\n # potentially later on it may be necessary to filter for all post versions\n\n results = Model.query.join(Detail).filter(Detail.is_most_recent_version)\n\n if reddit_ids:\n results = results.filter(Model.reddit_post_id.in_(reddit_ids))\n\n if sun_subreddit_id:\n results = results.filter(Model.sun_subreddit_id == sun_subreddit_id)\n\n if sun_account_id:\n results = results.filter(Model.sun_account_id == sun_account_id)\n\n if updated_before:\n updated_before = int(updated_before)\n results = results.filter(Detail.sun_created_at_epoch < updated_before)\n \n if reddit_subreddit_id:\n results = results.filter(Model.reddit_subreddit_id == reddit_subreddit_id)\n\n if updated_after:\n updated_after = int(updated_after)\n results = results.filter(Detail.sun_created_at_epoch > updated_after)\n\n if posted_before:\n posted_before = int(posted_before)\n results = results.filter(Model.sun_created_at_epoch < posted_before)\n\n if posted_after:\n posted_after = int(posted_after)\n results = results.filter(Model.sun_created_at_epoch > posted_after)\n\n\n if order_by: # Not sure this is necessary anymore\n order_by_to_cols = {\n 'sun_unique_id' : Model.sun_unique_id,\n 'most_recent_sun_version_id': Detail.sun_version_id,\n 'most_recent_sun_detail_id': Detail.sun_detail_id\n }\n\n for col, sort_by in order_by.items():\n col_to_order_by = order_by_to_cols.get(col)\n if sort_by == 'asc':\n results = results.order_by(col_to_order_by.asc())\n elif sort_by == 'desc':\n results = results.order_by(col_to_order_by.desc())\n\n additional_data = None\n\n if offset == 0:\n additional_data = {'total_count': results.count()}\n\n if offset:\n results = results.offset(offset)\n\n if limit:\n results = results.limit(limit)\n\n return payload(results, info, additional_data = additional_data)\n\n\n\n","repo_name":"jacob-bayer/SunbeltAPI","sub_path":"api/queries.py","file_name":"queries.py","file_ext":"py","file_size_in_byte":7078,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"25107252764","text":"from django.shortcuts import render, HttpResponse, get_object_or_404, HttpResponseRedirect, redirect\nfrom .models import Post\nfrom .forms import PostForm, CommentForm\nfrom django.contrib import messages\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.db.models import Q\n\n# Create your views here.\n\n\ndef post_index(request):\n post_list = Post.objects.all()\n query = request.GET.get('q')\n if query:\n post_list = post_list.filter(\n Q(title__icontains=query) |\n Q(question_Content__icontains=query) |\n Q(user__first_name__icontains=query) |\n Q(user__last_name__icontains=query)\n ).distinct()\n\n paginator = Paginator(post_list, 5) # Show 5 contacts per page\n\n page = request.GET.get('page')\n try:\n posts = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n posts = paginator.page(1)\n except EmptyPage:\n\n # If page is out of range (e.g. 9999), deliver last page of results.\n posts = paginator.page(paginator.num_pages)\n return render(request, 'posts/index.html', {'posts': posts})\n\n\ndef post_detail(request, id):\n post = get_object_or_404(Post, id=id)\n\n form = CommentForm(request.POST or None)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.post = post\n comment.save()\n return HttpResponseRedirect(post.get_absolute_url())\n\n context = {\n 'post': post,\n 'form': form,\n }\n return render(request, 'posts/detail.html', context)\n\n\ndef post_create(request):\n\n form = PostForm(request.POST or None, request.FILES or None)\n if form.is_valid():\n post = form.save(commit=False)\n post.user = request.user\n post.save()\n messages.success(request, 'Sorunuz başarılı bir şekilde oluşturuldu!')\n return HttpResponseRedirect(post.get_absolute_url())\n\n context = {\n 'form': form,\n }\n return render(request, 'posts/form.html', context)\n\n\ndef post_update(request, id):\n post = get_object_or_404(Post, id=id)\n form = PostForm(request.POST or None, request.FILES or None, instance=post)\n if form.is_valid():\n form.save()\n messages.success(request, 'Sorunuz başarılı bir şekilde güncellendi!')\n return HttpResponseRedirect(post.get_absolute_url())\n context = {\n 'form': form,\n }\n return render(request, 'posts/form.html', context)\n\n\ndef post_delete(request, id):\n post = get_object_or_404(Post, id=id)\n post.delete()\n return redirect('post:index')\n\n\ndef post(request):\n return HttpResponse('Burası post')\n","repo_name":"haticeufacik/DatabaseManagement","sub_path":"Veritabani/post/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2686,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"30631167759","text":"#!/bin/python3\r\n\r\nimport sys\r\n\r\n\r\narr = []\r\nfor arr_i in range(6):\r\n arr_t = [int(arr_temp) for arr_temp in input().strip().split(' ')]\r\n arr.append(arr_t)\r\n\r\n# print (arr)\r\nmaxSum = -sys.maxsize # maximum long in python\r\nfor i in range(len(arr) - 2):\r\n for j in range (len(arr[i]) - 2):\r\n # print (\"cur indices: i:%d j: %d\" % (i, j))\r\n # print (arr[i])\r\n #with slices, i:j j non inclusive\r\n s = sum(arr[i][j:j+3]) + arr[i+1][j+1] + sum(arr[i+2][j:j+3])\r\n # Ugly - many for loops. Can use sums of slices instead\r\n # for hRow in range(3):\r\n # for hCol in range(3):\r\n # if hRow == i+1 and (hCol == j or hCol == j + 2):\r\n # continue\r\n # else:\r\n # # print (\"cur hterm: row:%d col: %d\" % (hRow, hCol))\r\n # hsum += arr[i + hRow][j + hCol] \r\n # print (s)\r\n maxSum = max(s, maxSum)\r\n\r\nprint (maxSum)\r\n","repo_name":"alexnguyennn/prac","sub_path":"other/2darrays.py","file_name":"2darrays.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33703432971","text":"#-*- coding:utf-8 -*-\n#use:syh\n#1、创建封装get方法\n#2、发送requests get请求\n#3、获取结果响应内容\n#4、内容存在字典\n#5、字典返回\nimport requests\nfrom utils.LogsUtils import my_log\nclass Request:\n def __init__(self):\n self.log = my_log(\"Requests\")\n\n def requests_api(self,url,json=None,headers=None,method=\"get\"):\n if method == \"get\":\n self.log.debug(\"发送get请求\")\n r = requests.get(url, json=json, headers=headers)\n elif method == \"post\":\n self.log.debug(\"发送post请求\")\n r = requests.post(url, json=json, headers=headers)\n code = r.status_code\n try:\n body = r.json()\n except Exception as e:\n body = r.text()\n res = dict()\n res[\"code\"] = code\n res[\"body\"] = body\n\n return res\n def get(self,url,params=None,**kwargs):\n return self.requests_api(url,method=\"get\",arams=params,**kwargs)\n def post(self,url, json=None,**kwargs):\n return self.requests_api(url,method=\"post\", json=json,**kwargs)","repo_name":"syhcool/ApiTest","sub_path":"utils/RequestsUtils.py","file_name":"RequestsUtils.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"16844823608","text":"\"\"\"\r\nScript to calculate distance-to-default for all stocks in our database\r\nfrom 2000 to 2012\r\n\"\"\"\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom scipy.stats import norm\r\nfrom db_interactions_for_DD import connectZenithDb, getMarketValues, \\\r\n getDebt, getRf\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\n\r\ndef calcVolatility(x): \r\n return np.std(np.log(np.divide(x[1:],x[:-1])))*np.sqrt(252)\r\n\r\ndef BlackScholesCallValue(S,X,r,sigma,T):\r\n d1=(np.log(S/X)+(r+0.5*sigma**2)*T)/(sigma*np.sqrt(T));\r\n d2=d1-sigma*np.sqrt(T);\r\n delta=norm.cdf(d1)\r\n CP=S*delta-X*np.exp(-r*T)*norm.cdf(d2);\r\n return [CP, delta] \r\n\r\ndef BlackScholesZero(C,X,r,sigma,T):\r\n # solves Black Scholes Call option formula for S using Newton's method\r\n if X==0:\r\n return C\r\n\r\n UpperS=C+X*np.exp(-r*T);\r\n if sigma==0 or X==0:\r\n return UpperS\r\n\r\n LowerS=C\r\n tol=0.0001*C\r\n delta=C; itercount=0; x0=LowerS; x1=UpperS\r\n while abs(delta)>tol and itercount<=100:\r\n itercount=itercount+1\r\n [bs0,D0]=BlackScholesCallValue(x0, X, r, sigma, T)\r\n y0=bs0-C\r\n [bs1,D1]=BlackScholesCallValue(x1, X, r, sigma, T)\r\n y1=bs1-C\r\n\r\n if D0<0.01:\r\n xguess=(x0*y1-x1*y0)/(y1-y0);\r\n else:\r\n xguess = x0 - y0/D0;\r\n\r\n if xguess>UpperS:\r\n xguess=UpperS\r\n elif xguesstol:\r\n return np.nan\r\n else:\r\n return xguess\r\n \r\ndef dailyDLIcalcs(Ve,X,r,sigma_a):\r\n Va=[]\r\n for v in Ve:\r\n Va.append(BlackScholesZero(v,X,r,sigma_a,1.0))\r\n Va=np.array(Va)\r\n return [Va, calcVolatility(Va)]\r\n\r\ndef calc_DD(rf, X, Ve, T):\r\n sigma_a=calcVolatility(Ve)\r\n sigma_previous=sigma_a\r\n\r\n delta=1.0\r\n itercount=0\r\n\r\n while not np.isnan(delta) and delta>0.0001 and itercount<100:\r\n itercount=itercount+1\r\n [Va, sigma_a]=dailyDLIcalcs(Ve, X, rf, sigma_a)\r\n delta=abs(sigma_a-sigma_previous)\r\n sigma_previous=sigma_a\r\n\r\n # compute drift term\r\n mu=np.mean(np.log(np.divide(Va[1:],Va[:-1])))\r\n\r\n if X==0: # no probability of default\r\n DD=100\r\n elif sigma_a==0: # if stock is not traded\r\n DD=np.nan\r\n else:\r\n DD=(np.log(Va[-1]/X)+(mu-(0.5*sigma_a**2))*T) / (sigma_a*np.sqrt(T))\r\n\r\n return [DD, itercount, delta]\r\n \r\n#establish a connection to the Zenith database\r\ncnxn=connectZenithDb()\r\n\r\nT=1\r\nfor yr in range(2000,2013):\r\n res=[]\r\n # get the risk-free rate for the year\r\n df=getRf(cnxn, yr,12)\r\n rf=df.BAB_rate.values[0]/100\r\n # get a list of stocks and each stock's X value (precalculated)\r\n all_stocks_X=getDebt(cnxn, yr)\r\n for r in range(len(all_stocks_X.index)):\r\n stockid=all_stocks_X.iloc[r,0]\r\n X=all_stocks_X.iloc[r,1]\r\n # get the daily market values\r\n Ve=getMarketValues(cnxn,stockid,yr)\r\n if len(Ve.index)>0:\r\n Ve=Ve['Ve'].values*1000000\r\n z=calc_DD(rf, X, Ve, T)\r\n z.insert(0,stockid)\r\n z.insert(0,yr)\r\n res.append(z)\r\n\r\ndf=pd.DataFrame(np.array(res),columns=['Year','StockID','DD','itercount','delta'])\r\ndf=df.astype({'Year':'int32', 'StockID':'int32', 'itercount':'int32'})\r\ndf.to_csv('DD.csv')\r\nprint(df.head())","repo_name":"stevetulig/distance-to-default-calculation-in-python","sub_path":"distance_to_default.py","file_name":"distance_to_default.py","file_ext":"py","file_size_in_byte":3405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23127286404","text":"import pytest\n\nfrom overload.type.type import _TypeHandler\n\nfrom .data import (\n OUT_UP_TYPES_AND_EXPECTATIONS,\n CONVERTING_ARGS,\n CONVERTING_KWARGS,\n)\n\nset_custom_type_handler = _TypeHandler()\n\n\n@pytest.mark.type\n@pytest.mark.parametrize(\n 'deep,input_type,expected',\n OUT_UP_TYPES_AND_EXPECTATIONS,\n)\ndef test_out_up_types(deep, input_type, expected):\n handler = _TypeHandler()\n type_ = handler.out_up_types(input_type)\n assert type_ == expected\n\n\n@pytest.mark.type\n@pytest.mark.parametrize(\n 'args,result',\n CONVERTING_ARGS,\n)\ndef test_converting_args(args, result):\n handler = _TypeHandler()\n assert result == handler.converting_args(args)\n\n\n@pytest.mark.type\n@pytest.mark.parametrize(\n 'kwargs,result',\n CONVERTING_KWARGS,\n)\ndef test_converting_kwargs(kwargs, result):\n handler = _TypeHandler()\n assert result == handler.converting_kwargs(kwargs)\n","repo_name":"diarts/overload","sub_path":"tests/test_type/test_type.py","file_name":"test_type.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"43047296012","text":"from urllib.request import urlopen\nfrom bs4 import BeautifulSoup as bsup\nfrom datetime import date, timedelta\n\ndef check_symbol_in_web(symbol):\n webpage = \"http://nepalstockinfo.com/todaysprice/\"\n html = urlopen(webpage)\n bs_obj = bsup(html)\n try:\n data = bs_obj.find(\"td\", text=symbol).parent\n except AttributeError:\n print(\"Sorry, this company symbol doesn't exist in nepalstockinfo db\")\n\n\n\ndef check_file_exist(symbol):\n try:\n f = open(symbol+\".txt\")\n except IOError:\n print(\"File not found\")\n while 1:\n print(\"Do you want to scrape the data, it may take 1-2 hrs? \\n\")\n print(\"Choice: yes, no\")\n x = input()\n value = {1:\"yes\", 2:\"no\"}\n if str(x) in value.values():\n return str(x)\n break\n else:\n continue\n \n return 2\n\ndef scrape(symbol, sdate):\n edate = date.today()\n delta = edate - sdate # as timedelta\n file1 = open(symbol+\".txt\", \"a\")\n #file1.write(\"Date,Open,High,Low,Close,Volume\\n\")\n\n for i in range(delta.days + 1):\n day = sdate + timedelta(days=i)\n #print(day)\n webpage = \"http://nepalstockinfo.com/todaysprice/\"+str(day)\n #print(webpage)\n html = urlopen(webpage)\n bs_obj = bsup(html)\n data = bs_obj.find(\"td\", text=symbol).parent\n try:\n data.a[\"data-high\"]\n except KeyError:\n continue\n data_high = data.a[\"data-high\"]\n data_low = data.a[\"data-low\"]\n data_open = data.a[\"data-open\"]\n data_prev_closing = data.a[\"data-prev_closing\"]\n date_n = str(day)\n data_volume = data.a[\"data-volumn\"]\n file1.write(date_n+\",\"+data_open+\",\"+data_high+\",\"+data_low+\",\"+data_prev_closing+\",\"+data_volume+\"\\n\")\n print(date_n, data_high, data_low, data_open, data_prev_closing)\n file1.close()\n\n","repo_name":"joyboy420/stock_price_prediction_Nepal","sub_path":"check_and_scrape.py","file_name":"check_and_scrape.py","file_ext":"py","file_size_in_byte":1932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12606311663","text":"from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('grades', '0009_auto_20170111_1507'),\n ]\n\n operations = [\n migrations.AlterIndexTogether(\n name='persistentsubsectiongrade',\n index_together={('modified', 'course_id', 'usage_key'), ('first_attempted', 'course_id', 'user_id')},\n ),\n ]\n","repo_name":"openedx/edx-platform","sub_path":"lms/djangoapps/grades/migrations/0010_auto_20170112_1156.py","file_name":"0010_auto_20170112_1156.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":6774,"dataset":"github-code","pt":"3"} +{"seq_id":"72486906962","text":"import math\n\nclass sparselookup:\n\n \"\"\"\n Very naive min-thing-in-range calculator thing made using a \"sparse table\"\n First off I hash things instead of using a proper array, and also apparently\n there's a better(?) solution using segment trees? Anyways, I should rewrite\n this at some point\n \"\"\"\n\n def __init__(self, arr):\n\n self.arr = arr\n self.minmap = {}\n\n # Sparse-table-ish stuff, I kinda use a map cuz I'm lazy :|\n for index, element in enumerate(self.arr):\n self.minmap[(index, 1)] = element\n\n # Update by going from 2^n\n rangelen = 2\n while rangelen < 2 * len(self.arr):\n\n for index, element in enumerate(self.arr):\n lastlen = int(rangelen / 2)\n self.minmap[(index, rangelen)] = min(self.minmap.get((index, lastlen), math.inf),\n self.minmap.get((index + lastlen, lastlen), math.inf))\n\n rangelen *= 2\n\n def minin(self, left, right):\n \"\"\"\n Return minimum number in the range specified by [left, right)\n \"\"\"\n right += 1\n diff = right - left\n leftlen = math.pow(2, math.floor(math.log(diff, 2)))\n leftoff = right - (left + leftlen)\n return min(self.minmap[(left, leftlen)], self.minmap[(left + leftoff, leftlen)])\n\nif __name__ == \"__main__\":\n input()\n numlist = [int(i) for i in input().split(\" \")]\n looker = sparselookup(numlist)\n numqueries = int(input())\n for r in range(numqueries):\n lookrange = (int(r) for r in input().split(\" \"))\n print(looker.minin(*lookrange))\n","repo_name":"anlsh/euler","sub_path":"rmqsq.py","file_name":"rmqsq.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29037020037","text":"import os\n\nfrom ads_pytorch.hash_embedding.optim import create_optimizer\nfrom ads_pytorch.hash_embedding.hash_embedding import (\n HashEmbedding,\n create_hash_table,\n create_item\n)\n\nos.environ[\"OMP_NUM_THREADS\"] = \"1\"\nimport torch\nimport torch.nn\nimport numpy as np\nimport pytest\nimport math\n\n\ndef EmbeddingFTRLProximalLinearOptimizer(params, **kwargs):\n return create_optimizer(params, \"ftrllinear\", **kwargs)\n\n\ndef cmp2float(x: float, y: float, eps: float = 2e-3):\n return abs(x - y) <= eps\n\n\n@pytest.mark.parametrize(\n \"hash_type\",\n [\n \"ftrllinear\"\n ]\n)\ndef test_massive_expiration(hash_type):\n hash_table = HashEmbedding(create_hash_table(hash_type, 1))\n optim = EmbeddingFTRLProximalLinearOptimizer(hash_table.parameters(), lr=1, L1Global=0.0, L1LinearIncremental=1.0, L1SqrtIncremental=1.0, \n L2Global=0.0, L2LinearIncremental=1.0, L2SqrtIncremental=1.0, eps=1e-8, ttl=10)\n for u in optim.updaters:\n u.update_counter(1000)\n assert u.ttl_counter == 1000\n\n for i in range(100):\n item = create_item(hash_type, 1)\n item.ttl_counter = 0\n hash_table.insert_item(i, item)\n\n for i in range(100):\n item = create_item(hash_type, 1)\n item.ttl_counter = 999\n hash_table.insert_item(i + 100, item)\n\n data = torch.LongTensor(list(range(200)))\n data_len = torch.IntTensor([1] * 200)\n\n assert hash_table.size() == 200\n\n res = hash_table(data, data_len)\n res.sum().backward()\n optim.step()\n\n assert hash_table.size() == 200\n\n\n########################################################\n# EQUATIONS TESTS #\n########################################################\n\n# Here we test two use-cases:\n# 1. split jobs between threads\n# 2. testing proper vectorization code\n\n\ndef update_equations_impl(hash_type, num_threads, max_size_per_update_job, dim, weight_decay):\n items_count = 1000\n epoch_count = 15\n\n mul_tensor = torch.rand(1000, dim)\n mul_tensor.requires_grad = False\n\n hash_table = HashEmbedding(create_hash_table(hash_type, dim))\n\n lr = 0.001\n eps = 1e-8\n\n optim = EmbeddingFTRLProximalLinearOptimizer(hash_table.parameters(), lr=lr, L1Global=0.0, L1LinearIncremental=0.0, L1SqrtIncremental=0.0, \n L2Global=weight_decay, L2LinearIncremental=0.0, L2SqrtIncremental=0.0, eps=eps, ttl=100)\n optim.num_threads = num_threads\n\n for i in range(items_count):\n # This will generate some items\n item = hash_table.lookup_item(i)\n # check that ftrllinear items have trivial initialization\n assert torch.allclose(item.w, torch.zeros(dim))\n\n targets = torch.nn.init.normal_(torch.zeros(items_count))\n loss = torch.nn.MSELoss(reduction=\"mean\")\n\n # Usual pytorch code\n usual_tensor = torch.zeros(items_count, dim)\n with torch.no_grad():\n for i in range(items_count):\n usual_tensor[i, :] = hash_table.lookup_item(i).w\n usual_tensor.requires_grad = True\n param = torch.nn.Parameter(usual_tensor)\n optimizer = torch.optim.Adagrad(\n [param],\n lr=lr,\n weight_decay=weight_decay\n )\n\n usual_optimize_path = []\n for _ in range(epoch_count):\n param.grad = None\n out = (param * mul_tensor).sum(dim=1)\n loss_val = loss(out, targets)\n usual_optimize_path.append(float(loss_val))\n loss_val.backward()\n optimizer.step()\n\n # Hash embedding code\n hash_optimizer = EmbeddingFTRLProximalLinearOptimizer(hash_table.parameters(), lr=lr, L1Global=0.0, L1LinearIncremental=0.0, L1SqrtIncremental=0.0, \n L2Global=weight_decay, L2LinearIncremental=0.0, L2SqrtIncremental=0.0, eps=eps, ttl=100)\n hash_optimizer.num_threads = num_threads\n data = torch.LongTensor(list(range(items_count)))\n data_len = torch.IntTensor([1] * items_count)\n\n # print(\"HashTable optimization\")\n hash_optimize_path = []\n for _ in range(epoch_count):\n # hash embedding step\n tensor = hash_table(data, data_len)\n out = (tensor * mul_tensor).sum(dim=1)\n loss_val = loss(out, targets)\n hash_optimize_path.append(float(loss_val))\n loss_val.backward()\n hash_optimizer.step(max_size_per_update_job=max_size_per_update_job)\n\n for x, y in zip(usual_optimize_path, hash_optimize_path):\n assert cmp2float(x, y)\n\n\n# need comprehensive test across all dims because of manual vectorization\n@pytest.mark.parametrize(\n \"hash_type\",\n [\n \"ftrllinear\"\n ]\n)\n@pytest.mark.parametrize(\"dim\", [1])\n@pytest.mark.parametrize(\"l2\", [0, 0.001])\ndef test_different_dimensions(hash_type, dim, l2):\n update_equations_impl(\n hash_type=hash_type,\n num_threads=3,\n max_size_per_update_job=200000,\n dim=dim,\n weight_decay=l2\n )\n\ndef FTRLProximalLinearEmbeddingHashTable(dim):\n return create_hash_table(\"ftrllinear\", dim)\n\n\n# нужно ли делать такой тест, если размерность только 1 может быть?\n@pytest.mark.parametrize(\n \"hash_cls\",\n [\n FTRLProximalLinearEmbeddingHashTable\n ],\n ids=[\"RMSProp\"]\n)\ndef test_state_dict(hash_cls):\n ttl = 2\n hash_tables = [HashEmbedding(hash_cls(1))]\n optim = EmbeddingFTRLProximalLinearOptimizer(\n [\n {\"params\": hash_table.parameters(), \"lr\": 1.0 + i * 10, \"L2Global\": 0.4 + i * 0.05}\n for i, hash_table in enumerate(hash_tables)\n ],\n lr=1, eps=1e-8, ttl=ttl\n )\n for u in optim.updaters:\n assert u.ttl_counter == 0\n\n data = torch.LongTensor([1])\n data_len = torch.IntTensor([1])\n\n for i, hash_table in enumerate(hash_tables):\n res = hash_table(data, data_len)\n res.sum().backward()\n optim.step()\n optim.update_counter(1)\n for u in optim.updaters:\n assert u.ttl_counter == 1\n\n state = optim.state_dict()\n optim2 = EmbeddingFTRLProximalLinearOptimizer(\n [\n {\"params\": hash_table.parameters(), \"lr\": 5, \"L2Global\": 0.3}\n for i, hash_table in enumerate(hash_tables)\n ],\n lr=5, eps=1.0, ttl=100500\n )\n for u in optim2.updaters:\n assert u.ttl_counter == 0\n optim2.load_state_dict(state)\n for i, group in enumerate(optim2.param_groups):\n assert group[\"lr\"] == 1.0 + i * 10\n assert group[\"L2Global\"] == 0.4 + i * 0.05\n assert group[\"eps\"] == 1e-8\n assert group[\"ttl\"] == 2\n assert group[\"ttl_counter\"] == 1\n\n for u in optim2.updaters:\n assert u.ttl_counter == 1\n\n@pytest.mark.parametrize('max_size_per_update_job', [-1, 200, 1000000000])\n@pytest.mark.parametrize('num_threads', [1, 3, 5, 7])\ndef test_update_equations_with_different_item_occurencies(num_threads, max_size_per_update_job):\n torch.manual_seed(12345)\n dim = 1\n items_count = 1000\n first_part_epoch_count = 10\n second_part_epoch_count = 15\n hash_table = HashEmbedding(create_hash_table(\"ftrllinear\", dim))\n optim = EmbeddingFTRLProximalLinearOptimizer(hash_table.parameters(), lr=1, L1Global=0.0, L1LinearIncremental=0.0, L1SqrtIncremental=0.0, \n L2Global=0.0, L2LinearIncremental=0.0, L2SqrtIncremental=0.0, eps=1e-8, ttl=100)\n optim.num_threads = num_threads\n\n for i in range(items_count):\n # This will generate some items\n item = hash_table.lookup_item(i)\n # check that ftrllinear items have trivial initialization\n assert torch.allclose(item.w, torch.zeros(dim))\n\n targets = torch.nn.init.normal_(torch.zeros(items_count // 2))\n loss = torch.nn.MSELoss()\n\n # Usual pytorch code\n usual_params = []\n for chunk_id, epoch_count in enumerate([first_part_epoch_count, second_part_epoch_count]):\n usual_tensor = torch.zeros(items_count // 2, dim)\n with torch.no_grad():\n for i in range(items_count // 2):\n usual_tensor[i, :] = hash_table.lookup_item(i + chunk_id * (items_count // 2)).w\n usual_tensor.requires_grad = True\n param = torch.nn.Parameter(usual_tensor)\n optimizer = optimizer = torch.optim.Adagrad(\n [param],\n lr=1,\n weight_decay=0.0\n )\n\n for _ in range(epoch_count):\n param.grad = None\n loss(param.sum(dim=1), targets).backward()\n optimizer.step()\n\n usual_params.append((param, optimizer))\n\n usual_tensor = torch.cat([x[0].clone() for x in usual_params], dim=0)\n\n # Hash embedding code\n\n hash_optimizer = EmbeddingFTRLProximalLinearOptimizer([hash_table.parameter_with_hash_table], lr=1, L1Global=0.0, L1LinearIncremental=0.0, L1SqrtIncremental=0.0, \n L2Global=0.0, L2LinearIncremental=0.0, L2SqrtIncremental=0.0, eps=1e-8, ttl=100)\n\n data = torch.LongTensor(list(range(items_count // 2)))\n data_len = torch.IntTensor([1] * (items_count // 2))\n\n for _ in range(first_part_epoch_count):\n # hash embedding step\n tensor = hash_table(data, data_len)\n loss(tensor.sum(dim=1), targets).backward()\n hash_optimizer.step()\n\n data = torch.LongTensor(list(range(items_count // 2, items_count)))\n data_len = torch.IntTensor([1] * (items_count // 2))\n\n for _ in range(second_part_epoch_count):\n # hash embedding step\n tensor = hash_table(data, data_len)\n loss(tensor.sum(dim=1), targets).backward()\n hash_optimizer.step(max_size_per_update_job=max_size_per_update_job)\n\n # Check that items are equal\n data = torch.LongTensor(list(range(items_count)))\n data_len = torch.IntTensor([1] * (items_count))\n hash_table_tensor = hash_table(data, data_len)\n\n assert torch.allclose(hash_table_tensor, usual_tensor, rtol=1e-3, atol=1e-3)\n\n # NOW, when we have optimized several parameters, we start to optimize them jointly and\n # see whether pytorch correctly incorporate different step size counts for parameters\n\n epoch_count = 5\n\n data = torch.LongTensor(sum([[i, i + 500] for i in range(items_count // 2)], []))\n data_len = torch.IntTensor([2] * (items_count // 2))\n\n for _ in range(epoch_count):\n opt1, opt2 = [x[1] for x in usual_params]\n param1, param2 = [x[0] for x in usual_params]\n param1.grad = None\n param2.grad = None\n\n loss(param1.sum(dim=1) + param2.sum(dim=1), targets).backward()\n opt1.step()\n opt2.step()\n\n tensor = hash_table(data, data_len)\n loss(tensor.sum(dim=1), targets).backward()\n hash_optimizer.step()\n\n usual_tensor = torch.cat([x[0].clone() for x in usual_params], dim=0)\n hash_table_tensor = hash_table(\n torch.LongTensor(list(range(items_count))),\n torch.IntTensor([1] * (items_count))\n )\n\n assert torch.allclose(hash_table_tensor, usual_tensor, rtol=1e-3, atol=1e-3)\n\n\n@pytest.mark.parametrize(\n \"hash_type\",\n [\"ftrllinear\"]\n)\n@pytest.mark.parametrize(\"l2\", [0.1, 10, 100000])\ndef test_l2_regularization(hash_type, l2):\n torch.manual_seed(12345)\n dim = 1\n items_count = 1000\n hash_table_noreg = HashEmbedding(create_hash_table(hash_type, dim))\n hash_table = HashEmbedding(create_hash_table(hash_type, dim))\n\n for i in range(items_count):\n # ensure same initialization\n item = hash_table.lookup_item(i)\n hash_table_noreg.insert_item(i, item)\n\n targets = torch.nn.init.normal_(torch.zeros(items_count))\n loss = torch.nn.MSELoss()\n # Hash embedding code\n\n hash_optimizer = EmbeddingFTRLProximalLinearOptimizer([hash_table.parameter_with_hash_table], L2Global=l2)\n hash_optimizer_noreg = EmbeddingFTRLProximalLinearOptimizer([hash_table_noreg.parameter_with_hash_table])\n data = torch.LongTensor(list(range(items_count)))\n data_len = torch.IntTensor([1] * items_count)\n\n # hash embedding step\n tensor = hash_table(data, data_len)\n loss(tensor.sum(dim=1), targets).backward()\n hash_optimizer.step()\n\n tensor = hash_table_noreg(data, data_len)\n loss(tensor.sum(dim=1), targets).backward()\n hash_optimizer_noreg.step()\n\n # Check that items are equal\n hash_table_tensor = hash_table(data, data_len).abs()\n hash_table_noreg_tensor = hash_table_noreg(data, data_len).abs()\n assert torch.all(torch.abs(hash_table_noreg_tensor - hash_table_tensor) > 0)","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"ads/tests/hash_embedding/test_ftrllinear_updater.py","file_name":"test_ftrllinear_updater.py","file_ext":"py","file_size_in_byte":12303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26749605356","text":"'''\n This file is part of pyca.\n\n pyca is free software: you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n pyca is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with pyca. If not, see .\n'''\n\nimport sys\nimport time\nimport random\nimport traceback\nimport datetime\nimport string\n\nfrom __init__ import GLOBAL_VARS\nimport __main__\nimport config\nimport clientOrderIdObj\nimport geminiAPIHelper #todo: should be in the other class\nimport geminiTradeDCAPostOnly\n\n\nTRADE_SIDE='buy'\n\n\n\ndef getGeminiBuyDCAPostOnly():\n configFile = config.getConfig()\n\n strEnabled = configFile.get('GeminiBuyDCAPostOnly', 'Enabled')\n if(strEnabled==\"False\"):\n Enabled = False\n elif(strEnabled==\"True\"):\n Enabled = True\n else:\n raise ValueError('invalid value for GeminiBuyDCAPostOnly.Enabled')\n \n OrdersPerDay = configFile.get('GeminiBuyDCAPostOnly', 'OrdersPerDay')\n OrderQuantityPerDayInFiat = configFile.get('GeminiBuyDCAPostOnly', 'OrderQuantityPerDayInFiat')\n TradeSymbol = configFile.get('GeminiBuyDCAPostOnly', 'TradeSymbol')\n HardMaximumCoinPrice = float(configFile.get('GeminiBuyDCAPostOnly', 'HardMaximumCoinPrice'))\n NumberOfMinutesToConsiderOrderStale = float(configFile.get('GeminiBuyDCAPostOnly', 'NumberOfMinutesToConsiderOrderStale')) #note: when using sandbox mode, it's recommended to use \"0\" for this value\n ChanceToProceedOnOrderPerTick = float(configFile.get('GeminiBuyDCAPostOnly', 'ChanceToProceedOnOrderPerTick'))\n MaxDaysCatchup = float(configFile.get('GeminiBuyDCAPostOnly', 'MaxDaysCatchup')) #recommended to be at least 1.5 to catch up in case of maintenance windows up to 12 hours.\n DesiredDiscount = float(configFile.get('GeminiBuyDCAPostOnly', 'DesiredDiscount'))\n StartingProgressForFirstOrder = float(configFile.get('GeminiBuyDCAPostOnly', 'StartingProgressForFirstOrder'))\n\n cfg = GeminiBuyDCAPostOnly(_Enabled=Enabled, _OrdersPerDay=OrdersPerDay, _OrderQuantityPerDayInFiat=OrderQuantityPerDayInFiat, _TradeSymbol=TradeSymbol, _HardMaximumCoinPrice=HardMaximumCoinPrice, _NumberOfMinutesToConsiderOrderStale=NumberOfMinutesToConsiderOrderStale, _ChanceToProceedOnOrderPerTick=ChanceToProceedOnOrderPerTick, _MaxDaysCatchup=MaxDaysCatchup, _DesiredDiscount=DesiredDiscount, _StartingProgressForFirstOrder=StartingProgressForFirstOrder)\n return cfg\n\nclass GeminiBuyDCAPostOnly(geminiTradeDCAPostOnly.GeminiTradeDCAPostOnly):\n def __init__(self, _Enabled, _OrdersPerDay, _OrderQuantityPerDayInFiat, _TradeSymbol, _HardMaximumCoinPrice, _NumberOfMinutesToConsiderOrderStale, _ChanceToProceedOnOrderPerTick, _MaxDaysCatchup, _DesiredDiscount, _StartingProgressForFirstOrder):\n self.TradeSide = TRADE_SIDE\n self.Enabled = bool(_Enabled)\n self.OrdersPerDay = float(_OrdersPerDay)\n self.OrderQuantityPerDayInFiat = float(_OrderQuantityPerDayInFiat)\n self.TradeSymbol = str(_TradeSymbol)\n self.NumberOfMinutesToConsiderOrderStale = float(_NumberOfMinutesToConsiderOrderStale)\n self.MaxDaysCatchup = _MaxDaysCatchup #can purchase up to X times max per purchase if needed to \"catch up\" due to failed purchases, waiting for a better price, etc. (e.g. 2.0 = 200% max catchup single purchase)\n self.ChanceToProceedOnOrderPerTick = _ChanceToProceedOnOrderPerTick #this value adds a random delay to purchases to mitigate exact timing prediction by an adversary\n self.DesiredDiscount = _DesiredDiscount #uses a lower purchase price based on percent value. The more the discount, the less likely the purchase will go through soon (or at all).\n self.HardMaximumCoinPrice = float(_HardMaximumCoinPrice)\n self.ProcessActiveOrdersFrequencyPerDay = (24*(60/5)) #every 5 minutes\n self.StartingProgressForFirstOrder = _StartingProgressForFirstOrder #this value speeds up the first purchase after starting the program\n \n if((self.OrdersPerDay > 0) & (self.OrderQuantityPerDayInFiat > 0)):\n self.ProgressIncrementToOrderInFiatPerTick = (self.OrderQuantityPerDayInFiat/GLOBAL_VARS.TICKS_PER_DAY)\n self.CurrentProgressToOrderQuantityInFiat = (self.OrderQuantityPerDayInFiat/self.OrdersPerDay)*_StartingProgressForFirstOrder\n self.OrderQuantityInFiatPerOrder = round((self.OrderQuantityPerDayInFiat/self.OrdersPerDay), 2)\n self.OrderQuantityMaxInFiatPerOrder = round(self.MaxDaysCatchup * self.OrderQuantityInFiatPerOrder, 2)\n self.ProgressIncrementToProcessActiveOrdersPercentPerTick = (self.ProcessActiveOrdersFrequencyPerDay/GLOBAL_VARS.TICKS_PER_DAY)\n self.CurrentProgressToProcessActiveOrders = 0.995 #99.5%. process active orders pretty soon after starting\n else:\n self.ProgressIncrementToOrderInFiatPerTick = 0\n self.CurrentProgressToOrderQuantityInFiat = 0\n self.OrderQuantityInFiatPerOrder = 0\n self.OrderQuantityMaxInFiatPerOrder = 0\n self.ProgressIncrementToProcessActiveOrdersPercentPerTick = 0\n self.CurrentProgressToProcessActiveOrders = 0\n \n #input value checks\n if((self.OrdersPerDay < 0.0) | (self.OrdersPerDay > 7200.0)):\n raise ValueError('invalid value for GeminiBuyDCAPostOnly.OrdersPerDay')\n \n if((self.OrderQuantityPerDayInFiat < 0.00) | (self.OrderQuantityPerDayInFiat > 500.00)): #temporary maximum purchase per day in fiat of 500 fiat units (e.g. 500 USD)\n raise ValueError('invalid value for GeminiBuyDCAPostOnly.OrderQuantityPerDayInFiat')\n \n if(self.TradeSymbol != \"btcusd\"):\n raise ValueError('invalid value for GeminiBuyDCAPostOnly.TradeSymbol')\n \n if((self.MaxDaysCatchup < 1.0) | (self.MaxDaysCatchup > 20.0)):\n raise ValueError('invalid value for GeminiBuyDCAPostOnly.MaxDaysCatchup')\n \n if((self.ChanceToProceedOnOrderPerTick < 0.0001) | (self.ChanceToProceedOnOrderPerTick > 0.95)):\n raise ValueError('invalid value for GeminiBuyDCAPostOnly.ChanceToProceedOnOrderPerTick')\n \n if((self.DesiredDiscount < 0.0000) | (self.DesiredDiscount > 0.1000)): #maximum 10.0% discount\n raise ValueError('invalid value for GeminiBuyDCAPostOnly.DesiredDiscount')\n \n if((_StartingProgressForFirstOrder < 0.0) | (_StartingProgressForFirstOrder > (1*self.MaxDaysCatchup))):\n raise ValueError('invalid value for GeminiBuyDCAPostOnly parameter _StartingProgressForFirstOrder')\n \n\n\n def printMe(self):\n print(\"==ConfigGeminiBuyDCAPostOnly==\")\n print(\"Enabled:\" + str(self.Enabled))\n print(\"OrdersPerDay:\" + str(self.OrdersPerDay))\n print(\"OrderQuantityPerDayInFiat:\" + str(self.OrderQuantityPerDayInFiat))\n print(\"TradeSymbol:\" + self.TradeSymbol)\n print(\"MaxDaysCatchup:\" + str(self.MaxDaysCatchup))\n print(\"ChanceToProceedOnOrderPerTick:\" + str(self.ChanceToProceedOnOrderPerTick))\n print(\"DesiredDiscount:\" + str(self.DesiredDiscount))\n print(\"HardMaximumCoinPrice:\" + str(self.HardMaximumCoinPrice))\n print(\"NumberOfMinutesToConsiderOrderStale\" + str(self.NumberOfMinutesToConsiderOrderStale))\n \n print(\"ProgressIncrementToOrderInFiatPerTick:\" + str(self.ProgressIncrementToOrderInFiatPerTick))\n print(\"CurrentProgressToOrderQuantityInFiat:\" + str(self.CurrentProgressToOrderQuantityInFiat)) \n print(\"OrderQuantityInFiatPerOrder:\" + str(self.OrderQuantityInFiatPerOrder))\n print(\"OrderQuantityMaxInFiatPerOrder:\" + str(self.OrderQuantityMaxInFiatPerOrder))\n \n print(\"ProgressIncrementToProcessActiveOrdersPercentPerTick:\" + str(self.ProgressIncrementToProcessActiveOrdersPercentPerTick))\n print(\"CurrentProgressToProcessActiveOrders:\" + str(self.CurrentProgressToProcessActiveOrders))\n\n def printMinimal(self):\n print(\"CurrentProgressToOrderQuantityInFiat:\" + str(round(self.CurrentProgressToOrderQuantityInFiat,2)) + \"/\" + str(self.OrderQuantityInFiatPerOrder) + \" \" + str(round((self.CurrentProgressToOrderQuantityInFiat / self.OrderQuantityInFiatPerOrder) * 100, 3)) + \"%. Progress to process: \" + str(round(self.CurrentProgressToProcessActiveOrders*100.0,2))+\"%\" )\n \n def isEnabled(self):\n return self.Enabled\n \n \n","repo_name":"onyxcoyote/pyca","sub_path":"pyca/geminiBuyDCAPostOnly.py","file_name":"geminiBuyDCAPostOnly.py","file_ext":"py","file_size_in_byte":8782,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"72497840720","text":"import numpy\nfrom scipy.ndimage import convolve\nfrom skimage.exposure import rescale_intensity\n\nfrom aydin.features.groups.correlation import CorrelationFeatures\nfrom aydin.io.datasets import camera\n\n\ndef n(image):\n return rescale_intensity(\n image.astype(numpy.float32), in_range='image', out_range=(0, 1)\n )\n\n\ndef test_convolutional_feature_group():\n # get image:\n image = n(camera().astype(numpy.float32))\n\n # Instantiates convolutional features:\n ones = numpy.ones(shape=(3, 3))\n twos = 2 * numpy.ones(shape=(3, 3))\n convolutions = CorrelationFeatures(kernels=[ones, twos])\n assert convolutions.num_features(image.ndim) == 2\n\n # Check receptive field radius:\n assert convolutions.receptive_field_radius == 1\n\n # Set image:\n convolutions.prepare(image, [])\n\n # compute features and check their valididty:\n feature = numpy.empty_like(image)\n\n # Compute first convolution:\n convolutions.compute_feature(index=0, feature=feature)\n assert (feature == convolve(image, weights=ones)).all()\n\n # Compute second convolution:\n convolutions.compute_feature(index=1, feature=feature)\n assert (feature == convolve(image, weights=twos)).all()\n","repo_name":"royerlab/aydin","sub_path":"aydin/features/groups/test/test_correlation_feature_group.py","file_name":"test_correlation_feature_group.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","stars":128,"dataset":"github-code","pt":"3"} +{"seq_id":"8431122476","text":"import datetime\n\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework.response import Response\n\nfrom api.routing import *\n\ndef index(request):\n return render(request, 'api/api.html')\n\n@csrf_exempt\ndef get_route_name(request, origin, destination):\n origin_id = get_system_id(origin)\n destination_id = get_system_id(destination)\n start = datetime.datetime.now()\n\n # Run algorithm\n # path = breadth_first_search(int(origin), int(destination), systems, max)\n path = dijkstra_search(int(origin_id), int(destination_id))\n\n # format path into deliminated string\n pathName = str(path).strip('[]').replace(', ', ';')\n jumps = len(path)\n end = datetime.datetime.now()\n return JSONResponse({'jumps': jumps, 'path': pathName, 'time': (end - start)})\n\n\n@csrf_exempt\ndef get_route_id(request, origin, destination):\n start = datetime.datetime.now()\n\n # Run algorithm\n # path = breadth_first_search(int(origin), int(destination), systems, max)\n path = dijkstra_search(int(origin), int(destination))\n\n # format path into deliminated string\n pathName = str(path).strip('[]').replace(', ', ';')\n jumps = len(path)\n end = datetime.datetime.now()\n return JSONResponse({'jumps': jumps, 'path': pathName, 'time': (end - start)})\n\n\n@csrf_exempt\ndef get_distance_id(request, origin, destination):\n distance = get_distance(origin, destination)\n return JSONResponse({'distance': distance})\n\n\n@csrf_exempt\ndef get_distance_name(request, origin, destination):\n origin_id = get_system_id(origin)\n destination_id = get_system_id(destination)\n distance = get_distance(origin_id, destination_id)\n return JSONResponse({'distance': distance})\n\n\n@csrf_exempt\ndef get_jump_range(request, origin, jump):\n origins = origin.split(\",\")\n jumps = jump.split(\",\")\n if len(origins) == len(jumps):\n systems = systems_in_range(list(map(int, origins)), list(map(int, jumps)))\n return JSONResponse({'systems': systems})\n return JSONResponse({'systems': []})\n\n\n\nclass JSONResponse(HttpResponse):\n \"\"\"\n An HttpResponse that renders its content into JSON.\n \"\"\"\n def __init__(self, data, **kwargs):\n content = JSONRenderer().render(data)\n kwargs['content_type'] = 'application/json'\n super(JSONResponse, self).__init__(content, **kwargs)\n\n","repo_name":"erik-sn/eve_nav","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12612334573","text":"\"\"\"Catalog model tests.\"\"\"\n\n\nimport ddt\n\nfrom django.test import override_settings\n\nfrom openedx.core.djangoapps.catalog.tests import mixins\nfrom openedx.core.djangoapps.site_configuration.tests.test_util import with_site_configuration\nfrom openedx.core.djangolib.testing.utils import CacheIsolationTestCase\n\nCOURSE_CATALOG_API_URL = 'https://api.example.com/v1/'\n\n\n@ddt.ddt\nclass TestCatalogIntegration(mixins.CatalogIntegrationMixin, CacheIsolationTestCase):\n \"\"\"Tests covering the CatalogIntegration model.\"\"\"\n\n def assert_get_internal_api_url_value(self, expected):\n \"\"\" Asserts the value of get_internal_api_url matches the expected value. \"\"\"\n catalog_integration = self.create_catalog_integration()\n assert catalog_integration.get_internal_api_url() == expected\n\n @ddt.data(\n (0, False),\n (1, True),\n )\n @ddt.unpack\n def test_cache_control(self, cache_ttl, is_cache_enabled):\n \"\"\"Test the behavior of the property controlling whether API responses are cached.\"\"\"\n catalog_integration = self.create_catalog_integration(cache_ttl=cache_ttl)\n assert catalog_integration.is_cache_enabled == is_cache_enabled\n\n @override_settings(COURSE_CATALOG_API_URL=COURSE_CATALOG_API_URL)\n def test_get_internal_api_url(self):\n \"\"\" Requests made without a microsite should return the value from settings. \"\"\"\n self.assert_get_internal_api_url_value(COURSE_CATALOG_API_URL)\n catalog_integration = self.create_catalog_integration()\n assert catalog_integration.get_internal_api_url() == COURSE_CATALOG_API_URL\n\n @override_settings(COURSE_CATALOG_API_URL=COURSE_CATALOG_API_URL)\n @with_site_configuration(configuration={})\n def test_get_internal_api_url_without_microsite_override(self):\n \"\"\" Requests made to microsites that do not have COURSE_CATALOG_API_URL overridden should\n return the default value from settings. \"\"\"\n self.assert_get_internal_api_url_value(COURSE_CATALOG_API_URL)\n\n @override_settings(COURSE_CATALOG_API_URL=COURSE_CATALOG_API_URL)\n @with_site_configuration(configuration={'COURSE_CATALOG_API_URL': 'foo'})\n def test_get_internal_api_url_with_microsite_override(self):\n \"\"\" If a microsite has overridden the value of COURSE_CATALOG_API_URL, the overridden\n value should be returned. \"\"\"\n self.assert_get_internal_api_url_value('foo')\n","repo_name":"openedx/edx-platform","sub_path":"openedx/core/djangoapps/catalog/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"en","doc_type":"code","stars":6774,"dataset":"github-code","pt":"3"} +{"seq_id":"70214804883","text":"import itertools\r\n\r\nimport display\r\nimport world\r\nimport socket\r\nimport select\r\nimport pickle\r\nimport struct\r\nimport time\r\n\r\nNEWMAP = 1\r\nTO_INV = 2\r\n# Double backslashes so when printing they actually print.\r\ndef make_chr(data):\r\n return chr(data) if chr(data) != \"\\\\\" else \"\\\\\\\\\"\r\n\r\ndef unpack_map(data):\r\n return [[ [data[(x + y * world.WORLD_X) * 3], data[(x + y * world.WORLD_X) * 3 + 1], make_chr(data[(x + y * world.WORLD_X) * 3 + 2])] for y in range(world.WORLD_Y)] for x in range(world.WORLD_X)]\r\n\r\n# Returns number of bytes the utf-8 char will take. Byte is the first byte of the char\r\ndef unicode_bytes(byte):\r\n if byte < 0b10000000: # So a 0 in start position\r\n return 1\r\n elif byte < 0b11000000: # Not a single width, but less than a double width. Must be a continuing char.\r\n raise UnicodeDecodeError(\"Unexpected continuation character found\")\r\n elif byte < 0b11100000: # Not continuing, not 3 width. So 2\r\n return 2\r\n elif byte < 0b11110000: # Not 4 width, not 2. So 3\r\n return 3\r\n return 4\r\n\r\ndef multiplayer():\r\n\r\n # Get username and password.\r\n display.clear()\r\n display.flushinp()\r\n inpt = display.getch()\r\n curs_loc = 0\r\n char_name = \"\"\r\n\r\n display.printc(30, 9, \"Enter your name:\")\r\n while inpt != 10: # Until ENTER pressed\r\n if inpt == 8: # Backspace\r\n if curs_loc != 0:\r\n curs_loc -= 1\r\n char_name = char_name[:-1] # Remove last character\r\n display.printc(curs_loc + 30, 10, ' ')\r\n elif (inpt != -1) and (curs_loc < 45) and (chr(inpt) in \"abcdefghijklmnopqrtsuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_-1234567890 \"): # Also don't let them get too long. 45 chosen arbitrarily because yeah.\r\n display.printc(curs_loc + 30, 10, chr(inpt))\r\n char_name += chr(inpt)\r\n curs_loc += 1\r\n display.refresh()\r\n inpt = display.getch()\r\n # Wait for release\r\n while display.keyDown(display.CONST.VK_RETURN):\r\n pass\r\n if char_name == len(char_name) * ' ':\r\n char_name = \"default\"\r\n\r\n curs_loc = 0\r\n password = \"\"\r\n\r\n inpt = display.getch()\r\n display.printc(30, 11, \"Enter your Password:\")\r\n while inpt != 10 or len(password) < 8: # Until ENTER pressed. 8 char password min.\r\n if inpt == 10 and len(password) < 8:\r\n display.printc(30, 13, \"Passwords must be at least 8 characters.\")\r\n elif inpt != -1:\r\n display.printc(30, 13, \" \")\r\n if inpt == 8: # Backspace\r\n if curs_loc != 0:\r\n curs_loc -= 1\r\n password = password[:-1] # Remove last character\r\n display.printc(curs_loc + 30, 12, ' ')\r\n elif (inpt != -1) and (curs_loc < 45) and (inpt < 127) and (inpt > 31): # Most characters allowed in password. Just has to be a printable ASCI\r\n display.printc(curs_loc + 30, 12, chr(inpt))\r\n password += chr(inpt)\r\n curs_loc += 1\r\n display.refresh()\r\n inpt = display.getch()\r\n # Wait for release\r\n while display.keyDown(display.CONST.VK_RETURN):\r\n pass\r\n\r\n\r\n\r\n display.clear()\r\n display.draw_topbar()\r\n display.refresh()\r\n\r\n world.map = [[ [0, 1, '!'] for y in range(world.WORLD_Y)] for x in range(world.WORLD_X)]\r\n\r\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n sock.settimeout(1)\r\n try:\r\n sock.sendto(bytes(char_name, 'utf-8'), ('localhost', 5000))\r\n \r\n (data, new_addr) = sock.recvfrom(65507)\r\n last_update = time.clock()\r\n\r\n # All coord pairs to overwrite.\r\n to_overwrite = []\r\n \r\n sidebar_lines = 0\r\n\r\n # ID of current map we're in. Useful for server.\r\n current_map = 0\r\n\r\n class states:\r\n WORLD = 0\r\n INVENTORY = 1\r\n\r\n class inventory: # Only one of these exists so we can just modify class stuff.\r\n class item:\r\n def __init__(this, name, desc, amount, value):\r\n this.name = name\r\n this.desc = desc\r\n this.value = value\r\n this.amount = amount\r\n\r\n selected_index = 0\r\n current_type = \"weapon\"\r\n\r\n weapons = []\r\n hats = []\r\n shirts = []\r\n pants = []\r\n rings = []\r\n consumables = []\r\n\r\n def str_to_list(string):\r\n return [inventory.weapons, inventory.hats, inventory.shirts, inventory.pants, inventory.rings, inventory.consumables][[\"weapon\", \"hat\", \"shirt\", \"pants\", \"ring\", \"consumable\"].index(string)] \r\n\r\n def clear(): # Clears entire inventory.\r\n inventory.weapons.clear()\r\n inventory.hats.clear()\r\n inventory.shirts.clear()\r\n inventory.pants.clear()\r\n inventory.rings.clear()\r\n inventory.consumables.clear()\r\n def add_item(data): # Adds an item based off of current data\r\n split_data = [bytearray(g) for k,g in itertools.groupby(data, lambda x: x == 0) if not k] # Data is packed as type, name, desc, amount, value. First 3 are null terminated strings\r\n type = split_data[0].decode('utf-8')\r\n name = split_data[1].decode('utf-8')\r\n desc = split_data[2].decode('utf-8')\r\n amount = struct.unpack(\"!I\", data[len(data) - 8: len(data) - 4])[0] # Unpack last bytes.\r\n value = struct.unpack(\"!I\", data[len(data) - 4: len(data)])[0]\r\n inventory.str_to_list(type).append(inventory.item(name, desc, amount, value))\r\n\r\n state = states.WORLD\r\n while True:\r\n # Let players force quit with Ctrl+Q\r\n if display.keyDown(display.CONST.VK_CONTROL) and display.keyDown(ord('Q')) or (time.clock() - last_update > 1.0):\r\n while display.keyDown(ord('Q')):\r\n pass\r\n sock.close()\r\n return\r\n \r\n if select.select([sock], [], [], 0) != ([], [], []):\r\n data, addr = sock.recvfrom(65507)\r\n while select.select([sock], [], [], 0) != ([], [], []):\r\n sock.recvfrom(65507)\r\n last_update = time.clock()\r\n\r\n if state == states.WORLD:\r\n index = 1 # Current data index\r\n if data[0] == NEWMAP:\r\n current_map = data[index]\r\n index += 1\r\n map_size = struct.unpack(\"!I\", data[index:index + 4])[0] # 4 bytes for size of map\r\n index += 4\r\n world.map = unpack_map(data[index: index + map_size]) \r\n index += map_size\r\n world.dispworld()\r\n display.refresh()\r\n elif data[0] == TO_INV:\r\n display.printc(0, 5, ((\" \" * 80) + \"\\n\") * 25) # Should clear main screen.\r\n # Now print out inventory.\r\n display.printc(0, 5, \"Inventory: \\\\fyWeapons(1)\\\\fw Hats(2) Shirts(3) Pants(4) Rings(5) Consumables(6)\")\r\n display.printc(0, 6, \"Name/Description Value Amount\")\r\n display.printc(0, 7, '-' * 80)\r\n inventory.clear() # Clear old inv data.\r\n index = 1 # Current data index.\r\n while index < len(data): \r\n num_bytes = struct.unpack(\"!I\", data[index: index + 4])[0] # Number of bytes in item\r\n index += 4 # increment index\r\n inventory.add_item(data[index: index + num_bytes]) # Add the item.\r\n index += num_bytes\r\n\r\n # Set last tracking vals\r\n inventory.current_type = \"weapon\"\r\n inventory.selected_index = 0\r\n # Now print out inventory\r\n display.printc(0, 8, \">\") # Draw cursor.\r\n \r\n loc = 8 # y location to print item at.\r\n for itm in inventory.weapons: # Default to weapons\r\n display.printc(1, loc, itm.name)\r\n display.printc(50, loc, str(itm.value))\r\n display.printc(65, loc, str(itm.amount))\r\n display.printc(2, loc + 1, itm.desc)\r\n loc += 2\r\n if loc > 23: # Can't print more. So we get 12 items per sheet.\r\n break\r\n display.refresh()\r\n\r\n state = states.INVENTORY\r\n continue # Finish loop\r\n # Here we do our updating\r\n # So we need to redraw objects, HP/MP, gold, sidebar, and possibly equipment/spellbox/itembox\r\n # Remove all previous objects\r\n for elem in to_overwrite:\r\n display.printc(elem[0], elem[1], world.map[elem[0]][elem[1] - 5][2], world.map[elem[0]][elem[1] - 5][0])\r\n to_overwrite.clear()\r\n \r\n num_objs = data[index]\r\n index += 1\r\n # Redraw all new objects\r\n \r\n while num_objs > 0:\r\n x_loc = data[index]\r\n y_loc = data[index + 1]\r\n index += 2\r\n char = data[index: index + unicode_bytes(data[index])].decode('utf-8') # Total char length for unicode char\r\n index += unicode_bytes(data[index]) # Increment counter\r\n color = data[index]\r\n index += 1\r\n display.printc(x_loc, 5 + y_loc, char, color, world.map[x_loc][y_loc][1])\r\n to_overwrite.append((x_loc, 5 + y_loc))\r\n num_objs -= 1\r\n \r\n # Draw HP and MP\r\n HP = struct.unpack(\"!I\", data[index:index + 4])[0]\r\n maxHP = struct.unpack(\"!I\", data[index + 4 : index + 8])[0]\r\n display.printc(8, 0, ' ' * 17)\r\n display.printc(8, 0, str(HP) + \"/\" + str(maxHP))\r\n index += 8\r\n\r\n MP = struct.unpack(\"!I\", data[index:index + 4])[0]\r\n maxMP = struct.unpack(\"!I\", data[index + 4 : index+8])[0]\r\n display.printc(8, 1, ' ' * 17)\r\n display.printc(8, 1, str(MP) + \"/\" + str(maxMP))\r\n index += 8\r\n\r\n # Draw level, EXP, gold:\r\n level = struct.unpack(\"!I\", data[index: index + 4])[0]\r\n exp = struct.unpack(\"!I\", data[index + 4: index + 8])[0]\r\n gold = struct.unpack(\"!I\", data[index + 8: index + 12])[0]\r\n\r\n display.printc(12, 3, str(level))\r\n display.printc(5, 4, ' ' * 20)\r\n display.printc(5, 4, str(int(exp)) + \" to level\")\r\n display.printc(10, 2, ' ' * 15)\r\n display.printc(10, 2, str(gold))\r\n index += 12\r\n\r\n # Print spell box, item box\r\n spell_len = struct.unpack(\"!I\", data[index: index + 4])[0]\r\n index += 4\r\n display.printc(display.SPELL_BOX_START + 1, 1, data[index: index + spell_len].decode('utf-8'))\r\n index += spell_len\r\n\r\n item_len = struct.unpack(\"!I\", data[index: index + 4])[0]\r\n index += 4\r\n display.printc(display.ITEM_BOX_START + 1, 1, data[index: index + item_len].decode('utf-8'))\r\n index += item_len\r\n\r\n # Prints equipment\r\n y_print = 0\r\n \r\n for equip in [display.WEAPON_X, display.HAT_X, display.SHIRT_X, display.PANTS_X, display.RING_X]:\r\n equip_len = struct.unpack(\"!I\", data[index : index + 4])[0]\r\n index += 4\r\n display.printc(equip, y_print, ' ' * 37)\r\n display.printc(equip, y_print, data[index : index + equip_len].decode('utf-8'))\r\n index += equip_len\r\n y_print += 1\r\n\r\n # Now, we see if we have sidebar stuff to print.\r\n # So overwrite previous sidebar\r\n for ind in range(sidebar_lines):\r\n display.printc(50, ind + 5, ' ' * 30)\r\n\r\n sidebar_length = struct.unpack(\"!I\", data[index: index + 4])[0]\r\n index += 4\r\n display.printc(50, 5, data[index : index + sidebar_length].decode('utf-8'))\r\n sidebar_lines = display.getyx(display.stdscr)[0] - 4 # We know how far down we printed by where the cursor is.\r\n display.refresh()\r\n elif state == states.INVENTORY:\r\n # We can ignore data sent. So all we do is check kbd input and update based on that\r\n last_a = False\r\n if last_a and not display.keyDown(ord('A')):\r\n display.printc(0, 0, 'B')\r\n if display.keyDown(ord('A')):\r\n display.printc(0, 0, 'A')\r\n last_a = True\r\n display.refresh()\r\n \r\n # Sends what keys are down.\r\n # Byte Key\r\n # 0 W \r\n # 1 A\r\n # 2 S\r\n # 3 D\r\n # 4 I\r\n # 5 J\r\n # 6 K\r\n # 7 L\r\n # 8 SHIFT \r\n # 9 SPACE\r\n # 10 ENTER\r\n # 11 Q/UP\r\n # 12 E/DOWN\r\n # 13 U\r\n # 14 O\r\n # 15 ESC\r\n # 16 Special: Current map ID. Not actually a key. \r\n\r\n to_send = bytearray(20)\r\n if state == states.WORLD: # Only send kbd input if we're in the world.\r\n to_send[0] = states.WORLD # Sending world input\r\n to_send[1] = display.keyDown(ord('W'))\r\n to_send[2] = display.keyDown(ord('A'))\r\n to_send[3] = display.keyDown(ord('S'))\r\n to_send[4] = display.keyDown(ord('D'))\r\n to_send[5] = display.keyDown(ord('I'))\r\n to_send[6] = display.keyDown(ord('J'))\r\n to_send[7] = display.keyDown(ord('K'))\r\n to_send[8] = display.keyDown(ord('L'))\r\n to_send[9] = display.keyDown(display.CONST.VK_LSHIFT)\r\n to_send[10] = display.keyDown(ord(' '))\r\n to_send[11]= display.keyDown(display.CONST.VK_RETURN)\r\n to_send[12]= display.keyDown(ord('Q')) or display.keyDown(display.CONST.VK_UP)\r\n to_send[13]= display.keyDown(ord('E')) or display.keyDown(display.CONST.VK_DOWN)\r\n to_send[14]= display.keyDown(ord('E'))\r\n to_send[15]= display.keyDown(ord('U'))\r\n to_send[16]= display.keyDown(ord('O'))\r\n to_send[17]= display.keyDown(ord('V'))\r\n to_send[18]= display.keyDown(display.CONST.VK_ESCAPE)\r\n to_send[19] = current_map\r\n elif to_send == states.INVENTORY:\r\n to_send[0] = states.INVENTORY\r\n sock.sendto(to_send, new_addr)\r\n\r\n except ConnectionResetError as ex:\r\n sock.close()\r\n return\r\n except Exception as ex:\r\n return","repo_name":"fuzzything44/pyRPG","sub_path":"pyRPG/multiplayer.py","file_name":"multiplayer.py","file_ext":"py","file_size_in_byte":15887,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"21215584195","text":"def unique(string):\n counter = 0\n new_string = \"\"\n for char in string:\n if char not in new_string:\n new_string += char\n counter += 1\n return counter\n\n\ntext = input().upper()\nnew_text = \"\"\ncurrent_text = \"\"\nfor index in range(len(text)):\n char = text[index]\n if char.isdigit():\n if index < len(text) - 1:\n if text[index + 1].isdigit():\n number = int(char + text[index + 1])\n else:\n number = int(char)\n new_text += current_text * number\n current_text = \"\"\n else:\n number = int(char)\n new_text += current_text * number\n current_text = \"\"\n else:\n current_text += char\n\nif new_text:\n counter = unique(new_text)\n print(f\"Unique symbols used: {counter}\")\n print(new_text)\nelse:\n counter = unique(current_text)\n print(f\"Unique symbols used: {counter}\")\n print(current_text)\n","repo_name":"Tsveti1103/Python-Fundamentals","sub_path":"text_processing_exercise/rage_quit.py","file_name":"rage_quit.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"31562940880","text":"from pypy.interpreter.pyparser.grammar import Alternative, \\\n Sequence, KleeneStar, Token, Parser\n\nclass TestLookAheadBasics:\n\n def setup_method(self, method):\n self.parser = Parser()\n self.tok1 = self.parser.Token_n(\"t1\", 'foo')\n self.tok2 = self.parser.Token_n(\"t2\", 'bar')\n self.tok3 = self.parser.Token_n(\"t3\", 'foobar')\n self.tokens = [self.tok1, self.tok2, self.tok3]\n self.parser.build_first_sets() \n\n def test_basic_token(self):\n assert self.tok1.first_set == [self.tok1]\n\n def test_basic_alternative(self):\n alt = self.parser.Alternative_n(\"a1t\", self.tokens)\n self.parser.build_first_sets()\n assert alt.first_set == self.tokens\n\n\n def test_basic_sequence(self):\n seq = self.parser.Sequence_n(\"seq\", self.tokens)\n self.parser.build_first_sets()\n assert seq.first_set == [self.tokens[0]]\n\n def test_basic_kleenstar(self):\n tok1, tok2, tok3 = self.tokens\n kstar1 = self.parser.KleeneStar_n(\"k\", 1, 3, tok1)\n kstar2 = self.parser.KleeneStar_n(\"k2\", 0, 3, tok1)\n self.parser.build_first_sets()\n assert kstar1.first_set == [tok1]\n assert kstar2.first_set == [tok1, self.parser.EmptyToken]\n\n\n def test_maybe_empty_sequence(self):\n \"\"\"S -> tok1{0,2} tok2{0,2}\n ==> S.first_set = [tok1, tok2, EmptyToken]\n \"\"\"\n tok1, tok2, tok3 = self.tokens\n k1 = self.parser.KleeneStar_n( \"k1\", 0, 2, tok1)\n k2 = self.parser.KleeneStar_n(\"k2\", 0, 2, tok2)\n seq = self.parser.Sequence_n( \"seq\", [k1, k2])\n self.parser.build_first_sets()\n assert seq.first_set == [tok1, tok2, self.parser.EmptyToken]\n\n\n def test_not_empty_sequence(self):\n \"\"\"S -> tok1{0,2} tok2{1,2}\n ==> S.first_set = [tok1, tok2]\n \"\"\"\n tok1, tok2, tok3 = self.tokens\n k1 = self.parser.KleeneStar_n(\"k1\", 0, 2, tok1)\n k2 = self.parser.KleeneStar_n(\"k2\", 1, 2, tok2)\n seq = self.parser.Sequence_n(\"seq\", [k1, k2])\n self.parser.build_first_sets()\n assert seq.first_set == [tok1, tok2]\n\n def test_token_comparison(self):\n tok1 = self.parser.Token_n( \"tok1\", \"foo\" )\n tok1b = self.parser.Token_n( \"tok1\", \"foo\" )\n tok2 = self.parser.Token_n( \"tok2\", \"foo\" )\n tok3 = self.parser.Token_n( \"tok2\", None )\n assert tok1 == tok1b\n assert tok1 != tok2\n assert tok2 != tok3\n\n\n\nclass TestLookAhead:\n\n def setup_method(self, method):\n p = self.parser = Parser()\n self.LOW = p.Token_n( 'LOW', 'low')\n self.CAP = p.Token_n( 'CAP' ,'cap')\n self.A = p.Alternative_n( 'R_A', [])\n k1 = p.KleeneStar_n( 'R_k1', 0, rule=self.LOW)\n k2 = p.KleeneStar_n( 'R_k2', 0, rule=self.CAP)\n self.B = p.Sequence_n( 'R_B', [k1, self.A])\n self.C = p.Sequence_n( 'R_C', [k2, self.A])\n self.A.args = [self.B, self.C]\n p.build_first_sets()\n \n def test_S_first_set(self):\n p = self.parser\n LOW = p.tokens['LOW']\n CAP = p.tokens['CAP']\n for s in [Token(p, LOW, 'low'), p.EmptyToken, Token(p, CAP, 'cap')]:\n assert s in self.A.first_set\n assert s in self.B.first_set\n assert s in self.C.first_set\n","repo_name":"camillobruni/pygirl","sub_path":"pypy/interpreter/pyparser/test/test_lookahead.py","file_name":"test_lookahead.py","file_ext":"py","file_size_in_byte":3319,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"3"} +{"seq_id":"41583696512","text":"from random import randint\nimport math\nimport torch\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm_notebook as tqdm\nfrom fastai.vision import ItemLists, bb_pad_collate, tensor\nfrom object_detection_fastai.helper.wsi_loader import SlideContainer, ObjectItemListSlide, SlideObjectCategoryList\n\n\ndef sample_function_train(y, classes, size, level_dimensions, level):\n width, height = level_dimensions[level]\n if len(y[0]) == 0:\n return randint(0, width - size[0]), randint(0, height -size[1])\n else:\n if randint(0, 5) < 2: ##upsample patches containing annotations\n class_id = np.random.choice(classes, 1)[0] # select a random class\n ids = np.array(y[1]) == class_id # filter the annotations according to the selected class\n xmin, ymin, _, _ = np.array(y[0])[ids][randint(0, np.count_nonzero(ids) - 1)] # randomly select one of the filtered annotatons as seed for the training patch\n \n # To have the selected annotation not in the center of the patch and an random offset.\n xmin += randint(-size[0]/2, size[0]/2) \n ymin += randint(-size[1]/2, size[1]/2)\n xmin, ymin = max(0, int(xmin - size[0] / 2)), max(0, int(ymin -size[1] / 2))\n xmin, ymin = min(xmin, width - size[0]), min(ymin, height - size[1])\n return xmin, ymin\n else:\n return randint(0, width - size[0]), randint(0, height -size[1])\n \n \n \ndef sample_function_test(y, classes, size, level_dimensions, level):\n width, height = level_dimensions[level]\n return randint(0, width - size[0]), randint(0, height -size[1])\n\n \n \ndef create_wsi_container(annotations_df: pd.DataFrame, res_level, patch_size, input_folder=\"/drive/MyDrive/MIDOG_Challenge/images/\",train=True):\n\n container = []\n\n for image_name in tqdm(annotations_df[\"file_name\"].unique()):\n\n image_annos = annotations_df[annotations_df[\"file_name\"] == image_name]\n\n bboxes = [box for box in image_annos[\"box\"]]\n labels = [label for label in image_annos[\"cat\"]]\n \n if train==True:\n container.append(SlideContainer(input_folder+str(image_name), y=[bboxes, labels], level=res_level,width=patch_size, height=patch_size, sample_func=sample_function_train))\n else:\n container.append(SlideContainer(input_folder+str(image_name), y=[bboxes, labels], level=res_level,width=patch_size, height=patch_size, sample_func=sample_function_test))\n return container\n \n \n \ndef sample_selector(dataframe, train_samples, val_samples, batch_size, transforms, patch_size=256, res_level=0, \n train_scanner = \"Hamamatsu XR\", val_scanner = \"Hamamatsu S360\",\n random_seed = None, normalise = True, testdataframe = None,\n input_folder=\"/drive/MyDrive/MIDOG_Challenge/images/\"):\n \n if patch_size not in [256,512,1024]:\n print(\"Suggested patch sizes are 256, 512 or 1024\")\n if random_seed:\n np.random.seed(random_seed)\n\n train_annos = dataframe[dataframe[\"scanner\"].isin(train_scanner.split(\",\"))]\n train_container = create_wsi_container(train_annos, res_level, patch_size,input_folder,train=True)\n\n if testdataframe is not None:\n val_annos = testdataframe[testdataframe[\"scanner\"].isin(val_scanner.split(\",\"))]\n valid_container = create_wsi_container(val_annos, res_level, patch_size,input_folder,train=False)\n else:\n val_annos = dataframe[dataframe[\"scanner\"].isin(val_scanner.split(\",\"))]\n valid_container = create_wsi_container(val_annos, res_level, patch_size,input_folder,train=False)\n\n train_images = list(np.random.choice(train_container, train_samples))\n valid_images = list(np.random.choice(valid_container, val_samples))\n\n train, valid = ObjectItemListSlide(train_images), ObjectItemListSlide(valid_images)\n\n item_list = ItemLists(\".\", train, valid)\n lls = item_list.label_from_func(lambda x: x.y, label_cls=SlideObjectCategoryList)\n lls = lls.transform(transforms, tfm_y=True, size=train_images[0].height)\n\n norm_mean=[0,0,0]\n norm_sd=[0,0,0]\n if \"Hamamatsu XR\" in train_scanner:\n norm_mean=[sum(x) for x in zip(norm_mean, [197.53/255,143.54/255,202.30/255])]\n norm_sd=[sum(x) for x in zip(norm_sd,[math.sqrt(690.74)/255,math.sqrt(1279.30)/255,math.sqrt(237.16)/255])]\n if \"Hamamatsu S360\" in train_scanner:\n norm_mean=[sum(x) for x in zip(norm_mean,[206.11/255,144.28/255,187.62/255])]\n norm_sd=[sum(x) for x in zip(norm_sd,[math.sqrt(670.45)/255,math.sqrt(1522.41)/255,math.sqrt(601.91)/255])]\n if \"Aperio CS\" in train_scanner:\n norm_mean=[sum(x) for x in zip(norm_mean,[202.74/255,149.97/255,174.83/255])]\n norm_sd=[sum(x) for x in zip(norm_sd,[math.sqrt(731.78)/255,math.sqrt(1480.70)/255,math.sqrt(855.76)/255])]\n if \"Leica GT450\" in train_scanner:\n norm_mean=[sum(x) for x in zip(norm_mean,[231.52/255,197.26/255,230.18/255])]\n norm_sd=[sum(x) for x in zip(norm_sd,[math.sqrt(317.51)/255,math.sqrt(797.85)/255,math.sqrt(134.20)/255])]\n\n norm_mean=[i/len(train_scanner.split(\",\")) for i in norm_mean]\n norm_sd=[i/len(train_scanner.split(\",\")) for i in norm_sd]\n \n if normalise:\n data = lls.databunch(bs=batch_size, collate_fn=bb_pad_collate,num_workers=0\n ).normalize([tensor(norm_mean),tensor(norm_sd)])\n print(\"Validation data normalised on scanner \"+str(train_scanner))\n else:\n data = lls.databunch(bs=batch_size, collate_fn=bb_pad_collate,num_workers=0\n )\n return(data)\n\n\n## Code below here taken from MIDOG competition reference docker image\n\ndef activ_to_bbox(acts, anchors, flatten=True):\n \"\"\"Extrapolate bounding boxes on anchors from the model activations.\"\"\"\n if flatten:\n if anchors.shape[-1] == 4:\n acts.mul_(acts.new_tensor([[0.1, 0.1, 0.2, 0.2]]))\n centers = anchors[..., 2:] * acts[..., :2] + anchors[..., :2]\n sizes = anchors[..., 2:] * torch.exp(acts[..., 2:])\n else:\n acts.mul_(acts.new_tensor([[0.1, 0.1, 0.2]]))\n centers = anchors[..., 2:] * acts[..., :2] + anchors[..., :2]\n sizes = anchors[..., 2:] * torch.exp(acts[..., 2:])\n return torch.cat([centers, sizes], -1)\n else:\n return [activ_to_bbox(act, anc) for act, anc in zip(acts, anchors)]\n\ndef nms_patch(boxes, scores, thresh=0.5):\n idx_sort = scores.argsort(descending=True)\n boxes, scores = boxes[idx_sort], scores[idx_sort]\n to_keep, indexes = [], torch.LongTensor(np.arange(len(scores)))\n\n while len(scores) > 0:\n to_keep.append(idx_sort[indexes[0]])\n iou_vals = IoU_values(boxes, boxes[:1]).squeeze()\n mask_keep = iou_vals <= thresh\n if len(mask_keep.nonzero()) == 0: break\n boxes, scores, indexes = boxes[mask_keep], scores[mask_keep], indexes[mask_keep]\n return torch.LongTensor(to_keep)\n\n\ndef intersection(anchors, targets):\n \"\"\"Compute the sizes of the intersections of `anchors` by `targets`.\"\"\"\n ancs, tgts = cthw2tlbr(anchors), cthw2tlbr(targets)\n a, t = ancs.size(0), tgts.size(0)\n ancs, tgts = ancs.unsqueeze(1).expand(a, t, 4), tgts.unsqueeze(0).expand(a, t, 4)\n top_left_i = torch.max(ancs[..., :2], tgts[..., :2])\n bot_right_i = torch.min(ancs[..., 2:], tgts[..., 2:])\n sizes = torch.clamp(bot_right_i - top_left_i, min=0)\n return sizes[..., 0] * sizes[..., 1]\n\n\n\ndef IoU_values(anchors, targets):\n \"\"\"Compute the IoU values of `anchors` by `targets`.\"\"\"\n if anchors.shape[-1] == 4:\n\n inter = intersection(anchors, targets)\n anc_sz, tgt_sz = anchors[:, 2] * anchors[:, 3], targets[:, 2] * targets[:, 3]\n union = anc_sz.unsqueeze(1) + tgt_sz.unsqueeze(0) - inter\n\n return inter / (union + 1e-8)\n\n else: # circular anchors\n a, t = anchors.size(0), targets.size(0)\n ancs = anchors.unsqueeze(1).expand(a, t, 3)\n tgts = targets.unsqueeze(0).expand(a, t, 3)\n diff = (ancs[:, :, 0:2] - tgts[:, :, 0:2])\n distances = (diff ** 2).sum(dim=2).sqrt()\n radius1 = ancs[..., 2]\n radius2 = tgts[..., 2]\n acosterm1 = (((distances ** 2) + (radius1 ** 2) - (radius2 ** 2)) / (2 * distances * radius1)).clamp(-1,\n 1).acos()\n acosterm2 = (((distances ** 2) - (radius1 ** 2) + (radius2 ** 2)) / (2 * distances * radius2)).clamp(-1,\n 1).acos()\n secondterm = ((radius1 + radius2 - distances) * (distances + radius1 - radius2) * (\n distances + radius1 + radius2) * (distances - radius1 + radius2)).clamp(min=0).sqrt()\n\n intersec = (radius1 ** 2 * acosterm1) + (radius2 ** 2 * acosterm2) - (0.5 * secondterm)\n\n union = np.pi * ((radius1 ** 2) + (radius2 ** 2)) - intersec\n\n return intersec / (union + 1e-8)\n\n\n\ndef rescale_box(bboxes, size: torch.Tensor):\n bboxes[:, :2] = bboxes[:, :2] - bboxes[:, 2:] / 2\n bboxes[:, :2] = (bboxes[:, :2] + 1) * size / 2\n bboxes[:, 2:] = bboxes[:, 2:] * size / 2\n bboxes = bboxes.long()\n return bboxes\n\n\n## Code from MIDOG reference docker for use in get_model_prediction \ndef cthw2tlbr(boxes):\n \"\"\"Convert center/size format `boxes` to top/left bottom/right corners.\"\"\"\n top_left = boxes[:, :2] - boxes[:, 2:]/2\n bot_right = boxes[:, :2] + boxes[:, 2:]/2\n return torch.cat([top_left, bot_right], 1)\n\ndef tlbr2cthw(boxes):\n \"\"\"Convert top/left bottom/right format `boxes` to center/size corners.\"\"\"\n center = (boxes[:, :2] + boxes[:, 2:])/2\n sizes = boxes[:, 2:] - boxes[:, :2]\n return torch.cat([center, sizes], 1)\n","repo_name":"scjjb/MIDOG_Domain_Adaptation","sub_path":"code/fastai_sampling.py","file_name":"fastai_sampling.py","file_ext":"py","file_size_in_byte":9754,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"27652164917","text":"#!/usr/bin/env python\nimport fitsio\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport sys\nimport yaml\nsys.path.append('../utils')\n\n#./plot_counts_richness.py ../yml/mini_uchuu/mini_uchuu_fid_hod.yml\n#./plot_counts_richness.py yml/abacus_summit_fid_hod.yml\n\nclass PlotCountsRichness(object):\n def __init__(self, yml_fname):\n\n with open(yml_fname, 'r') as stream:\n try:\n self.para = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(exc)\n\n self.depth = self.para['depth']\n perc = self.para['perc']\n output_loc = self.para['output_loc']\n model_name = self.para['model_name']\n self.rich_name = self.para['rich_name']\n self.out_path = f'{output_loc}/model_{model_name}'\n redshift = self.para['redshift']\n self.survey = self.para.get('survey', 'desy1')\n\n los = self.para.get('los', 'z')\n # if los == 'xyz':\n # self.los = sys.argv[2]\n # else:\n # self.los = los\n\n #use_pmem = self.para.get('use_pmem', False)\n #pec_vel = self.para.get('pec_vel', False)\n #sat_from_part = self.para.get('sat_from_part', False)\n\n #if use_pmem == True:\n # self.rich_name = f'pmem'\n #else:\n # self.rich_name = f'd{self.depth:.0f}'\n\n #if self.los == 'x' or self.los == 'y':\n # self.rich_name = f'{self.rich_name}_{self.los}'\n\n #if pec_vel == True:\n # self.rich_name += '_vel'\n #if perc == False:\n # self.rich_name += '_noperc'\n #if sat_from_part == True:\n # self.rich_name += '_from_part'\n\n\n # if os.path.isdir(f'{self.out_path}/obs_{self.rich_name}/')==False: \n # os.makedirs(f'{self.out_path}/obs_{self.rich_name}/')\n self.obs_path = f'{self.out_path}/obs_{self.rich_name}_{self.survey}/'\n if os.path.isdir(self.obs_path)==False: \n os.makedirs(self.obs_path)\n\n if self.para['nbody'] == 'mini_uchuu':\n from read_mini_uchuu import ReadMiniUchuu\n self.readcat = ReadMiniUchuu(self.para['nbody_loc'], redshift)\n\n if self.para['nbody'] == 'uchuu':\n from read_uchuu import ReadUchuu\n self.readcat = ReadUchuu(self.para['nbody_loc'], redshift)\n\n if self.para['nbody'] == 'abacus_summit':\n sys.path.append('../abacus_summit')\n from read_abacus_summit import ReadAbacusSummit\n self.readcat = ReadAbacusSummit(self.para['nbody_loc'], redshift)\n\n if self.para['nbody'] == 'tng_dmo':\n from read_tng_dmo import ReadTNGDMO\n halofinder = self.para.get('halofinder', 'rockstar')\n self.readcat = ReadTNGDMO(self.para['nbody_loc'], halofinder, redshift)\n print('halofinder', halofinder)\n\n #self.mpart = self.readcat.mpart\n self.boxsize = self.readcat.boxsize\n #self.hubble = self.readcat.hubble\n self.vol = self.boxsize**3\n\n #self.ofname = f'{self.out_path}/obs_{self.rich_name}/counts_richness.dat'\n self.ofname = f'{self.obs_path}/counts_richness.dat'\n \n def calc_counts_richness(self):\n if self.depth == 'pmem' or self.depth==-1:\n fname = f'{self.out_path}/richness_{self.rich_name}.fit'\n else:\n fname = f'{self.out_path}/richness_{self.rich_name}.fit'\n\n #print('self.rich_name', self.rich_name)\n data = fitsio.read(fname)\n lam = data['lambda']\n lam_min_list = 10**np.linspace(np.log10(20), np.log10(100), 20)\n\n den_list = []\n for lam_min in lam_min_list:\n sel = (lam >= lam_min)\n den_list.append(len(lam[sel])/self.vol)\n\n # get galaxy density\n fname = f'{self.out_path}/gal_density.dat'\n if os.path.exists(fname) == False:\n \n # read in galaxies\n gal_cat_format = self.para.get('gal_cat_format', 'fits')\n\n if gal_cat_format == 'fits':\n gal_fname = f'{self.out_path}/gals.fit'\n data, header = fitsio.read(gal_fname, header=True)\n x_gal_in = data['px']\n\n if gal_cat_format == 'h5':\n import h5py\n loc = '/bsuhome/hwu/scratch/abacus_summit/'\n gal_fname = loc + 'NHOD_0.10_11.7_11.7_12.9_1.00_0.0_0.0_1.0_1.0_0.0_c000_ph000_z0p300.hdf5'\n f = h5py.File(gal_fname,'r')\n data = f['particles']\n #print(data.dtype)\n x_gal_in = data['x']\n\n ngal = len(x_gal_in)/self.vol\n data = np.array([ngal]).transpose()\n np.savetxt(fname, data, fmt='%-12g', header='ngal (h^3 Mpc^-3)')\n\n data = np.array([lam_min_list, den_list]).transpose()\n np.savetxt(self.ofname, data, fmt='%-12g', header='lam_min, den')\n\n def plot_counts_richness(self, axes=None, label=None):\n if axes is None:\n fig, axes = plt.subplots(1, 1, figsize=(7, 7))\n if label is None: \n label = ''\n \n ngal = np.loadtxt(f'{self.out_path}/gal_density.dat')\n label += r'$, \\rm n_{gal}$=%.2e'%(ngal)\n\n lam_min_list, den_list = np.loadtxt(self.ofname, unpack=True)\n plt.loglog(lam_min_list, den_list, label=label)\n plt.xlabel(r'$\\lambda$')\n plt.ylabel(r'$n(>\\lambda)$')\n plt.legend()\n plt.xlim(10, None)\n\n def plot_y1_counts_richness(self):\n lam_min_list, den_list, den_low, den_high = np.loadtxt('../y1/data/des_y1_space_density_lambda_z_0.2_0.35.dat', unpack=True)\n plt.plot(lam_min_list, den_list, label='DES Y1', c='k')\n plt.fill_between(lam_min_list, den_low, den_high, facecolor='gray', alpha=0.2)\n\n\nif __name__ == \"__main__\":\n yml_fname = sys.argv[1]\n\n ccr = PlotCountsRichness(yml_fname)\n ccr.calc_counts_richness()\n ccr.plot_counts_richness()\n plt.show()\n","repo_name":"hywu/hod-selection-bias","sub_path":"repo/pipeline/plot_counts_richness.py","file_name":"plot_counts_richness.py","file_ext":"py","file_size_in_byte":5933,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"} +{"seq_id":"38675365440","text":"import pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nclass Display:\n\n @staticmethod\n def individual_variable(data, vars, plot, id=None):\n \"\"\"\n Plot the individual in a grid fashion\n :param data: the raw data\n :param vars: variables will be plot\n :param plot: plot type\n :param id: id variable will be compared\n :return: None\n \"\"\"\n if not isinstance(data, pd.DataFrame):\n raise TypeError(\"The data needs to be Pandas DataFrame\")\n\n if id and isinstance(id, str):\n f = pd.melt(data, id_vars=[id], value_vars=vars)\n g = sns.FacetGrid(f, col=\"variable\", col_wrap=2, sharex=False, sharey=False, size=5)\n g = g.map(plot, \"value\", id)\n else:\n f = pd.melt(data, value_vars=vars)\n g = sns.FacetGrid(f, col=\"variable\", col_wrap=2, sharex=False, sharey=False,size=5)\n g = g.map(plot, \"value\")\n\n @staticmethod\n def boxplot(x, y, **kwargs):\n sns.boxplot(x=x, y=y)\n x=plt.xticks(rotation=90)\n\n\n @staticmethod\n def corr_heatmap(data):\n if not isinstance(data. pd.Dataframe):\n raise TypeError(\"Input data should be a Pandas DataFrame\")\n\n corr = data.select_dtypes(include = [\"float64\", \"int64\"]).corr()\n plt.figure(figsize=(12,12))\n sns.heatmap(corr, vmax=1, square=True)","repo_name":"dolremi/Housing_Price","sub_path":"notebook/Visualization.py","file_name":"Visualization.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"2385387663","text":"#! /usr/bin/python3\n\nimport rospy\nimport numpy as np\nfrom nav_msgs.msg import Odometry\nfrom sensor_msgs.msg import JointState\nfrom tf.transformations import quaternion_from_euler\n\nclass odometryNode:\n\n def __init__(self, node_name):\n\n rospy.init_node(node_name)\n\n self.load()\n\n self.compute_inverse_H0_matrix()\n\n rospy.Subscriber(self.wheel_state_topic, JointState, self.joint_state_callback)\n self.odom_publisher = rospy.Publisher(self.odom_topic, Odometry, queue_size=1)\n\n rospy.loginfo('odom_node setup complete.')\n rospy.spin()\n rospy.loginfo('odom_node shutting down')\n\n def load(self):\n\n # load parameters from the yaml file\n self.cmd_vel_topic = rospy.get_param('cmd_vel_topic','/cmd_vel')\n self.number_of_wheels = rospy.get_param('number_of_wheels',None)\n self.wheel_radius = rospy.get_param('wheel_radius',None)\n self.wheel_driving_angles = rospy.get_param('wheel_driving_angles',None)\n self.wheel_x_coords = rospy.get_param('wheel_x_coords',None)\n self.wheel_y_coords = rospy.get_param('wheel_y_coords',None)\n\n self.robot_config = rospy.get_param('robot_start_config', [0,0,0])\n self.wheel_state_topic = rospy.get_param('wheel_state_topic', None)\n\n self.odom_topic = rospy.get_param('odom_topic','/odom')\n\n self.prev_time_stamp = None\n self.chassis_velocity = None\n self.odom_msg = Odometry()\n\n # check if all have been entered\n k = 0\n if self.number_of_wheels == None:\n rospy.logerr('parameter not given')\n k = 1\n if self.wheel_radius == None:\n rospy.logerr('parameter not given')\n k = 1\n if self.wheel_driving_angles == None:\n rospy.logerr('parameter not given')\n k = 1\n if self.wheel_x_coords == None:\n rospy.logerr('parameter not given')\n k = 1\n if self.wheel_y_coords == None:\n rospy.logerr('parameter not given')\n k = 1\n if self.wheel_state_topic == None:\n rospy.logerr('parameter not given')\n k = 1\n\n if k:\n exit(1)\n\n def compute_inverse_H0_matrix(self):\n \n self.compute_H0_elements()\n\n temp = np.array([[ a for a in self.w_coeff],\n [ a for a in self.x_coeff],\n [ a for a in self.y_coeff]])\n\n H0 = np.transpose(temp)\n\n self.H0_inv = np.linalg.pinv(H0,rcond=1e-6)\n\n def compute_H0_elements(self):\n \n self.w_coeff = []\n self.x_coeff = []\n self.y_coeff = []\n for i in range(self.number_of_wheels):\n\n self.w_coeff.append((self.wheel_x_coords[i]*np.sin(self.wheel_driving_angles[i]) - self.wheel_y_coords[i]*np.cos(self.wheel_driving_angles[i]))/self.wheel_radius)\n\n self.x_coeff.append(np.cos(self.wheel_driving_angles[i])/self.wheel_radius)\n\n self.y_coeff.append(np.sin(self.wheel_driving_angles[i])/self.wheel_radius)\n\n def stamp_to_seconds(self, stamp):\n return float(stamp.secs) + float(stamp.nsecs)/1000000000\n\n def joint_state_callback(self, joint_state_msg):\n\n if self.prev_time_stamp == None:\n\n # save the timestamp\n self.prev_time_stamp = self.stamp_to_seconds(joint_state_msg.header.stamp)\n # calc and save the chassis velocity \n self.chassis_velocity = self.compute_chassis_velocity(np.array(joint_state_msg.velocity))\n return\n \n # update robot state by integrating velocity\n current_time_stamp = self.stamp_to_seconds(joint_state_msg.header.stamp)\n self.robot_config = self.robot_config + self.chassis_velocity*(current_time_stamp-self.prev_time_stamp)\n self.prev_time_stamp = current_time_stamp\n\n # map phi to [-180, 180]\n self.robot_config[0] = (self.robot_config[0] + np.pi)%(2*np.pi) - np.pi\n\n # calc and save the chassis velocity\n self.chassis_velocity = self.compute_chassis_velocity(np.array(joint_state_msg.velocity))\n\n # display robot state\n # print(\"robot_state: {:.2f} {:.2f} {:.2f}\".format(self.robot_config[0], self.robot_config[1], self.robot_config[2]))\n\n self.odom_msg.header.stamp = joint_state_msg.header.stamp\n\n # convert bot orientation to quaternion\n quat = quaternion_from_euler(0,0,self.robot_config[0])\n\n self.odom_msg.pose.pose.orientation.x = quat[0]\n self.odom_msg.pose.pose.orientation.y = quat[1]\n self.odom_msg.pose.pose.orientation.z = quat[2]\n self.odom_msg.pose.pose.orientation.w = quat[3]\n\n self.odom_msg.pose.pose.position.x = self.robot_config[1]\n self.odom_msg.pose.pose.position.y = self.robot_config[2]\n self.odom_msg.pose.pose.position.z = self.wheel_radius\n\n try:\n self.odom_publisher.publish(self.odom_msg)\n except rospy.ROSException:\n pass\n\n def compute_chassis_velocity(self,wheel_speeds):\n cvel = np.matmul(self.H0_inv, np.transpose(wheel_speeds))\n\n self.odom_msg.twist.twist.linear.x = cvel[1]\n self.odom_msg.twist.twist.linear.y = cvel[2]\n self.odom_msg.twist.twist.angular.z = cvel[0]\n\n # transform chassis_velocity\n c_inv = np.linalg.inv(np.array([[1, 0, 0],\n [0, np.cos(self.robot_config[0]), np.sin(self.robot_config[0])],\n [0,-np.sin(self.robot_config[0]), np.cos(self.robot_config[0])]]))\n \n cvel = np.matmul(c_inv,cvel)\n return cvel\n\nif __name__ == '__main__':\n odometryNode('odom_node')\n\n","repo_name":"arthurgomes4/omni_wheels_simulation","sub_path":"omni_wheel_control/scripts/odom_node.py","file_name":"odom_node.py","file_ext":"py","file_size_in_byte":5880,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"} +{"seq_id":"9339220581","text":"# coding: utf-8\n\n\"\"\"\nRunning\n$ celery -A tasks worker --loglevel=info\n>>> from tasks import add\n>>> add.delay(4, 4)\n$ celery beat\nStart beat as the producer and make sure celeryd is working as the consumer.\n\nRunning in daemon\n $ sudo service celeryd/celerybeat start\n >>> from tasks import add\n >>> add.delay(4, 4)\nNotes 1:\n Make sure you have CELERY_INCLUDE defined in celeryconfig.py otherwise daemon not working\nNotes 2:\n celery/celerybeat configuration goes to celeryconfig.py\n celeryd/celerybeat configuration goes to /etc/default/celeryd\nNotes 3:\n if you change .py like tasks.py or celeryconfig.py make sure you restart like below:\n $ sudo service celeryd/celerybeat restart\n Otherwise the change will not be applied.\n\nRemote Control\ncelery -A tasks inspect active\ncelery -A tasks status\n\n# lookup current events\ncelery -A tasks control enable_events\ncelery -A tasks events / celery -A tasks events --dump\ncelery -A tasks control disable_events\n\"\"\"\n\nfrom celery import Celery\n\n# Set no backend\n#\n# celery = Celery('tasks', broker='amqp://guest@localhost//')\n#\n# You can't play with result, if there is no backend set like below\n\n# Set backend if you want to store or send the states somewhere\n#\n# celery = Celery('tasks', backend='amqp', broker='amqp://guest@localhost//')\n#\n# Thus, you could do this:\n# >>> result = add.deplay(4, 4)\n# >>> result.ready()\n# >>> result.get(timeout=1)\n# >>> result.get(propagate=True) # default is True equals to result.get()\n# >>> result.traceback\n\n# Go with configuration\ncelery = Celery('tasks') # specify the module name 'tasks' which is same to current module name\n# load config from celeryconfig.py, if there is package with module, check https://groups.google.com/forum/#!topic/celery-users/D-5PtgAqdLI otherwise error happens with celerybeat\ncelery.config_from_object('celeryconfig')\n\n# Set some configurations as list here:\n# celery.conf.update(\n # CELERYD_PREFETCH_MULTIPLIER = 1,\n# )\n\n\nfrom celery.utils.log import get_task_logger\nfrom celery import current_task\n\nlogger = get_task_logger(__name__)\n\n# Put task decorator the first one if there are multiple decorators, which means @celery.task will be excuted last.\n@celery.task(max_retries=3) # default max_retries=3, default_retry_delay=180, rate_limite='1/s' '1/m' '1/h', second, minute, hour\ndef add(x, y):\n try:\n request = current_task.request\n logger.info('==================add retries: {0}======================'.format(request.retries))\n logger.info('add.name={0}'.format(add.name))\n logger.info('Caculate adding expression x={0}, y={1}'.format(x, y)) \n logger.info('request.delivery_info={0} request.retries={1} request.hostname={2}'.format(request.delivery_info, request.retries, request.hostname))\n # fail deliberately until retry = 3\n if request.retries < 3:\n raise Exception('ERROR HAPPENS HERE.')\n return x + y\n except Exception as exc:\n raise add.retry(exc=exc, countdown=5)\n # 1. overwrite 180s above to 5s to retry, 'Retry' will be logged into logs\n # 2. worker will notify the queue to resend message for retry\n # 3. if it excceds max_retries, the detail exception(exc here) will be thrown and logged.\n\n@celery.task\ndef mul(x, y):\n logger.info('========================mul=================================')\n logger.info('mul.name={0}'.format(mul.name))\n logger.info('Caculate mul expression x={0}, y={1}'.format(x, y)) \n return x * y\n\n@celery.task\ndef pdf(content):\n import pdfkit\n pdf_result_file = pdfkit.from_string(content, False)\n # raise Exception('========pdf=======')\n return pdf_result_file\n\n# There is a race condition if the task starts executing before the transaction has been committed; The database object does not exist yet!\n\n# The solution is to always commit transactions before sending tasks depending on state from the current transaction:\n\n# Django sample\n# @transaction.commit_manually\n# def create_article(request):\n # try:\n # article = Article.objects.create(...)\n # except:\n # transaction.rollback()\n # raise\n # else:\n # transaction.commit()\n # expand_abbreviations.delay(article.pk)\n\n","repo_name":"ghosert/VimProject","sub_path":"StudyPyramid/celery/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":4228,"program_lang":"python","lang":"en","doc_type":"code","stars":208,"dataset":"github-code","pt":"3"} +{"seq_id":"34668680384","text":"import contextlib\nimport os\nimport shutil\nimport unittest\nfrom pxr import Sdf, Tf, Usd, Vt, Gf\n\n@contextlib.contextmanager\ndef InterpolationType(stage, interpolationType):\n oldInterpolationType = stage.GetInterpolationType()\n try:\n stage.SetInterpolationType(interpolationType)\n yield\n finally:\n stage.SetInterpolationType(oldInterpolationType)\n\n@contextlib.contextmanager\ndef LayerChangeListener():\n class _Listener(object):\n def __init__(self):\n self.changedLayers = []\n self._listener = Tf.Notice.RegisterGlobally(\n Sdf.Notice.LayersDidChange, self._HandleNotice)\n def _HandleNotice(self, notice, sender):\n self.changedLayers += notice.GetLayers()\n\n l = _Listener()\n yield l\n\nclass TestUsdValueClips(unittest.TestCase):\n def CheckTimeSamples(self, attr):\n \"\"\"Verifies attribute time samples are as expected via\n the time sample API\"\"\"\n allTimeSamples = attr.GetTimeSamples()\n self.assertEqual(attr.GetNumTimeSamples(), len(allTimeSamples))\n for i in range(0, len(allTimeSamples) - 1):\n (lowerSample, upperSample) = allTimeSamples[i], allTimeSamples[i+1]\n \n # The attribute's bracketing time samples at each time returned\n # by GetTimeSamples() should be equal to the time.\n self.assertEqual(attr.GetBracketingTimeSamples(lowerSample), \n (lowerSample, lowerSample))\n self.assertEqual(attr.GetBracketingTimeSamples(upperSample), \n (upperSample, upperSample))\n\n # The attribute's bracketing time samples should be the same\n # at every time in the interval (lowerSample, upperSample)\n for t in range(int(lowerSample) + 1, int(upperSample)):\n self.assertEqual(attr.GetBracketingTimeSamples(t), \n (lowerSample, upperSample))\n\n # Check the midpoint between lower and upper samples as an\n # extra sanity check -- this catches issues for non-integer\n # time sample times.\n if lowerSample != upperSample:\n self.assertEqual(\n attr.GetBracketingTimeSamples(\n lowerSample + ((upperSample - lowerSample) / 2.0)),\n (lowerSample, upperSample))\n\n # The attribute should return the same value at every time in the\n # interval [lowerSample, upperSample) if the stage's interpolation\n # type is held.\n with InterpolationType(attr.GetStage(), Usd.InterpolationTypeHeld):\n for t in range(int(lowerSample) + 1, int(upperSample)):\n self.assertEqual(attr.Get(t), attr.Get(lowerSample))\n\n # Verify that the value before the first time sample and after the\n # last time sample are held.\n if len(allTimeSamples) > 0:\n firstTimeSample = min(allTimeSamples)\n self.assertEqual(attr.GetBracketingTimeSamples(firstTimeSample - 1),\n (firstTimeSample, firstTimeSample))\n self.assertEqual(attr.Get(firstTimeSample - 1), \n attr.Get(firstTimeSample))\n\n lastTimeSample = max(allTimeSamples)\n self.assertEqual(attr.GetBracketingTimeSamples(lastTimeSample + 1),\n (lastTimeSample, lastTimeSample))\n self.assertEqual(attr.Get(lastTimeSample + 1), \n attr.Get(lastTimeSample))\n\n # Verify that getting the complete time sample map for this\n # attribute is equivalent to asking for the value at each time\n # returned by GetTimeSamples()\n def _GetValue(attr, t):\n v = attr.Get(t)\n if v == None:\n return Sdf.ValueBlock()\n return v\n\n timeSampleMap = dict([(t, _GetValue(attr, t)) for t in allTimeSamples])\n\n self.assertEqual(timeSampleMap, attr.GetMetadata('timeSamples'))\n\n # Verify that getting ranges of time samples works\n if len(allTimeSamples) > 2:\n startClip = min(allTimeSamples) \n endClip = startClip\n\n self.assertEqual(\n attr.GetTimeSamplesInInterval(\n Gf.Interval(startClip - 1, endClip, True, False)),\n [])\n\n while endClip < max(allTimeSamples):\n self.assertEqual(\n attr.GetTimeSamplesInInterval(\n Gf.Interval(startClip, endClip)), \n [t for t in allTimeSamples if t <= endClip])\n endClip += 1\n\n self.assertEqual(\n attr.GetTimeSamplesInInterval(\n Gf.Interval(endClip, endClip + 1, False, True)),\n [])\n\n def CheckValue(self, attr, expected, time=None, query=True):\n if time is not None:\n self.assertEqual(attr.Get(time), expected)\n if query:\n self.assertEqual(Usd.AttributeQuery(attr).Get(time), expected)\n else:\n self.assertEqual(attr.Get(), expected)\n if query:\n self.assertEqual(Usd.AttributeQuery(attr).Get(), expected)\n\n def test_BasicClipBehavior(self):\n \"\"\"Exercises basic clip behavior.\"\"\"\n stage = Usd.Stage.Open('basic/root.usda')\n\n model = stage.GetPrimAtPath('/Model_1')\n\n localAttr = model.GetAttribute('local')\n refAttr = model.GetAttribute('ref')\n clsAttr = model.GetAttribute('cls')\n payloadAttr = model.GetAttribute('payload')\n varAttr = model.GetAttribute('var')\n self.assertTrue(localAttr)\n self.assertTrue(clsAttr)\n self.assertTrue(refAttr)\n self.assertTrue(payloadAttr)\n self.assertTrue(varAttr)\n\n # No clip layers should be loaded yet\n self.assertEqual(stage.GetUsedLayers(includeClipLayers=True), \n stage.GetUsedLayers(includeClipLayers=False))\n self.assertFalse(Sdf.Layer.Find('basic/clip.usda'))\n self.assertFalse(Sdf.Layer.Find('basic/manifest.usda'))\n\n # Clips are never consulted for default values. This implies also\n # that no clips should even get loaded as a result of the queries.\n # However, we must tell CheckValue() not to construct UsdAttributeQuery\n # objects, since that act *does* need to load clips is the attr is \n # affected by clips\n self.CheckValue(localAttr, expected=1.0, query=False)\n self.CheckValue(refAttr, expected=1.0, query=False)\n self.CheckValue(clsAttr, expected=1.0, query=False)\n self.CheckValue(payloadAttr, expected=1.0, query=False)\n self.CheckValue(varAttr, expected=1.0, query=False)\n \n # Still shouldn't have loaded any clip layers! \n self.assertEqual(stage.GetUsedLayers(includeClipLayers=True), \n stage.GetUsedLayers(includeClipLayers=False))\n self.assertFalse(Sdf.Layer.Find('basic/clip.usda'))\n self.assertFalse(Sdf.Layer.Find('basic/manifest.usda'))\n\n # These attributes all have multiple time samples either locally\n # or from the single clip, so they all might be time varying.\n self.assertTrue(localAttr.ValueMightBeTimeVarying())\n self.assertTrue(clsAttr.ValueMightBeTimeVarying())\n self.assertTrue(refAttr.ValueMightBeTimeVarying())\n self.assertTrue(payloadAttr.ValueMightBeTimeVarying())\n self.assertTrue(varAttr.ValueMightBeTimeVarying())\n\n # Model_1 has active clips authored starting at time 10. However, the first\n # active clip is \"held active\" to -inf, and for any given time t, the prior\n # active clip at time t is still considered active. So even when querying\n # a timeSample prior to the first \"active time\", we expect the first clip\n # to be loaded and consulted, with a linear time-mapping from stage time\n # to time-within-clip-earlier-than-first-clipTimes-knot. In our test case,\n # this means all attrs except localAttr (which has local timeSamples in the\n # clip-anchoring layer) should get their values from the first clip.\n self.CheckValue(localAttr, time=5, expected=5.0)\n self.CheckValue(refAttr, time=5, expected=-5.0)\n self.CheckValue(clsAttr, time=5, expected=-5.0)\n self.CheckValue(payloadAttr, time=5, expected=-5.0)\n self.CheckValue(varAttr, time=5, expected=-5.0)\n\n # We expect the manifest and the first clip to be opened at this point.\n self.assertTrue(Sdf.Layer.Find('basic/clip.usda'))\n self.assertTrue(Sdf.Layer.Find('basic/manifest.usda'))\n\n # Starting at time 10, clips should be consulted for values.\n #\n # The strength order using during time sample resolution is \n # L1(ocal)C(lip)L2(ocal)I(nherit)V(ariant)R(eference)P(ayload), so\n # local opinions in layers stronger than the layer that anchors the clip\n # metadata (L1 above, which *includes* the anchoring subLayer) should win\n # over the clip, but the clip should win over all other opinions, including\n # those from loal subLayers weaker than the anchoring layer (L2).\n self.CheckValue(localAttr, time=10, expected=10.0)\n self.CheckValue(refAttr, time=10, expected=-10.0)\n self.CheckValue(clsAttr, time=10, expected=-10.0)\n self.CheckValue(payloadAttr, time=10, expected=-10.0)\n self.CheckValue(varAttr, time=10, expected=-10.0)\n\n # Attributes in prims that are descended from where the clip\n # metadata was authored should pick up opinions from the clip\n # too, just like above.\n child = stage.GetPrimAtPath('/Model_1/Child')\n childAttr = child.GetAttribute('attr')\n\n self.CheckValue(childAttr, expected=1.0)\n self.CheckValue(childAttr, time=5, expected=-5.0)\n self.CheckValue(childAttr, time=10, expected=-10.0)\n\n self.CheckTimeSamples(localAttr)\n self.CheckTimeSamples(refAttr)\n self.CheckTimeSamples(clsAttr)\n self.CheckTimeSamples(payloadAttr)\n self.CheckTimeSamples(varAttr)\n self.CheckTimeSamples(childAttr)\n\n # Before reload, stage should still be getting the old value\n clipAttr = stage.GetPrimAtPath('/Model_1/Child').GetAttribute('attr')\n self.CheckValue(clipAttr, expected=-5, time=5)\n\n # Ensure that UsdStage::Reload reloads clip layers\n # by editing one of the clip layers values.\n try:\n # Make a copy of the original layer and restore it\n # afterwards so we don't leave unwanted state behind\n # and cause subsequent test runs to fail.\n shutil.copy2('basic/clip.usda', 'basic/clip.usda.old')\n\n clip = Sdf.Layer.FindOrOpen('basic/clip.usda')\n clip.SetTimeSample(Sdf.Path('/Model/Child.attr'), 5, 1005)\n clip.Save()\n\n # After, it should get the newly set value in our clip layer\n stage.Reload()\n self.CheckValue(clipAttr, expected=1005, time=5)\n finally:\n shutil.move('basic/clip.usda.old', 'basic/clip.usda')\n\n def test_ClipTiming(self):\n \"\"\"Exercises clip retiming via clipTimes metadata\"\"\"\n stage = Usd.Stage.Open('timing/root.usda')\n \n model = stage.GetPrimAtPath('/Model')\n attr = model.GetAttribute('size')\n\n # Default value should come through regardless of clip timing.\n self.CheckValue(attr, expected=1.0)\n\n # The 'clipTimes' metadata authored in the test asset offsets the \n # time samples in the clip by 10 frames and scales it slower by 50%,\n # repeating at frame 21.\n with InterpolationType(stage, Usd.InterpolationTypeHeld):\n self.CheckValue(attr, time=0, expected=10.0)\n self.CheckValue(attr, time=5, expected=10.0)\n self.CheckValue(attr, time=10, expected=15.0)\n self.CheckValue(attr, time=15, expected=15.0)\n self.CheckValue(attr, time=20, expected=10.0)\n self.CheckValue(attr, time=25, expected=10.0)\n self.CheckValue(attr, time=30, expected=15.0)\n self.CheckValue(attr, time=35, expected=15.0)\n self.CheckValue(attr, time=40, expected=20.0)\n\n # Requests for samples before and after the mapping specified in\n # 'clipTimes' just pick up the first or last time sample.\n self.CheckValue(attr, time=-1, expected=10.0)\n self.CheckValue(attr, time=41, expected=20.0)\n\n # Repeat the test with linear interpolation\n with InterpolationType(stage, Usd.InterpolationTypeLinear):\n self.CheckValue(attr, time=0, expected=10.0)\n self.CheckValue(attr, time=5, expected=12.5)\n self.CheckValue(attr, time=10, expected=15.0)\n self.CheckValue(attr, time=15, expected=17.5)\n self.CheckValue(attr, time=20, expected=10.0)\n self.CheckValue(attr, time=25, expected=12.5)\n self.CheckValue(attr, time=30, expected=15.0)\n self.CheckValue(attr, time=35, expected=17.5)\n self.CheckValue(attr, time=40, expected=20.0)\n\n self.CheckValue(attr, time=-1, expected=10.0)\n self.CheckValue(attr, time=41, expected=20.0)\n\n # The clip has time samples authored every 5 frames, but\n # since we've scaled everything by 50%, we should have samples\n # every 10 frames.\n self.assertEqual(\n attr.GetTimeSamples(), \n [-10, 0, 10, 20 - Usd.TimeCode.SafeStep(), 20, 30, 40])\n self.assertEqual(\n attr.GetTimeSamplesInInterval(Gf.Interval(0, 30)),\n [0, 10, 20 - Usd.TimeCode.SafeStep(), 20, 30])\n\n # Test trickier cases where time samples in the clip fall outside\n # of the time domain specified by the 'clipTimes' metadata.\n model2 = stage.GetPrimAtPath('/Model2')\n attr2 = model2.GetAttribute('size')\n\n self.CheckValue(attr2, time=20, expected=20.0)\n self.CheckValue(attr2, time=30, expected=25.0)\n\n # Repeat the test with held interpolation\n with InterpolationType(stage, Usd.InterpolationTypeHeld):\n self.CheckValue(attr2, time=20, expected=15.0)\n self.CheckValue(attr2, time=30, expected=25.0)\n\n self.assertEqual(attr2.GetTimeSamples(),\n [0.0, 10.0, 20.0, 25.0, 30.0])\n self.assertEqual(attr2.GetTimeSamplesInInterval(Gf.Interval(0, 25)), \n [0.0, 10.0, 20.0, 25.0])\n\n self.CheckTimeSamples(attr)\n self.CheckTimeSamples(attr2)\n\n def test_ClipTimeSamples(self):\n \"\"\"Test that each stage time in a clips time mapping is treated as\n a time sample.\"\"\"\n stage = Usd.Stage.Open('timeSamples/root.usda')\n\n model = stage.GetPrimAtPath('/Model')\n attr = model.GetAttribute('size')\n\n self.assertEqual(\n attr.GetTimeSamples(),\n [0.0, 2.0, 4.0, 5.0 - Usd.TimeCode.SafeStep(), 5.0, 6.0, 7.0, 8.0, \n 9.0])\n self.CheckTimeSamples(attr)\n\n def test_ClipTimingOutsideRange(self):\n \"\"\"Tests clip retiming behavior when the mapped clip times are outside\n the range of time samples in the clip\"\"\"\n stage = Usd.Stage.Open('timingOutsideClip/root.usda')\n\n model = stage.GetPrimAtPath('/Model')\n attr = model.GetAttribute('size')\n\n # Asking for frames outside the mapped times should also clamp to\n # the nearest time sample.\n for t in range(-10, 0):\n self.CheckValue(attr, time=t, expected=25.0)\n self.assertEqual(attr.GetBracketingTimeSamples(t), (0.0, 0.0))\n\n for t in range(11, 20):\n self.CheckValue(attr, time=t, expected=25.0)\n self.assertEqual(attr.GetBracketingTimeSamples(t), (10.0, 10.0))\n\n self.assertEqual(attr.GetTimeSamples(), \n [0.0, 10.0])\n self.assertEqual(attr.GetTimeSamplesInInterval(Gf.Interval(-1.0, 1.0)), \n [0.0])\n self.assertEqual(attr.GetTimeSamplesInInterval(Gf.Interval(0.0, 0.0)), \n [0.0])\n self.CheckTimeSamples(attr)\n\n def test_ClipTimeCodeTiming(self):\n \"\"\"Exercises clip retiming via clipTimes metadata for timecode value \n attributes\"\"\"\n stage = Usd.Stage.Open('timeCodeTiming/root.usda')\n\n model = stage.GetPrimAtPath('/Model')\n attr = model.GetAttribute('time')\n attr2 = model.GetAttribute('timeArray')\n\n # Default value should come through regardless of clip timing.\n self.CheckValue(attr, expected=1.0)\n self.CheckValue(attr2, expected=Sdf.TimeCodeArray([1.0,2.0]))\n\n stage.SetInterpolationType(Usd.InterpolationTypeLinear)\n\n # The 'clipTimes' metadata authored in the test asset offsets the \n # time samples in the clip by 10 frames and scales it slower by 50%,\n # then at frame 21 it repeats the clip from frame 0 without the offset\n # and scaling..\n self.CheckValue(attr, time=0, expected=5.0)\n self.CheckValue(attr, time=5, expected=5.0)\n self.CheckValue(attr, time=10, expected=5.0)\n self.CheckValue(attr, time=15, expected=5.0)\n self.CheckValue(attr, time=20, expected=45.0)\n self.CheckValue(attr, time=25, expected=40.0)\n self.CheckValue(attr, time=30, expected=35.0)\n self.CheckValue(attr, time=35, expected=30.0)\n self.CheckValue(attr, time=40, expected=25.0)\n\n # Requests for samples before and after the mapping specified in\n # 'clipTimes' just pick up the first or last time sample.\n self.CheckValue(attr, time=-1, expected=5.0)\n self.CheckValue(attr, time=41, expected=25.0)\n\n # Repeat getting values at the same times for the SdfTimeCodeArray \n # valued attribute.\n self.CheckValue(attr2, time=0, expected=Sdf.TimeCodeArray([0.0, 5.0]))\n self.CheckValue(attr2, time=5, expected=Sdf.TimeCodeArray([5.0, 5.0]))\n self.CheckValue(attr2, time=10, expected=Sdf.TimeCodeArray([10.0, 5.0]))\n self.CheckValue(attr2, time=15, expected=Sdf.TimeCodeArray([15.0, 5.0]))\n self.CheckValue(attr2, time=20, expected=Sdf.TimeCodeArray([20.0, 45.0]))\n self.CheckValue(attr2, time=25, expected=Sdf.TimeCodeArray([25.0, 40.0]))\n self.CheckValue(attr2, time=30, expected=Sdf.TimeCodeArray([30.0, 35.0]))\n self.CheckValue(attr2, time=35, expected=Sdf.TimeCodeArray([35.0, 30.0]))\n self.CheckValue(attr2, time=40, expected=Sdf.TimeCodeArray([40.0, 25.0]))\n\n self.CheckValue(attr2, time=-1, expected=Sdf.TimeCodeArray([0.0, 5.0]))\n self.CheckValue(attr2, time=41, expected=Sdf.TimeCodeArray([40.0, 25.0]))\n\n # Repeat the test over again with held interpolation.\n stage.SetInterpolationType(Usd.InterpolationTypeHeld)\n\n # The 'clipTimes' metadata authored in the test asset offsets the \n # time samples in the clip by 10 frames and scales it slower by 50%,\n # then at frame 21 it repeats the clip from frame 0 without the offset\n # and scaling..\n self.CheckValue(attr, time=0, expected=5.0)\n self.CheckValue(attr, time=5, expected=5.0)\n self.CheckValue(attr, time=10, expected=5.0)\n self.CheckValue(attr, time=15, expected=5.0)\n self.CheckValue(attr, time=20, expected=45.0)\n self.CheckValue(attr, time=25, expected=40.0)\n self.CheckValue(attr, time=30, expected=35.0)\n self.CheckValue(attr, time=35, expected=30.0)\n self.CheckValue(attr, time=40, expected=25.0)\n\n # Requests for samples before and after the mapping specified in\n # 'clipTimes' just pick up the first or last time sample.\n self.CheckValue(attr, time=-1, expected=5.0)\n self.CheckValue(attr, time=41, expected=25.0)\n\n # Repeat getting values at the same times for the SdfTimeCodeArray \n # valued attribute.\n self.CheckValue(attr2, time=0, expected=Sdf.TimeCodeArray([0.0, 5.0]))\n self.CheckValue(attr2, time=5, expected=Sdf.TimeCodeArray([0.0, 5.0]))\n self.CheckValue(attr2, time=10, expected=Sdf.TimeCodeArray([10.0, 5.0]))\n self.CheckValue(attr2, time=15, expected=Sdf.TimeCodeArray([10.0, 5.0]))\n self.CheckValue(attr2, time=20, expected=Sdf.TimeCodeArray([20.0, 45.0]))\n self.CheckValue(attr2, time=25, expected=Sdf.TimeCodeArray([25.0, 40.0]))\n self.CheckValue(attr2, time=30, expected=Sdf.TimeCodeArray([30.0, 35.0]))\n self.CheckValue(attr2, time=35, expected=Sdf.TimeCodeArray([35.0, 30.0]))\n self.CheckValue(attr2, time=40, expected=Sdf.TimeCodeArray([40.0, 25.0]))\n\n self.CheckValue(attr2, time=-1, expected=Sdf.TimeCodeArray([0.0, 5.0]))\n self.CheckValue(attr2, time=41, expected=Sdf.TimeCodeArray([40.0, 25.0]))\n\n # The clip has time samples authored every 5 frames, but\n # since we've scaled everything by 50%, we should have samples\n # every 10 frames.\n self.assertEqual(\n attr.GetTimeSamples(), \n [0, 10, 20 - Usd.TimeCode.SafeStep(), 20, 25, 30, 35, 40])\n self.assertEqual(\n attr.GetTimeSamplesInInterval(Gf.Interval(0, 30)),\n [0, 10, 20 - Usd.TimeCode.SafeStep(), 20, 25, 30])\n\n self.CheckTimeSamples(attr)\n self.CheckTimeSamples(attr2)\n\n def test_ClipsWithLayerOffsets(self):\n \"\"\"Tests behavior of clips when layer offsets are involved\"\"\"\n stage = Usd.Stage.Open('layerOffsets/root.usda')\n\n model1 = stage.GetPrimAtPath('/Model_1')\n attr1 = model1.GetAttribute('size')\n model2 = stage.GetPrimAtPath('/Model_2')\n attr2 = model2.GetAttribute('size')\n model3 = stage.GetPrimAtPath('/Model_3')\n attr3 = model3.GetAttribute('size')\n\n # Default value should be unaffected by layer offsets.\n self.CheckValue(attr1, expected=1.0)\n\n # The clip should be active starting from frame +10.0 due to the\n # offset; before that, we get the held value of the clip's first \n # time sample,\n self.CheckValue(attr1, time=9, expected=-5.0)\n\n # Sublayer offset of 10 frames is present, so attribute value at\n # frame 20 should be from the clip at frame 10, etc.\n self.CheckValue(attr1, time=20, expected=-10.0)\n self.CheckValue(attr1, time=15, expected=-5.0)\n self.CheckValue(attr1, time=10, expected=-5.0)\n self.assertEqual(attr1.GetTimeSamples(), \n [10.0, 15.0, 20.0, 25.0, 30.0])\n self.assertEqual(attr1.GetTimeSamplesInInterval(\n Gf.Interval(-10, 10)), [10.0])\n \n # Test that layer offsets on layers where\n # clipTimes/clipActive are authored are taken into\n # account. The test case is similar to above, except\n # clipTimes/clipActive have been authored in a sublayer that\n # is offset by 20 frames instead of 10.\n self.CheckValue(attr2, expected=1.0)\n self.CheckValue(attr2, time=19, expected=-5.0)\n self.CheckValue(attr2, time=40, expected=-20.0)\n self.CheckValue(attr2, time=35, expected=-15.0)\n self.CheckValue(attr2, time=30, expected=-10.0)\n self.assertEqual(attr2.GetTimeSamples(), \n [20.0, 25.0, 30.0, 35.0, 40.0])\n self.assertEqual(attr2.GetTimeSamplesInInterval(\n Gf.Interval(-17, 21)), \n [20.0])\n\n # Test that reference offsets are taken into account. An offset\n # of 10 frames is authored on the reference; this should be combined\n # with the offset of 10 frames on the sublayer.\n self.CheckValue(attr3, expected=1.0)\n self.CheckValue(attr3, time=19, expected=-5.0)\n self.CheckValue(attr3, time=40, expected=-20.0)\n self.CheckValue(attr3, time=35, expected=-15.0)\n self.CheckValue(attr3, time=30, expected=-10.0)\n self.assertEqual(attr3.GetTimeSamples(), \n [20.0, 25.0, 30.0, 35.0, 40.0])\n self.assertEqual(attr3.GetTimeSamplesInInterval(\n Gf.Interval(-5, 5)), \n [])\n\n self.CheckTimeSamples(attr1)\n self.CheckTimeSamples(attr2)\n self.CheckTimeSamples(attr3)\n\n # Verify GetPropertyStackWithLayerOffsets run on an attribute with \n # clips returns the clip spec's layer offset matching the source spec's\n # layer offset.\n self.assertEqual(attr3.GetPropertyStackWithLayerOffsets(40),\n [(Sdf.Find('layerOffsets/clip.usda', '/Model.size'), \n Sdf.LayerOffset(20)), \n (Sdf.Find('layerOffsets/ref.usda', '/Model.size'), \n Sdf.LayerOffset(20))])\n\n def test_TimeCodeClipsWithLayerOffsets(self):\n \"\"\"Tests behavior of clips when layer offsets are involved and the\n attributes are SdfTimeCode values. This test is almost identical to \n test_ClipsWithLayerOffsets except that values returned themselves are\n also offset by the layer offsets.\"\"\"\n stage = Usd.Stage.Open('layerOffsets/root.usda')\n\n model1 = stage.GetPrimAtPath('/Model_1')\n attr1 = model1.GetAttribute('time')\n model2 = stage.GetPrimAtPath('/Model_2')\n attr2 = model2.GetAttribute('time')\n model3 = stage.GetPrimAtPath('/Model_3')\n attr3 = model3.GetAttribute('time')\n\n # Default time code value will be affected by layer offsets.\n self.CheckValue(attr1, expected=11.0)\n\n # The first time sample from the clip should be active starting from \n # frame +10.0 due to the offset; before that, we get the held value\n # of the clip's first time sample, which is itself then adjusted by\n # the offset.\n self.CheckValue(attr1, time=9, expected=5.0)\n\n # Sublayer offset of 10 frames is present, so attribute value at\n # frame 20 should be from the clip at frame 10, etc. plus the value of\n # the offset.\n self.CheckValue(attr1, time=20, expected=0.0)\n self.CheckValue(attr1, time=15, expected=5.0)\n self.CheckValue(attr1, time=10, expected=5.0)\n self.assertEqual(attr1.GetTimeSamples(), \n [10.0, 15.0, 20.0, 25.0, 30.0])\n self.assertEqual(attr1.GetTimeSamplesInInterval(\n Gf.Interval(-10, 10)), [10.0])\n\n # Test that layer offsets on layers where\n # clipTimes/clipActive are authored are taken into\n # account. The test case is similar to above, except\n # clipTimes/clipActive have been authored in a sublayer that\n # is offset by 20 frames instead of 10.\n self.CheckValue(attr2, expected=11.0)\n self.CheckValue(attr2, time=19, expected=15.0)\n self.CheckValue(attr2, time=40, expected=0.0)\n self.CheckValue(attr2, time=35, expected=5.0)\n self.CheckValue(attr2, time=30, expected=10.0)\n self.assertEqual(attr2.GetTimeSamples(), \n [20.0, 25.0, 30.0, 35.0, 40.0])\n self.assertEqual(attr2.GetTimeSamplesInInterval(\n Gf.Interval(-17, 21)), \n [20.0])\n\n # Test that reference offsets are taken into account. An offset\n # of 10 frames is authored on the reference; this should be combined\n # with the offset of 10 frames on the sublayer.\n self.CheckValue(attr3, expected=21.0)\n self.CheckValue(attr3, time=19, expected=15.0)\n self.CheckValue(attr3, time=40, expected=0.0)\n self.CheckValue(attr3, time=35, expected=5.0)\n self.CheckValue(attr3, time=30, expected=10.0)\n self.assertEqual(attr3.GetTimeSamples(), \n [20.0, 25.0, 30.0, 35.0, 40.0])\n self.assertEqual(attr3.GetTimeSamplesInInterval(\n Gf.Interval(-5, 5)), \n [])\n\n self.CheckTimeSamples(attr1)\n self.CheckTimeSamples(attr2)\n self.CheckTimeSamples(attr3)\n\n def test_ClipTimingDiscontinuities(self):\n \"\"\"Tests behavior of clip timing with discontinuities to control\n looping\"\"\"\n stage = Usd.Stage.Open('timingDiscontinuity/root.usda')\n attr = stage.GetAttributeAtPath('/World.value')\n\n # Test that values interpolate up to the discontinuity at\n # time 10, then loop back to the start of the clip at time 10.\n self.CheckValue(attr, time=6, expected=6)\n self.CheckValue(attr, time=7, expected=7)\n self.CheckValue(attr, time=8, expected=8)\n self.CheckValue(attr, time=9, expected=9)\n self.CheckValue(attr, time=9.5, expected=9.5)\n self.CheckValue(attr, \n time=10 - Usd.TimeCode.SafeStep(), \n expected=10 - Usd.TimeCode.SafeStep())\n self.CheckValue(attr, time=10, expected=0)\n self.CheckValue(attr, time=11, expected=1)\n self.CheckValue(attr, time=12, expected=2)\n self.CheckValue(attr, time=13, expected=3)\n\n # The list of time samples includes an entry at each discontinuity.\n # If there's a discontinuity at time t, there will be time samples\n # at t and t - Usd.TimeCode.SafeStep(). This allows us to represent\n # the discontinuity consistently when flattening the attribute.\n self.assertEqual(\n attr.GetTimeSamples(), \n [-10, 0, 3, 6, 10 - Usd.TimeCode.SafeStep(), 10, \n 13, 16, 20 - Usd.TimeCode.SafeStep(), 20])\n\n self.CheckTimeSamples(attr)\n\n def test_ClipReverseTiming(self):\n '''Tests behavior when reversing time samples in clips'''\n stage = Usd.Stage.Open('reversing/root.usda')\n attr = stage.GetAttributeAtPath('/Model.size')\n\n # From time [0, 4] we retrieve values from the clip at times [0, 4]\n self.CheckValue(attr, time=0, expected=0)\n self.CheckValue(attr, time=1, expected=2)\n self.CheckValue(attr, time=2, expected=4)\n self.CheckValue(attr, time=3, expected=6)\n self.CheckValue(attr, time=4, expected=8)\n\n # From time (4, 8] the times metadata reverse the clip times, so at\n # time = 5 we get the value in the clip at time 3, at time = 6 we get\n # the value in the clip at time 2, etc.\n self.CheckValue(attr, time=5, expected=6)\n self.CheckValue(attr, time=6, expected=4)\n self.CheckValue(attr, time=7, expected=2)\n self.CheckValue(attr, time=8, expected=0)\n\n self.assertEqual(\n attr.GetTimeSamples(),\n [0, 2, 4, 6, 8])\n\n self.CheckTimeSamples(attr)\n\n def test_ClipStrengthOrdering(self):\n '''Tests strength of clips during resolution'''\n\n rootLayerFile = 'ordering/root.usda'\n clipFile = 'ordering/clip.usda'\n subLayerClipIntroFile = \\\n 'ordering/sublayer_with_clip_intro.usda'\n subLayerWithOpinionFile = \\\n 'ordering/sublayer_with_opinion.usda'\n\n clipLayer = Sdf.Layer.FindOrOpen(clipFile)\n subLayerClipIntroLayer = Sdf.Layer.FindOrOpen(subLayerClipIntroFile)\n subLayerWithOpinionLayer = Sdf.Layer.FindOrOpen(subLayerWithOpinionFile)\n\n primPath = Sdf.Path('/Model')\n \n stage = Usd.Stage.Open(rootLayerFile)\n\n model = stage.GetPrimAtPath(primPath)\n\n # Ensure that a stronger layer wins over clips\n propName = 'baz'\n attr = model.GetAttribute(propName)\n self.assertEqual(attr.GetPropertyStack(10.0),\n [p.GetPropertyAtPath(primPath.AppendProperty(propName)) for p in \n [subLayerClipIntroLayer, clipLayer, subLayerWithOpinionLayer]])\n # With a default time code, clips won't show up\n self.assertEqual(attr.GetPropertyStack(Usd.TimeCode.Default()),\n [p.GetPropertyAtPath(primPath.AppendProperty(propName)) for p in \n [subLayerClipIntroLayer, subLayerWithOpinionLayer]])\n self.CheckValue(attr, time=10, expected=5.0)\n\n # Ensure that a clip opinion wins out over a weaker sublayer\n propName = 'foo'\n attr = model.GetAttribute(propName)\n self.assertEqual(attr.GetPropertyStack(5.0),\n [p.GetPropertyAtPath(primPath.AppendProperty(propName)) for p in \n [clipLayer, subLayerWithOpinionLayer]])\n # With a default time code, clips won't show up\n self.assertEqual(attr.GetPropertyStack(Usd.TimeCode.Default()),\n [p.GetPropertyAtPath(primPath.AppendProperty(propName)) for p in \n [subLayerWithOpinionLayer]])\n self.CheckValue(attr, time=5, expected=50.0) \n\n # Ensure fallback to weaker layers works as intended \n propName = 'bar'\n attr = model.GetAttribute(propName)\n self.assertEqual(attr.GetPropertyStack(15.0),\n [p.GetPropertyAtPath(primPath.AppendProperty(propName)) for p in \n [subLayerWithOpinionLayer]])\n # With a default time code, clips won't show up\n self.assertEqual(attr.GetPropertyStack(Usd.TimeCode.Default()),\n [p.GetPropertyAtPath(primPath.AppendProperty(propName)) for p in \n [subLayerWithOpinionLayer]])\n self.CheckValue(attr, time=15, expected=500.0)\n\n def test_SingleClip(self):\n \"\"\"Verifies behavior with a single clip being applied to a prim\"\"\"\n stage = Usd.Stage.Open('singleclip/root.usda')\n\n model = stage.GetPrimAtPath('/SingleClip')\n\n # This prim has a single clip that contributes just one time sample\n # for this attribute. That value will be used over all time.\n attr_1 = model.GetAttribute('attr_1')\n\n self.assertFalse(attr_1.ValueMightBeTimeVarying())\n self.CheckValue(attr_1, time=0, expected=10.0)\n self.assertEqual(attr_1.GetBracketingTimeSamples(0.0), (0.0, 0.0))\n self.assertEqual(attr_1.GetTimeSamples(), [0.0])\n self.assertEqual(attr_1.GetTimeSamplesInInterval(\n Gf.Interval.GetFullInterval()), [0.0])\n\n self.CheckTimeSamples(attr_1)\n\n # This attribute has no time samples in the clip or elsewhere. Value \n # resolution will fall back to the default value, which will be used over \n # all time.\n attr_2 = model.GetAttribute('attr_2')\n\n self.assertFalse(attr_2.ValueMightBeTimeVarying())\n self.CheckValue(attr_2, time=0, expected=2.0)\n self.assertEqual(attr_2.GetBracketingTimeSamples(0.0), ())\n self.assertEqual(attr_2.GetTimeSamples(), [])\n self.assertEqual(attr_2.GetTimeSamplesInInterval( \n Gf.Interval.GetFullInterval()), [])\n\n self.CheckTimeSamples(attr_2)\n\n def test_MultipleClips(self):\n \"\"\"Verifies behavior with multiple clips being applied to a single prim\"\"\"\n stage = Usd.Stage.Open('multiclip/root.usda')\n\n model = stage.GetPrimAtPath('/Model_1')\n attr = model.GetAttribute('size')\n\n # This prim has multiple clips that contribute values to this attribute,\n # so it should be detected as potentially time varying.\n self.assertTrue(attr.ValueMightBeTimeVarying())\n \n # clip1 is active in the range [..., 16)\n # clip2 is active in the range [16, ...)\n # Check that we get time samples from the right clip when querying\n # in those ranges.\n self.CheckValue(attr, time=5, expected=-5)\n self.CheckValue(attr, time=10, expected=-10)\n self.CheckValue(attr, time=15, expected=-15)\n self.CheckValue(attr, time=16, expected=-23)\n self.CheckValue(attr, time=19, expected=-23)\n self.CheckValue(attr, time=22, expected=-26)\n self.CheckValue(attr, time=25, expected=-29)\n\n # Value clips introduce time samples at their boundaries, even if there\n # isn't an actual time sample in the clip at that time. This is to\n # isolate them from surrounding clips. So, the value from frame 16 comes\n # from clip 2.\n self.CheckValue(attr, time=16, expected=-23)\n self.assertEqual(attr.GetBracketingTimeSamples(16), (16, 16))\n\n # Verify that GetTimeSamples() returns time samples from both clips.\n self.assertEqual(\n attr.GetTimeSamples(), \n [0.0, 5.0, 10.0, 15.0, 16.0 - Usd.TimeCode.SafeStep(), 16.0, 19.0, \n 22.0, 25.0, 32.0])\n self.assertEqual(\n attr.GetTimeSamplesInInterval(Gf.Interval(0, 30)), \n [0.0, 5.0, 10.0, 15.0, 16.0 - Usd.TimeCode.SafeStep(), 16.0, 19.0,\n 22.0, 25.0])\n self.CheckTimeSamples(attr)\n\n def test_MultipleClipsWithNoTimeSamples(self):\n \"\"\"Tests behavior when multiple clips are specified on a prim and none\n have time samples for an attributed owned by that prim.\"\"\"\n stage = Usd.Stage.Open('multiclip/root.usda')\n\n model = stage.GetPrimAtPath('/ModelWithNoClipSamples')\n attr = model.GetAttribute('size')\n \n # Since none of the clips provide samples for this attribute, we should\n # fall back to the default value and report that this attribute's values\n # are constant over time.\n self.assertFalse(attr.ValueMightBeTimeVarying())\n self.assertEqual(attr.GetResolveInfo(0).GetSource(),\n Usd.ResolveInfoSourceDefault)\n\n # This prim has multiple clips specified from frames [0.0, 31.0] but\n # none provide samples for the size attribute. The value in this\n # time range should be equal to the default value from the reference.\n # The value outside this time range should also be the default\n # value, since no clips are active in those times.\n for t in range(-10, 40):\n self.CheckValue(attr, time=t, expected=1.0)\n\n # Since none of the clips provide samples, there should be no\n # time samples or bracketing time samples at any of these times.\n for t in range(-10, 40):\n self.assertEqual(attr.GetBracketingTimeSamples(t), ())\n\n self.assertEqual(attr.GetTimeSamples(), [])\n self.assertEqual(attr.GetTimeSamplesInInterval(\n Gf.Interval.GetFullInterval()), [])\n\n self.CheckTimeSamples(attr)\n\n def test_MultipleClipsWithSomeTimeSamples(self):\n \"\"\"Tests behavior when multiple clips are specified on a prim and\n some of them have samples for an attribute owned by that prim, while\n others do not.\"\"\"\n stage = Usd.Stage.Open('multiclip/root.usda')\n \n model = stage.GetPrimAtPath('/ModelWithSomeClipSamples')\n attr = model.GetAttribute('size')\n \n # The clip in the range [..., 16) has no samples for the attribute,\n # so the value should be the default value from the manifest. Since\n # no default value is specified, we get a value of None.\n with InterpolationType(stage, Usd.InterpolationTypeLinear):\n for t in range(-10, 16):\n self.CheckValue(attr, time=t, expected=None)\n\n with InterpolationType(stage, Usd.InterpolationTypeHeld):\n for t in range(-10, 16):\n self.CheckValue(attr, time=t, expected=None)\n\n # This attribute should be detected as potentially time-varying\n # since multiple clips are involved and at least one of them has\n # samples.\n self.assertTrue(attr.ValueMightBeTimeVarying())\n\n # The clip in the range [16, ...) has samples on frames 3, 6, 9 so\n # we expect time samples for this attribute at frames 19, 22, and 25.\n with InterpolationType(stage, Usd.InterpolationTypeHeld):\n self.CheckValue(attr, time=16, expected=-23.0)\n self.CheckValue(attr, time=17, expected=-23.0)\n self.CheckValue(attr, time=18, expected=-23.0)\n self.CheckValue(attr, time=19, expected=-23.0)\n self.CheckValue(attr, time=20, expected=-23.0)\n self.CheckValue(attr, time=21, expected=-23.0)\n self.CheckValue(attr, time=22, expected=-26.0)\n self.CheckValue(attr, time=23, expected=-26.0)\n self.CheckValue(attr, time=24, expected=-26.0)\n self.CheckValue(attr, time=25, expected=-29.0)\n self.CheckValue(attr, time=26, expected=-29.0)\n self.CheckValue(attr, time=27, expected=-29.0)\n self.CheckValue(attr, time=28, expected=-29.0)\n self.CheckValue(attr, time=29, expected=-29.0)\n self.CheckValue(attr, time=30, expected=-29.0)\n self.CheckValue(attr, time=31, expected=-29.0)\n\n # Repeat test with linear interpolation\n with InterpolationType(stage, Usd.InterpolationTypeLinear):\n self.CheckValue(attr, time=16, expected=-23.0)\n self.CheckValue(attr, time=17, expected=-23.0)\n self.CheckValue(attr, time=18, expected=-23.0)\n self.CheckValue(attr, time=19, expected=-23.0)\n self.CheckValue(attr, time=20, expected=-24.0)\n self.CheckValue(attr, time=21, expected=-25.0)\n self.CheckValue(attr, time=22, expected=-26.0)\n self.CheckValue(attr, time=23, expected=-27.0)\n self.CheckValue(attr, time=24, expected=-28.0)\n self.CheckValue(attr, time=25, expected=-29.0)\n self.CheckValue(attr, time=26, expected=-29.0)\n self.CheckValue(attr, time=27, expected=-29.0)\n self.CheckValue(attr, time=28, expected=-29.0)\n self.CheckValue(attr, time=29, expected=-29.0)\n self.CheckValue(attr, time=30, expected=-29.0)\n self.CheckValue(attr, time=31, expected=-29.0)\n\n self.assertEqual(\n attr.GetTimeSamples(), \n [0.0, 16.0 - Usd.TimeCode.SafeStep(), 16.0, 19.0, 22.0, 25.0, 32.0])\n self.assertEqual(\n attr.GetTimeSamplesInInterval(Gf.Interval(-5, 50)), \n [0.0, 16.0 - Usd.TimeCode.SafeStep(), 16.0, 19.0, 22.0, 25.0, 32.0])\n\n self.CheckTimeSamples(attr)\n\n def test_MultipleClipsWithSomeTimeSamples2(self):\n \"\"\"Another test case similar to TestMultipleClipsWithSomeTimeSamples2.\"\"\"\n stage = Usd.Stage.Open('multiclip/root.usda')\n\n model = stage.GetPrimAtPath('/ModelWithSomeClipSamples2')\n attr = model.GetAttribute('size')\n\n # This attribute should be detected as potentially time-varying\n # since multiple clips are involved and at least one of them has\n # samples.\n self.assertTrue(attr.ValueMightBeTimeVarying())\n\n # Clips are active in the range [..., 4.0), [4.0, 8.0), and [8.0, ...).\n # The first and last clips have time samples for the size attribute,\n # while the middle clip does not.\n with InterpolationType(stage, Usd.InterpolationTypeHeld):\n # First clip.\n self.CheckValue(attr, time=-1, expected=-23.0)\n self.CheckValue(attr, time=0, expected=-23.0)\n self.CheckValue(attr, time=1, expected=-23.0)\n self.CheckValue(attr, time=2, expected=-23.0)\n self.CheckValue(attr, time=3, expected=-26.0)\n\n # Middle clip with no samples. Since the middle clip has no \n # time samples and there is no default value specified in the\n # manifest, we get a value of None.\n self.CheckValue(attr, time=4, expected=None)\n self.CheckValue(attr, time=5, expected=None)\n self.CheckValue(attr, time=6, expected=None)\n self.CheckValue(attr, time=7, expected=None)\n\n # Last clip.\n self.CheckValue(attr, time=8, expected=-26.0)\n self.CheckValue(attr, time=9, expected=-26.0)\n self.CheckValue(attr, time=10, expected=-26.0)\n self.CheckValue(attr, time=11, expected=-29.0)\n self.CheckValue(attr, time=12, expected=-29.0)\n\n # Repeat test with linear interpolation\n with InterpolationType(stage, Usd.InterpolationTypeLinear):\n # First clip.\n self.CheckValue(attr, time=-1, expected=-23.0)\n self.CheckValue(attr, time=0, expected=-23.0)\n self.CheckValue(attr, time=1, expected=-24.0)\n self.CheckValue(attr, time=2, expected=-25.0)\n self.CheckValue(attr, time=3, expected=-26.0)\n\n # Middle clip with no samples. Since the middle clip has no \n # time samples and there is no default value specified in the\n # manifest, we get a value of None.\n self.CheckValue(attr, time=4, expected=None)\n self.CheckValue(attr, time=5, expected=None)\n self.CheckValue(attr, time=6, expected=None)\n self.CheckValue(attr, time=7, expected=None)\n\n # Last clip.\n self.CheckValue(attr, time=8, expected=-26.0)\n self.CheckValue(attr, time=9, expected=-27.0)\n self.CheckValue(attr, time=10, expected=-28.0)\n self.CheckValue(attr, time=11, expected=-29.0)\n self.CheckValue(attr, time=12, expected=-29.0)\n\n self.assertEqual(\n attr.GetTimeSamples(), \n [0.0, 3.0, 4.0 - Usd.TimeCode.SafeStep(), 4.0, 7.0, 8.0, 11.0])\n self.assertEqual(\n attr.GetTimeSamplesInInterval(Gf.Interval(0, 10)), \n [0.0, 3.0, 4.0 - Usd.TimeCode.SafeStep(), 4.0, 7.0, 8.0])\n\n self.CheckTimeSamples(attr)\n\n def test_MultipleClipsWithSomeTimeSamples3(self):\n \"\"\"Tests multi-clip case where first clip has no time samples\"\"\"\n stage = Usd.Stage.Open('multiclip/root.usda')\n\n attr = stage.GetAttributeAtPath('/ModelWithSomeClipSamples3.size')\n\n # The first active clip has no time samples, so at the first clip's\n # start time we should get None since no default value is declared\n # in the manifest.\n self.CheckValue(attr, time=0, expected=None)\n\n # At time 1 we should interpolate between the value at the first\n # clip's start time and the value at t=2, which is the next clip's\n # start time.\n self.CheckValue(attr, time=1, expected=None)\n \n # Verify the time samples from the second clip.\n self.CheckValue(attr, time=2, expected=-23)\n self.CheckValue(attr, time=3, expected=-23)\n self.CheckValue(attr, time=6, expected=-26)\n self.CheckValue(attr, time=9, expected=-29)\n\n # There must be a time sample for each clip at their start and end times.\n self.assertEqual(attr.GetTimeSamples(), [0.0, 2.0, 3.0, 6.0, 9.0])\n\n self.CheckTimeSamples(attr)\n\n def test_MultipleClipsWithTimesSpanningClips(self):\n \"\"\"Tests that clip time mappings that span multiple clips work as\n expected\"\"\"\n stage = Usd.Stage.Open('multiclip/root.usda')\n\n model = stage.GetPrimAtPath('/ModelWithTimesSpanningClips')\n attr = model.GetAttribute('size')\n\n # The clip time mappings specified for this prim span a time range\n # where two different clips are active. For a given stage time, the\n # corresponding clip time should be determined from the mapping first,\n # independent of what clip is active. The active clip should then be\n # consulted at that clip time to retrieve the final value.\n self.CheckValue(attr, time=1, expected=100.0)\n self.CheckValue(attr, time=2, expected=200.0)\n self.CheckValue(attr, time=3, expected=300.0)\n self.CheckValue(attr, time=4, expected=400.0)\n\n self.assertEqual(attr.GetTimeSamples(), [1.0, 2.0, 3.0, 4.0])\n self.assertEqual(attr.GetTimeSamplesInInterval(Gf.Interval(0, 3)), \n [1.0, 2.0, 3.0])\n\n def test_MultipleClipsWithTimesSpanningClips2(self):\n \"\"\"Another test similar to test_MultipleClipsWithTimesSpanningClips\"\"\"\n stage = Usd.Stage.Open('multiclip/root.usda')\n\n model = stage.GetPrimAtPath('/ModelWithTimesSpanningClips_2')\n attr = model.GetAttribute('size')\n\n self.CheckValue(attr, time=100.5, expected=100.5)\n self.CheckValue(attr, time=100.75, expected=100.75)\n self.CheckValue(attr, time=101.0, expected=101.0)\n self.CheckValue(attr, time=101.25, expected=151.25)\n self.CheckValue(attr, time=101.5, expected=201.5)\n self.CheckValue(attr, time=101.75, expected=201.75)\n self.CheckValue(attr, time=102, expected=202)\n\n self.assertEqual(attr.GetTimeSamples(), \n [100.5, 100.75, 101.0, 101.5, 101.75, 102.0])\n self.assertEqual(attr.GetTimeSamplesInInterval(Gf.Interval(101, 102)), \n [101.0, 101.5, 101.75, 102.0])\n\n self.CheckTimeSamples(attr)\n\n def test_MultipleClipsWithNoTimes(self):\n \"\"\"Test sequencing multiple clips together with no times metadata\n to remap times.\"\"\"\n stage = Usd.Stage.Open('multiclip/root.usda')\n\n attr = stage.GetAttributeAtPath('/ModelWithNoTimes.size')\n\n # In this test case there is no times metadata specified,\n # so the stage times map to the clip times directly. \n self.CheckValue(attr, time=0.0, expected=-5)\n self.CheckValue(attr, time=5.0, expected=-5)\n self.CheckValue(attr, time=7.0, expected=-27)\n self.CheckValue(attr, time=9.0, expected=-29)\n\n # At t=6 we are between the last time sample in the active range\n # of the first clip and the first time sample in the second clip.\n # We should interpolate between these two sample values.\n self.CheckValue(attr, time=6.0, expected=-16)\n\n # The clips should only contribute time samples from their active\n # range, so we should only get the samples at 5.0 from the first\n # clip, 9.0 from the second clip, and 0.0 and 7.0 from the active\n # metadata.\n self.assertEqual(attr.GetTimeSamples(), [0.0, 5.0, 7.0, 9.0])\n self.CheckTimeSamples(attr)\n\n def test_InterpolateMissingClipValues(self):\n \"\"\"Tests interpolation of values for clips that do not have time\n samples for attributes that have been declared in the manifest.\"\"\"\n def _Test(prim):\n # The first clip active in the range [0, 2) has no samples for\n # this attribute. We should hold the value in this time range\n # from the first sample from the second clip.\n attrNotInFirstClip = prim.GetAttribute('attrNotInFirstClip')\n\n self.CheckValue(attrNotInFirstClip, time=-1, expected=300)\n self.CheckValue(attrNotInFirstClip, time=-0.5, expected=300)\n self.CheckValue(attrNotInFirstClip, time=0, expected=300)\n self.CheckValue(attrNotInFirstClip, time=0.5, expected=300)\n self.CheckValue(attrNotInFirstClip, time=1, expected=300)\n self.CheckValue(attrNotInFirstClip, time=1.5, expected=300)\n self.CheckValue(attrNotInFirstClip, time=2, expected=300)\n self.CheckValue(attrNotInFirstClip, time=2.5, expected=350)\n self.CheckValue(attrNotInFirstClip, time=3, expected=400)\n self.CheckValue(attrNotInFirstClip, time=3.5, expected=450)\n self.CheckValue(attrNotInFirstClip, time=4, expected=500)\n self.CheckValue(attrNotInFirstClip, time=4.5, expected=550)\n self.CheckValue(attrNotInFirstClip, time=5, expected=600)\n self.CheckValue(attrNotInFirstClip, time=5.5, expected=650)\n self.CheckValue(attrNotInFirstClip, time=6, expected=700)\n self.CheckValue(attrNotInFirstClip, time=6.5, expected=750)\n self.CheckValue(attrNotInFirstClip, time=7, expected=800)\n self.CheckValue(attrNotInFirstClip, time=7.5, expected=800)\n self.CheckValue(attrNotInFirstClip, time=8, expected=800)\n\n self.assertEqual(attrNotInFirstClip.GetTimeSamples(),\n [2.0, 3.0, 4.0, 5.0, 6.0, 7.0])\n \n self.CheckTimeSamples(attrNotInFirstClip)\n\n # The middle clips that are active in the range [2, 6) have no\n # samples for this attribute. We should interpolate the value in\n # this time range from the first clip and the last clip.\n attrNotInMiddleClips = prim.GetAttribute('attrNotInMiddleClips')\n\n self.CheckValue(attrNotInMiddleClips, time=-1, expected=100)\n self.CheckValue(attrNotInMiddleClips, time=-0.5, expected=100)\n self.CheckValue(attrNotInMiddleClips, time=0, expected=100)\n self.CheckValue(attrNotInMiddleClips, time=0.5, expected=150)\n self.CheckValue(attrNotInMiddleClips, time=1, expected=200)\n self.CheckValue(attrNotInMiddleClips, time=1.5, expected=250)\n self.CheckValue(attrNotInMiddleClips, time=2, expected=300)\n self.CheckValue(attrNotInMiddleClips, time=2.5, expected=350)\n self.CheckValue(attrNotInMiddleClips, time=3, expected=400)\n self.CheckValue(attrNotInMiddleClips, time=3.5, expected=450)\n self.CheckValue(attrNotInMiddleClips, time=4, expected=500)\n self.CheckValue(attrNotInMiddleClips, time=4.5, expected=550)\n self.CheckValue(attrNotInMiddleClips, time=5, expected=600)\n self.CheckValue(attrNotInMiddleClips, time=5.5, expected=650)\n self.CheckValue(attrNotInMiddleClips, time=6, expected=700)\n self.CheckValue(attrNotInMiddleClips, time=6.5, expected=750)\n self.CheckValue(attrNotInMiddleClips, time=7, expected=800)\n self.CheckValue(attrNotInMiddleClips, time=7.5, expected=800)\n self.CheckValue(attrNotInMiddleClips, time=8, expected=800)\n\n self.assertEqual(attrNotInMiddleClips.GetTimeSamples(),\n [0.0, 1.0, 6.0, 7.0])\n self.CheckTimeSamples(attrNotInMiddleClips)\n\n # The last clip active in the range [6, ...) has no samples for\n # this attribute. We should hold the value in this time range\n # from the last sample in the second-to-last clip.\n attrNotInLastClip = prim.GetAttribute('attrNotInLastClip')\n\n self.CheckValue(attrNotInLastClip, time=-1, expected=100)\n self.CheckValue(attrNotInLastClip, time=-0.5, expected=100)\n self.CheckValue(attrNotInLastClip, time=0, expected=100)\n self.CheckValue(attrNotInLastClip, time=0.5, expected=150)\n self.CheckValue(attrNotInLastClip, time=1, expected=200)\n self.CheckValue(attrNotInLastClip, time=1.5, expected=250)\n self.CheckValue(attrNotInLastClip, time=2, expected=300)\n self.CheckValue(attrNotInLastClip, time=2.5, expected=350)\n self.CheckValue(attrNotInLastClip, time=3, expected=400)\n self.CheckValue(attrNotInLastClip, time=3.5, expected=450)\n self.CheckValue(attrNotInLastClip, time=4, expected=500)\n self.CheckValue(attrNotInLastClip, time=4.5, expected=550)\n self.CheckValue(attrNotInLastClip, time=5, expected=600)\n self.CheckValue(attrNotInLastClip, time=5.5, expected=600)\n self.CheckValue(attrNotInLastClip, time=6, expected=600)\n self.CheckValue(attrNotInLastClip, time=6.5, expected=600)\n self.CheckValue(attrNotInLastClip, time=7, expected=600)\n self.CheckValue(attrNotInLastClip, time=7.5, expected=600)\n self.CheckValue(attrNotInLastClip, time=8, expected=600)\n\n self.assertEqual(attrNotInLastClip.GetTimeSamples(),\n [0.0, 1.0, 2.0, 3.0, 4.0, 5.0])\n self.CheckTimeSamples(attrNotInLastClip)\n\n # This attribute is in the manifest but not in any clip. We\n # expect to get 1 time sample with a value of None since no \n # default value is declared in the manifest.\n attrNotInAnyClip = prim.GetAttribute('attrNotInAnyClip')\n\n self.CheckValue(attrNotInAnyClip, time=-1, expected=None)\n self.CheckValue(attrNotInAnyClip, time=0, expected=None)\n self.CheckValue(attrNotInAnyClip, time=1, expected=None)\n\n self.assertEqual(attrNotInAnyClip.GetTimeSamples(), [0.0])\n self.CheckTimeSamples(attrNotInAnyClip)\n\n stage = Usd.Stage.Open('missingValueInterpolation/root.usda')\n _Test(stage.GetPrimAtPath('/Model'))\n _Test(stage.GetPrimAtPath('/ModelWithManifestBlocks'))\n \n def test_InterpolateMissingClipValuesWithBlocksInManifest(self):\n \"\"\"Tests that interpolation of values for empty clips avoids opening\n layers for clips that are declared to have no values in the manifest.\"\"\"\n def _OpenTestStage():\n self.assertFalse(\n Sdf.Layer.Find('missingValueInterpolation/clip1.usda'))\n self.assertFalse(\n Sdf.Layer.Find('missingValueInterpolation/clip2.usda'))\n self.assertFalse(\n Sdf.Layer.Find('missingValueInterpolation/clip3.usda'))\n self.assertFalse(\n Sdf.Layer.Find('missingValueInterpolation/clip4.usda'))\n return Usd.Stage.Open('missingValueInterpolation/root.usda')\n\n # These attributes have been marked as not having time samples in various\n # clips, so when we query for values none of those clips should ever be\n # opened.\n stage = _OpenTestStage()\n attrNotInFirstClip = stage.GetAttributeAtPath(\n '/ModelWithManifestBlocks.attrNotInFirstClip')\n for i in range(0, 8):\n attrNotInFirstClip.Get(i)\n self.assertFalse(\n Sdf.Layer.Find('missingValueInterpolation/clip1.usda'))\n\n del stage\n stage = _OpenTestStage()\n attrNotInMiddleClips = stage.GetAttributeAtPath(\n '/ModelWithManifestBlocks.attrNotInMiddleClips')\n for i in range(0, 8):\n attrNotInMiddleClips.Get(i)\n self.assertFalse(\n Sdf.Layer.Find('missingValueInterpolation/clip2.usda'))\n self.assertFalse(\n Sdf.Layer.Find('missingValueInterpolation/clip3.usda'))\n\n del stage\n stage = _OpenTestStage()\n attrNotInLastClip = stage.GetAttributeAtPath(\n '/ModelWithManifestBlocks.attrNotInLastClip')\n for i in range(0, 8):\n attrNotInLastClip.Get(i)\n self.assertFalse(\n Sdf.Layer.Find('missingValueInterpolation/clip4.usda'))\n\n del stage\n stage = _OpenTestStage()\n attrNotInAnyClip = stage.GetAttributeAtPath(\n '/ModelWithManifestBlocks.attrNotInAnyClip')\n for i in range(0, 8):\n attrNotInAnyClip.Get(i)\n # Note that even though this clip is indicated as not having\n # samples in any clips, we do still wind up opening clip1.usda\n # when querying time samples. Avoiding this would incur an\n # additional check on the manifest in the more common cases,\n # so we choose not to do that.\n self.assertTrue(\n Sdf.Layer.Find('missingValueInterpolation/clip1.usda'))\n self.assertFalse(\n Sdf.Layer.Find('missingValueInterpolation/clip2.usda'))\n self.assertFalse(\n Sdf.Layer.Find('missingValueInterpolation/clip3.usda'))\n self.assertFalse(\n Sdf.Layer.Find('missingValueInterpolation/clip4.usda'))\n\n def test_InterpolateMissingClipValuesWithFallbacksInManifest(self):\n \"\"\"Tests that interpolation for missing clip values will be\n skipped for attributes with a fallback value declared in the\n manifest.\"\"\"\n stage = Usd.Stage.Open('missingValueInterpolation/root.usda')\n\n attrNotInFirstClip = stage.GetAttributeAtPath(\n '/ModelWithManifestFallbacks.attrNotInFirstClip')\n\n self.CheckValue(attrNotInFirstClip, time=-1, expected=42)\n self.CheckValue(attrNotInFirstClip, time=-0.5, expected=42)\n self.CheckValue(attrNotInFirstClip, time=0, expected=42)\n self.CheckValue(attrNotInFirstClip, time=0.5, expected=106.5)\n self.CheckValue(attrNotInFirstClip, time=1, expected=171)\n self.CheckValue(attrNotInFirstClip, time=1.5, expected=235.5)\n self.CheckValue(attrNotInFirstClip, time=2, expected=300)\n\n self.assertEqual(attrNotInFirstClip.GetTimeSamples(),\n [0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0])\n self.CheckTimeSamples(attrNotInFirstClip)\n\n attrNotInMiddleClips = stage.GetAttributeAtPath(\n '/ModelWithManifestFallbacks.attrNotInMiddleClips')\n\n self.CheckValue(attrNotInMiddleClips, time=1, expected=200)\n self.CheckValue(attrNotInMiddleClips, time=1.5, expected=121)\n self.CheckValue(attrNotInMiddleClips, time=2, expected=42)\n self.CheckValue(attrNotInMiddleClips, time=2.5, expected=42)\n self.CheckValue(attrNotInMiddleClips, time=3, expected=42)\n self.CheckValue(attrNotInMiddleClips, time=3.5, expected=42)\n self.CheckValue(attrNotInMiddleClips, time=4, expected=42)\n self.CheckValue(attrNotInMiddleClips, time=4.5, expected=206.5)\n self.CheckValue(attrNotInMiddleClips, time=5, expected=371)\n self.CheckValue(attrNotInMiddleClips, time=5.5, expected=535.5)\n self.CheckValue(attrNotInMiddleClips, time=6, expected=700)\n\n self.assertEqual(attrNotInMiddleClips.GetTimeSamples(),\n [0.0, 1.0, 2.0, 4.0, 6.0, 7.0])\n self.CheckTimeSamples(attrNotInMiddleClips)\n\n attrNotInLastClip = stage.GetAttributeAtPath(\n '/ModelWithManifestFallbacks.attrNotInLastClip')\n\n self.CheckValue(attrNotInLastClip, time=5, expected=600.0)\n self.CheckValue(attrNotInLastClip, time=5.5, expected=321.0)\n self.CheckValue(attrNotInLastClip, time=6, expected=42.0)\n self.CheckValue(attrNotInLastClip, time=7, expected=42.0)\n self.CheckValue(attrNotInLastClip, time=8, expected=42.0)\n\n self.assertEqual(attrNotInLastClip.GetTimeSamples(),\n [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0])\n self.CheckTimeSamples(attrNotInLastClip)\n\n attrNotInAnyClip = stage.GetAttributeAtPath(\n '/ModelWithManifestFallbacks.attrNotInAnyClip')\n\n self.CheckValue(attrNotInAnyClip, time=-1, expected=42.0)\n self.CheckValue(attrNotInAnyClip, time=0, expected=42.0)\n self.CheckValue(attrNotInAnyClip, time=1, expected=42.0)\n\n self.assertEqual(attrNotInAnyClip.GetTimeSamples(), \n [0.0, 2.0, 4.0, 6.0, 7.0])\n self.CheckTimeSamples(attrNotInAnyClip)\n\n def test_GetTimeSamplesInIntervalWithoutInterpolation(self):\n \"\"\"Tests behavior of GetTimeSamplesInInterval with clip sets\n that are missing time samples with interpolation between\n missing clip values turned off.\"\"\"\n def _OpenTestStage():\n # Use the test case from missingValueInterpolation but turn off\n # the interpolation behavior for this test case.\n stage = Usd.Stage.Open('missingValueInterpolation/root.usda')\n Sdf.CreatePrimInLayer(stage.GetSessionLayer(), '/Model').SetInfo(\n 'clips', {'default': {'interpolateMissingClipValues' : False}})\n\n self.assertFalse(\n Sdf.Layer.Find('missingValueInterpolation/clip1.usda'))\n self.assertFalse(\n Sdf.Layer.Find('missingValueInterpolation/clip2.usda'))\n self.assertFalse(\n Sdf.Layer.Find('missingValueInterpolation/clip3.usda'))\n self.assertFalse(\n Sdf.Layer.Find('missingValueInterpolation/clip4.usda'))\n\n return stage\n\n # When interpolation is turned off, querying time samples in an\n # interval should only need to open the clips that are active\n # during that interval. In this case, only clip 1 is active in\n # the time interval [0, 1] with time samples at 0.0 and 1.0.\n stage = _OpenTestStage()\n attrNotInFirstClip = stage.GetAttributeAtPath(\n '/Model.attrNotInLastClip')\n self.assertEqual(\n attrNotInFirstClip.GetTimeSamplesInInterval(\n Gf.Interval(0.0, 1.0)),\n [0.0, 1.0])\n\n self.assertTrue(\n Sdf.Layer.Find('missingValueInterpolation/clip1.usda'))\n self.assertFalse(\n Sdf.Layer.Find('missingValueInterpolation/clip2.usda'))\n self.assertFalse(\n Sdf.Layer.Find('missingValueInterpolation/clip3.usda'))\n self.assertFalse(\n Sdf.Layer.Find('missingValueInterpolation/clip4.usda')) \n\n # If there are no values in any clips, we should still only need\n # to open the clip that is active during that interval, which is\n # clip 1. This is because each clip introduces a time sample at\n # its start time when interpolation is turned off, even if it\n # has no authored samples.\n del stage\n stage = _OpenTestStage()\n attrNotInAnyClip = stage.GetAttributeAtPath('/Model.attrNotInAnyClip')\n self.assertEqual(\n attrNotInAnyClip.GetTimeSamplesInInterval(\n Gf.Interval(0.0, 1.0)),\n [0.0])\n\n self.assertTrue(\n Sdf.Layer.Find('missingValueInterpolation/clip1.usda'))\n self.assertFalse(\n Sdf.Layer.Find('missingValueInterpolation/clip2.usda'))\n self.assertFalse(\n Sdf.Layer.Find('missingValueInterpolation/clip3.usda'))\n self.assertFalse(\n Sdf.Layer.Find('missingValueInterpolation/clip4.usda')) \n\n def test_GetTimeSamplesInIntervalWithInterpolation(self):\n \"\"\"Tests behavior of GetTimeSamplesInInterval with clip sets\n that are missing time samples with interpolation between\n missing clip values turned on.\"\"\"\n def _OpenTestStage():\n stage = Usd.Stage.Open('missingValueInterpolation/root.usda')\n self.assertFalse(\n Sdf.Layer.Find('missingValueInterpolation/clip1.usda'))\n self.assertFalse(\n Sdf.Layer.Find('missingValueInterpolation/clip2.usda'))\n self.assertFalse(\n Sdf.Layer.Find('missingValueInterpolation/clip3.usda'))\n self.assertFalse(\n Sdf.Layer.Find('missingValueInterpolation/clip4.usda'))\n return stage\n\n # When interpolation is turned on, querying time samples in an\n # interval should only need to open the clips that are active\n # during that interval if any of them contain time samples. In\n # this case, only clip 1 is active in the time interval [0, 1]\n # with time samples at 0.0 and 1.0.\n stage = _OpenTestStage()\n attrNotInFirstClip = stage.GetAttributeAtPath(\n '/Model.attrNotInLastClip')\n self.assertEqual(\n attrNotInFirstClip.GetTimeSamplesInInterval(\n Gf.Interval(0.0, 1.0)),\n [0.0, 1.0])\n\n self.assertTrue(\n Sdf.Layer.Find('missingValueInterpolation/clip1.usda'))\n self.assertFalse(\n Sdf.Layer.Find('missingValueInterpolation/clip2.usda'))\n self.assertFalse(\n Sdf.Layer.Find('missingValueInterpolation/clip3.usda'))\n self.assertFalse(\n Sdf.Layer.Find('missingValueInterpolation/clip4.usda')) \n\n # However, if the active clip does not contain time samples,\n # we currently need to scan to see if any other clips provide\n # time samples. In the worst case, when no clips provide samples,\n # this will cause all clips to be opened.\n del stage\n stage = _OpenTestStage()\n attrNotInAnyClip = stage.GetAttributeAtPath('/Model.attrNotInAnyClip')\n self.assertEqual(\n attrNotInAnyClip.GetTimeSamplesInInterval(\n Gf.Interval(0.0, 1.0)),\n [0.0])\n\n self.assertTrue(\n Sdf.Layer.Find('missingValueInterpolation/clip1.usda'))\n self.assertTrue(\n Sdf.Layer.Find('missingValueInterpolation/clip2.usda'))\n self.assertTrue(\n Sdf.Layer.Find('missingValueInterpolation/clip3.usda'))\n self.assertTrue(\n Sdf.Layer.Find('missingValueInterpolation/clip4.usda')) \n\n # This can be mitigated by authoring value blocks in the manifest to\n # indicate that certain clips do not provide samples. In this case,\n # we've authored blocks for all clips so none of them should be opened.\n del stage\n stage = _OpenTestStage()\n attrNotInAnyClip = stage.GetAttributeAtPath(\n '/ModelWithManifestBlocks.attrNotInAnyClip')\n self.assertEqual(\n attrNotInAnyClip.GetTimeSamplesInInterval(\n Gf.Interval(0.0, 1.0)),\n [0.0])\n self.assertFalse(\n Sdf.Layer.Find('missingValueInterpolation/clip1.usda'))\n self.assertFalse(\n Sdf.Layer.Find('missingValueInterpolation/clip2.usda'))\n self.assertFalse(\n Sdf.Layer.Find('missingValueInterpolation/clip3.usda'))\n self.assertFalse(\n Sdf.Layer.Find('missingValueInterpolation/clip4.usda'))\n\n def test_AncestralClips(self):\n \"\"\"Tests that clips specified on a descendant model will override\n clips specified on an ancestral model\"\"\"\n stage = Usd.Stage.Open('ancestral/root.usda')\n\n ancestor = stage.GetPrimAtPath('/ModelGroup')\n ancestorAttr = ancestor.GetAttribute('attr')\n \n self.assertEqual(ancestorAttr.GetTimeSamples(), [0, 5, 10, 15])\n self.assertEqual(ancestorAttr.GetTimeSamplesInInterval(Gf.Interval(0, 15)), \n [0, 5, 10, 15])\n self.CheckValue(ancestorAttr, time=5, expected=-5)\n self.CheckValue(ancestorAttr, time=10, expected=-10)\n self.CheckValue(ancestorAttr, time=15, expected=-15)\n\n # Tests that attributes on prims will receive values from clips specified\n # on ancestors.\n descendant = stage.GetPrimAtPath('/ModelGroup/Subgroup')\n descendantAttr = descendant.GetAttribute('attr')\n\n self.assertEqual(descendantAttr.GetTimeSamples(), [0, 5, 10, 15])\n self.assertEqual(descendantAttr.GetTimeSamplesInInterval(Gf.Interval(0, 15)), \n [0, 5, 10, 15])\n self.CheckValue(descendantAttr, time=5, expected=-5)\n self.CheckValue(descendantAttr, time=10, expected=-10)\n self.CheckValue(descendantAttr, time=15, expected=-15)\n\n # Tests that clips specified on a descendant model will override\n # clips specified on an ancestral model\n descendant = stage.GetPrimAtPath('/ModelGroup/Subgroup/Model')\n descendantAttr = descendant.GetAttribute('attr')\n\n self.assertEqual(descendantAttr.GetTimeSamples(), [0, 1, 2, 3])\n self.assertEqual(descendantAttr.GetTimeSamplesInInterval(Gf.Interval(0, 2.95)), \n [0, 1, 2])\n self.CheckValue(descendantAttr, time=1, expected=-1)\n self.CheckValue(descendantAttr, time=2, expected=-2)\n self.CheckValue(descendantAttr, time=3, expected=-3)\n\n self.CheckTimeSamples(ancestorAttr)\n self.CheckTimeSamples(descendantAttr)\n\n def test_ClipFlatten(self):\n \"\"\"Ensure that UsdStages with clips are flattened as expected.\n In particular, the time samples in the flattened stage should incorporate\n data from clips, and no clip metadata should be present\"\"\"\n\n stage = Usd.Stage.Open('flatten/root.usda')\n expectedFlatStage = Sdf.Layer.FindOrOpen(\n 'flatten/flat.usda')\n\n self.assertEqual(stage.ExportToString(addSourceFileComment=False),\n expectedFlatStage.ExportToString())\n\n def test_ClipValidation(self):\n \"\"\"Tests validation of clip metadata\"\"\"\n\n # class Listener(object):\n # def __init__(self):\n # self.warnings = []\n # self._listener = Tf.Notice.RegisterGlobally(\n # 'TfDiagnosticNotice::IssuedWarning', \n # self._OnNotice)\n\n # def _OnNotice(self, notice, sender):\n # self.warnings.append(notice.warning)\n\n # l = Listener()\n\n stage = Usd.Stage.Open('validation/root.usda')\n\n # XXX: The notice listening portion of this test is disabled for now, since\n # parallel UsdStage population causes these warnings to be emitted from\n # separate threads. The diagnostic system does not issue notices for\n # warnings and errors not issued from \"the main thread\".\n\n # self.assertEqual(len(l.warnings), numExpectedWarnings)\n\n # # Each 'Error' prim should have caused a warning to be posted.\n # for i in range(1, numExpectedWarnings):\n # errorPrimName = 'Error%d' % i\n # numErrorsForPrim = sum(1 if errorPrimName in str(e) else 0 \n # for e in l.warnings)\n # self.assertEqual(numErrorsForPrim, 1)\n\n # # The 'NoError' prims should not have caused any errors to be posted.\n # self.assertFalse(any(['NoError' in str(e) for e in l.warnings]))\n\n def test_ClipsOnNonModel(self):\n \"\"\"Verifies that clips authored on non-models work\"\"\"\n stage = Usd.Stage.Open('nonmodel/root.usda')\n\n nonModel = stage.GetPrimAtPath('/NonModel')\n self.assertFalse(nonModel.IsModel())\n attr = nonModel.GetAttribute('a')\n self.CheckValue(attr, time=1.0, expected=-100.0)\n\n def test_ClipsCannotIntroduceNewTopology(self):\n \"\"\"Verifies that clips cannot introduce new scenegraph topology\"\"\"\n stage = Usd.Stage.Open('topology/root.usda')\n\n prim = stage.GetPrimAtPath('/Model')\n self.assertTrue(prim.IsModel())\n\n # Clips cannot introduce new topology. Prims and properties defined only\n # in the clip should not be visible on the stage.\n self.assertFalse(prim.GetAttribute('clipOnly'))\n self.assertEqual(prim.GetChildren(), [])\n\n def test_ClipAuthoring(self):\n \"\"\"Tests clip authoring API on Usd.ClipsAPI\"\"\"\n allFormats = ['usd' + x for x in 'ac']\n for fmt in allFormats:\n stage = Usd.Stage.CreateInMemory('TestClipAuthoring.'+fmt)\n\n prim = stage.DefinePrim('/Model')\n model = Usd.ClipsAPI(prim)\n\n prim2 = stage.DefinePrim('/Model2')\n model2 = Usd.ClipsAPI(prim2)\n\n # Clip authoring API supports the use of lists as well as Vt arrays.\n clipAssetPaths = [Sdf.AssetPath('clip1.usda'), \n Sdf.AssetPath('clip2.usda')]\n model.SetClipAssetPaths(clipAssetPaths)\n self.assertEqual(model.GetClipAssetPaths(), clipAssetPaths)\n\n model2.SetClipAssetPaths(\n Sdf.AssetPathArray([Sdf.AssetPath('clip1.usda'),\n Sdf.AssetPath('clip2.usda')]))\n self.assertEqual(model2.GetClipAssetPaths(), clipAssetPaths)\n\n clipPrimPath = \"/Clip\"\n model.SetClipPrimPath(clipPrimPath)\n self.assertEqual(model.GetClipPrimPath(), clipPrimPath)\n\n clipTimes = Vt.Vec2dArray([(0.0, 0.0),(10.0, 10.0),(20.0, 20.0)])\n model.SetClipTimes(clipTimes)\n self.assertEqual(model.GetClipTimes(), clipTimes)\n\n model2.SetClipTimes(\n Vt.Vec2dArray([Gf.Vec2d(0.0, 0.0),\n Gf.Vec2d(10.0, 10.0),\n Gf.Vec2d(20.0, 20.0)]))\n self.assertEqual(model2.GetClipTimes(), clipTimes)\n\n clipActive = [(0.0, 0.0),(10.0, 1.0),(20.0, 0.0)]\n model.SetClipActive(clipActive)\n self.assertEqual(model.GetClipActive(), Vt.Vec2dArray(clipActive))\n\n model2.SetClipActive(\n Vt.Vec2dArray([Gf.Vec2d(0.0, 0.0),\n Gf.Vec2d(10.0, 1.0),\n Gf.Vec2d(20.0, 0.0)]))\n self.assertEqual(model2.GetClipActive(), Vt.Vec2dArray(clipActive))\n\n clipManifestAssetPath = Sdf.AssetPath('clip_manifest.usda')\n model.SetClipManifestAssetPath(clipManifestAssetPath)\n self.assertEqual(model.GetClipManifestAssetPath(), clipManifestAssetPath)\n\n model.SetInterpolateMissingClipValues(True)\n self.assertEqual(model.GetInterpolateMissingClipValues(), True)\n\n # Test authoring of template clip metadata\n model.SetClipTemplateAssetPath('clip.###.usda')\n self.assertEqual(model.GetClipTemplateAssetPath(), 'clip.###.usda')\n\n model.SetClipTemplateStride(4.5)\n self.assertEqual(model.GetClipTemplateStride(), 4.5)\n\n model.SetClipTemplateStartTime(1)\n self.assertEqual(model.GetClipTemplateStartTime(), 1)\n\n model.SetClipTemplateEndTime(5)\n self.assertEqual(model.GetClipTemplateEndTime(), 5)\n \n # Ensure we can't set the clipTemplateStride to 0\n with self.assertRaises(Tf.ErrorException) as e:\n model.SetClipTemplateStride(0)\n\n # Ensure we can't set the clipTemplateStride to <0\n with self.assertRaises(Tf.ErrorException) as e:\n model.SetClipTemplateStride(-1)\n\n model.SetClipTemplateActiveOffset(2)\n self.assertEqual(model.GetClipTemplateActiveOffset(), 2)\n\n model.SetClipTemplateActiveOffset(-5)\n self.assertEqual(model.GetClipTemplateActiveOffset(), -5)\n\n def test_ClipSetAuthoring(self):\n \"\"\"Tests clip authoring API with clip sets on Usd.ClipsAPI\"\"\"\n allFormats = ['usd' + x for x in 'ac']\n for fmt in allFormats:\n stage = Usd.Stage.CreateInMemory('TestClipSetAuthoring.'+fmt)\n\n prim = stage.DefinePrim('/Model')\n model = Usd.ClipsAPI(prim)\n\n prim2 = stage.DefinePrim('/Model2')\n model2 = Usd.ClipsAPI(prim2)\n\n clipSetName = \"my_clip_set\"\n\n # Clip authoring API supports the use of lists as well as Vt arrays.\n clipAssetPaths = [Sdf.AssetPath('clip1.usda'), \n Sdf.AssetPath('clip2.usda')]\n model.SetClipAssetPaths(clipAssetPaths, clipSetName)\n self.assertEqual(model.GetClipAssetPaths(clipSetName), \n clipAssetPaths)\n\n model2.SetClipAssetPaths(\n Sdf.AssetPathArray([Sdf.AssetPath('clip1.usda'),\n Sdf.AssetPath('clip2.usda')]),\n clipSetName)\n self.assertEqual(model2.GetClipAssetPaths(clipSetName), \n clipAssetPaths)\n\n clipPrimPath = \"/Clip\"\n model.SetClipPrimPath(clipPrimPath, clipSetName)\n self.assertEqual(model.GetClipPrimPath(clipSetName), clipPrimPath)\n\n clipTimes = Vt.Vec2dArray([(0.0, 0.0),(10.0, 10.0),(20.0, 20.0)])\n model.SetClipTimes(clipTimes, clipSetName)\n self.assertEqual(model.GetClipTimes(clipSetName), clipTimes)\n\n model2.SetClipTimes(\n Vt.Vec2dArray([Gf.Vec2d(0.0, 0.0),\n Gf.Vec2d(10.0, 10.0),\n Gf.Vec2d(20.0, 20.0)]),\n clipSetName)\n self.assertEqual(model2.GetClipTimes(clipSetName), clipTimes)\n\n clipActive = [(0.0, 0.0),(10.0, 1.0),(20.0, 0.0)]\n model.SetClipActive(clipActive, clipSetName)\n self.assertEqual(model.GetClipActive(clipSetName), \n Vt.Vec2dArray(clipActive))\n\n model2.SetClipActive(\n Vt.Vec2dArray([Gf.Vec2d(0.0, 0.0),\n Gf.Vec2d(10.0, 1.0),\n Gf.Vec2d(20.0, 0.0)]),\n clipSetName)\n self.assertEqual(model2.GetClipActive(clipSetName), \n Vt.Vec2dArray(clipActive))\n\n clipManifestAssetPath = Sdf.AssetPath('clip_manifest.usda')\n model.SetClipManifestAssetPath(clipManifestAssetPath, clipSetName)\n self.assertEqual(model.GetClipManifestAssetPath(clipSetName), \n clipManifestAssetPath)\n\n # Test authoring of template clip metadata\n model.SetClipTemplateAssetPath('clip.###.usda', clipSetName)\n self.assertEqual(model.GetClipTemplateAssetPath(clipSetName), \n 'clip.###.usda')\n\n model.SetClipTemplateStride(4.5, clipSetName)\n self.assertEqual(model.GetClipTemplateStride(clipSetName), 4.5)\n\n model.SetClipTemplateStartTime(1, clipSetName)\n self.assertEqual(model.GetClipTemplateStartTime(clipSetName), 1)\n\n model.SetClipTemplateEndTime(5, clipSetName)\n self.assertEqual(model.GetClipTemplateEndTime(clipSetName), 5)\n \n # Ensure we can't set the clipTemplateStride to 0\n with self.assertRaises(Tf.ErrorException) as e:\n model.SetClipTemplateStride(0, clipSetName)\n\n def test_ClipTimesBracketingTimeSamplePrecision(self):\n stage = Usd.Stage.Open('precision/root.usda')\n prim = stage.GetPrimAtPath('/World/fx/Particles_Splash/points')\n attr = prim.GetAttribute('points')\n\n self.assertEqual(attr.GetTimeSamples(), [101.0, 101.99, 102.0, 103.0])\n self.assertEqual(attr.GetBracketingTimeSamples(101), (101.00, 101.00))\n self.assertEqual(attr.GetBracketingTimeSamples(101.99), (101.99, 101.99))\n self.assertEqual(attr.GetBracketingTimeSamples(101.90), (101.00, 101.99))\n self.assertEqual(attr.GetTimeSamplesInInterval(Gf.Interval(101.0,102.0)), \n [101.00, 101.99, 102.00])\t \n\n def test_ClipManifest(self):\n \"\"\"Verifies behavior with value clips when a clip manifest is \n specified.\"\"\"\n stage = Usd.Stage.Open('manifest/root.usda')\n prim = stage.GetPrimAtPath('/WithManifestClip')\n\n # No clip layers should be loaded yet. We have an manifest explicitly\n # specified so we don't need to open any of the clip layers to\n # generate one.\n self.assertFalse(Sdf.Layer.Find('manifest/clip_1.usda'))\n self.assertFalse(Sdf.Layer.Find('manifest/clip_2.usda'))\n\n # This attribute doesn't exist in the manifest, so we should\n # not have looked in any clips for samples, and its value should\n # fall back to its default value.\n notInManifestAndInClip = prim.GetAttribute('notInManifestAndInClip')\n self.assertFalse(notInManifestAndInClip.ValueMightBeTimeVarying())\n self.assertFalse(Sdf.Layer.Find('manifest/clip_1.usda'))\n self.assertFalse(Sdf.Layer.Find('manifest/clip_2.usda'))\n self.CheckValue(notInManifestAndInClip, time=0, expected=3.0)\n self.assertEqual(notInManifestAndInClip.GetTimeSamples(), [])\n self.assertEqual(notInManifestAndInClip.GetTimeSamplesInInterval(\n Gf.Interval.GetFullInterval()), [])\n self.CheckTimeSamples(notInManifestAndInClip)\n\n # This attribute also doesn't exist in the manifest and also\n # does not have any samples in the clips. It should behave exactly\n # as above; we should not have to open any of the clips.\n notInManifestNotInClip = prim.GetAttribute('notInManifestNotInClip')\n self.assertFalse(notInManifestNotInClip.ValueMightBeTimeVarying())\n self.assertFalse(Sdf.Layer.Find('manifest/clip_1.usda'))\n self.assertFalse(Sdf.Layer.Find('manifest/clip_2.usda'))\n self.CheckValue(notInManifestNotInClip, time=0, expected=4.0)\n self.assertEqual(notInManifestNotInClip.GetTimeSamplesInInterval(\n Gf.Interval.GetFullInterval()), [])\n self.CheckTimeSamples(notInManifestNotInClip)\n \n # This attribute is in the manifest but is declared uniform,\n # so we should also not look in any clips for samples.\n uniformInManifestAndInClip = prim.GetAttribute('uniformInManifestAndInClip')\n self.assertFalse(uniformInManifestAndInClip.ValueMightBeTimeVarying())\n self.assertFalse(Sdf.Layer.Find('manifest/clip_1.usda'))\n self.assertFalse(Sdf.Layer.Find('manifest/clip_2.usda'))\n self.CheckValue(uniformInManifestAndInClip, time=0, expected=5.0)\n self.assertEqual(uniformInManifestAndInClip.GetTimeSamples(), [])\n self.assertEqual(uniformInManifestAndInClip.GetTimeSamplesInInterval(\n Gf.Interval.GetFullInterval()), [])\n self.CheckTimeSamples(uniformInManifestAndInClip)\n\n # This attribute is in the manifest and has samples in the\n # first clip, but not the other. We should get the clip's samples\n # in the first time range, and the default value in the second\n # range.\n inManifestAndInClip = prim.GetAttribute('inManifestAndInClip')\n self.assertTrue(inManifestAndInClip.ValueMightBeTimeVarying())\n # Since there's more than one clip we don't need to open any \n # layers to determine if the attribute might be varying.\n self.assertFalse(Sdf.Layer.Find('manifest/clip_1.usda'))\n self.assertFalse(Sdf.Layer.Find('manifest/clip_2.usda'))\n self.CheckValue(inManifestAndInClip, time=0, expected=0.0)\n self.CheckValue(inManifestAndInClip, time=1, expected=-1.0)\n\n # Note that the clip at t=2 does not have a value for this attribute,\n # and the manifest has no default value specified, so we get None.\n self.CheckValue(inManifestAndInClip, time=2, expected=None)\n\n self.assertEqual(inManifestAndInClip.GetTimeSamples(), \n [0.0, 1.0, 2.0, 3.0])\n self.assertEqual(inManifestAndInClip.GetTimeSamplesInInterval(\n Gf.Interval(0, 2.1)), [0.0, 1.0, 2.0])\n self.CheckTimeSamples(inManifestAndInClip)\n\n # Close and reopen the stage to ensure the clip layers are closed\n # before we do the test below.\n del stage\n self.assertFalse(Sdf.Layer.Find('manifest/clip_1.usda'))\n self.assertFalse(Sdf.Layer.Find('manifest/clip_2.usda'))\n\n # Lastly, this attribute is in the manifest but has no\n # samples in the clip and no default in the manifest, so we should\n # get None.\n stage = Usd.Stage.Open('manifest/root.usda')\n prim = stage.GetPrimAtPath('/WithManifestClip')\n\n inManifestNotInClip = prim.GetAttribute('inManifestNotInClip')\n self.assertTrue(inManifestNotInClip.ValueMightBeTimeVarying())\n # Since there's more than one clip we don't need to open any \n # layers to determine if the attribute might be varying.\n self.assertFalse(Sdf.Layer.Find('manifest/clip_1.usda'))\n self.assertFalse(Sdf.Layer.Find('manifest/clip_2.usda'))\n self.CheckValue(inManifestNotInClip, time=0, expected=None)\n self.assertEqual(inManifestNotInClip.GetTimeSamples(), \n [0.0, 1.0, 2.0, 3.0])\n self.assertEqual(inManifestNotInClip.GetTimeSamplesInInterval(\n Gf.Interval.GetFullInterval()), [0.0, 1.0, 2.0, 3.0])\n self.CheckTimeSamples(inManifestNotInClip)\n\n def test_ClipManifestFallback(self):\n \"\"\"Verifies fallback values from manifest when a clip does not\n have values for an attribute that is in the manifest.\"\"\"\n stage = Usd.Stage.Open('manifestFallback/root.usda')\n\n # In the following test cases, the clip that is active at t=2.0\n # contains no attributes.\n\n # If the attribute is declared with a default value in the\n # manifest, we fall back to that value.\n fallbackInManifest = \\\n stage.GetAttributeAtPath('/Model.fallbackInManifest')\n self.CheckValue(fallbackInManifest, time=0.0, expected=10.0)\n self.CheckValue(fallbackInManifest, time=2.0, expected=50.0)\n self.CheckValue(fallbackInManifest, time=4.0, expected=20.0)\n self.assertEqual(fallbackInManifest.GetTimeSamples(),\n [0.0, 1.0, 2.0 - Usd.TimeCode.SafeStep(), 2.0,\n 4.0 - Usd.TimeCode.SafeStep(), 4.0])\n self.CheckTimeSamples(fallbackInManifest)\n \n fallbackBlockInManifest = \\\n stage.GetAttributeAtPath('/Model.fallbackBlockInManifest')\n self.CheckValue(fallbackBlockInManifest, time=0.0, expected=10.0)\n self.CheckValue(fallbackBlockInManifest, time=2.0, expected=None)\n self.CheckValue(fallbackBlockInManifest, time=4.0, expected=20.0)\n self.assertEqual(fallbackBlockInManifest.GetTimeSamples(),\n [0.0, 1.0, 2.0 - Usd.TimeCode.SafeStep(), 2.0,\n 4.0 - Usd.TimeCode.SafeStep(), 4.0])\n self.CheckTimeSamples(fallbackBlockInManifest)\n\n # If the attribute is declared without a default value in the\n # manifest, we get a value of None.\n noFallbackInManifest = \\\n stage.GetAttributeAtPath('/Model.noFallbackInManifest')\n self.CheckValue(noFallbackInManifest, time=0.0, expected=10.0)\n self.CheckValue(noFallbackInManifest, time=2.0, expected=None)\n self.CheckValue(noFallbackInManifest, time=4.0, expected=20.0)\n self.assertEqual(noFallbackInManifest.GetTimeSamples(),\n [0.0, 1.0, 2.0 - Usd.TimeCode.SafeStep(), 2.0,\n 4.0 - Usd.TimeCode.SafeStep(), 4.0])\n self.CheckTimeSamples(noFallbackInManifest)\n\n def test_ClipManifestFallback2(self):\n \"\"\"Verifies fallback values from manifest when using a single clip\n that does not have values for an attribute that is in the manifest.\"\"\"\n stage = Usd.Stage.Open('manifestFallback/root.usda')\n\n # In this test case the prim has a single clip with no samples\n # that is active at time=0.\n\n # If the attribute is declared with a default value in the\n # manifest, we fall back to that value.\n fallbackInManifest = \\\n stage.GetAttributeAtPath('/Model_2.fallbackInManifest')\n self.CheckValue(fallbackInManifest, time=-1.0, expected=50.0)\n self.CheckValue(fallbackInManifest, time=0.0, expected=50.0)\n self.CheckValue(fallbackInManifest, time=1.0, expected=50.0)\n self.assertEqual(fallbackInManifest.GetTimeSamples(), [0.0])\n self.CheckTimeSamples(fallbackInManifest)\n \n fallbackBlockInManifest = \\\n stage.GetAttributeAtPath('/Model_2.fallbackBlockInManifest')\n self.CheckValue(fallbackBlockInManifest, time=-1.0, expected=None)\n self.CheckValue(fallbackBlockInManifest, time=0.0, expected=None)\n self.CheckValue(fallbackBlockInManifest, time=1.0, expected=None)\n self.assertEqual(fallbackBlockInManifest.GetTimeSamples(), [0.0])\n self.CheckTimeSamples(fallbackBlockInManifest)\n\n # If the attribute is declared without a default value in the\n # manifest, we get a value of None.\n noFallbackInManifest = \\\n stage.GetAttributeAtPath('/Model_2.noFallbackInManifest')\n self.CheckValue(noFallbackInManifest, time=-1.0, expected=None)\n self.CheckValue(noFallbackInManifest, time=0.0, expected=None)\n self.CheckValue(noFallbackInManifest, time=1.0, expected=None)\n self.assertEqual(noFallbackInManifest.GetTimeSamples(), [0.0])\n self.CheckTimeSamples(noFallbackInManifest)\n\n def test_ClipManifestGeneration(self):\n \"\"\"Tests generating a manifest using UsdClipsAPI\"\"\"\n def _ValidateManifest(manifest):\n def _CheckAttribute(path):\n attr = manifest.GetAttributeAtPath(path)\n self.assertTrue(attr)\n self.assertIsNone(attr.default)\n\n _CheckAttribute('/Clip/A.a')\n _CheckAttribute('/Clip/A.b')\n _CheckAttribute('/Clip/A{v=a}.c')\n _CheckAttribute('/Clip/A.z')\n\n # We should not have an entry for d in the manifest since it has\n # no time samples in any of the clips.\n self.assertFalse(manifest.GetAttributeAtPath('/Clip/A.d'))\n\n # Test UsdClipsAPI.GenerateManifestFromLayers\n clip1 = Sdf.Layer.FindOrOpen('manifestGeneration/clip_1.usda')\n self.assertTrue(clip1)\n clip2 = Sdf.Layer.FindOrOpen('manifestGeneration/clip_2.usda')\n self.assertTrue(clip2)\n\n manifest = Usd.ClipsAPI.GenerateClipManifestFromLayers(\n [clip1, clip2], '/Clip/A')\n _ValidateManifest(manifest)\n\n # Test errors when passing in a non-prim path\n with self.assertRaises(Tf.ErrorException):\n manifest = Usd.ClipsAPI.GenerateClipManifestFromLayers(\n [clip1, clip2], '/')\n\n with self.assertRaises(Tf.ErrorException):\n manifest = Usd.ClipsAPI.GenerateClipManifestFromLayers(\n [clip1, clip2], '/Foo.bar')\n\n # Test errors when passing in an invalid clip layer\n with self.assertRaises(Tf.ErrorException):\n manifest = Usd.ClipsAPI.GenerateClipManifestFromLayers(\n [clip1, None], '/Model')\n\n # Test UsdClipsAPI.GenerateManifest on authored clip sets.\n stage = Usd.Stage.Open('manifestGeneration/root.usda')\n prim = stage.GetPrimAtPath('/Model')\n\n clipsAPI = Usd.ClipsAPI(prim)\n clipsAPI.SetClipAssetPaths([Sdf.AssetPath('./clip_1.usda'), \n Sdf.AssetPath('./clip_2.usda')])\n clipsAPI.SetClipActive([(0, 0), (2, 1)])\n\n _ValidateManifest(clipsAPI.GenerateClipManifest())\n _ValidateManifest(clipsAPI.GenerateClipManifest('default'))\n\n def test_ClipManifestGenerationWithMissingValues(self):\n \"\"\"Tests generating a manifest using UsdClipsAPI and\n writeBlocksForClipsWithMissingValues=True\"\"\"\n def _ValidateManifest(manifest):\n def _CheckAttribute(path, expectedTimeSamples):\n self.assertEqual(\n manifest.ListTimeSamplesForPath(path), expectedTimeSamples)\n\n for time in expectedTimeSamples:\n self.assertEqual(\n manifest.QueryTimeSample(path, time), Sdf.ValueBlock())\n\n # These attributes don't exist in clip 2, which is active at time\n # 2.0 so we expect to see value blocks authored at that time.\n _CheckAttribute('/Clip/A.a', [2.0])\n _CheckAttribute('/Clip/A.b', [2.0])\n _CheckAttribute('/Clip/A{v=a}.c', [2.0])\n\n # This attribute doesn't exist in clip 1, which is active at times\n # 0.0 and 4.0, so we expect to see value blocks authored at those\n # times.\n _CheckAttribute('/Clip/A.z', [0.0, 4.0])\n\n stage = Usd.Stage.Open('manifestGeneration/missingValues/root.usda')\n prim = stage.GetPrimAtPath('/Model')\n\n clipsAPI = Usd.ClipsAPI(prim)\n\n _ValidateManifest(clipsAPI.GenerateClipManifest(\n writeBlocksForClipsWithMissingValues=True))\n _ValidateManifest(clipsAPI.GenerateClipManifest(\n 'default', writeBlocksForClipsWithMissingValues=True))\n\n def test_ClipManifestAutoGeneration(self):\n \"\"\"Verifies behavior with automatic generation of clip manifest\n when no manifest is explicitly specified.\"\"\"\n # Temporarily modify the first clip to test Reload functionality\n clipLayer = Sdf.Layer.FindOrOpen('manifestGeneration/clip_1.usda')\n del clipLayer.GetPrimAtPath('/Clip/A').properties['b']\n\n stage = Usd.Stage.Open('manifestGeneration/root.usda')\n prim = stage.GetPrimAtPath('/Model')\n\n def _GetManifestLayer(stage):\n def _IsManifestLayer(l):\n return l.anonymous and 'generated_manifest' in l.identifier\n layers = [l for l in stage.GetUsedLayers() if _IsManifestLayer(l)]\n self.assertEqual(len(layers), 1)\n return layers[0]\n\n # The prim at /Model has one active clip specified and no manifest.\n # A manifest should have automatically been generated for this\n # clip containing declarations for the attributes in the clip.\n #\n # Note that the manifest does not contain /Clip/A.d. Although it\n # is declared in clip_1.usda, it has no time samples so it's ignored.\n #\n # Note that the manifest does not contain /Clip/B. Since the clip set's\n # primPath is set to /Clip/A, no values under /Clip/B will ever be used\n # so it's ignored.\n manifestLayer = _GetManifestLayer(stage)\n self.assertTrue(manifestLayer.GetAttributeAtPath('/Clip/A.a'))\n self.assertFalse(manifestLayer.GetAttributeAtPath('/Clip/A.b'))\n self.assertTrue(manifestLayer.GetAttributeAtPath('/Clip/A{v=a}.c'))\n self.assertFalse(manifestLayer.GetAttributeAtPath('/Clip/A.d'))\n self.assertFalse(manifestLayer.GetPrimAtPath('/Clip/B'))\n\n # Reloading the stage should cause the manifest to be regenerated.\n with LayerChangeListener() as l:\n stage.Reload()\n\n # Note that unlike test cases below, the manifestLayer will remain\n # valid and will just be modified in place.\n self.assertTrue(clipLayer in l.changedLayers)\n self.assertTrue(manifestLayer in l.changedLayers)\n self.assertTrue(manifestLayer.GetAttributeAtPath('/Clip/A.a'))\n self.assertTrue(manifestLayer.GetAttributeAtPath('/Clip/A.b'))\n self.assertTrue(manifestLayer.GetAttributeAtPath('/Clip/A{v=a}.c'))\n self.assertFalse(manifestLayer.GetPrimAtPath('/Clip/B'))\n\n # Now add on a new active clip. A new manifest should be created\n # containing attributes from clip_1.usda and clip_2.usda.\n with LayerChangeListener() as l:\n clipsAPI = Usd.ClipsAPI(prim)\n clipsAPI.SetClipAssetPaths([Sdf.AssetPath('./clip_1.usda'), \n Sdf.AssetPath('./clip_2.usda')])\n clipsAPI.SetClipActive([(0, 0), (2, 1)])\n\n self.assertFalse(manifestLayer)\n manifestLayer = _GetManifestLayer(stage)\n\n self.assertTrue(manifestLayer in l.changedLayers)\n self.assertTrue(manifestLayer.GetAttributeAtPath('/Clip/A.a'))\n self.assertTrue(manifestLayer.GetAttributeAtPath('/Clip/A.b'))\n self.assertTrue(manifestLayer.GetAttributeAtPath('/Clip/A.z'))\n self.assertTrue(manifestLayer.GetAttributeAtPath('/Clip/A{v=a}.c'))\n self.assertFalse(manifestLayer.GetPrimAtPath('/Clip/B'))\n\n # Authoring more clip metadata causes the prim to resync, but\n # should reuse the existing manifest instead of regenerating.\n with LayerChangeListener() as l:\n clipsAPI.SetClipTimes([(0, 0), (1, 1), (2, 0), (2, 1)])\n\n self.assertTrue(manifestLayer)\n\n self.assertFalse(manifestLayer in l.changedLayers)\n self.assertTrue(manifestLayer.GetAttributeAtPath('/Clip/A.a'))\n self.assertTrue(manifestLayer.GetAttributeAtPath('/Clip/A.b'))\n self.assertTrue(manifestLayer.GetAttributeAtPath('/Clip/A.z'))\n self.assertTrue(manifestLayer.GetAttributeAtPath('/Clip/A{v=a}.c'))\n\n # Change the clip prim path. A new manifest should be created\n # containing only attributes under the new path.\n with LayerChangeListener() as l:\n clipsAPI.SetClipPrimPath('/Clip/B')\n\n self.assertFalse(manifestLayer)\n manifestLayer = _GetManifestLayer(stage)\n\n self.assertTrue(manifestLayer in l.changedLayers)\n self.assertFalse(manifestLayer.GetPrimAtPath('/Clip/A'))\n self.assertTrue(manifestLayer.GetAttributeAtPath('/Clip/B.g'))\n self.assertTrue(manifestLayer.GetAttributeAtPath('/Clip/B.h'))\n self.assertTrue(manifestLayer.GetAttributeAtPath('/Clip/B.y'))\n\n def test_ClipTemplateManifestGeneration(self):\n \"\"\"Tests generating a manifest using UsdClipsAPI with template-based\n value clips\"\"\"\n def _ValidateManifest(manifest):\n def _CheckAttribute(path):\n attr = manifest.GetAttributeAtPath(path)\n self.assertTrue(attr)\n self.assertIsNone(attr.default)\n\n _CheckAttribute('/points.extent')\n _CheckAttribute('/points.a')\n \n # This attribute exists in a clip that should not be picked up\n # by the template specification.\n self.assertFalse(manifest.GetAttributeAtPath('/points.b'))\n\n stage = Usd.Stage.Open('template/manifestGeneration/root.usda')\n prim = stage.GetPrimAtPath('/World/points')\n\n clipsAPI = Usd.ClipsAPI(prim)\n _ValidateManifest(clipsAPI.GenerateClipManifest())\n _ValidateManifest(clipsAPI.GenerateClipManifest('default'))\n\n def test_ClipTemplateManifestGenerationWithMissingValues(self):\n \"\"\"Tests generating a manifest using UsdClipsAPI with template-based\n value clips and writeBlocksForClipsWithMissingValues=True\"\"\"\n def _ValidateManifest(manifest):\n def _CheckAttribute(path, expectedTimeSamples):\n self.assertEqual(\n manifest.ListTimeSamplesForPath(path), expectedTimeSamples)\n\n for time in expectedTimeSamples:\n self.assertEqual(\n manifest.QueryTimeSample(path, time), Sdf.ValueBlock())\n\n # This attribute exists in all clips, so we expect no blocks to be\n # authored in the manifest.\n _CheckAttribute('/points.extent', [])\n\n # This attribute does not exist in clips 1 and 3 which are active\n # at times 1 and 3, so we expect blocks at those times.\n _CheckAttribute('/points.a', [1.0, 3.0])\n\n # This attribute does not exist in clips 1 and 2 which are active\n # at times 1 and 2, so we expect blocks at those times.\n _CheckAttribute('/points.b', [1.0, 2.0])\n\n stage = Usd.Stage.Open(\n 'template/manifestGeneration/missingValues/root.usda')\n prim = stage.GetPrimAtPath('/World/points')\n\n clipsAPI = Usd.ClipsAPI(prim)\n _ValidateManifest(clipsAPI.GenerateClipManifest(\n writeBlocksForClipsWithMissingValues=True))\n _ValidateManifest(clipsAPI.GenerateClipManifest(\n 'default', writeBlocksForClipsWithMissingValues=True))\n\n def test_ClipTemplateManifestAutoGeneration(self):\n \"\"\"Verifies automatic generation of clip manifest for template-based\n value clip specification\"\"\"\n stage = Usd.Stage.Open('template/manifestGeneration/root.usda')\n prim = stage.GetPrimAtPath('/World/points')\n\n def _GetManifestLayer(stage):\n def _IsManifestLayer(l):\n return l.anonymous and 'generated_manifest' in l.identifier\n layers = [l for l in stage.GetUsedLayers() if _IsManifestLayer(l)]\n self.assertEqual(len(layers), 1)\n return layers[0]\n \n # The prim at /World/points has template-based clips specified\n # that should pick up p.001.usda and p.002.usda.\n # A manifest should have automatically been generated for this\n # clip containing declarations for the attributes in these clips.\n manifestLayer = _GetManifestLayer(stage)\n self.assertTrue(manifestLayer.GetAttributeAtPath('/points.extent'))\n self.assertTrue(manifestLayer.GetAttributeAtPath('/points.a'))\n self.assertFalse(manifestLayer.GetAttributeAtPath('/points.b'))\n\n # Extend the template end time to pick up an additional clip.\n # This should cause a resync and a new manifest to be generated.\n with LayerChangeListener() as l:\n clipsAPI = Usd.ClipsAPI(prim)\n clipsAPI.SetClipTemplateEndTime(3)\n\n self.assertFalse(manifestLayer)\n manifestLayer = _GetManifestLayer(stage)\n self.assertTrue(manifestLayer in l.changedLayers)\n\n self.assertTrue(manifestLayer.GetAttributeAtPath('/points.extent'))\n self.assertTrue(manifestLayer.GetAttributeAtPath('/points.a'))\n self.assertTrue(manifestLayer.GetAttributeAtPath('/points.b'))\n\n def test_ClipTemplateBehavior(self):\n primPath = Sdf.Path('/World/fx/Particles_Splash/points')\n attrName = 'extent'\n\n stage = Usd.Stage.Open('template/int1/result_int_1.usda')\n prim = stage.GetPrimAtPath(primPath)\n attr = prim.GetAttribute(attrName)\n self.CheckValue(attr, time=1, expected=Vt.Vec3fArray(2, (1,1,1)))\n self.CheckValue(attr, time=1.5, \n expected=Vt.Vec3fArray(2, (1.5,1.5,1.5)))\n self.CheckValue(attr, time=2, expected=Vt.Vec3fArray(2, (2,2,2)))\n self.CheckValue(attr, time=2.5, \n expected=Vt.Vec3fArray(2, (2.5,2.5,2.5)))\n self.CheckValue(attr, time=3, expected=Vt.Vec3fArray(2, (3,3,3)))\n self.CheckValue(attr, time=3.5, \n expected=Vt.Vec3fArray(2, (3.5,3.5,3.5)))\n self.CheckValue(attr, time=4, expected=Vt.Vec3fArray(2, (4,4,4)))\n\n stage = Usd.Stage.Open('template/int2/result_int_2.usda')\n prim = stage.GetPrimAtPath(primPath)\n attr = prim.GetAttribute(attrName)\n self.CheckValue(attr, time=1, expected=Vt.Vec3fArray(2, (1,1,1)))\n self.CheckValue(attr, time=10, expected=Vt.Vec3fArray(2, (10,10,10)))\n self.CheckValue(attr, time=17, expected=Vt.Vec3fArray(2, (17,17,17)))\n self.CheckValue(attr, time=26, expected=Vt.Vec3fArray(2, (26,26,26)))\n self.CheckValue(attr, time=33, expected=Vt.Vec3fArray(2, (33,33,33)))\n self.CheckValue(attr, time=43, expected=Vt.Vec3fArray(2, (43,43,43)))\n self.CheckValue(attr, time=49, expected=Vt.Vec3fArray(2, (49,49,49)))\n\n # Test with template offsets applied\n stage = Usd.Stage.Open('template/int3/result_int_3.usda')\n prim = stage.GetPrimAtPath(primPath)\n attr = prim.GetAttribute(attrName)\n self.CheckValue(attr, time=2.5, expected=Vt.Vec3fArray(2, (1,1,1)))\n self.CheckValue(attr, time=3.0, expected=Vt.Vec3fArray(2, (1,1,1)))\n self.CheckValue(attr, time=3.25, expected=Vt.Vec3fArray(2, (2,2,2)))\n self.CheckValue(attr, time=3.5, expected=Vt.Vec3fArray(2, (3,3,3)))\n self.CheckValue(attr, time=4.0, expected=Vt.Vec3fArray(2, (3,3,3)))\n self.CheckValue(attr, time=4.5, expected=Vt.Vec3fArray(2, (3,3,3)))\n\n # XXX: bug/155441 precludes us from adding the following test case\n # stage = Usd.Stage.Open('template/int4/result_int_4.usda')\n # prim = stage.GetPrimAtPath(primPath)\n # attr = prim.GetAttribute(attrName)\n # self.CheckValue(attr, time=0, expected=Vt.Vec3fArray(2, (0,0,0)))\n # self.CheckValue(attr, time=1, expected=Vt.Vec3fArray(2, (1,1,1)))\n # self.CheckValue(attr, time=3.5, expected=Vt.Vec3fArray(2, (3,3,3)))\n # self.CheckValue(attr, time=4.0, expected=Vt.Vec3fArray(2, (4,4,4)))\n\n stage = Usd.Stage.Open('template/subint1/result_subint_1.usda')\n prim = stage.GetPrimAtPath(primPath)\n attr = prim.GetAttribute(attrName)\n self.CheckValue(attr, time=101, expected=Vt.Vec3fArray(2, (101,101,101)))\n self.CheckValue(attr, time=101.5, \n expected=Vt.Vec3fArray(2, (101.5,101.5,101.5)))\n self.CheckValue(attr, time=102, expected=Vt.Vec3fArray(2, (102,102,102)))\n self.CheckValue(attr, time=102.5, \n expected=Vt.Vec3fArray(2, (102.5,102.5,102.5)))\n self.CheckValue(attr, time=103, expected=Vt.Vec3fArray(2, (103,103,103)))\n self.CheckValue(attr, time=103.5, \n expected=Vt.Vec3fArray(2, (103.5,103.5,103.5)))\n self.CheckValue(attr, time=104, expected=Vt.Vec3fArray(2, (104,104,104)))\n\n stage = Usd.Stage.Open('template/subint2/result_subint_2.usda')\n prim = stage.GetPrimAtPath(primPath)\n attr = prim.GetAttribute(attrName)\n self.CheckValue(attr, time=10.00, \n expected=Vt.Vec3fArray(2, (10.00, 10.00, 10.00)))\n self.CheckValue(attr, time=10.025, \n expected=Vt.Vec3fArray(2, (10.025, 10.025, 10.025)))\n self.CheckValue(attr, time=10.05, \n expected=Vt.Vec3fArray(2, (10.05, 10.05, 10.05)))\n self.CheckValue(attr, time=10.08, \n expected=Vt.Vec3fArray(2, (10.08, 10.08, 10.08)))\n self.CheckValue(attr, time=10.10, \n expected=Vt.Vec3fArray(2, (10.10, 10.10, 10.10)))\n self.CheckValue(attr, time=10.125, \n expected=Vt.Vec3fArray(2, (10.125, 10.125, 10.125)))\n self.CheckValue(attr, time=10.15, \n expected=Vt.Vec3fArray(2, (10.15, 10.15, 10.15)))\n\n # Test with template offsets applied\n stage = Usd.Stage.Open('template/subint3/result_subint_3.usda')\n prim = stage.GetPrimAtPath(primPath)\n attr = prim.GetAttribute(attrName)\n\n self.CheckValue(attr, time=9.95, expected=Vt.Vec3fArray(2, (10, 10, 10)))\n self.CheckValue(attr, time=10.00, expected=Vt.Vec3fArray(2, (10, 10, 10)))\n self.CheckValue(attr, time=10.025, \n expected=Vt.Vec3fArray(2, (10.05, 10.05, 10.05)))\n self.CheckValue(attr, time=10.05, expected=Vt.Vec3fArray(2, (10.1, 10.1, 10.1)))\n self.CheckValue(attr, time=10.10, expected=Vt.Vec3fArray(2, (10.1, 10.1, 10.1)))\n self.CheckValue(attr, time=10.15, expected=Vt.Vec3fArray(2, (10.1, 10.1, 10.1)))\n self.CheckValue(attr, time=10.20, expected=Vt.Vec3fArray(2, (10.1, 10.1, 10.1)))\n self.CheckValue(attr, time=10.25, expected=Vt.Vec3fArray(2, (10.1, 10.1, 10.1)))\n\n def test_ClipTemplateWithOffsets(self):\n stage = Usd.Stage.Open('template/layerOffsets/root.usda')\n prim = stage.GetPrimAtPath('/Model')\n attr = prim.GetAttribute('a')\n\n # Times are offset by 2 via reference and layer offsets,\n # so we expect the value at time 4 to read from clip 2, etc.\n self.CheckValue(attr, time=3.0, expected=1.0)\n self.CheckValue(attr, time=4.0, expected=2.0)\n self.CheckValue(attr, time=5.0, expected=3.0)\n\n # Because of the time offset, this should try to read clip 4,\n # but since we only have 3 clips we hold the value from the\n # last one.\n self.CheckValue(attr, time=6.0, expected=3.0)\n\n def test_ClipsWithSparseOverrides(self):\n # This layer overrides the clipActive metadata to flip\n # the active clips\n stage = Usd.Stage.Open('sparseOverrides/over_root.usda')\n prim = stage.GetPrimAtPath('/main')\n attr = prim.GetAttribute('foo')\n\n self.CheckValue(attr, time=101.0, expected=3.0)\n self.CheckValue(attr, time=103.0, expected=1.0)\n\n # This is the original layer with the clip metadata authored.\n stage = Usd.Stage.Open('sparseOverrides/root.usda')\n prim = stage.GetPrimAtPath('/main')\n attr = prim.GetAttribute('foo')\n\n self.CheckValue(attr, time=101.0, expected=1.0)\n self.CheckValue(attr, time=103.0, expected=3.0)\n\n # This layer overrides the startTime from the template metadata\n # to be equal to the endTime, effectively giving us only one clip\n stage = Usd.Stage.Open('sparseOverrides/template_over_root.usda')\n prim = stage.GetPrimAtPath('/main')\n attr = prim.GetAttribute('foo')\n\n self.CheckValue(attr, time=101.0, expected=3.0)\n self.CheckValue(attr, time=103.0, expected=3.0)\n\n # This is the original layer with the template metadata authored. \n stage = Usd.Stage.Open('sparseOverrides/template_root.usda')\n prim = stage.GetPrimAtPath('/main')\n attr = prim.GetAttribute('foo')\n\n self.CheckValue(attr, time=101.0, expected=1.0)\n self.CheckValue(attr, time=103.0, expected=3.0)\n\n def test_MultipleClipSets(self):\n \"\"\"Verifies behavior with multiple clip sets defined on\n the same prim that affect different prims\"\"\"\n stage = Usd.Stage.Open('clipsets/root.usda')\n\n prim = stage.GetPrimAtPath('/Set/Child_1')\n attr = prim.GetAttribute('attr')\n self.CheckValue(attr, time=0, expected=-5.0)\n self.CheckValue(attr, time=1, expected=-10.0)\n self.CheckValue(attr, time=2, expected=-15.0)\n self.CheckTimeSamples(attr)\n\n prim = stage.GetPrimAtPath('/Set/Child_2')\n attr = prim.GetAttribute('attr')\n self.CheckValue(attr, time=0, expected=-50.0)\n self.CheckValue(attr, time=1, expected=-100.0)\n self.CheckValue(attr, time=2, expected=-200.0)\n self.CheckTimeSamples(attr)\n\n def test_ListEditClipSets(self):\n \"\"\"Verifies reordering and deleting clip sets via list editing\n operations\"\"\"\n stage = Usd.Stage.Open('clipsetListEdits/root.usda')\n\n prim = stage.GetPrimAtPath('/DefaultOrderTest')\n attr = prim.GetAttribute('attr')\n self.CheckValue(attr, time=0, expected=10.0)\n self.CheckValue(attr, time=1, expected=20.0)\n self.CheckValue(attr, time=2, expected=30.0)\n self.CheckTimeSamples(attr)\n\n prim = stage.GetPrimAtPath('/ReorderTest')\n attr = prim.GetAttribute('attr')\n self.CheckValue(attr, time=0, expected=100.0)\n self.CheckValue(attr, time=1, expected=200.0)\n self.CheckValue(attr, time=2, expected=300.0)\n self.CheckTimeSamples(attr)\n\n prim = stage.GetPrimAtPath('/DeleteTest')\n attr = prim.GetAttribute('attr')\n self.CheckValue(attr, time=0, expected=100.0)\n self.CheckValue(attr, time=1, expected=200.0)\n self.CheckValue(attr, time=2, expected=300.0)\n self.CheckTimeSamples(attr)\n\n def test_InterpolateSamplesInClip(self):\n \"\"\"Tests that time samples in clips are interpolated\n when a clip time is specified and no sample exists in\n the clip at that time.\"\"\"\n stage = Usd.Stage.Open('interpolation/root.usda')\n\n prim = stage.GetPrimAtPath('/InterpolationTest')\n attr = prim.GetAttribute('attr')\n self.CheckValue(attr, time=0, expected=0.0)\n self.CheckValue(attr, time=1, expected=5.0)\n self.CheckValue(attr, time=2, expected=10.0)\n self.CheckValue(attr, time=3, expected=15.0)\n self.CheckValue(attr, time=4, expected=20.0)\n self.CheckTimeSamples(attr)\n\n def test_InterpolateSamplesToNextClip(self):\n \"\"\"Tests that time samples in clips are interpolated using the\n value from the next clip if necessary.\"\"\"\n stage = Usd.Stage.Open('interpolation/root.usda')\n\n attr = stage.GetAttributeAtPath('/ClipInterpolationTest.attr')\n self.CheckValue(attr, time=0, expected=10.0)\n self.CheckValue(attr, time=1, expected=20.0)\n\n # At t=0.5, we're beyond the last (and only) time sample in the first\n # clip that is inside the clip's active time. We should ignore the\n # time sample authored at t=1.0 in the first clip (since that's outside\n # the active time for that clip) and interpolate using the time sample\n # in the next clip at t=1.0\n self.CheckValue(attr, time=0.5, expected=15.0)\n\n self.assertEqual(attr.GetTimeSamples(), [0.0, 1.0, 3.0])\n self.CheckTimeSamples(attr)\n\n def test_InterpolateSamplesToNextClip2(self):\n \"\"\"Tests that additional entries in the times metadata can be used\n to 'block' interpolating values using the next clip.\"\"\"\n stage = Usd.Stage.Open('interpolation/root.usda')\n\n attr = stage.GetAttributeAtPath('/ClipInterpolationTest2.attr')\n self.CheckValue(attr, time=0, expected=10.0)\n self.CheckValue(attr, time=1, expected=20.0)\n\n # At t=0.5, we're beyond the last (and only) time sample in the first\n # clip that is inside the clip's active time. In this case we do *not*\n # ignore the time sample authored at t=1.0 in the first clip because\n # we have an entry in the times metadata that explicitly says to use\n # the value from the first clip at t=1.0 when interpolating values\n # up to the end of the clip.\n self.CheckValue(attr, time=0.5, expected=55.0)\n\n self.assertEqual(attr.GetTimeSamples(), \n [0.0, 1.0 - Usd.TimeCode.SafeStep(), 1.0, 3.0])\n self.CheckTimeSamples(attr)\n\n def test_AssetPathValuesInClips(self):\n \"\"\"Tests that asset path values in clips are resolved\n properly.\"\"\"\n stage = Usd.Stage.Open('assetPathValues/root.usda')\n\n def _CheckPaths(p1, p2):\n self.assertEqual(os.path.normcase(p1), os.path.normcase(p2))\n\n def _CheckAssetPathValue(attr, time, expected):\n _CheckPaths(attr.Get(time).resolvedPath, expected)\n _CheckPaths(Usd.AttributeQuery(attr).Get(time).resolvedPath,\n expected)\n\n def _CheckAssetPathArrayValue(attr, time, expected):\n array = attr.Get(time)\n self.assertEqual(len(array), len(expected))\n for (p1, p2) in zip(array, expected):\n _CheckPaths(p1.resolvedPath, p2)\n\n # Test that relative asset paths from clips are anchored to the\n # clip layer. Note that at time 1 we have a clip with no samples\n # so we should get the default value defined in the manifest;\n # the resolved path there should be anchored to the manifest layer.\n #\n # The stage variable expressions authored in the asset paths in\n # clip3.usda are evaluated using the variables authored in the\n # stage's root and session layer. Variables in the clip itself\n # are currently ignored.\n\n attr = stage.GetAttributeAtPath('/Model.assetPath')\n _CheckAssetPathValue(\n attr, time=0, \n expected=os.path.abspath('assetPathValues/clip1/clip1.usda'))\n _CheckAssetPathValue(\n attr, time=1, \n expected=os.path.abspath('assetPathValues/manifest/manifest.usda'))\n _CheckAssetPathValue(\n attr, time=2,\n expected=os.path.abspath('assetPathValues/clip2/clip2.usda'))\n _CheckAssetPathValue(\n attr, time=3,\n expected=os.path.abspath('assetPathValues/clip3/clip3.usda'))\n\n attr = stage.GetAttributeAtPath('/Model.assetPathArray')\n _CheckAssetPathArrayValue(\n attr, time=0, \n expected=[os.path.abspath('assetPathValues/clip1/clip1.usda')])\n _CheckAssetPathArrayValue(\n attr, time=1, \n expected=[os.path.abspath('assetPathValues/manifest/manifest.usda')])\n _CheckAssetPathArrayValue(\n attr, time=2,\n expected=[os.path.abspath('assetPathValues/clip2/clip2.usda')])\n _CheckAssetPathArrayValue(\n attr, time=3,\n expected=[os.path.abspath('assetPathValues/clip3/clip3.usda')])\n\n def test_ComputeClipAssetPaths(self):\n \"\"\"Test Usd.ClipsAPI.ComputeClipAssetPaths\"\"\"\n def _CheckPaths(p1, p2):\n self.assertEqual(os.path.normcase(p1), os.path.normcase(p2))\n\n def _CheckAssetPathArrays(array, expected):\n self.assertEqual(len(array), len(expected))\n for (p1, p2) in zip(array, expected):\n _CheckPaths(p1, p2)\n\n stage = Usd.Stage.Open('assetPathValues/root.usda')\n clipsAPI = Usd.ClipsAPI(stage.GetPrimAtPath('/Model'))\n computedAssetPaths = clipsAPI.ComputeClipAssetPaths()\n _CheckAssetPathArrays(\n [p.resolvedPath for p in computedAssetPaths],\n [os.path.abspath('assetPathValues/clip1/clip1.usda'),\n os.path.abspath('assetPathValues/nosamples.usda'),\n os.path.abspath('assetPathValues/clip2/clip2.usda'),\n os.path.abspath('assetPathValues/clip3/clip3.usda')])\n\n stage = Usd.Stage.Open('template/int1/result_int_1.usda')\n clipsAPI = Usd.ClipsAPI(\n stage.GetPrimAtPath('/World/fx/Particles_Splash/points'))\n computedAssetPaths = clipsAPI.ComputeClipAssetPaths()\n _CheckAssetPathArrays(\n [p.resolvedPath for p in computedAssetPaths],\n [os.path.abspath('template/int1/p.001.usd'),\n os.path.abspath('template/int1/p.002.usd'),\n os.path.abspath('template/int1/p.003.usd'),\n os.path.abspath('template/int1/p.004.usd')])\n\n def test_TemplateFileFormatArguments(self):\n stage = Usd.Stage.Open('template/args/root.usda')\n prim = stage.GetPrimAtPath('/World/points')\n attr = prim.GetAttribute('extent')\n\n self.CheckValue(attr, time=1, expected=Vt.Vec3fArray(2, (1,1,1)))\n self.CheckValue(attr, time=2, expected=Vt.Vec3fArray(2, (2,2,2)))\n\n layerId = Sdf.Layer.CreateIdentifier(\n os.path.abspath(\"template/args/p.002.usd\"), \n {'a': '1', 'b': 'str'})\n layer = Sdf.Layer.Find(layerId)\n self.assertTrue(layer)\n self.assertEqual(layer.GetFileFormatArguments(), {'a': '1', 'b': 'str'})\n\n def test_SublayerChanges(self):\n \"\"\"Test that clip layers are loaded successfully when sublayers\n are added or removed before the clip layers are pulled on.\"\"\"\n\n def _test(stage):\n # Query our test attribute's property stack and verify that it\n # contains the opinions we expect. This will open the clip layer.\n a = stage.GetAttributeAtPath('/SingleClip.attr_1')\n propertyStack = a.GetPropertyStack(0)\n\n rootLayer = stage.GetRootLayer()\n sublayerWithClip = Sdf.Layer.FindRelativeToLayer(\n rootLayer, 'layers/sublayer.usda')\n self.assertTrue(sublayerWithClip)\n\n clipLayer = Sdf.Layer.FindRelativeToLayer(\n sublayerWithClip, 'clip.usda')\n self.assertTrue(clipLayer)\n\n refLayer = Sdf.Layer.FindRelativeToLayer(\n rootLayer, 'layers/ref.usda')\n self.assertTrue(refLayer)\n\n self.assertEqual(\n propertyStack,\n [sublayerWithClip.GetAttributeAtPath('/SingleClip.attr_1'),\n clipLayer.GetAttributeAtPath('/Model.attr_1'),\n refLayer.GetAttributeAtPath('/Model.attr_1')])\n\n # Test combinations of inserting and removing sublayers prior to\n # pulling on attributes and opening clip layers. Clip layers are\n # opened the first time the _test function is called, so these\n # tests are separated into insert-first and remove-first to cover\n # both cases. Empty and non-empty sublayers are also tested \n # separately since there's an optimization that avoids significant\n # resyncs in the former case.\n\n def _TestInsertAndRemoveEmptySublayer():\n dummySublayer = Sdf.Layer.CreateAnonymous('.usda')\n rootLayer = Sdf.Layer.FindOrOpen('sublayerChanges/root.usda')\n\n stage = Usd.Stage.Open(rootLayer)\n rootLayer.subLayerPaths.insert(0, dummySublayer.identifier)\n _test(stage)\n\n del rootLayer.subLayerPaths[0]\n _test(stage)\n\n def _TestRemoveAndInsertEmptySublayer():\n dummySublayer = Sdf.Layer.CreateAnonymous('.usda')\n\n rootLayer = Sdf.Layer.FindOrOpen('sublayerChanges/root.usda')\n rootLayer.subLayerPaths.insert(0, dummySublayer.identifier)\n\n stage = Usd.Stage.Open(rootLayer)\n del rootLayer.subLayerPaths[0]\n _test(stage)\n\n rootLayer.subLayerPaths.insert(0, dummySublayer.identifier)\n _test(stage)\n\n def _TestInsertAndRemoveNonEmptySublayer():\n dummySublayer = Sdf.Layer.CreateAnonymous('.usda')\n Sdf.CreatePrimInLayer(dummySublayer, '/Dummy')\n\n rootLayer = Sdf.Layer.FindOrOpen('sublayerChanges/root.usda')\n\n stage = Usd.Stage.Open(rootLayer)\n rootLayer.subLayerPaths.insert(0, dummySublayer.identifier)\n _test(stage)\n\n del rootLayer.subLayerPaths[0]\n _test(stage)\n\n def _TestRemoveAndInsertNonEmptySublayer():\n dummySublayer = Sdf.Layer.CreateAnonymous('.usda')\n Sdf.CreatePrimInLayer(dummySublayer, '/Dummy')\n\n rootLayer = Sdf.Layer.FindOrOpen('sublayerChanges/root.usda')\n rootLayer.subLayerPaths.insert(0, dummySublayer.identifier)\n\n stage = Usd.Stage.Open(rootLayer)\n del rootLayer.subLayerPaths[0]\n _test(stage)\n\n rootLayer.subLayerPaths.insert(0, dummySublayer.identifier)\n _test(stage)\n \n _TestInsertAndRemoveNonEmptySublayer()\n _TestRemoveAndInsertNonEmptySublayer()\n _TestInsertAndRemoveEmptySublayer()\n _TestRemoveAndInsertEmptySublayer()\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"PixarAnimationStudios/OpenUSD","sub_path":"pxr/usd/usd/testenv/testUsdValueClips.py","file_name":"testUsdValueClips.py","file_ext":"py","file_size_in_byte":124854,"program_lang":"python","lang":"en","doc_type":"code","stars":5042,"dataset":"github-code","pt":"3"} +{"seq_id":"70544675601","text":"'''\nTypedLeaf: a marker for typechecking leaves.\n\n>>> int_or_bool = TypedLeaf(int, bool)\n>>> validate(int_or_bool, 12)\n>>> validate(int_or_bool, True)\n>>> validate(int_or_bool, \"12\")\nTraceback (most recent call last):\n...\nInvalid: type_error\n\n>>> try:\n... validate(int_or_bool, \"12\")\n... except Invalid as e:\n... print(e)\ntype_error - Expected one of [int, bool]; got str\n\ndictify and undictify always pass through values unchanged, regardless of type:\n\n>>> dictify(int_or_bool, True)\nTrue\n>>> dictify(int_or_bool, \"Not a bool\")\n'Not a bool'\n>>> undictify(int_or_bool, 100)\n100\n>>> undictify(int_or_bool, \"100\")\n'100'\n\nAdditionally, this module provides several TypedLeaf subclasses with the types\nprefilled:\n\n>>> s = String()\n>>> validate(s, \"foo\")\n>>> validate(s, 12)\nTraceback (most recent call last):\n...\nInvalid: type_error\n\n>>> b = Boolean()\n>>> validate(b, True)\n>>> validate(b, False)\n>>> validate(b, 1)\nTraceback (most recent call last):\n...\nInvalid: type_error\n\n>>> i = Int()\n>>> validate(i, 12)\n>>> validate(i, 12.1)\nTraceback (most recent call last):\n...\nInvalid: type_error\n\n>>> num = Number()\n>>> validate(num, 1)\n>>> validate(num, 1.1)\n>>> validate(num, 1+1j)\nTraceback (most recent call last):\n...\nInvalid: type_error\n>>> validate(num, 'ten')\nTraceback (most recent call last):\n...\nInvalid: type_error\n\n>>> com = Complex()\n>>> validate(com, 1)\n>>> validate(com, 1.1)\n>>> validate(com, 1+1j)\n>>> validate(com, 'ten')\nTraceback (most recent call last):\n...\nInvalid: type_error\n\n'''\n\nimport numbers\nimport sys\n\nif sys.version < '3': # pragma: no cover\n bytes_type = str\nelse: # pragma: no cover\n bytes_type = bytes\n basestring = str\n\nfrom .base import Leaf, dictify, undictify, validate, IGNORE, CHECK\nfrom .invalid import Invalid\n\ndef _type_to_str(typ):\n return getattr(typ, '__name__', str(typ))\n\nclass TypedLeaf(Leaf):\n '''Marker for simple types, such as integers and strings.\n\n Parameterized by a list of allowed types.\n\n TypedLeaf(*types) simply means \"A value of one of these types.\"\n '''\n def __init__(self, *types):\n if types:\n self.types = types\n\n def error_msg_for(self, value):\n '''\n >>> TypedLeaf().error_msg_for(12)\n 'Expected nothing; got int'\n >>> TypedLeaf(bool).error_msg_for(12)\n 'Expected bool; got int'\n >>> TypedLeaf(str, dict).error_msg_for(12)\n 'Expected one of [str, dict]; got int'\n '''\n got = _type_to_str(type(value))\n if not self.types: # This shouldn't really happen\n expected = 'nothing'\n elif len(self.types) == 1:\n expected = _type_to_str(self.types[0])\n else:\n tstr = ', '.join(_type_to_str(t) for t in self.types)\n expected = 'one of [{}]'.format(tstr)\n return 'Expected {}; got {}'.format(expected, got)\n\n types = None\n\n# validate checks .types,\n\n@validate.when(TypedLeaf)\ndef validate_tl(dispgraph, value, **kwargs):\n if kwargs.get('error_mode', CHECK) == IGNORE:\n # why would you call validate with error_mode IGNORE? Nonetheless, it'll\n # do what you apparently want.\n return\n if not isinstance(value, dispgraph.marker.types):\n raise Invalid('type_error', dispgraph.marker.error_msg_for(value))\n\nBoolean = TypedLeaf.subclass(types=(bool,), __class_name=\"Boolean\")\nString = TypedLeaf.subclass(types=(basestring,), __class_name=\"String\")\nBytes = TypedLeaf.subclass(types=(bytes_type,), __class_name=\"Bytes\")\nInt = TypedLeaf.subclass(types=(numbers.Integral,), __class_name=\"Int\")\nNumber = TypedLeaf.subclass(types=(numbers.Real,), __class_name=\"Number\")\nComplex = TypedLeaf.subclass(types=(numbers.Complex,), __class_name=\"Complex\")\n\n# temporary hack for JSON compatilibity\n# TODO make a separate JSON-aware dfier?\nimport base64\n\n@dictify.when(Bytes)\ndef df_bytes(dispgraph, value, **kwargs):\n '''\n >>> dictify(Bytes(), b'\\\\xc3\\\\xbf') == u'w78='\n True\n '''\n return base64.b64encode(value).decode('ascii')\n\n@undictify.when(Bytes)\ndef udf_bytes(dispgraph, value, **kwargs):\n '''\n >>> undictify(Bytes(), u'w78=') == b'\\\\xc3\\\\xbf'\n True\n >>> undictify(Bytes(), 'invalid_base64\\\\xc3')\n Traceback (most recent call last):\n ...\n Invalid: ...\n >>> undictify(Bytes(), ['not', 'a', 'string'])\n Traceback (most recent call last):\n ...\n Invalid: ...\n '''\n try:\n return base64.b64decode(value)\n except Exception as e:\n raise Invalid(\"bad_value\", str(e))\n","repo_name":"dplepage/travesty","sub_path":"travesty/typed_leaf.py","file_name":"typed_leaf.py","file_ext":"py","file_size_in_byte":4498,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"431148387","text":"from tempfile import NamedTemporaryFile\n\nimport pytest\nimport copy\n\nfrom cobald.daemon.config.mapping import ConfigurationError\nfrom cobald.daemon.core.config import load, COBalDLoader, yaml_constructor\nfrom cobald.controller.linear import LinearController\n\nfrom ...mock.pool import MockPool\n\n\n# register test pool as safe for YAML configurations\nCOBalDLoader.add_constructor(tag=\"!MockPool\", constructor=yaml_constructor(MockPool))\n\n\n# Helpers for testing lazy/eager YAML evaluation\n# Since YAML defaults to lazy evaluation, the arguments available during evaluation\n# are not necessarily complete.\nclass TagTracker:\n \"\"\"Helper to track the arguments supplied to YAML !Tags\"\"\"\n\n def __init__(self, *args, **kwargs):\n # the state of arguments *during* YAML evaluation\n self.orig_args = copy.deepcopy(args)\n self.orig_kwargs = copy.deepcopy(kwargs)\n # the state of arguments *after* YAML evaluation\n self.final_args = args\n self.final_kwargs = kwargs\n\n\nCOBalDLoader.add_constructor(\n tag=\"!TagTrackerEager\", constructor=yaml_constructor(TagTracker, eager=True)\n)\nCOBalDLoader.add_constructor(\n tag=\"!TagTrackerLazy\", constructor=yaml_constructor(TagTracker, eager=False)\n)\n\n\ndef get_config_section(config: dict, section: str):\n return next(\n content for plugin, content in config.items() if plugin.section == section\n )\n\n\nclass TestYamlConfig:\n def test_load(self):\n \"\"\"Load a valid YAML config\"\"\"\n with NamedTemporaryFile(suffix=\".yaml\") as config:\n with open(config.name, \"w\") as write_stream:\n write_stream.write(\n \"\"\"\n pipeline:\n - !LinearController\n low_utilisation: 0.9\n high_allocation: 1.1\n - !MockPool\n \"\"\"\n )\n with load(config.name):\n assert True\n assert True\n\n def test_load_invalid(self):\n \"\"\"Load a invalid YAML config (invalid keyword argument)\"\"\"\n with NamedTemporaryFile(suffix=\".yaml\") as config:\n with open(config.name, \"w\") as write_stream:\n write_stream.write(\n \"\"\"\n pipeline:\n - !LinearController\n low_utilisation: 0.9\n foo: 0\n - !MockPool\n \"\"\"\n )\n with pytest.raises(TypeError):\n with load(config.name):\n assert False\n\n def test_load_dangling(self):\n \"\"\"Forbid loading a YAML config with dangling content\"\"\"\n with NamedTemporaryFile(suffix=\".yaml\") as config:\n with open(config.name, \"w\") as write_stream:\n write_stream.write(\n \"\"\"\n pipeline:\n - !LinearController\n low_utilisation: 0.9\n high_allocation: 1.1\n - !MockPool\n random_things:\n foo: bar\n \"\"\"\n )\n with pytest.raises(ConfigurationError):\n with load(config.name):\n assert False\n\n def test_load_missing(self):\n \"\"\"Forbid loading a YAML config with missing content\"\"\"\n with NamedTemporaryFile(suffix=\".yaml\") as config:\n with open(config.name, \"w\") as write_stream:\n write_stream.write(\n \"\"\"\n logging:\n version: 1.0\n \"\"\"\n )\n with pytest.raises(ConfigurationError):\n with load(config.name):\n assert False\n\n def test_load_mixed_creation(self):\n \"\"\"Load a YAML config with mixed pipeline step creation methods\"\"\"\n with NamedTemporaryFile(suffix=\".yaml\") as config:\n with open(config.name, \"w\") as write_stream:\n write_stream.write(\n \"\"\"\n pipeline:\n - __type__: cobald.controller.linear.LinearController\n low_utilisation: 0.9\n high_allocation: 0.9\n - !MockPool\n \"\"\"\n )\n with load(config.name) as config:\n pipeline = get_config_section(config, \"pipeline\")\n assert isinstance(pipeline[0], LinearController)\n assert isinstance(pipeline[0].target, MockPool)\n\n def test_load_tags_substructure(self):\n \"\"\"Load !Tags with substructure\"\"\"\n with NamedTemporaryFile(suffix=\".yaml\") as config:\n with open(config.name, \"w\") as write_stream:\n write_stream.write(\n \"\"\"\n pipeline:\n - !MockPool\n __config_test:\n tagged: !TagTrackerEager\n host: 127.0.0.1\n port: 1234\n algorithm: HS256\n users:\n - user_name: tardis\n scopes:\n - user:read\n \"\"\"\n )\n with load(config.name) as config:\n tagged = get_config_section(config, \"__config_test\")[\"tagged\"]\n assert isinstance(tagged, TagTracker)\n assert tagged.final_kwargs[\"host\"] == \"127.0.0.1\"\n assert tagged.final_kwargs[\"port\"] == 1234\n assert tagged.final_kwargs[\"algorithm\"] == \"HS256\"\n assert tagged.final_kwargs[\"users\"][0][\"user_name\"] == \"tardis\"\n assert tagged.final_kwargs[\"users\"][0][\"scopes\"] == [\"user:read\"]\n\n def test_load_tags_eager(self):\n \"\"\"Load !Tags with substructure, immediately using them\"\"\"\n with NamedTemporaryFile(suffix=\".yaml\") as config:\n with open(config.name, \"w\") as write_stream:\n write_stream.write(\n \"\"\"\n pipeline:\n - !MockPool\n __config_test:\n tagged: !TagTrackerEager\n top: \"top level value\"\n nested:\n - leaf: \"leaf level value\"\n \"\"\"\n )\n with load(config.name) as config:\n tagged = get_config_section(config, \"__config_test\")[\"tagged\"]\n assert isinstance(tagged, TagTracker)\n # eager loading => all data should exist immediately\n assert tagged.orig_kwargs[\"top\"] == \"top level value\"\n assert tagged.orig_kwargs[\"nested\"] == [{\"leaf\": \"leaf level value\"}]\n assert tagged.orig_kwargs == tagged.final_kwargs\n\n def test_load_tags_lazy(self):\n \"\"\"Load !Tags with substructure, lazily using them\"\"\"\n with NamedTemporaryFile(suffix=\".yaml\") as config:\n with open(config.name, \"w\") as write_stream:\n write_stream.write(\n \"\"\"\n pipeline:\n - !MockPool\n __config_test:\n tagged: !TagTrackerLazy\n top: \"top level value\"\n nested:\n - leaf: \"leaf level value\"\n \"\"\"\n )\n with load(config.name) as config:\n tagged = get_config_section(config, \"__config_test\")[\"tagged\"]\n assert isinstance(tagged, TagTracker)\n # eager loading => only some data should exist immediately...\n assert tagged.orig_kwargs[\"top\"] == \"top level value\"\n assert tagged.orig_kwargs[\"nested\"] == []\n # ...but should be there in the end\n assert tagged.final_kwargs[\"nested\"] == [{\"leaf\": \"leaf level value\"}]\n\n def test_load_tags_nested(self):\n \"\"\"Load !Tags with nested !Tags\"\"\"\n with NamedTemporaryFile(suffix=\".yaml\") as config:\n with open(config.name, \"w\") as write_stream:\n write_stream.write(\n \"\"\"\n pipeline:\n - !MockPool\n __config_test:\n top_eager: !TagTrackerEager\n nested:\n - leaf: \"leaf level value\"\n - leaf_lazy: !TagTrackerLazy\n nested:\n - leaf: \"leaf level value\"\n \"\"\"\n )\n with load(config.name) as config:\n top_eager = get_config_section(config, \"__config_test\")[\"top_eager\"]\n # eager tags are evaluated eagerly\n assert top_eager.orig_kwargs[\"nested\"][0] == {\n \"leaf\": \"leaf level value\"\n }\n leaf_lazy = top_eager.orig_kwargs[\"nested\"][1][\"leaf_lazy\"]\n # eagerness overrides laziness\n assert leaf_lazy.orig_kwargs[\"nested\"] == [{\"leaf\": \"leaf level value\"}]\n\n def test_load_tag_settings(self):\n \"\"\"Load !Tags with decorator settings\"\"\"\n # __yaml_tag_test is provided by the cobald package\n with NamedTemporaryFile(suffix=\".yaml\") as config:\n with open(config.name, \"w\") as write_stream:\n write_stream.write(\n \"\"\"\n pipeline:\n - !MockPool\n __config_test:\n settings_tag: !__yaml_tag_test\n top: \"top level value\"\n nested:\n - leaf: \"leaf level value\"\n \"\"\"\n )\n with load(config.name) as config:\n section = get_config_section(config, \"__config_test\")\n args, kwargs = section[\"settings_tag\"]\n assert args == ()\n assert kwargs[\"top\"] == \"top level value\"\n assert kwargs[\"nested\"] == [{\"leaf\": \"leaf level value\"}]\n","repo_name":"MatterMiners/cobald","sub_path":"cobald_tests/daemon/core/test_config.py","file_name":"test_config.py","file_ext":"py","file_size_in_byte":10338,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"3"} +{"seq_id":"31728644882","text":"import random\nimport os\n\nfrom mimesis import Person, Datetime, Finance, Development, Code, Numeric, Address\nfrom mimesis.enums import EANFormat\n\nfrom utils import get_database_connection, get_string_buffer, remove_duplicates\n\nperson = Person()\ndatetime = Datetime()\nfinance = Finance()\ndevelopment = Development()\ncode = Code()\nnumeric = Numeric()\naddress = Address()\n\nNUMBER_CUSTOMERS = int(os.getenv(\"NUMBER_CUSTOMERS\", \"1000\"))\nNUMBER_COMPANIES = int(os.getenv(\"NUMBER_COMPANIES\", \"50\"))\nNUMBER_PRODUCTS = int(os.getenv(\"NUMBER_PRODUCTS\", \"300\"))\nNUMBER_COMPANY_CATALOG_ITEMS = int(os.getenv(\"NUMBER_COMPANY_CATALOG_ITEMS\", \"2500\"))\nNUMBER_ORDERS = int(os.getenv(\"NUMBER_ORDERS\", \"5000\"))\n\n## Generate data\nrows_customer = [\n {\n \"document_number\": person.identifier(mask=\"@@###-###\"),\n \"full_name\": person.full_name(),\n \"date_of_birth\": datetime.date(start=1950, end=2005),\n }\n for _ in range(NUMBER_CUSTOMERS)\n]\nrows_company = [\n {\n \"cuit_number\": person.identifier(mask=\"@@#####@@\"),\n \"name\": finance.stock_name(),\n \"is_supplier\": development.boolean(),\n }\n for _ in range(NUMBER_COMPANIES)\n]\nrows_product = [{\"ean\": code.ean(fmt=EANFormat.EAN13)} for _ in range(NUMBER_PRODUCTS)]\n\nlist_document_number = [customer[\"document_number\"] for customer in rows_customer]\nlist_cuit_number = [company[\"cuit_number\"] for company in rows_company]\nlist_cuit_number_companies = [\n company[\"cuit_number\"] for company in rows_company if not company[\"is_supplier\"]\n]\nlist_cuit_number_suppliers = [\n company[\"cuit_number\"] for company in rows_company if company[\"is_supplier\"]\n]\nlist_ean = [product[\"ean\"] for product in rows_product]\n\nrows_company_catalog = [\n {\n \"cuit_number\": random.choice(list_cuit_number),\n \"ean\": random.choice(list_ean),\n \"price\": numeric.float_number(start=0.01, end=100, precision=2),\n }\n for _ in range(NUMBER_COMPANY_CATALOG_ITEMS)\n]\nrows_company_catalog_clean = list(\n remove_duplicates(rows_company_catalog, lambda d: (d[\"cuit_number\"], d[\"ean\"]))\n)\n\nrows_order = [\n {\n \"cuit_number_company\": random.choice(list_cuit_number_companies),\n \"cuit_number_supplier\": random.choice(list_cuit_number_suppliers),\n \"ean\": random.choice(list_ean),\n \"document_number\": random.choice(list_document_number),\n \"shipping_country_code\": address.country_code(),\n \"price\": numeric.float_number(start=0.01, end=100, precision=2),\n \"timestamp\": datetime.datetime(),\n }\n for _ in range(NUMBER_ORDERS)\n]\n\n\ndatabase_connection = get_database_connection()\n\n## Write to the Database\nwith database_connection.cursor() as cursor:\n print(\"Writing to the b2b database...\", end=\" \")\n cursor.execute(\"truncate table b2b_database.customer cascade;\")\n cursor.copy_expert(\n \"\"\"\n COPY b2b_database.customer (document_number, full_name, date_of_birth) \n FROM STDIN WITH CSV HEADER\n \"\"\",\n get_string_buffer(\n rows_customer, [\"document_number\", \"full_name\", \"date_of_birth\"]\n ),\n )\n\n cursor.execute(\"truncate table b2b_database.company cascade;\")\n cursor.copy_expert(\n \"COPY b2b_database.company (cuit_number, name, is_supplier) FROM STDIN WITH CSV HEADER\",\n get_string_buffer(rows_company, [\"cuit_number\", \"name\", \"is_supplier\"]),\n )\n\n cursor.execute(\"truncate table b2b_database.product cascade;\")\n cursor.copy_expert(\n \"COPY b2b_database.product (ean) FROM STDIN WITH CSV HEADER\",\n get_string_buffer(rows_product, [\"ean\"]),\n )\n\n cursor.execute(\"truncate table b2b_database.company_catalog;\")\n cursor.copy_expert(\n \"COPY b2b_database.company_catalog (cuit_number, ean, price) FROM STDIN WITH CSV HEADER\",\n get_string_buffer(rows_company_catalog_clean, [\"cuit_number\", \"ean\", \"price\"]),\n )\n\n cursor.execute(\"truncate table b2b_database.order;\")\n cursor.copy_expert(\n \"\"\"\n COPY b2b_database.order (cuit_number_company, cuit_number_supplier, ean, document_number, shipping_country_code, price, timestamp) \n FROM STDIN WITH CSV HEADER\n \"\"\",\n get_string_buffer(\n rows_order,\n [\n \"cuit_number_company\",\n \"cuit_number_supplier\",\n \"ean\",\n \"document_number\",\n \"shipping_country_code\",\n \"price\",\n \"timestamp\",\n ],\n ),\n )\n\ndatabase_connection.commit()\nprint(\"done.\")\n","repo_name":"HugoNeves/Airflow-Play","sub_path":"generate_data_sources/b2b_database.py","file_name":"b2b_database.py","file_ext":"py","file_size_in_byte":4537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10395898387","text":"import requests\nimport json\nimport pandas as pd\nimport os\n\n\n\nclass GoogleBooks():\n\n def __init__(self):\n self.row_list = []\n\n\n def get_search_result(self, user_input, maxResults):\n\n response = requests.get(f\"https://www.googleapis.com/books/v1/volumes?q={user_input}&fields=items(volumeInfo(title, authors, publisher))&maxResults={maxResults}&key={os.environ['GOOGLE_BOOKS_API']}\")\n response_text = response.json()\n return response_text\n\n def converts_json_response_to_list(self, response_text):\n vinfo = [v['volumeInfo'] for v in response_text[\"items\"]]\n return vinfo\n\n\n def store_list_in_dataframe(self,vinfo):\n df = pd.DataFrame(vinfo, index =[1, 2, 3, 4, 5])\n return df\n\n def displays_dataframe(self, df):\n display_book_list = df\n print (display_book_list)\n return display_book_list\n\n\n def get_user_selection(self, df):\n\n user_selection = input(\"Please select a book title you want to add to your reading list, your entry must match the text case:\")\n book_selected = df['title'] == user_selection\n if book_selected.any():\n selected_book = df[book_selected]\n print(\"You have chosen:\")\n return selected_book\n else:\n print(\"Sorry there was no match found in the list. Check your entry for spelling errors or if it is in the correct case and select again. \")\n return self.get_user_selection(df)\n\n def adds_selection_to_reading_list(self, selected_book):\n if selected_book is None:\n return self.row_list\n for index, rows in selected_book.iterrows():\n my_list = [rows.title, rows.authors, rows.publisher]\n self.row_list.append(my_list)\n self.display_update_reading_list()\n return self.row_list\n\n\n def asks_user_to_search_again(self):\n search_again = input(\"Would you like to search for another author? Enter y for Yes and n for No:\")\n new_search = None\n if search_again == 'y':\n new_search = self.get_search_result(input(\"Please enter your new author search here:\"), 5)\n json_response_new = self.converts_json_response_to_list((new_search))\n new_dataframe = self.store_list_in_dataframe(json_response_new)\n print(new_dataframe)\n new_book_selection = self.get_user_selection(new_dataframe)\n reading_list = self.adds_selection_to_reading_list(new_book_selection)\n self.asks_user_to_search_again()\n\n else:\n print('This is your current reading list:')\n self.display_update_reading_list()\n print(\"Happy Reading!\")\n\n def display_update_reading_list(self):\n for item in self.row_list:\n print(\"-\", item[0], item[1], item[2])\n\n\nif __name__ == \"__main__\":\n googlebooks = GoogleBooks()\n user_input = input(\"Which author would you like to search for?:\")\n search_result = googlebooks.get_search_result(user_input, 5)\n json_response = googlebooks.converts_json_response_to_list(search_result)\n book_list = googlebooks.store_list_in_dataframe(json_response)\n display_book_list = googlebooks.displays_dataframe(book_list)\n book_to_read = googlebooks.get_user_selection(display_book_list)\n reading_list = googlebooks.adds_selection_to_reading_list(book_to_read)\n search_for_new_book = googlebooks.asks_user_to_search_again()\n","repo_name":"emmassr/Reading-List-GoogleBooksAPI","sub_path":"lib/reading_list.py","file_name":"reading_list.py","file_ext":"py","file_size_in_byte":3429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25672774312","text":"from django.urls import path, include\n\nfrom . import views\n\napp_name = 'blog'\n\n\npost_urls = [\n path('/',\n views.PostDetailView.as_view(), name='post_detail'),\n path('create/',\n views.PostCreateView.as_view(), name='create_post'),\n path('/edit/',\n views.PostUpdateView.as_view(), name='edit_post'),\n path('/delete/',\n views.PostDeleteView.as_view(), name='delete_post'),\n path('/comment',\n views.CommentCreateView.as_view(), name='add_comment'),\n path('/edit_comment/',\n views.CommentUpdateView.as_view(), name='edit_comment'),\n path('/delete_comment//',\n views.CommentDeleteView.as_view(), name='delete_comment'),\n]\nurlpatterns = [\n path('', views.PostListView.as_view(), name='index'),\n path('posts/', include(post_urls)),\n path('profile//',\n views.ProfileListView.as_view(),\n name='profile'),\n path('account/edit/',\n views.ProfileUpdateView.as_view(),\n name='edit_profile'),\n path('category//',\n views.CategoryPostListView.as_view(),\n name='category_posts'),\n\n]\n","repo_name":"stanlyzera/django_sprint4","sub_path":"blogicum/blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"40262164166","text":"\"\"\"NICE model\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nfrom torch.distributions.transforms import SigmoidTransform, AffineTransform\nfrom torch.distributions import Uniform, TransformedDistribution\nimport numpy as np\n\n\nclass AdditiveCoupling(nn.Module):\n \"\"\"Additive coupling layer.\n \"\"\"\n\n def __init__(self, in_out_dim, mid_dim, hidden, mask_config):\n \"\"\"Initialize an additive coupling layer.\n\n Args:\n in_out_dim: input/output dimensions.\n mid_dim: number of units in a hidden layer.\n hidden: number of hidden layers.\n mask_config: 1 if transform odd units, 0 if transform even units.\n \"\"\"\n super(AdditiveCoupling, self).__init__()\n\n # input size is half of the in_out dimension.\n input_size = in_out_dim // 2\n\n # create a Sequential linear block with ReLu activation.\n self.input_layer = nn.Sequential(\n nn.Linear(input_size, mid_dim),\n nn.ReLU())\n\n # create a ModuleList of Sequential blocks with ReLu activations.\n self.hidden_layers = nn.ModuleList([\n nn.Sequential(\n nn.Linear(mid_dim, mid_dim),\n nn.ReLU()) for i in range(hidden - 1)])\n\n # output size is half the in_out dimension.\n out_size = in_out_dim // 2\n\n # create a linear output block\n self.output_layer = nn.Linear(mid_dim, out_size)\n\n # mask-config determines units to transform on.\n self.mask = mask_config\n\n def forward(self, x, log_det_J, reverse=False):\n \"\"\"Forward pass.\n\n Args:\n x: input tensor.\n log_det_J: log determinant of the Jacobian\n reverse: True in inference mode, False in sampling mode.\n Returns:\n transformed tensor and updated log-determinant of Jacobian.\n \"\"\"\n # reshape x to match NICE shapes.\n size0, size1 = x.size()\n\n x = x.reshape((x.shape[0], x.shape[1] // 2, 2))\n\n # determine units to transform on using mask-config.\n x1, x2 = x[:, :, 1], x[:, :, 0]\n if self.mask:\n x1, x2 = x[:, :, 0], x[:, :, 1]\n\n # transform half of the data using the predefines model\n out = self.input_layer(x2)\n for i in range(len(self.hidden_layers)):\n out = self.hidden_layers[i](out)\n out = self.output_layer(out)\n\n # apply additive function\n if reverse:\n x1 = x1 - out\n else:\n x1 = x1 + out\n\n # return x with proper stack\n x = torch.stack((x2, x1), dim=2)\n if self.mask:\n x = torch.stack((x1, x2), dim=2)\n\n out_param = x.reshape((size0, size1)), log_det_J\n return out_param\n\n\nclass AffineCoupling(nn.Module):\n def __init__(self, in_out_dim, mid_dim, hidden, mask_config):\n \"\"\"Initialize an affine coupling layer.\n\n Args:\n in_out_dim: input/output dimensions.\n mid_dim: number of units in a hidden layer.\n hidden: number of hidden layers.\n mask_config: 1 if transform odd units, 0 if transform even units.\n \"\"\"\n super(AffineCoupling, self).__init__()\n\n # mask-config determines units to transform on.\n self.mask = mask_config\n\n # input size is half of the in_out dimension.\n input_size = in_out_dim // 2\n\n # create a Sequential linear block with ReLu activation.\n self.input_layer = nn.Sequential(\n nn.Linear(input_size, mid_dim),\n nn.ReLU())\n\n # create a ModuleList of Sequential blocks with ReLu activations.\n self.hidden_layers = nn.ModuleList([\n nn.Sequential(\n nn.Linear(mid_dim, mid_dim),\n nn.ReLU()) for i in range(hidden - 1)])\n\n # create a linear output block\n self.output_layer = nn.Linear(mid_dim, in_out_dim)\n\n def forward(self, x, log_det_J, reverse=False):\n \"\"\"Forward pass.\n\n Args:\n x: input tensor.\n log_det_J: log determinant of the Jacobian\n reverse: True in inference mode, False in sampling mode.\n Returns:\n transformed tensor and updated log-determinant of Jacobian.\n \"\"\"\n # split data using mask_config\n if self.mask:\n x1, x2 = (x[:, 1::2], x[:, 0::2])\n else:\n x1, x2 = (x[:, 0::2], x[:, 1::2])\n\n out = self.input_layer(x2)\n for i in range(len(self.hidden_layers)):\n out = self.hidden_layers[i](out)\n out = self.output_layer(out)\n\n log_s, t = out[:, 0::2, ...], out[:, 1::2, ...]\n s = torch.sigmoid(log_s)\n\n if reverse:\n x1 = (x1 - t) / s\n else:\n x1 = s * x1 + t\n\n log_det_J = torch.sum(torch.log(torch.abs(s)))\n\n x = torch.ones_like(x)\n if self.mask:\n x[:, 1::2], x[:, 0::2] = (x1, x2)\n else:\n x[:, 1::2], x[:, 0::2] = (x2, x1)\n\n return x, log_det_J\n\n\nclass Scaling(nn.Module):\n \"\"\"Log-scaling layer.\n \"\"\"\n\n def __init__(self, dim):\n \"\"\"Initialize a (log-)scaling layer.\n\n Args:\n dim: input/output dimensions.\n \"\"\"\n super(Scaling, self).__init__()\n self.scale = nn.Parameter(\n torch.zeros((1, dim)), requires_grad=True)\n self.eps = 1e-5\n\n def forward(self, x, reverse=False):\n \"\"\"Forward pass.\n\n Args:\n x: input tensor.\n reverse: True in inference mode, False in sampling mode.\n Returns:\n transformed tensor and log-determinant of Jacobian.\n \"\"\"\n scale = torch.exp(self.scale)\n log_det_j = torch.sum(self.scale) + self.eps\n\n # scale the data by the Jacobian\n if reverse:\n x = x * (scale ** -1)\n else:\n x = x * scale\n\n return x, log_det_j\n\n\nclass NICE(nn.Module):\n \"\"\"NICE main model.\n \"\"\"\n\n def __init__(self, prior, coupling, coupling_type, in_out_dim, mid_dim, hidden, device):\n \"\"\"Initialize a NICE.\n\n Args:\n coupling_type: 'additive' or 'adaptive'\n coupling: number of coupling layers.\n in_out_dim: input/output dimensions.\n mid_dim: number of units in a hidden layer.\n hidden: number of hidden layers.\n device: run on cpu or gpu\n \"\"\"\n super(NICE, self).__init__()\n # choose device\n self.device = device\n\n # choose type of distribution\n if prior == 'gaussian':\n self.prior = torch.distributions.Normal(\n torch.tensor(0.).to(device), torch.tensor(1.).to(device))\n\n elif prior == 'logistic':\n self.prior = TransformedDistribution(Uniform(0, 1),\n [SigmoidTransform().inv, AffineTransform(loc=0., scale=1.)])\n\n else:\n raise ValueError('Prior not implemented.')\n\n self.in_out_dim = in_out_dim\n self.mid_dim = mid_dim\n self.coupling = coupling\n self.hidden = hidden\n self.coupling_type = coupling_type\n self.scaling = Scaling(self.in_out_dim)\n\n # choose coupling type\n if self.coupling_type == \"additive\":\n self.coupling = nn.ModuleList([\n AdditiveCoupling(in_out_dim=self.in_out_dim,\n mid_dim=self.mid_dim,\n hidden=self.hidden,\n mask_config=i % 2) for i in range(coupling)])\n\n elif self.coupling_type == 'affine':\n self.coupling = nn.ModuleList([\n AffineCoupling(in_out_dim=self.in_out_dim,\n mid_dim=self.mid_dim,\n hidden=self.hidden,\n mask_config=i % 2) for i in range(coupling)])\n\n else:\n raise ValueError('Coupling type not implemented.')\n\n def f_inverse(self, z):\n \"\"\"Transformation g: Z -> X (inverse of f).\n\n Args:\n z: tensor in latent space Z.\n Returns:\n transformed tensor in data space X.\n \"\"\"\n # reverse scaling\n x, _ = self.scaling(z, reverse=True)\n # reverse coupling layers\n for i in reversed(range(len(self.coupling))):\n x, _ = self.coupling[i](x, 0, reverse=True)\n # return reversed value -> x\n return x\n\n def f(self, x):\n \"\"\"Transformation f: X -> Z (inverse of g).\n\n Args:\n x: tensor in data space X.\n Returns:\n transformed tensor in latent space Z and log determinant Jacobian\n \"\"\"\n # initiate log det of Jacobian\n log_det_j = 0\n # pipe into the coupling layers\n for i in range(len(self.coupling)):\n x, ldj = self.coupling[i](x, log_det_j)\n log_det_j += ldj\n\n # return scaled x and log det of Jacobian\n x, ldj = self.scaling(x, reverse=False)\n log_det_j += ldj\n\n return x, log_det_j\n\n def log_prob(self, x):\n \"\"\"Computes data log-likelihood.\n\n (See Section 3.3 in the NICE paper.)\n\n Args:\n x: input minibatch.\n Returns:\n log-likelihood of input.\n \"\"\"\n z, log_det_J = self.f(x)\n log_det_J -= np.log(256) * self.in_out_dim # log det for rescaling from [0.256] (after dequantization) to [0,1]\n log_ll = torch.sum(self.prior.log_prob(z), dim=1)\n return log_ll + log_det_J\n\n def sample(self, size):\n \"\"\"Generates samples.\n\n Args:\n size: number of samples to generate.\n Returns:\n samples from the data space X.\n \"\"\"\n z = self.prior.sample((size, self.in_out_dim)).to(self.device)\n return self.f_inverse(z)\n\n def forward(self, x):\n \"\"\"Forward pass.\n\n Args:\n x: input minibatch.\n Returns:\n log-likelihood of input.\n \"\"\"\n return self.log_prob(x)\n","repo_name":"ofek181/NICE","sub_path":"nice.py","file_name":"nice.py","file_ext":"py","file_size_in_byte":10010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21492879986","text":"import numpy as np\nimport datetime, sys, pymysql, time\n\nlist1 = [1, 2, 3, 4, 5]\ntotal = sum(list1)\navg = total / len(list1)\nprint(avg)\n\n# zero = np.zeros([3, 3], dtype=np.uint8)\n# print(zero[0, :])\n# print(zero.ndim)\n# print(zero.shape)\n# print(zero.dtype)\n\n# list2 = np.array(list1, dtype=np.float)\n# print(list2.dtype)\n# print(list2)\n\n# a = np.array([[1,2,3],[4,5,6]])\n# # 从现有的数组当中创建\n# a1 = np.array(a)\n# # 相当于索引的形式,并没有真正的创建一个新的\n# a2 = np.asarray(a)\n\n# print(a2)\n\n# ls = np.logspace(0, 100, num=10)\n# print(ls)\n\n\"\"\" \nint8 signed -2^(8-1) ~ 2^(8-1) - 1\nint16 signed -2^(16-1) ~ 2^(16-1) - 1\nint32 signed -2^(32-1) ~ 2^(32-1) - 1\nint64 signed -2^(64-1) ~ 2^(64-1) - 1\nuint8 unsigned 0 ~ 2^8 - 1\nuint16 unsigned 0 ~ 2^16 - 1\nuint32 unsigned 0 ~ 2^32 - 1\nuint64 unsigned 0 ~ 2^64 - 1\n\"\"\"\n\n# simple:np.ndarray = np.array([4, 1, 5, 6, 9, 7])\n# m = simple.mean() # 均值\n# v = simple.var() # 方差\n# s = simple.std() # 标准差\n# mi = simple.min() # 获取最小值\n# mx = simple.max() # 获取最大值\n# print(m)\n# print(v)\n# print(s)\n# print(mi)\n# print(mx)\n\ndef GeneratorDate(s_year, s_month, s_day, e_year, e_month, e_day, exculdeWeekday=True) -> list:\n start_year = s_year\n start_month = s_month\n start_day = s_day\n date_list = []\n while True:\n try:\n d = datetime.date(start_year, start_month, start_day)\n start_day += 1\n if exculdeWeekday:\n if d.isoweekday() == 6 or d.isoweekday() == 7:\n if d.year == e_year and d.month == e_month and d.day == e_day:\n break\n continue\n date_list.append(d)\n if d.year == e_year and d.month == e_month and d.day == e_day:\n break\n except ValueError:\n err_info = str(sys.exc_info()[1])\n if err_info[:3] == 'day':\n start_day = 1\n start_month += 1\n if err_info[:3] == 'mon':\n start_month = 1\n start_year += 1\n return date_list\n\nstock_day_rise:np.ndarray = np.random.normal(0, 1, (1631, 522))\ndate_list: np.ndarray = np.array(GeneratorDate(2018, 1, 1, 2019, 12, 31))\n\nstock_list = []\ntry:\n conn = pymysql.Connect('localhost', 'root', 'vkchow', 'bilibili')\n cur:pymysql.cursors.Cursor = conn.cursor()\n row = 1631\n if row % 100 == 0:\n page_num = row / 100\n else:\n page_num = row // 100 + 1\n for i in range(page_num):\n sql = 'select * from new_stock_final limit {0}, {1}'.format(\n i * 100,\n 100\n )\n cur.execute(sql)\n data = cur.fetchall()\n for v in data:\n stock_list.append(list(v))\n\n print(len(stock_list))\n conn.close()\nexcept:\n sys.exit(1)\n\nstock_final = []\ncount = 0\nfor row in stock_day_rise:\n date_count = 0\n for col in row:\n num = stock_list[count][0]\n name = stock_list[count][1]\n d = date_list[date_count]\n rise = col\n stock_final.append([num, name, d, rise])\n date_count = date_count + 1\n count = count + 1\n\nprint('链接数据库...')\ntry:\n conn = pymysql.Connect('localhost', 'root', 'vkchow', 'bilibili')\n print('数据库链接成功')\n start = time.time()\n print('开始执行数据库写入')\n # 获取游标\n cur: pymysql.cursors.Cursor = conn.cursor()\n # 游标执行excute(sql语句)\n count = 1\n for v in stock_final:\n if count % 100 == 0:\n conn.commit()\n print('已提交:{}条'.format(count))\n sql = 'insert into stock_two_years_data(`update`, `num`, `stock_name`, `rise`) values(\"{0}\", {1}, \"{2}\", {3})'.format(v[2], v[0], v[1], v[3])\n # print(sql)\n cur.execute(sql)\n count = count + 1\n print('数据库写入完毕,耗时:{:.4f}秒'.format(time.time() - start))\n conn.close()\n print('数据库链接关闭...成功')\nexcept:\n print(sys.exc_info()[1])\n sys.exit(1)","repo_name":"gemark/numpy-note","sub_path":"numpy-002/numpy01.py","file_name":"numpy01.py","file_ext":"py","file_size_in_byte":4059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14919297396","text":"# Any problem that asks to find the top/smallest/frequent\n# 'K' elements amond a given set falls under this pattern.\n\n## EXAMPLE PROBLEM ##\n# Given an unsorted array of numbers, find the 'K' largest\n# numbers in it.\n\n## Brute force approach:\n# Sort the array and return the largest K numbers\n# Time: O(n logn) \n\n######################################################\n\n## Heap approach:\n\nfrom heapq import *\n\ndef find_k_largest_nums(nums, k):\n minHeap = []\n # put first 'K' numbers in the min heap\n for i in range(k):\n heappush(minHeap, nums[i])\n \n # go thru the remaining nums of the arr, if the number from the array\n # is bigger than the top(smallest) number of the min-heap, remove the top num from \n # heap and add the number from array\n for i in range(k, len(nums)):\n if nums[i] > minHeap[0]:\n heappop(minHeap)\n heappush(minHeap, nums[i])\n \n # the heap has the top 'K' numbers, return them in a list\n return list(minHeap)\n\ndef main():\n\n print(\"Here are the top K numbers: \" +\n str(find_k_largest_nums([3, 1, 5, 12, 2, 11], 3)))\n\n print(\"Here are the top K numbers: \" +\n str(find_k_largest_nums([5, 12, 11, -1, 12], 3)))\n\nmain()\n\n# Time: O(N logK)\n# Space: O(K) top K numbers in the heap","repo_name":"ciciswann/problem-solving-patterns","sub_path":"top-k-elem.py","file_name":"top-k-elem.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5972037364","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom colors import color\nfrom pylibs import influxdb\nfrom pylibs import utils\nimport argparse\nimport os\nimport time\n\nSEC_IN_DAY = 60 * 60 * 24 # number of seconds in a day\nMAX_CERT_AGE = 90 # maximum certificate age in days\nTHRESHOLD = 25 # minimum time before expiration alert\n\n\ndef save_to_influxdb(timestamp, domain, check_result: bool):\n try:\n json_body = [{\n \"time\": timestamp,\n \"measurement\": \"monitoring-certificate\",\n \"tags\": {'domain': domain},\n \"fields\": {'check_result': check_result}\n }]\n client = influxdb.InfluxDBClient(args.influxdb_host, args.influxdb_port, args.influxdb_user,\n args.influxdb_password, args.influxdb_database)\n client.write_points(json_body, time_precision='s')\n utils.message('Domain {}, check result {} was saved to InfluxDB on timestamp {}'.\n format(domain, check_result, timestamp))\n except BaseException:\n utils.message('Error saving domain {}, check result {} to InfluxDB on timestamp {}!'.\n format(domain, check_result, timestamp))\n\n\ndef print_check_result(domain, age_file_check, age_cert_check, check_result: bool):\n if check_result:\n print(color('[PASS] {}, file age check {}, cert age check {}, result: {}'.format(\n domain, age_file_check, age_cert_check, check_result), fg='white', bg='green', style='bold'))\n else:\n print(color('[FAIL] {}, file age check {}, cert age check {}, result: {}'.format(\n domain, age_file_check, age_cert_check, check_result), fg='white', bg='red', style='bold'))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Get SSL certificates expiration info')\n\n parser.add_argument('-d', '--domains', nargs='+', metavar='example.com example.org', default=os.environ.get(\n 'DOMAINS', []), help='Domains to check certificates on. Use whitespace to separate domains. '\n 'Do not use schemes (http, https). If this option is not set, '\n 'DOMAINS environment variable is used')\n\n parser.add_argument('-p', '--path', nargs=1, metavar='PATH', default=os.environ.get(\n 'CERTBOT_ETC_PATH', os.environ.get('LETSENCRYPT_ETC_PATH', ['/etc/letsencrypt'])),\n help='Path to certbot/letsencrypt certificate directory. If not passed, '\n 'CERTBOT_ETC_PATH or LETSENCRYPT_ETC_PATH environment variable are used '\n '(CERTBOT_ETC_PATH take precedence), if none of them are present, '\n '/etc/letsencrypt used as default')\n\n parser.add_argument('--save-to-influxdb', action='store_true', help='Save domains check results to influxdb '\n 'or just output them to console')\n\n influxdb.add_influxdb_options(parser)\n\n args = parser.parse_args()\n\n domains_traversed = {} # we will set True for domain traversed in filesystem loop\n t = time.time()\n file_threshold = SEC_IN_DAY * (MAX_CERT_AGE - THRESHOLD)\n for entry in os.scandir(args.path[0].rstrip(os.sep) + '/live'):\n if not entry.name.startswith('.') and entry.is_dir():\n age_file_check = t - entry.stat().st_mtime < file_threshold\n age_cert_check = utils.get_cert_expiration_timestamp(entry.name) - t > THRESHOLD\n check_result = age_file_check and age_cert_check\n domains_traversed[entry.name] = True\n if args.save_to_influxdb:\n save_to_influxdb(t, entry.name, check_result)\n else:\n print_check_result(entry.name, age_file_check, age_cert_check, check_result)\n\n for domain in args.domains:\n if domain not in domains_traversed:\n age_cert_check = utils.get_cert_expiration_timestamp(domain) - t > THRESHOLD\n if args.save_to_influxdb:\n save_to_influxdb(t, domain, age_cert_check)\n else:\n print_check_result(domain, 'unavailable', age_cert_check, age_cert_check)\n","repo_name":"andyceo/bash_scripts","sub_path":"monitoring-certificate/monitoring-domain.py","file_name":"monitoring-domain.py","file_ext":"py","file_size_in_byte":4196,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"3"} +{"seq_id":"13715471288","text":"import sys\nfrom urllib.request import urlopen\n\ndef fetch_words(url = 'http://sixty-north.com/c/t.txt'): \n story = urlopen(url)\n story_words = []\n for line in story:\n line_words = line.split()\n for word in line_words:\n story_words.append(word.decode('utf8'))\n story.close()\n return story_words\n\ndef print_items(items):\n for word in items:\n print(word)\n\ndef main(url):\n words = fetch_words(url)\n print_items(words)\n\nif __name__ == '__main__':\n main(sys.argv[1])","repo_name":"yuriy-wellsky/Learning-Python","sub_path":"words.py","file_name":"words.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38202570950","text":"from django.conf.urls import patterns, include, url\nfrom django.conf import settings\nfrom django.contrib import admin\n\nurlpatterns = patterns('',\n url(r'^$', 'news.views.home', name='home'),\n url(r'^noticia/(?P\\d+)', 'news.views.noticia', name='noticia'),\n url(r'^noticias/(?P\\w+)', 'news.views.noticias_categoria', name='noticias_categoria'),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^static-files/(?P.*)$', 'django.views.static.serve', {\n 'document_root': settings.MEDIA_ROOT,\n }),\n)\n","repo_name":"paulovianna/tcc-jornalismo","sub_path":"news/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13576514032","text":"from tkinter import *\r\nimport Back\r\n\r\ndata = Back.Database()\r\n\r\nprint(data.select())\r\n\r\n\r\ndef add_entry():\r\n list.delete(1, END)\r\n data.insert(title.get(), author.get(), year.get(), isbn.get())\r\n list.insert(END, (title.get(), author.get(), year.get(), isbn.get()))\r\n\r\n\r\ndef clear_all_entries():\r\n title_entry.delete(0, END)\r\n author_entry.delete(0, END)\r\n year_entry.delete(0, END)\r\n isbn_entry.delete(0, END)\r\n\r\n\r\ndef search_entry():\r\n list.delete(0, END)\r\n for row in data.search(title.get(), author.get(), year.get(), isbn.get()):\r\n list.insert(END, row)\r\n\r\n\r\ndef view_all():\r\n list.delete(0, END)\r\n for row in data.select():\r\n list.insert(END, row)\r\n\r\n\r\ndef get_selected_row(event):\r\n global selected_tuple\r\n index = list.curselection()[0]\r\n selected_tuple = list.get(index)\r\n title_entry.delete(0, END)\r\n title_entry.insert(END, selected_tuple[1])\r\n author_entry.delete(0, END)\r\n author_entry.insert(END, selected_tuple[2])\r\n year_entry.delete(0, END)\r\n year_entry.insert(END, selected_tuple[3])\r\n isbn_entry.delete(0, END)\r\n isbn_entry.insert(END, selected_tuple[4])\r\n\r\n\r\ndef delete_entry_command():\r\n data.delete(selected_tuple[0])\r\n\r\n\r\ndef update_entry_command():\r\n data.update(selected_tuple[0], title.get(),\r\n author.get(), year.get(), isbn.get())\r\n\r\n\r\nwindow = Tk()\r\n\r\nwindow.wm_title(\"Book Store\")\r\nl1 = Label(window, text='Title')\r\nl1.grid(row=0, column=0)\r\n\r\ntitle = StringVar()\r\ntitle_entry = Entry(window, textvariable=title)\r\ntitle_entry.grid(row=0, column=1)\r\n\r\nl2 = Label(window, text='Author')\r\nl2.grid(row=0, column=2)\r\n\r\nauthor = StringVar()\r\nauthor_entry = Entry(window, textvariable=author)\r\nauthor_entry.grid(row=0, column=3)\r\n\r\nl3 = Label(window, text='Year')\r\nl3.grid(row=1, column=0)\r\n\r\nyear = StringVar()\r\nyear_entry = Entry(window, textvariable=year)\r\nyear_entry.grid(row=1, column=1)\r\n\r\nl4 = Label(window, text='ISBN')\r\nl4.grid(row=1, column=2)\r\n\r\nisbn = StringVar()\r\nisbn_entry = Entry(window, textvariable=isbn)\r\nisbn_entry.grid(row=1, column=3)\r\n\r\nlist = Listbox(window, height=6, width=35)\r\nlist.grid(row=2, column=0, rowspan=6, columnspan=2)\r\n\r\nsb = Scrollbar(window)\r\nsb.grid(row=2, column=2, rowspan=6)\r\n\r\nlist.configure(yscrollcommand=sb.set)\r\nsb.configure(command=list.yview)\r\n\r\nlist.bind('<>', get_selected_row)\r\n\r\nb1 = Button(window, text='View All', width=12, command=view_all)\r\nb1.grid(row=2, column=3)\r\n\r\nb2 = Button(window, text='Search Entry', width=12, command=search_entry)\r\nb2.grid(row=3, column=3)\r\n\r\nb3 = Button(window, text='Add Entry', width=12, command=add_entry)\r\nb3.grid(row=4, column=3)\r\n\r\nb4 = Button(window, text='Update', width=12, command=update_entry_command)\r\nb4.grid(row=5, column=3)\r\n\r\nb5 = Button(window, text='Delete', width=12, command=delete_entry_command)\r\nb5.grid(row=6, column=3)\r\n\r\nb6 = Button(window, text='Clear Entries', width=12, command=clear_all_entries)\r\nb6.grid(row=7, column=3)\r\n\r\nwindow.mainloop()\r\n","repo_name":"shubhamgogri/Books-Store-","sub_path":"front.py","file_name":"front.py","file_ext":"py","file_size_in_byte":3000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72437120400","text":"import tkinter as tki\nimport tkinter.ttk as ttk\n\n\nclass Words:\n \"\"\"Words component class.\n Displays valid founded words\n \"\"\"\n def __init__(self, root):\n \"\"\"Init function\"\"\"\n self._root = root\n self.words = tki.StringVar(self._root)\n self.word_list = ttk.Label(self._root, textvariable=self.words)\n self.word_list.configure(padding=4)\n self.word_list.grid(row=6, columnspan=4)\n\n def add_word(self, word: str) -> None:\n \"\"\"Adds a new word to used words\"\"\"\n words = self.words.get().split(\"\\n\")\n if word not in words:\n self.words.set(value=self.words.get() + \"\\n\" + word)\n\n def reset(self) -> None:\n \"\"\"Resets used words for a new game\"\"\"\n self.words.set(value=\"\")\n\n","repo_name":"yuvalNakav/ex11","sub_path":"words.py","file_name":"words.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"43665820996","text":"import numpy as np\n\n\ndef calculate_prune_dict(file_name='tmp/head_pruning/prune_single_head.csv', k=6):\n acc = np.loadtxt(open(file_name,'rb'), delimiter=',')\n idx = np.argsort(acc, axis=1)[:,:k]\n return_dict = {layer_idx:idx[layer_idx].tolist() for layer_idx in range(idx.shape[0])}\n return return_dict\n\nif __name__ == \"__main__\":\n print(calculate_prune_dict())\n\n","repo_name":"ChandlerGuan/blockskim","sub_path":"src/utils/calculate_prune_dict.py","file_name":"calculate_prune_dict.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"762308906","text":"import tkinter as tk\r\nfrom tkinter import filedialog\r\n\r\ndef open_file():\r\n filepath = filedialog.askopenfilename(filetypes=[(\"Text Files\", \"*.txt\"), (\"All Files\", \"*.*\")])\r\n if filepath:\r\n with open(filepath, \"r\") as file:\r\n text.delete(1.0, tk.END)\r\n text.insert(tk.END, file.read())\r\n\r\ndef save_file():\r\n filepath = filedialog.asksaveasfilename(defaultextension=\".txt\", filetypes=[(\"Text Files\", \"*.txt\"), (\"All Files\", \"*.*\")])\r\n if filepath:\r\n with open(filepath, \"w\") as file:\r\n file.write(text.get(1.0, tk.END))\r\n\r\ndef cut_text():\r\n text.event_generate(\"<>\")\r\n\r\ndef copy_text():\r\n text.event_generate(\"<>\")\r\n\r\ndef paste_text():\r\n text.event_generate(\"<>\")\r\n\r\ndef select_all_text():\r\n text.tag_add(tk.SEL, \"1.0\", tk.END)\r\n text.mark_set(tk.INSERT, \"1.0\")\r\n text.see(tk.INSERT)\r\n return 'break'\r\n\r\nroot = tk.Tk()\r\nroot.title(\"Text Editor\")\r\n\r\ntext = tk.Text(root)\r\ntext.pack()\r\n\r\nmenu = tk.Menu(root)\r\nroot.config(menu=menu)\r\n\r\nfile_menu = tk.Menu(menu)\r\nmenu.add_cascade(label=\"File\", menu=file_menu)\r\nfile_menu.add_command(label=\"Open\", command=open_file)\r\nfile_menu.add_command(label=\"Save\", command=save_file)\r\nfile_menu.add_separator()\r\nfile_menu.add_command(label=\"Exit\", command=root.quit)\r\n\r\nedit_menu = tk.Menu(menu)\r\nmenu.add_cascade(label=\"Edit\", menu=edit_menu)\r\nedit_menu.add_command(label=\"Cut\", command=cut_text)\r\nedit_menu.add_command(label=\"Copy\", command=copy_text)\r\nedit_menu.add_command(label=\"Paste\", command=paste_text)\r\nedit_menu.add_command(label=\"Select All\", command=select_all_text)\r\n\r\nroot.mainloop()\r\n","repo_name":"Madalacharitavya/TextEditor-Using-Python","sub_path":"TextEditor.py","file_name":"TextEditor.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5244814781","text":"import os\nimport json\nimport datetime\nimport requests\nimport threading\n\nimport common\nfrom paths import PathFinder\n\nclass ArukanaDownloader:\n\n def __init__(self):\n path_finder = PathFinder()\n self.download_folder = path_finder.get_arukana_folder()\n self.history_path = path_finder.get_arukana_download_history_path()\n self.url = 'http://dl.sega-pc.com/chruser/Resource/Card/'\n self.token = datetime.datetime.utcnow().strftime('%Y%m%d%H%M%S')\n self.json = None\n self.log_lock = threading.Lock()\n self.thread_list = []\n self.force_list = [1, 3, 14, 1003, 3002, 4001, 4003, 4016, 4017,\n 4022, 5003, 6008, 7501, 7508]\n\n def download_all(self, cid_range):\n for cid in cid_range:\n download_thread = threading.Thread(target=self.download,\n args=(range(cid, cid+1)))\n download_thread.start()\n self.thread_list.append(download_thread)\n if len(self.thread_list)==20:\n for td in self.thread_list:\n if td.is_alive():\n td.join()\n self.thread_list.clear()\n\n def download(self, cid):\n force_download = False\n if cid in self.force_list:\n force_download = True\n\n local_path = os.path.join(self.download_folder,\n '{0:05d}.scr'.format(cid))\n if not force_download:\n if os.path.exists(local_path):\n # print(\"download skipped: {0}\".format(cid))\n return 304\n\n #print(\"download started: {0}\".format(cid))\n download_url = '{0}cha_2d_card_{1:05d}.scr'.format(self.url, cid)\n retry_count = 10\n while retry_count > 0:\n retry_count -= 1\n try:\n download_response = requests.get(download_url, stream=True,\n timeout=180)\n ret_code = download_response.status_code\n if ret_code != 200:\n #print(\"downaload failed: {0}:{1}\".format(cid, ret_code))\n return ret_code\n with open(local_path, 'wb') as local_fd:\n for chunk in download_response.iter_content(1024):\n local_fd.write(chunk)\n if not force_download:\n self.log_history(cid)\n print('Download successfully: {0}'.format(cid))\n return ret_code\n except requests.RequestException:\n continue\n print('Download timeout: {0}'.format(cid))\n return ret_code\n\n def init_history(self):\n if os.path.exists(self.history_path):\n return\n with open(self.history_path, 'wt', encoding='utf-8') as history_fd:\n json.dump({self.token: []}, history_fd)\n\n def log_history(self, cid):\n if self.log_lock.acquire(True, timeout=10):\n self.init_history()\n if (self.json is None):\n with open(self.history_path, 'rt', encoding='utf-8') \\\n as history_fd:\n self.json = json.load(history_fd)\n if self.json.get(self.token) is None:\n self.json[self.token] = [cid]\n else:\n self.json.get(self.token).append(cid)\n with open(self.history_path, 'wt', encoding='utf-8') \\\n as history_fd:\n json.dump(self.json, history_fd)\n self.log_lock.release()\n\nif __name__ == \"__main__\":\n test_downloader = ArukanaDownloader()\n test_downloader.download_all(range(1,70001))\n # test_downloader.download(1063)\n # for cid in range(1, 70001):\n # test_downloader.download(cid)\n","repo_name":"deepwellice/ChainChronicle","sub_path":"parser/scripts/arukana.py","file_name":"arukana.py","file_ext":"py","file_size_in_byte":3886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36422347710","text":"import hashlib\nfrom datetime import datetime\n\n\nclass Block:\n\n def __init__(self, timestamp, data):\n self.timestamp = timestamp\n self.data = data\n self.previous_hash = None\n self.hash = self.calc_hash()\n\n def calc_hash(self):\n sha = hashlib.sha256()\n\n hash_str = self.data.encode(\n 'utf-8')\n\n sha.update(hash_str)\n\n return sha.hexdigest()\n\n def __repr__(self):\n s = \"Timestamp: \" + self.timestamp.strftime(\"%m/%d/%Y, %H:%M:%S\")\n s += \"\\nData: \"+self.data\n s += \"\\nSHA256 Hash: \"+self.hash\n if self.previous_hash is not None:\n s += \"\\nPrivious Hash: \"+self.previous_hash\n else:\n s += \"\\nPrivious Hash: None\"\n return s\n\n\nclass Blockchain:\n def __init__(self, timestamp, data):\n root_block = Block(timestamp, data)\n self.current_block = root_block\n\n def add_block(self, block):\n block.previous_hash = self.current_block.hash\n self.current_block = block\n\n def get_current_block(self):\n return self.current_block\n\n# Add your own test cases: include at least three test cases\n# and two of them must include edge cases, such as null, empty or very large values\n\nprint(\"--------------------------------------------------------\")\nprint(\"#Block root: \")\nblock_chain = Blockchain(datetime.now(), \"information of root block data\")\nprint(block_chain.get_current_block())\n\n# Test Case 1\nblock1 = Block(datetime.now(), 'information-block1')\nblock_chain.add_block(block1)\nprint(\"--------------------------------------------------------\")\nprint(\"#Block 1: \")\nprint(block_chain.get_current_block())\n# Test Case 2\nblock2 = Block(datetime.now(), 'information-block2')\nblock_chain.add_block(block2)\nprint(\"--------------------------------------------------------\")\nprint(\"#Block 2: \")\nprint(block_chain.get_current_block())\n# Test Case 3\nblock3 = Block(datetime.now(), 'information-block3')\nblock_chain.add_block(block3)\nprint(\"--------------------------------------------------------\")\nprint(\"#Block 3: \")\nprint(block_chain.get_current_block())\n","repo_name":"NguyenTranQuocThang/Project_DataStructure_02","sub_path":"Project_DataStructure_02/problem5/problem5.py","file_name":"problem5.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"14268094209","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"usbipy\",\n version=\"0.0.1\",\n author=\"Filippo Valle\",\n author_email=\"fvalle@elemento.cloud\",\n description=\"Package to use usbip\",\n license=\"GPL\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/fvalle1/usbipy\",\n packages=setuptools.find_packages(),\n py_modules=[\"usbipy\"],\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: GPL License\",\n \"Operating System :: OS Independent\",\n ],\n install_requires=[\"argparse\"],\n dependency_links=[],\n python_requires='>=3.8',\n)\n","repo_name":"Elemento-Modular-Cloud/usbipy","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"70940902481","text":"import time\n\nfrom django.contrib.auth.models import User\nfrom django.forms import widgets\nfrom django.utils.log import getLogger\n\nfrom rest_framework import serializers\n\nfrom .models import Units, Location, Manufacturer, TimeStamp, DataValue,\\\n DeviceInstance, PhysicalSignal, Device, DeviceGateway\n\nlogger = getLogger(\"app\")\n#######################################################################################\n\nclass UserSerializer(serializers.ModelSerializer):\n device_instance = serializers.PrimaryKeyRelatedField(many=True)\n class Meta:\n model = User\n fields = ('id', 'username', 'device_instance')\n\nclass UnitsSerializer(serializers.Serializer):\n \"\"\"\n Adaptation of tutorial example to Units.\n based on http://django-rest-framework.org/tutorial/1-serialization.html\n \"\"\"\n pk = serializers.Field() # Note: `Field` is an untyped read-only field.\n \n name = serializers.CharField(max_length=25)\n symbol = serializers.CharField(max_length=30, blank=True)\n system = serializers.CharField(max_length=8, blank=True)\n notes = serializers.CharField(blank=True)\n \n def restore_object(self, attrs, instance=None):\n \"\"\"\n Create or update a new snippet instance, given a dictionary\n of deserialized field values.\n\n Note that if we don't define this method, then deserializing\n data will simply return a dictionary of items.\n \"\"\"\n if instance:\n # Update existing instance\n instance.name = attrs.get('name', instance.name)\n instance.symbol = attrs.get('symbol', instance.symbol)\n instance.system = attrs.get('system', instance.system)\n instance.notes = attrs.get('notes', instance.notes) \n return instance\n\n # Create new instance\n return Units(**attrs)\n\nclass UnitsSerializer2(serializers.ModelSerializer):\n class Meta:\n model = Units\n\nclass LocationSerializer(serializers.ModelSerializer):\n class Meta:\n model = Location\n\nclass ManufacturerSerializer(serializers.ModelSerializer):\n class Meta:\n model = Manufacturer\n fields = ('id', 'name', 'url', 'notes')\n\nclass TimeStampSerializer(serializers.ModelSerializer):\n class Meta:\n model = TimeStamp\n fields = ('id', 'server_timestamp', 'measurement_timestamp')\n\nclass PhysicalSignalSerializer(serializers.ModelSerializer):\n class Meta:\n model = PhysicalSignal\n\nclass DeviceSerializer(serializers.ModelSerializer):\n class Meta:\n model = Device\n\nclass DeviceGatewaySerializer(serializers.ModelSerializer):\n class Meta:\n model = DeviceGateway\n # fields = ('id', 'name', 'address','port','protocol','url','mac_address','active','process_name','process_pid','description')\n\nclass DeviceInstanceSerializer(serializers.ModelSerializer):\n #user = serializers.Field(source='user.username')\n #device = serializers.Field(source='device.device_name')\n class Meta:\n model = DeviceInstance\n # fields = ('id','user', 'device','gateway','accept_from_gateway_only',\\\n # 'location','physical_signal','update_rate','active','private','serial_number')\n # fields = ('id','user', 'accept_from_gateway_only','update_rate','active','private','serial_number')\n\nclass DataValueSerializer(serializers.ModelSerializer):\n # device_instance = serializers.Field(source='device_instance')\n class Meta:\n model = DataValue\n # fields = ('data_timestamp', 'device_instance','value')\n\nclass DataValuePairSerializer(serializers.ModelSerializer):\n data_timestamp = serializers.Field(source='data_timestamp.measurement_timestamp')\n # device_instance = serializers.Field(source='device_instance.serial_number')\n # device = serializers.Field(source='device_instance.device')\n value = serializers.Field(source='get_value_pair')\n class Meta:\n model = DataValue\n # fields = ('data_timestamp','value')\n fields = ('value',)\n\nclass DataValuePairSerializer2(serializers.Serializer):\n \"\"\"\n Adaptation of tutorial example to Units.\n based on http://django-rest-framework.org/tutorial/1-serialization.html\n \"\"\"\n \n data_timestamp = serializers.DateTimeField()\n value = serializers.FloatField()\n\n @property\n def data(self):\n\n to = time.time()\n logger.debug(\"Found %d items matching criteria \" % self.object.count())\n values_list = self.object.values_list('data_timestamp__measurement_timestamp','value') \n v = self.object.values_list('value',flat=True)\n t = self.object.values_list('data_timestamp__measurement_timestamp',flat=True)\n logger.debug(\"Making the list = %.3f\" % (time.time() - to))\n\n logger.debug(\"Preparing array for json transmision\")\n data = []\n for data_pt in values_list: \n data.append([time.mktime(data_pt[0].timetuple()),data_pt[1]])\n\n logger.debug(\"Making value pair list = %.3f\" % (time.time() - to))\n return data\n\n","repo_name":"borand/sensoredweb","sub_path":"sensordata/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":5062,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"26722283189","text":"from Trainer import Trainer\nfrom models.BERTModel import BERTModel\nfrom data.UniRefDataLoader import UniRefDataLoader\nfrom comp.TokenizerBERT import TokenizerBERT\nimport os\nimport torch.optim as optim\nfrom torch.nn import CrossEntropyLoss\nimport torch\nfrom metrics.Metrics import Metrics\nfrom sklearn.metrics import accuracy_score, f1_score\nfrom datetime import datetime\nimport json\n\n\nclass BERT_Trainer(Trainer):\n def __init__(self, learning_rate, max_sequence_length, embedding_dim, hidden_dim, dropout_prob, num_heads, num_layers, vocab_length, batch_size, epochs, checkpoint_paths, vocab_path, data_path):\n tokenizer = TokenizerBERT(vocab_path, max_sequence_length)\n data = UniRefDataLoader(data_path, batch_size, tokenizer, max_sequence_length)\n self.train_loader = data.get_train_data_loader()\n self.test_loader = data.get_test_data_loader() # This will be used as validation set\n self.model = BERTModel(max_sequence_length, embedding_dim, hidden_dim, dropout_prob, num_heads, num_layers, vocab_length)\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.model.to(self.device)\n self.log_interval = 1_000\n self.optimizer = optim.Adam(self.model.parameters(), lr=learning_rate)\n self.pad_token_id = 1\n self.epochs = epochs\n self.checkpoint_paths = checkpoint_paths\n self.hyperparameters = {\n 'max_sequence_length': max_sequence_length,\n 'embedding_dim': embedding_dim,\n 'hidden_dim': hidden_dim,\n 'dropout_prob': dropout_prob,\n 'num_heads': num_heads,\n 'num_layers': num_layers,\n 'vocab_length': vocab_length,\n 'batch_size': batch_size,\n 'epochs': epochs\n }\n self.current_time = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n self.metric_path, self.weights_path = self.make_dirs()\n self.metrics = Metrics(self.metric_path)\n \n\n def make_dirs(self):\n folder_path = os.path.join(self.checkpoint_paths, self.current_time)\n os.mkdir(folder_path)\n metric_path = os.path.join(folder_path, \"metrics\")\n weights_path = os.path.join(folder_path, \"weights\")\n config_path = os.path.join(folder_path, \"configs.json\")\n os.mkdir(metric_path)\n os.mkdir(weights_path)\n with open(config_path, 'w') as file:\n json.dump(self.hyperparameters, file)\n return metric_path, weights_path\n\n def save_checkpoint(self, epoch, filename=\"checkpoint.pth.tar\"):\n state = {'epoch': epoch, 'state_dict': self.model.state_dict(), 'optimizer': self.optimizer.state_dict()}\n torch.save(state, filename)\n\n def calculate_accuracy_and_f1(self, outputs, targets):\n predictions = torch.argmax(outputs, dim=1)\n accuracy = accuracy_score(targets.cpu(), predictions.cpu())\n f1 = f1_score(targets.cpu(), predictions.cpu(), average='weighted')\n return accuracy, f1\n\n def evaluate(self, data_loader):\n self.model.eval()\n total_loss, total_accuracy, total_f1 = 0, 0, 0\n criterion = CrossEntropyLoss(ignore_index=-100)\n with torch.no_grad():\n for data, target in data_loader:\n data, target = data.to(self.device), target.to(self.device)\n output = self.model(data)\n output = output.view(-1, self.model.vocab_size)\n target = target.view(-1)\n loss = criterion(output, target)\n accuracy, f1 = self.calculate_accuracy_and_f1(output, target)\n total_loss += loss.item()\n total_accuracy += accuracy\n total_f1 += f1\n return total_loss / len(data_loader), total_accuracy / len(data_loader), total_f1 / len(data_loader)\n\n def train(self):\n print(\"Starting training\")\n criterion = CrossEntropyLoss(ignore_index=-100)\n for epoch in range(self.epochs):\n print(f\"{epoch} Epoch Ran\")\n self.model.train()\n for batch_idx, (data, target) in enumerate(self.train_loader):\n data, target = data.to(self.device), target.to(self.device)\n self.optimizer.zero_grad()\n output = self.model(data)\n output = output.view(-1, self.model.vocab_size)\n target = target.view(-1)\n loss = criterion(output, target)\n loss.backward()\n self.optimizer.step()\n if batch_idx % self.log_interval == 0:\n print(f'Train Epoch: [{batch_idx * len(data)}/{len(self.train_loader.dataset)} ({100. * batch_idx / len(self.train_loader):.0f}%)]\\tLoss: {loss.item():.6f}')\n val_loss, val_accuracy, val_f1 = self.evaluate(self.test_loader)\n print(f'Epoch {epoch} - Validation Loss: {val_loss:.4f}, Validation Accuracy: {val_accuracy:.4f}, Validation F1: {val_f1:.4f}')\n self.metrics.track_loss_point(loss.item(), val_loss)\n self.metrics.track_accuracy_point(val_accuracy, val_accuracy)\n self.metrics.track_f1_point(val_f1, val_f1)\n weights = os.path.join(self.weights_path, f\"checkpoint_epoch_{epoch}.pth.tar\")\n self.save_checkpoint(epoch, filename=weights)\n self.metrics.save_loss_plot()\n self.metrics.save_accuracy_plot()\n\n\n# Hyperparameters\nlearning_rate = 1e-4\nembedding_dim = 512\nnum_encoder_layers = 3\nmax_sequence_length = 500\nhidden_dim = 2048\ndropout_prob = 0.1\nnum_heads = 8\nvocab_length = 30\nbatch_size = 10\nepochs = 11\ncheckpoint_path = \"./checkpoints/\"\nvocab_path = \"comp/vocab.json\"\ndata_path = \"data/raw_data/100k.fasta\"\n\ntrainer = BERT_Trainer(learning_rate, max_sequence_length, embedding_dim, hidden_dim, dropout_prob, num_heads, num_encoder_layers, vocab_length, batch_size, epochs,checkpoint_path, vocab_path, data_path)\ntrainer.train()\n","repo_name":"eugenechoi2004/COS597N","sub_path":"templates/BERT_Trainer.py","file_name":"BERT_Trainer.py","file_ext":"py","file_size_in_byte":5962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"32269845981","text":"import requests as rq\nimport pandas as pd\nfrom base64 import b64encode\nfrom urllib.parse import urlencode\nimport re\nimport json\n\nfrom typing import Dict, List, Union\n\nfrom .utils import (\n get_cubes_from_discovery,\n parse_headers,\n detect_error,\n convert_store_to_dataframe,\n cubes_leaves,\n)\nfrom .mdx import convert_mdx_to_dataframe, builder\nfrom .query import Query\nfrom .authentication import AuthenticationBuilder\nfrom .autotype import Types\n\n\nJSON_Flat = Dict[str, Union[str, int, None]]\n\n\nclass Connector:\n # ==== Definition ====\n\n cubes = None\n\n def __init__(self, endpoint: str, authentication: AuthenticationBuilder):\n tools = authentication(endpoint.rstrip(\"/\"))\n self.get = tools.get\n self.post = tools.post\n self.discover()\n\n # ==== Get data about every cubes ====\n\n def discover(self):\n \"\"\"\n Get the infos of the cube, the number of axes, their labels, ...\n \"\"\"\n response = self.get(\"pivot/rest/v4/cube/discovery\")\n detect_error(response)\n\n self.cubes = get_cubes_from_discovery(response)\n self.cubes_leaves = cubes_leaves(self.cubes)\n # print(self.cubes_leaves)\n\n def stores(self) -> Types:\n \"\"\"\n Get the list of stores of a cube\n \"\"\"\n response = self.get(\"pivot/rest/v4/datastore/discovery/storeNames\")\n detect_error(response)\n return response[\"data\"]\n\n def store_fields(self, store: str):\n \"\"\"\n Get the list of available fields of a specific store with their respective types\n \"\"\"\n response = self.get(f\"pivot/rest/v4/datastore/data/stores/{store}\")\n detect_error(response)\n return parse_headers(response[\"data\"][\"headers\"])\n\n def store_references(self, store: str):\n \"\"\"\n Get the references of the provided store\n \"\"\"\n response = self.get(f\"pivot/rest/v4/datastore/discovery/references/{store}\")\n detect_error(response)\n return response[\"data\"]\n\n def mdx_builder(self, cube_name: str, fields: List[str], types: Types = {}):\n if cube_name not in self.cubes_leaves:\n raise Exception(\n f\"{cube_name} isn't a valid cube.\\nThe available cubes are: {', '.join(self.cubes_leaves.keys())}\"\n )\n return self.mdx_query(builder(cube_name, fields, self.cubes_leaves[cube_name]), types)\n\n def mdx_query(self, mdx_request: str, types: Types = {}) -> Query:\n \"\"\"\n Execute a MDX query\n \"\"\"\n\n def refresh():\n response = self.post(\"pivot/rest/v4/cube/query/mdx\", body={\"mdx\": mdx_request})\n detect_error(response)\n\n return convert_mdx_to_dataframe(response, self.cubes)\n\n return Query(refresh, types)\n\n def store_query(\n self,\n store: str,\n fields: List[str],\n branch: str = \"master\",\n conditions: JSON_Flat = None,\n epoch: Union[str, None] = None,\n timeout: int = 30000,\n limit: int = 100,\n offset: int = 0,\n types: Types = {},\n ) -> Query:\n \"\"\"\n Execute a query directly on the data store.\n Must specify the name of the data store and the fields that you want to retrieve.\n\n You can specify:\n - the conditions if you want to filter the data\n - the timeout in ms of the request (delay to wait at most for the query to complete in Pivot)\n - the epoch (optional category for the message)\n - the branch on which the request will be done\n - the limit and the offset for pagination\n - the returned types\n \"\"\"\n limit = int(limit)\n offset = int(offset)\n timeout = int(timeout)\n\n page_size = min(limit, 10)\n start_extras = offset % page_size\n end_extras = (offset + limit) % page_size\n nb_pages = (start_extras != 0) + limit // page_size + (end_extras != 0)\n page_offset = offset // page_size + 1\n\n def refresh():\n cond = conditions\n base = \"pivot/rest/v4/datastore\"\n body = {\"fields\": fields, \"branch\": branch, \"timeout\": timeout}\n if epoch != None:\n body[\"epoch\"] = epoch\n if cond:\n if type(cond) == str:\n cond = json.loads(cond)\n body[\"conditions\"] = cond\n\n response = self.post(\n f\"{base}/data/stores/{store}?query=&page={page_offset}&pageSize={page_size}\",\n body=body,\n )\n detect_error(response)\n headers = parse_headers(response[\"data\"][\"headers\"])\n rows = response[\"data\"][\"rows\"]\n page_index = 1\n while page_index < nb_pages and response[\"data\"][\"pagination\"].get(\"nextPageUrl\"):\n page_index += 1\n response = self.post(\n f'{base}{response[\"data\"][\"pagination\"][\"nextPageUrl\"]}&query=', body=body\n )\n detect_error(response)\n rows.extend(response[\"data\"][\"rows\"])\n real_end_extras = max(len(rows) - offset - limit, 0)\n # Trimming\n rows = rows[start_extras : (len(rows) - real_end_extras)]\n return convert_store_to_dataframe(headers, rows)\n\n return Query(refresh, types)\n\n","repo_name":"anupsingh/paris-internship-python-connector","sub_path":"python/pivot/pivot.py","file_name":"pivot.py","file_ext":"py","file_size_in_byte":5353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42011226371","text":"import yaml\nfrom rknn.api import RKNN\nimport cv2\n\n_model_load_dict = {\n 'caffe': 'load_caffe',\n 'tensorflow': 'load_tensorflow',\n 'tflite': 'load_tflite',\n 'onnx': 'load_onnx',\n 'darknet': 'load_darknet',\n 'pytorch': 'load_pytorch',\n 'mxnet': 'load_mxnet',\n 'rknn': 'load_rknn',\n }\n\nyaml_file = './config.yaml'\n\n\ndef main():\n with open(yaml_file, 'r') as F:\n config = yaml.load(F)\n # print('config is:')\n # print(config)\n\n model_type = config['running']['model_type']\n print('model_type is {}'.format(model_type))#检查模型的类型\n\n rknn = RKNN(verbose=True)\n\n\n\n#配置文件\n print('--> config model')\n rknn.config(**config['config'])\n print('done')\n\n\n print('--> Loading model')\n load_function = getattr(rknn, _model_load_dict[model_type])\n ret = load_function(**config['parameters'][model_type])\n if ret != 0:\n print('Load yolo failed! Ret = {}'.format(ret))\n exit(ret)\n print('done')\n\n ####\n #print('hybrid_quantization')\n #ret = rknn.hybrid_quantization_step1(dataset=config['build']['dataset'])\n\n\n if model_type != 'rknn':\n print('--> Building model')\n ret = rknn.build(**config['build'])\n print('acc_eval')\n rknn.accuracy_analysis(inputs='./dataset1.txt', target='rk3399pro')\n print('acc_eval done!')\n\n if ret != 0:\n print('Build yolo failed!')\n exit(ret)\n else:\n print('--> skip Building model step, cause the model is already rknn')\n\n\n#导出RKNN模型\n if config['running']['export'] is True:\n print('--> Export RKNN model')\n ret = rknn.export_rknn(**config['export_rknn'])\n if ret != 0:\n print('Init runtime environment failed')\n exit(ret)\n else:\n print('--> skip Export model')\n\n\n#初始化\n print('--> Init runtime environment')\n ret = rknn.init_runtime(**config['init_runtime'])\n if ret != 0:\n print('Init runtime environment failed')\n exit(ret)\n print('done')\n\n\n print('--> load img')\n img = cv2.imread(config['img']['path'])\n print('img shape is {}'.format(img.shape))\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n inputs = [img]\n print(inputs[0][0:10,0,0])\n#推理\n if config['running']['inference'] is True:\n print('--> Running model')\n config['inference']['inputs'] = inputs\n #print(config['inference'])\n outputs = rknn.inference(inputs)\n #outputs = rknn.inference(config['inference'])\n print('len of output {}'.format(len(outputs)))\n print('outputs[0] shape is {}'.format(outputs[0].shape))\n print(outputs[0][0][0:2])\n else:\n print('--> skip inference')\n#评价\n if config['running']['eval_perf'] is True:\n print('--> Begin evaluate model performance')\n config['inference']['inputs'] = inputs\n perf_results = rknn.eval_perf(inputs=[img])\n else:\n print('--> skip eval_perf')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"littledeep/YOLOv5-RK3399Pro","sub_path":"convert/rknn_convert.py","file_name":"rknn_convert.py","file_ext":"py","file_size_in_byte":3017,"program_lang":"python","lang":"en","doc_type":"code","stars":99,"dataset":"github-code","pt":"3"} +{"seq_id":"2536622312","text":"from time import sleep\nfrom setting.keyType import WORKER_STATE\nfrom ui_folder import saveItem\nfrom ui_folder.uiDetailPage import uiDetailWindow\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom common_server.data_module import DataCenter\nfrom common_server.timer import TimerManager\n\nclass DetailWindow(QtWidgets.QMainWindow, uiDetailWindow):\n def __init__(self, key, detailList):\n super(DetailWindow, self).__init__()\n self.key = key\n self.data_center = DataCenter()\n self.setupUi(self)\n self.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)\n self.tableWidget.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)\n self.tableWidget.setRowCount(len(detailList))\n saveItem(detailList, QtWidgets.QTableWidgetItem, self)\n # self.timer = None\n # self.timer = TimerManager.addRepeatTimer(self.data_center.getCfgValue('client', 'tick_time', 1.0), self.update)\n self.updateThread = UpdateThread(self)\n self.updateThread.start()\n self.updateThread.update.connect(self.update)\n self.showLingtou = True\n self.checkBox.stateChanged.connect(self.onLingtouChecked)\n\n def update(self, detailList):\n if detailList[0] == \"disconnected\":\n self.close()\n if detailList[0] != 'no stock':\n if not self.showLingtou:\n detailList = [i for i in detailList if i[5] >= 100]\n else: detailList = []\n if len(detailList) == 0:\n detailList = 'no stock'\n saveItem(detailList, QtWidgets.QTableWidgetItem, self)\n pass\n\n def onLingtouChecked(self):\n self.showLingtou = True if self.checkBox.isChecked() else False\n\n def closeEvent(self, a0: QtGui.QCloseEvent) -> None:\n # if self.timer:\n # TimerManager.cancel(self.timer)\n if self.updateThread:\n self.updateThread.stop()\n return super(DetailWindow, self).closeEvent(a0)\n\nclass UpdateThread(QtCore.QThread):\n update = QtCore.pyqtSignal(list)\n\n def __init__(self, parent) -> None:\n super().__init__(None)\n self.key = parent.key\n self.data_center = DataCenter()\n self.running = True\n\n def run(self):\n while self.running:\n state = self.data_center.getState()\n if state == WORKER_STATE.RUNNING:\n detailList = self.data_center.getDetailDataByKey(self.key)\n self.update.emit(detailList)\n elif state == WORKER_STATE.DISCONNECTED:\n self.update.emit([\"disconnected\"])\n sleep(self.data_center.getCfgValue('client', 'tick_time', 1.0))\n\n def stop(self):\n self.running = False","repo_name":"zxc916443179/fengkong","sub_path":"Worker/ui_folder/DetailWindow.py","file_name":"DetailWindow.py","file_ext":"py","file_size_in_byte":2679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6252442893","text":"from collections import namedtuple\nimport itertools\nimport pydecode.nlp.decoding as decoding\nimport pydecode.constraints as constraints\nimport pydecode as ph\nimport random\n\nclass PermutationProblem(decoding.DecodingProblem):\n def __init__(self, size):\n self.size = size\n\n def feasible_set(self):\n for mid in itertools.permutations(range(1, self.size)):\n perm = Permutation(list(mid))\n assert perm.check_valid()\n yield perm\n\nclass Permutation(object):\n def __init__(self, perm):\n self.perm = perm\n\n def __eq__(self, other):\n return self.perm == other.perm\n\n def __cmp__(self, other):\n return cmp(self.perm, other.perm)\n\n def __repr__(self):\n return str(self.perm)\n\n def transition(self):\n yield (0, self.perm[0])\n for r in itertools.izip(self.perm, self.perm[1:]):\n yield r\n yield (self.perm[-1], 0)\n\n\n def check_valid(self):\n d = set()\n for i in self.perm:\n if i in d: return False\n d.add(i)\n return True\n\n\nclass PermutationScorer(object):\n @staticmethod\n def random(dependency_problem):\n n = dependency_problem.size\n return numpy.random.random([n, n])\n\n @staticmethod\n def score(scores, perm):\n return sum((scores[i, j]\n for i, j in perm.transition()))\n\ndef make_lattice(width, height, transitions):\n w, h = width, height\n\n blank = np.array([], dtype=np.int64)\n\n coder = np.arange(w * h, dtype=np.int64)\\\n .reshape([w+2, h])\n out = np.arange(w * h * h, dtype=np.int64)\\\n .reshape([w, h, h])\n\n c = ph.ChartBuilder(coder.size,\n unstrict=True,\n output_size=out.size)\n\n c.init(coder[0, 0])\n for i in range(1, w + 1):\n for j in range(h):\n c.set2(coder[i, j],\n coder[i-1, transitions[j]],\n blank,\n out[i-1, j, transitions[j]])\n c.set(coder[w+1, 0],\n coder[w, :h],\n blank,\n blank)\n return c\n\n\nclass PermutationDecoder(decoding.ConstrainedHypergraphDecoder):\n def output_to_instance(self, problem, items):\n w, h = problem.size-1, problem.size\n trans = numpy.unravel_index(items.nonzero()[0], (w, h, h))\n trans = zip(*trans)\n perms = [-1] * (problem.size - 1)\n for i, j, _ in trans:\n perms[i] = j\n return Permutation(perms)\n\n def constraints(self, hypergraph, problem):\n cons = constraints.Constraints(hypergraph,\n [(i, -1) for i in range(problem.size)])\n def make_constraint(edge):\n if edge.head.label.i == 0 or edge.head.label.i > problem.size:\n return []\n return [(edge.head.label.j, 1)]\n\n cons.from_vector([make_constraint(edge)\n for edge in hypergraph.edges])\n return cons\n\n def hypergraph(self, problem):\n return make_lattice(problem.size-1, problem.size,\n np.repeat(np.arange(problem.size), problem.size))\n\n def special_decode(self, method, problem, hypergraph, scores, constraints,\n scorer):\n if method == \"CUBE\":\n groups = [node.label.i for node in hypergraph.nodes]\n ins = ph.inside(hypergraph, scores)\n out = ph.outside(hypergraph, scores, ins)\n\n beam_chart = ph.beam_search_BinaryVector(\n hypergraph, scores, constraints.to_binary_potentials(),\n out, -10000, groups, [1000] * len(groups), cube_pruning=True)\n return beam_chart.path(0)\n\n elif method == \"BEAM\":\n groups = [node.label.i for node in hypergraph.nodes]\n ins = ph.inside(hypergraph, scores)\n out = ph.outside(hypergraph, scores, ins)\n\n beam_chart = ph.beam_search_BinaryVector(\n hypergraph, scores, constraints.to_binary_potentials(),\n out, -10000, groups, [1000] * len(groups))\n return beam_chart.path(0)\n elif method == \"MULTIDFA\":\n old = hypergraph\n old_hmap = None\n\n for j in range(problem.size):\n states = 2\n symbols = 2\n dfa = ph.DFA(states, symbols, [{0:0, 1:1} , {0:1}], [1])\n vec = [(1 if (edge.head.label.j == j) else 0)\n for edge in old.edges]\n counts = ph.CountingPotentials(old).from_vector(vec)\n hmap = ph.extend_hypergraph_by_dfa(old, counts, dfa)\n old = hmap.domain_hypergraph\n old.labeling = ph.Labeling(old, [hmap[node].label\n for node in old.nodes],\n None)\n #new_scores = old_scores.up_project(old, hmap)\n if old_hmap is not None:\n old_hmap = old_hmap.compose(hmap)\n else:\n old_hmap = hmap\n # old_scores = new_scores\n new_scores = scores.up_project(old, old_hmap)\n #new_scores = self.potentials(old, scorer)\n return ph.best_path(old, new_scores)\n\n elif method == \"BIGDFA\":\n old = hypergraph\n states = 2**problem.size\n symbols = problem.size + 1\n final_state = 0\n for i in range(problem.size):\n final_state |= 2**i\n\n transitions = \\\n [{j : i | 2**j for j in range(symbols) if i & 2**j == 0}\n for i in range(states)]\n dfa = ph.DFA(states, symbols,\n transitions,\n [final_state])\n vec = [edge.head.label.j for edge in old.edges]\n counts = ph.CountingPotentials(old).from_vector(vec)\n hmap = ph.extend_hypergraph_by_dfa(old, counts, dfa)\n old = hmap.domain_hypergraph\n old.labeling = ph.Labeling(old, [hmap[node].label\n for node in old.nodes],\n None)\n new_scores = scores.up_project(old, hmap)\n return ph.best_path(old, new_scores)\n","repo_name":"srush/PyDecode","sub_path":"python/pydecode/nlp/permutation.py","file_name":"permutation.py","file_ext":"py","file_size_in_byte":6292,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"3"} +{"seq_id":"35176003626","text":"#!/usr/bin/python3 \n# -*- coding: utf-8 -*-\n# @File : return-yield.py\n# @Author : BAOSHUNCHIN\n# @Email : baoshunchin@qq.com\n# @Time : 2020-12-06 22:11\n\n# def foo(num):\n# print(\"starting...\")\n# while num<10:\n# num=num+1\n# yield num\n# for n in foo(0):\n# print(n)\n\ndef foo(num):\n print(\"starting...\")\n while num<10:\n num = num+1\n return num\nfor n in foo(0):\n print(n)\n\n# def foo():\n# print(\"starting...\")\n# while True:\n# res = 4\n# return res\n# print(\"res:\", res)\n#\n# g = foo()\n# print(next(g))\n# print(\"*\"*20)\n# print(next(g))","repo_name":"chenbaoshun/AutomationTesting","sub_path":"Course0823/Week01/return-yield.py","file_name":"return-yield.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36901393830","text":"import streamlit as st\nfrom jinja2 import Environment, FileSystemLoader\nfrom appDataClass import App\n\n# __app = {\n# 'name': 'test',\n# 'replicas': 1,\n# 'namespace': 'test'\n# }\n\nnamespaceMap = {\n '生产环境': 'production',\n '预发布环境': 'pre',\n '开发环境': 'dev',\n '测试环境': 'test'\n}\n\ndef generate_k8s_yaml(app_name):\n __app = App(app_name)\n\n namespace = st.selectbox(\"部署环境\", namespaceMap.keys())\n __app.namespace = namespaceMap[namespace]\n __app.update_replicas()\n\n __app.first_image_tag = st.text_input(\"镜像tag\", \"\")\n st.write(__app)\n\n if st.button(\"生成k8s.yaml\"):\n file_loader = FileSystemLoader('templates')\n env = Environment(loader=file_loader)\n template = env.get_template('k8s_deployment.j2')\n\n body = template.render(app = __app)\n st.code(body, language='yaml')\n\n\napp_name = st.text_input(\"app name\", \"\")\nif app_name == '':\n st.error('you must set app name first')\nelse:\n generate_k8s_yaml(app_name)","repo_name":"nizijing/pp3","sub_path":"streamlit/pages/02_generate_k8s_yaml.py","file_name":"02_generate_k8s_yaml.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"23637270053","text":"import torch\nimport numpy as np\nimport datetime\nfrom utils.converters import Converters\n\n\nconverters = Converters()\n__MEAN = converters.get_mean()\n__STD = converters.get_std()\n\n\ndef get_labels(output):\n output = torch.nn.functional.softmax(output, dim=1)\n output = torch.squeeze(output)\n _, indices = torch.max(output, 0)\n return indices.detach().cpu().numpy()\n\n\ndef prepare_s_image_for_pt(img, device):\n img_pt = img.astype(np.float32) / 255.0\n for i in range(3):\n img_pt[..., i] -= __MEAN[i]\n img_pt[..., i] /= __STD[i]\n img_pt = img_pt.transpose(2, 0, 1)\n return torch.from_numpy(img_pt[None, ...]).to(device)\n\n\ndef load_model(modelClass, path, nClasses):\n device_str = \"cuda:0\" if torch.cuda.is_available() else \"cpu\"\n device = torch.device(device_str)\n model = modelClass(nClasses, device)\n model.load_state_dict(torch.load(path, map_location=device_str))\n model.eval()\n model.to(device)\n return model, device\n\n\ndef save_model_with_meta(file, model, optimizer, additional_info):\n dict_to_save = {'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(),\n 'date': str(datetime.datetime.now()), 'additional_info': {}}\n dict_to_save['additional_info'].update(additional_info)\n torch.save(dict_to_save, file)\n\n\ndef load_model_with_meta(modelClass, path, nClasses, device_name=None):\n if device_name is not None:\n loaded_torch = torch.load(path, map_location=device_name)\n device = torch.device(device_name)\n else:\n device_str = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n loaded_torch = torch.load(path, map_location=device_str)\n device = torch.device(device_str)\n\n model = modelClass(nClasses)\n\n if 'model_state_dict' in loaded_torch:\n model.load_state_dict(loaded_torch['model_state_dict'])\n else:\n model.load_state_dict(loaded_torch)\n\n res = loaded_torch['additional_info']['resolution'] if loaded_torch['additional_info']['resolution'] is not None else None\n\n model.eval()\n model.to(device)\n\n if 'model_state_dict' in loaded_torch:\n print('Model Info:')\n for key, item in loaded_torch.items():\n if key == 'model_state_dict' or key == 'optimizer_state_dict':\n continue\n print('%s: %s' % (key, item))\n\n return model, device, res\n","repo_name":"dsjardim/ComputerVisionUFRGS","sub_path":"TF/deep_learning/src/utils/pt_utils.py","file_name":"pt_utils.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"2490050255","text":"import json\nimport jsonFileHandler\n#import writeJsonHandler\n\n# reading from json file and populating data object\n\ndata = jsonFileHandler.readJsonFile('customers.json')\n\n# initializing var to calculate last customer id\nlastCustID = 0\n\n# extracting and printing fields from data from json file\n\nfor i in data :\n print(f\"Customer ID : {i['customerID']}\", end = \" -->\")\n print(f\" {i['customerName']}\", end = \", \")\n print(f\"{i['customerAddress']}, {i['customerCity']}, {i['customerState']}\", end = \" -->\")\n print(f\" {i['customerPhone']}\\n\")\n lastCustID = int(i['customerID']) # storing last customer id in variable\n\n\n#initializing a dictionary to store the new customer data\nnewCust = { \"customerID\":\"\", \n \"customerName\": \"\", \n \"customerAddress\": \"\", \n \"customerCity\": \"\", \n \"customerState\": \"\",\n \"customerPhone\": \"\"\n }\n\n\n# getting new customer details for adding new customers\n\ncustName = input(\"Enter new customer name: \")\ncustAddress = input(\"Enter street address: \")\ncustCity = input(\"Enter city: \")\ncustState = input(\"Enter State: \")\ncustPhone = input(\"Enter phone: \")\n\n\n# populating the new customer dictionary with user inputs\n\nnewCust['customerID'] = str(lastCustID + 1)\nnewCust['customerName'] = custName\nnewCust['customerAddress'] = custAddress\nnewCust['customerCity'] = custCity\nnewCust['customerState'] = custState\nnewCust['customerPhone'] = custPhone\n\n\n# calling funcion to write the new record in json file\nif jsonFileHandler.writeJsonFile(newCust, \"customers.json\") :\n print(\"Successfully added new customer!!\")\n","repo_name":"zeemeeran/Python-challenges","sub_path":"customersReadWriteJson.py","file_name":"customersReadWriteJson.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25691827497","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nimport pandas as pd\r\n\r\n# Function to scrape the data from a given URL and list name\r\ndef scrape_data(url, list_name):\r\n response = requests.get(url)\r\n soup = BeautifulSoup(response.text, 'html.parser')\r\n table = soup.find(\"tbody\", class_=\"lister-list\")\r\n data = []\r\n\r\n for tr in table.find_all(\"tr\"):\r\n title = tr.find(\"td\", class_=\"titleColumn\").a.get_text()\r\n rank = tr.find(\"td\", class_=\"titleColumn\").get_text().split(\".\")[0]\r\n imdb_id = tr.find(\"td\", class_=\"posterColumn\").a[\"href\"].split(\"/\")[2]\r\n year = tr.find(\"span\", class_=\"secondaryInfo\").get_text().strip(\"()\")\r\n try:\r\n rating = tr.find(\"strong\").get_text()\r\n except AttributeError:\r\n rating = None\r\n image_url = tr.find(\"td\", class_=\"posterColumn\").img[\"src\"]\r\n\r\n # Fetch additional attributes\r\n attributes = tr.find_all(\"span\")\r\n rank_attribute = None\r\n imdb_rating_attribute = None\r\n user_votes_attribute = None\r\n num_votes_attribute = None\r\n user_rating_attribute = None\r\n\r\n for attribute in attributes:\r\n attribute_name = attribute.get(\"name\")\r\n if attribute_name == \"rk\":\r\n rank_attribute = attribute.get(\"data-value\")\r\n elif attribute_name == \"ir\":\r\n imdb_rating_attribute = attribute.get(\"data-value\")\r\n elif attribute_name == \"us\":\r\n user_votes_attribute = attribute.get(\"data-value\")\r\n elif attribute_name == \"nv\":\r\n num_votes_attribute = attribute.get(\"data-value\")\r\n elif attribute_name == \"ur\":\r\n user_rating_attribute = attribute.get(\"data-value\")\r\n\r\n data.append([rank,title,imdb_id,year,rating,image_url,list_name,rank_attribute,imdb_rating_attribute,user_votes_attribute,num_votes_attribute,user_rating_attribute])\r\n\r\n return data\r\n\r\n# Main code\r\ndata = []\r\n\r\n# Scrape data from IMDB Top 250\r\ndata.extend(scrape_data(\"https://www.imdb.com/chart/top\", \"IMDB Top 250\"))\r\n\r\n# Scrape data from Most Popular\r\ndata.extend(scrape_data(\"https://www.imdb.com/chart/moviemeter/\", \"Most Popular\"))\r\n\r\n# Scrape data from Most Top Rated English Movies\r\ndata.extend(scrape_data(\"https://www.imdb.com/chart/top-english-movies\", \"Most Top Rated English Movies\"))\r\n\r\n# Convert the list to a pandas dataframe\r\ndf = pd.DataFrame(data, columns=[\r\n \"Rank\",\"Title\",\"IMDB ID\",\"Year\",\"Rating\",\"Image URL\",\"List Name\",\"Rank Attribute\",\"IMDB Rating Attribute\",\"User Votes Attribute\",\"Number of Votes Attribute\",\"User Rating Attribute\"])\r\n","repo_name":"johnneycao/PowerBI-Open-Data-for-fun","sub_path":"_Asset Library/Source_Files/IMDB_Charts.py","file_name":"IMDB_Charts.py","file_ext":"py","file_size_in_byte":2637,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"3978201382","text":"#!/usr/bin/python3 \n# test the swing buffers with usrp_driver and cuda_driver \nimport unittest \nimport numpy as np \nimport sys \nimport os\nimport posix_ipc \nimport pdb \nimport time \nimport subprocess \nimport configparser \nimport datetime \n \nfrom termcolor import cprint \nsys.path.insert(0, '../python_include') \nsys.path.insert(0, '../cuda_driver') \n \nfrom drivermsg_library import * \nfrom socket_utils import * \nfrom cuda_driver import * \nfrom radar_config_constants import * \nimport clear_frequency_search \nimport rosmsg \nimport test_cuda_driver \n\n \n \nSTART_DRIVER = False \nANTENNA_UNDER_TEST = [0, 1, 2, 3, 4 ,5, 6, 7] \n#ANTENNA_UNDER_TEST = [0] \nINTEGRATION_PERIOD_SYNC_TIME = 0.3 # todo: get from file\n \n\n# init logging\nlogFile = open(\"../log/test_swing.log\", \"wt\")\n\ndef logmsg(msg):\n logFile.write(\"{} - {}\\n\".format(datetime.now().strftime(\"%H:%M:%S:%f\"), msg) )\n\n\n\n# parse gpu config file\ndriverconfig = configparser.ConfigParser()\ndriverconfig.read('../driver_config.ini')\nshm_settings = driverconfig['shm_settings']\ncuda_settings = driverconfig['cuda_settings']\nnetwork_settings = driverconfig['network_settings']\n\nrxshm_size = shm_settings.getint('rxshm_size')\ntxshm_size = shm_settings.getint('txshm_size')\n\n\ncuda_port = network_settings.getint('CUDADriverPort')\nusrp_port = network_settings.getint('USRPDriverPort')\n\nsamplingRate_rx = float(cuda_settings['FSampTX'])\nsamplingRate_tx = float(cuda_settings['FSampRX'])\n\n\n\nrx_shm_list = [[],[]]\ntx_shm_list = []\nrx_semaphore_list = [[],[]] # [side][swing]\nswings = [SWING0, SWING1]\nsides = [SIDEA]\n\n# list of all semaphores/shared memory paths for cleaning up\nshm_list = []\nsem_list = []\n\ndef connect_to_cuda_driver():\n serversock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n max_connect_attempts = 5\n for i in range(max_connect_attempts):\n print('attempting connection to usrp_driver')\n try:\n serversock.connect(('localhost', cuda_port))\n break\n except:\n print('connecting to cuda driver failed on attempt ' + str(i + 1))\n time.sleep(5)\n return [serversock]\n\n\ndef connect_to_usrp_driver():\n max_connect_attempts = 5\n\n serversock = []\n for iUSRP, antennaNumber in enumerate(ANTENNA_UNDER_TEST):\n serversock.append(socket.socket(socket.AF_INET, socket.SOCK_STREAM))\n for i in range(max_connect_attempts):\n print('attempting connection to usrp_driver ( at localhost:{} )'.format(usrp_port + antennaNumber))\n try:\n serversock[iUSRP].connect(('localhost', usrp_port + antennaNumber))\n break;\n except:\n print('connecting to usrp driver failed on attempt ' + str(i + 1))\n time.sleep(3)\n\n if i == (max_connect_attempts - 1):\n print('connecting to usrp driver failed, exiting')\n sys.exit(1)\n return serversock\n\ndef sync_usrps(sock):\n logmsg(\"Start sync_usrps\")\n cprint('testing usrp trigger with one period','red')\n\n cmd = usrp_sync_time_command(sock)\n cmd.transmit()\n ret = cmd.client_return()\n logmsg(\"End sync_usrps\")\n\nclass swingParameter():\n def __init__(self, seq):\n self.seq = seq\n self.rx_rate = 5e6\n self.tx_rate = 5e6\n self.nPulses_per_period = 9\n self.nSamples_per_pulse = 4200\n self.sample_offsets = None\n self.swing = 0\n\n @property\n def tx_freq(self):\n self.seq.ctrlprm['tfreq']\n \n @tx_freq.setter\n def tx_freq(self, value):\n self.seq.ctrlprm['tfreq'] = value\n\n @property\n def rx_freq(self):\n self.seq.ctrlprm['rfreq']\n \n @rx_freq.setter\n def rx_freq(self, value):\n self.seq.ctrlprm['rfreq'] = value\n\n\n\ndef generate_parameter(integration_period = 2):\n seq = test_cuda_driver.create_testsequence()\n usrp_par = swingParameter(seq)\n \n\n # determine the length of integration periods for all channels in seconds\n PULSE_SEQUENCE_PADDING_TIME = 35e3 * 75 * 2 / 3e8 # without offset\n nPulses_in_sequence = 8 # seq.npulses\n\n\n# nSamples_per_pulse = np.uint64(int(cuda_settings['TXUpsampleRate']) * ( np.floor(samplingRate_tx * seq.pulse_lens[0]/1e6 ) + 2 * np.floor(samplingRate_tx * seq.tr_to_pulse_delay/1e6 )))\n nSamples_per_pulse = np.uint64( ( np.floor(samplingRate_tx * seq.pulse_lens[0]/1e6 ) + 2 * np.floor(samplingRate_tx * seq.tr_to_pulse_delay/1e6 )))\n\n # to find out how much time is available in an integration period for pulse sequences, subtract out startup delay\n sampling_duration = integration_period - INTEGRATION_PERIOD_SYNC_TIME\n\n # calculate the pulse sequence period with padding\n time_sequence = PULSE_SEQUENCE_PADDING_TIME + seq.pulse_offsets_vector[-1] + seq.pulse_lens[-1] /1e6\n nSamples_sequence = int(np.round(time_sequence * samplingRate_tx))\n usrp_par.seq.ctrlprm['number_of_samples'] = int(time_sequence*usrp_par.seq.ctrlprm['baseband_samplerate'])\n # calculate the number of pulse sequences that fit in the available time within an integration period\n nSequences_in_period = int(np.floor(sampling_duration / time_sequence))\n\n # then calculate sample indicies at which pulse sequences start within a pulse sequence\n pulse_sequence_offsets_samples = [int(offset* samplingRate_tx) for offset in seq.pulse_offsets_vector]\n pulse_sequence_offsets_vector = seq.pulse_offsets_vector\n\n # then, calculate sample indicies at which pulses start within an integration period\n nPulses_per_period = int(nPulses_in_sequence * nSequences_in_period )\n integration_period_pulse_sample_offsets = np.zeros(nPulses_per_period, dtype=np.uint64)\n for iSequence in range(nSequences_in_period):\n for iPulse in range(nPulses_in_sequence):\n integration_period_pulse_sample_offsets[iSequence * nPulses_in_sequence + iPulse] = iSequence * nSamples_sequence + pulse_sequence_offsets_samples[iPulse]\n\n # calculate the number of RF transmit and receive samples \n usrp_par.seq.nbb_rx_samples_per_integration_period = nSamples_sequence * nSequences_in_period\n\n print(\"nSamples_rx:{}, nSamples_per_pulse:{}, integration_period_pulse_sample_offsets:\".format(usrp_par.seq.nbb_rx_samples_per_integration_period , nSamples_per_pulse))\n print(\"nSequences_in_period:{}, nPulses_per_period:{}, \".format(nSequences_in_period, nPulses_per_period))\n print(integration_period_pulse_sample_offsets)\n\n\n swing = 0 \n\n\n usrp_par.rx_rate = samplingRate_rx\n usrp_par.tx_rate = samplingRate_tx\n usrp_par.nPulses_per_period = nPulses_per_period\n\n usrp_par.nSamples_per_pulse = nSamples_per_pulse\n usrp_par.sample_offsets = integration_period_pulse_sample_offsets\n usrp_par.swing = swing\n\n return usrp_par\n\ndef usrp_setup(sock, usrp_par):\n logmsg(\"Start usrp_setup (swing{})\".format(usrp_par.swing))\n start_setup = time.time()\n cmd = usrp_setup_command(sock, usrp_par.tx_freq, usrp_par.rx_freq, usrp_par.rx_rate , usrp_par.tx_rate, usrp_par.nPulses_per_period, usrp_par.seq.nbb_rx_samples_per_integration_period, usrp_par.nSamples_per_pulse, usrp_par.sample_offsets, usrp_par.swing)\n cmd.transmit()\n client_returns = cmd.client_return()\n\n for r in client_returns:\n assert(r == UHD_SETUP)\n time_needed_for_setup = time.time() - start_setup\n print(\"Time for: setup: {}\".format(time_needed_for_setup))\n logmsg(\"End usrp_setup (swing{})\".format(usrp_par.swing))\n\ndef usrp_trigger(sock, usrp_par):\n logmsg(\"Start usrp_tigger (swing{})\".format(usrp_par.swing))\n # grab current usrp time from one usrp_driver\n cmd = usrp_get_time_command(sock[0])\n cmd.transmit()\n usrp_time = cmd.recv_time(sock[0])\n cmd.client_return()\n\n trigger_time = usrp_time + INTEGRATION_PERIOD_SYNC_TIME\n cmd = usrp_trigger_pulse_command(sock, trigger_time, usrp_par.seq.tr_to_pulse_delay / 1e9, usrp_par.swing)\n cmd.transmit()\n client_returns = cmd.client_return()\n for r in client_returns:\n assert(r == UHD_TRIGGER_PULSE)\n logmsg(\"End usrp_tigger (swing{})\".format(usrp_par.swing))\n\ndef usrp_ready_data(sock, usrp_par):\n logmsg(\"Start usrp_ready (swing{})\".format(usrp_par.swing))\n\n # request pulse data\n cmd = usrp_ready_data_command(sock, usrp_par.swing)\n cmd.transmit()\n logmsg(\"Start usrp_ready waiting (swing{})\".format(usrp_par.swing))\n for iSock in sock:\n ret = cmd.recv_metadata(iSock)\n print(\" recieved READY STATUS: status:{}, ant: {}, nSamples: {}, fault: {}\".format(ret['status'], ret['antenna'], ret['nsamples'], ret['fault']))\n logmsg(\"End usrp_ready waiting (swing{})\".format(usrp_par.swing))\n\n client_returns = cmd.client_return()\n for r in client_returns:\n assert(r == UHD_READY_DATA)\n logmsg(\"End usrp_ready (swing{})\".format(usrp_par.swing))\n\n\n\n\ndef plot():\n # plot data\n import matplotlib.pyplot as plt\n print(\"Reading data from shm:\")\n for ant in ANTENNA_UNDER_TEST:\n rx_shm = rx_shm_list[0][ant*2]\n rx_shm.seek(0)\n ar = np.frombuffer(rx_shm, dtype=np.int16, count=nSamples_rx*2)\n arp = np.float32(ar[0::2]) ** 2 + np.float32(ar[1::2]) ** 2\n print(\" ant {}: rms: {:5.3f} max: {:5.3f}\".format(ant, np.sqrt(np.mean(arp) ), np.sqrt(np.max(arp)) ))\n if True:\n plt.plot(ar[::2])\n plt.plot(ar[1::2])\n plt.plot(np.sqrt(arp))\n plt.show()\n\n print('sampled phase')\n # pdb.set_trace() \n\ndef cuda_add_channel(sock, parClass):\n logmsg(\"Start cuda_add_channel (swing{})\".format(parClass.swing))\n cmd = cuda_add_channel_command(sock, parClass.seq, parClass.swing)\n cmd.transmit()\n cmd.client_return()\n logmsg(\"End cuda_add_channel (swing{})\".format(parClass.swing))\n\ndef cuda_generate_pulse(sock, parClass):\n logmsg(\"Start cuda_generate (swing{})\".format(parClass.swing))\n cmd = cuda_generate_pulse_command(sock, parClass.swing)\n cmd.transmit()\n cmd.client_return()\n logmsg(\"End cuda_generate (swing{})\".format(parClass.swing))\n\n\ndef cuda_process(sock, parClass):\n logmsg(\"Start cuda_process (swing{})\".format(parClass.swing))\n # process samples from shared memory\n cmd = cuda_process_command(sock, parClass.swing)\n cmd.transmit()\n cmd.client_return()\n logmsg(\"End cuda_process (swing{})\".format(parClass.swing))\n\ndef cuda_get_data(sock, parClass, channel_number):\n logmsg(\"Start cuda_get_data (swing{})\".format(parClass.swing))\n # copy processed samples\n cmd = cuda_get_data_command(sock, parClass.swing)\n cmd.transmit()\n\n main_samples = None\n back_samples = None\n \n for cudasock in sock:\n\n cprint('waiting for number of antennas from cuda_driver', 'red')\n nAntennas = recv_dtype(cudasock, np.uint32)\n cprint('collecting data from {} antennas'.format(nAntennas), 'red')\n transmit_dtype(cudasock, channel_number, np.int32)\n\n for iAntenna in range(nAntennas):\n antIdx = recv_dtype(cudasock, np.uint16)\n\n cprint('collecting samples from antenna {}'.format(antIdx), 'red')\n num_samples = recv_dtype(cudasock, np.uint32)\n samples = recv_dtype(cudasock, np.float32, num_samples)\n samples = samples[0::2] + 1j * samples[1::2] # unpacked interleaved i/q\n\n\n #... initialize main/back sample arrays once num_samples is known\n if main_samples is None:\n main_samples = np.zeros((4, 16, num_samples/2))\n back_samples = np.zeros((4, 4, num_samples/2))\n\n if antIdx < 16:\n main_samples[channel_number-1][antIdx] = samples[:]\n\n else:\n back_samples[channel_number-1][antIdx - nMainAntennas] = samples[:]\n\n\n transmit_dtype(cudasock, -1, np.int32) # send channel -1 to cuda driver to end transfer process\n cprint('finished collecting samples!', 'red')\n\n cmd.client_return()\n logmsg(\"End cuda_get_data (swing{})\".format(parClass.swing))\n return [main_samples, back_samples]\n\n\ndef cuda_exit(sock):\n cmd = cuda_exit_command(sock)\n cmd.transmit()\n\n\n\ndef test_one_swing():\n cuda_sock = connect_to_cuda_driver()\n usrp_sock = connect_to_usrp_driver()\n par_swing0 = generate_parameter()\n \n usrp_setup(usrp_sock, par_swing0)\n cuda_add_channel(cuda_sock, par_swing0)\n cuda_generate_pulse(cuda_sock, par_swing0)\n usrp_trigger(usrp_sock, par_swing0)\n usrp_ready_data(usrp_sock, par_swing0)\n cuda_process(cuda_sock, par_swing0)\n channel_number = 1\n data = cuda_get_data(cuda_sock, par_swing0, channel_number)\n\n cuda_exit(cuda_sock)\n\n\ndef test_both_swings():\n cuda_sock = connect_to_cuda_driver()\n usrp_sock = connect_to_usrp_driver()\n par_swing0 = generate_parameter()\n par_swing1 = generate_parameter()\n par_swing1.swing = 1\n\n # swing A\n usrp_setup(usrp_sock, par_swing0)\n cuda_add_channel(cuda_sock, par_swing0)\n cuda_generate_pulse(cuda_sock, par_swing0)\n usrp_trigger(usrp_sock, par_swing0)\n \n # B\n cuda_add_channel(cuda_sock, par_swing1)\n cuda_generate_pulse(cuda_sock, par_swing1)\n\n # wait for A\n usrp_ready_data(usrp_sock, par_swing0)\n cuda_process(cuda_sock, par_swing0)\n \n usrp_trigger(usrp_sock, par_swing1)\n \n channel_number = 1\n data = cuda_get_data(cuda_sock, par_swing0, channel_number) # swin A finished\n cuda_add_channel(cuda_sock, par_swing0)\n cuda_generate_pulse(cuda_sock, par_swing0)\n\n # wait for B\n usrp_ready_data(usrp_sock, par_swing1)\n cuda_process(cuda_sock, par_swing1)\n \n\n# usrp_setup(usrp_sock, par_swing0)\n channel_number = 1\n data = cuda_get_data(cuda_sock, par_swing1, channel_number) # swing B finished\n\n cuda_exit(cuda_sock)\n\ndef test_nSequences(nSequences):\n \n if nSequences < 1:\n print(\"nSequences have to be >= 1. Setting it to 1\")\n nSequences = 1\n\n # connect and setup\n cuda_sock = connect_to_cuda_driver()\n usrp_sock = connect_to_usrp_driver()\n par_swing0 = generate_parameter()\n par_swing1 = generate_parameter()\n par_swing1.swing = 1\n par_swing1.tx_freq = 15000\n par_swing1.rx_freq = 15000\n par_vec = [par_swing0, par_swing1]\n rx_data_list = []\n\n # prepare first swing\n usrp_setup(usrp_sock, par_swing0)\n cuda_add_channel(cuda_sock, par_swing0)\n cuda_generate_pulse(cuda_sock, par_swing0)\n usrp_trigger(usrp_sock, par_swing0)\n \n channel_number = 1\n active_swing = 0 # swing finishing tx/rx, signal processing and tranismitting of final data\n prep_swing = 1 # preparing swing (add cuda channel, generate pulse, usrp setup) and start transmitting\n\n for iSequence in range(nSequences-1):\n # prepare prep_swing in cuda\n cuda_add_channel(cuda_sock, par_vec[prep_swing])\n cuda_generate_pulse(cuda_sock, par_vec[prep_swing])\n \n # wait active_swing and start cuda processing\n usrp_ready_data(usrp_sock, par_vec[active_swing])\n# cuda_process(cuda_sock, par_vec[active_swing])\n \n # setup usrp for prep_swing and trigger usrp\n usrp_setup(usrp_sock, par_vec[prep_swing])\n usrp_trigger(usrp_sock, par_vec[prep_swing])\n\n cuda_process(cuda_sock, par_vec[active_swing])\n # get data of active_swing\n rx_data_list.append( cuda_get_data(cuda_sock, par_vec[active_swing], channel_number) )\n\n # switch prep and active swing\n active_swing = 1 - active_swing\n prep_swing = 1 - prep_swing\n\n usrp_ready_data(usrp_sock, par_vec[active_swing])\n cuda_process(cuda_sock, par_vec[active_swing])\n rx_data_list.append(cuda_get_data(cuda_sock, par_vec[active_swing], channel_number) )\n\n# cuda_exit(cuda_sock)\n os.system(\"../srr.py stop\") \n return rx_data_list\n\n\n\ndef plot_sequences(rx_data_list):\n import matplotlib.pyplot as plt\n # rx_data[iSequence|iSwing][mainArray|backArray][iChannel][iAntenne][iComplexSample]\n nSequences = len(rx_data_list)\n for iSequence, seqRxData in enumerate(rx_data_list):\n # plt.figure()\n for i, antennaData in enumerate(seqRxData[0][0]):\n if i > 7:\n break\n plt.subplot(nSequences,8, i+1+iSequence*8)\n if i == 0:\n plt.title(\"seq {} ant {}\".format(iSequence, i))\n\n plt.plot(np.real(antennaData))\n plt.plot(np.imag(antennaData))\n power = np.abs(antennaData)**2\n print(\"sequence {}: ant {: >2}, rms {: >9.3f} max {: >9.3f}\".format(iSequence, i, np.sqrt(np.sum(power)), np.sqrt(max(power)) ))\n plt.figure()\n data =rx_data_list[0][0][0][0] \n plt.plot([i/3333.33 for i in range(len(data))], data)\n plt.title(\"sequence 0, ant 0\")\n plt.show()\n\n\ndef tmp():\n cuda_sock = connect_to_cuda_driver()\n par_swing0 = generate_parameter()\n cuda_add_channel(cuda_sock, par_swing0)\n cuda_generate_pulse(cuda_sock, par_swing0)\n cuda_exit(cuda_sock)\n\n\nrx_data_list = test_nSequences(3)\nplot_sequences(rx_data_list)\n","repo_name":"UAF-SuperDARN-OPS/SuperDARN_UHD_Server","sub_path":"usrp_server/test_swing_drivers.py","file_name":"test_swing_drivers.py","file_ext":"py","file_size_in_byte":16880,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"34993610703","text":"'''\n!/usr/bin/env python\n@author:ayanava_dutta,shivam_gupta,rohan_sasmal\n-*-coding:utf-8-*-\n'''\n#--------Packages-----------\nimport streamlit as st\nst.set_page_config(page_title=\"HighRadius™ | CASH APPLICATION CLOUD\", page_icon='/root/caascript/res/bg/logo.png')\nimport pandas as pd\nimport numpy as np\nimport time\nfrom res.bg_css import page_bg\n#----------RP---------------\nfrom rpscript.rpmodelling import login\nfrom rpscript.rpmodelling import mod2\nfrom rpscript.rpmodelling import data_prep\nfrom rpscript.rpmodelling import DataExt\n#from rpscript.rpmodelling import dashdem\nfrom rpscript.rpmodelling import auto_pilot\nfrom rpscript.rpmonitor import RP_monitoring\nfrom rpscript.rpmonitor import RP_Monitoring_Automated_modified\nfrom rpscript.rpanalysis import rp_analysis_functions\nfrom rpscript.rpanalysis import RP_Analysis\n#----------LITM-------------\nfrom litmscript.litmmonitor import LITM_Monitoring_automation_modified\nfrom litmscript.litmmonitor import Monitor_start\nfrom litmscript.litmanalysis import LITM_Analysis\n#st.title(\"CASH APPS\")\n\ndef all_screen(choice):\n img_path='/root/caascript/res/bg/'\n #st.set_page_config(page_title=\"HighRadius™ |CASH APPLICATION CLOUD\", page_icon='/root/caascript/res/bg/logo.png')\n if choice:\n page_bg(img_path+'Picture_Login.png')\n login.login()\n \n\n else:\n side_bar = st.sidebar.selectbox(label='What do you want to do?', options=['RP','LITM'])\n\n if side_bar =='RP':\n rp_bar = st.sidebar.selectbox(label='What do you want to do?', options=['Data Extraction','RP-Modelling','Rp-Monitoring','RP-Analysis'],key=1)\n\n if rp_bar=='Data Extraction':\n page_bg(img_path+'dataext_pic.png')\n st.header(\"Data Extraction\")\n DataExt.main()\n \n \n elif rp_bar=='RP-Modelling':\n modelling_bar=st.sidebar.selectbox(label='What do you want to do?', options=['Data Preparation','Modelling','Auto-Pilot Mode'],key=2)\n \n if modelling_bar=='Data Preparation':\n page_bg(img_path+'dataprep_pic.png')\n st.header(\"Data Preparation\") \n data_prep.data_prep()\n \n elif modelling_bar=='Modelling':\n page_bg(img_path+'modelling_pic.png')\n st.header(\"Model Training\") \n mod2.modelling_main()\n\n else:\n page_bg(img_path+'autopilot_pic.png')\n st.header(\"Auto-Pilot Mode\")\n auto_pilot.auto_pilot()\n\n\n \n elif rp_bar=='Rp-Monitoring':\n page_bg(img_path+'rpmonitor_pic.png')\n st.header(\"RP-Monitoring\")\n RP_monitoring.main()\n\n\n \n elif rp_bar=='RP-Analysis':\n page_bg(img_path+'rpanalysis_pic.png')\n st.header(\"RP-Analysis\")\n if st.checkbox(\"Show Warning\",value=True):\n st.warning(\"This feature is still in devolopement phase \\n\\n Some features may or may not run properly\")\n RP_Analysis.main()\n\n\n else:\n litm_bar = st.sidebar.selectbox(label='What do you want to do?', options=['LITM Monitoring','LITM Analysis'],key=3)\n if litm_bar=='LITM Monitoring':\n page_bg(img_path+'litmmonitor_pic.png')\n st.header(\"LITM Monitoring\")\n Monitor_start.main()\n else:\n page_bg(img_path+'litm_analysis.png')\n st.header(\"LITM Analysis\")\n if st.checkbox(\"Show Warning\",value=True):\n st.warning(\"This feature is still in devolopement phase \\n\\n Some features may or may not run properly\")\n LITM_Analysis.analysis()\n\n\n\n\n\n\nif __name__ == '__main__':\n \n choice = st.sidebar.checkbox('Login',value=True)\n all_screen(choice)\n\n","repo_name":"rohansasmal123/continuous-testing","sub_path":"all.py","file_name":"all.py","file_ext":"py","file_size_in_byte":3976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"39523837012","text":"# list\n\n# myList = [2,4,6,5,7,8]\n\n# for i in myList:\n# print(i)\n# newList = [expression for item iterable if condition else some statement]\nnewList = [\"Musah\", \"Rahman\", \"Sufyan\", \"Haqq\", \"Rafiu\", \"Book\", \"Going\"]\n# traditional program\n# li = []\n# for item in newList:\n# if \"a\" in item:\n# li.append(item)\n# print(li)\n# ll = []\n# myList = [ll.append(x) for x in newList if \"a\" in x]\nmyList = [x for x in newList if \"a\" in x]\n# print(myList)\n\nnumber = [1,2,3,4,5,6,7,8,9]\nselectedNumber = [num for num in number if num > 5]\n# print(selectedNumber)\ngeneratedList = myList + selectedNumber\nprint(generatedList)\n\nnl = myList.copy()\n# print(nl)\nnl.extend(selectedNumber)\nprint(nl)","repo_name":"musahibrahimali/python-tutorial","sub_path":"source/Four/list-cont.py","file_name":"list-cont.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13844763520","text":"\nimport json\nimport requests\n\n# Target address\naddress = '
'\n\nresp = requests.get('https://blockchain.info/unspent?active=%s' % address)\nutxo_set = json.loads(resp.text)[\"unspent_outputs\"]\n\nfor utxo in utxo_set:\n print(\"{tx_hash}:{tx_output_n} - {value} Satoshis\".format(**utxo))\n","repo_name":"apogiatzis/comp1830-lab2","sub_path":"accounting-models/get-utxo.py","file_name":"get-utxo.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"9430263790","text":"#print('Hello')\na = 3\nb = 4\nc = a+b\na = 777\nn = a\nname = 'Dima,Fedor,Yeugen,Ilya'\nname2 = 'Dima,Fedor,Yeugen,Ilya'\n#name[0] = 'B'\n#name2 = name\n\nprint(id(name))\nprint(id(name2))\n\nname = 'onethow'\nprint(name2)\n\nprint(id(name))\nprint(id(name2))\n\nlst = [1,2,3]\n#print(dir(lst))\ntuple = (1,2)\n\n\nfrom libs_me.utils import add\n\nrez = add(2,6,3,4,5,6)\nprint(rez)\n\n#name = 'Dima-Fedor-Yeugen-Ilya'\n#name = '1.Dima;2.Fedor;3.Yeugen;4.Ilya'\n#arr = 'fff;fff;ff'.split(',')\n#arr = name.split(',')\n#d = '-'.join(arr)\n#print(d)\n#print(dir(name))\n# for index, el in enumetare(name):\n# print(i)\n\n\n\n\n\n\n\n\n#print(id(a))\n#f = print(n)\n#print(f)\n","repo_name":"zdimon/wezom-python-course2","sub_path":"students/DmytroSerhiyovich/start.1.py","file_name":"start.1.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26302331064","text":"def return_text_by_filepath(path):\n with open(path) as f:\n return f.read()\n\n\nopinions = {\n 'клево': {\n 'description': {\n 'len': 4,\n 1: ['спокойно', 'наслаждение', 'весело', 'блаженство', 'удовольствие', 'удовлетворение', 'легко'],\n 2: ['любопытно', 'влечение', 'интересно', 'вожделение', 'позыв', 'вдохновение', 'стремлюсь'],\n 3: ['мило', 'симпатия', 'по-доброму', 'обожаю', 'тепло на душе', 'принятие', 'нежность'],\n 4: ['другое'] * 7,\n },\n 'state_is': {\n 'len': 4,\n 1: ['поддержка', 'информация', 'забота', 'запасной план', 'полезность', 'разнообразие', 'деньги',\n 'эффективность', 'план действий', 'достоверные источники', 'доверие', 'авторитетное мнение',\n 'стабильность', 'развитиеуверенность'],\n 2: ['близость', 'чувства', 'внимание', 'индивидуальность', '��расота', 'привилегии', 'творчество',\n 'признание', 'похвала', 'искренность', 'благодарность', 'восхищение', 'связь', 'привлекательность'],\n 3: ['власть', 'преданность', 'сила', 'честь', 'возможность противостоять', 'ответственность',\n 'профессионализм', 'справедливость', 'возможность оказать сопротивление', 'автономность',\n 'однозначность', 'свобода', 'независимость', 'достижения'],\n 4: ['другое'] * 14,\n },\n 'wants': {\n 1: 'безопасности',\n 2: 'любви',\n 3: 'уважения',\n },\n },\n 'не очень': {\n 'description': {\n 'len': 6,\n 1: ['скучно', 'горюю', 'апатия', 'грусть', 'счастья нет', 'печальненько', 'расстройство'],\n 2: ['стремно', 'беспокойство', 'волнение', 'ужас', 'тревожненько', 'растерянность', 'паника'],\n 3: ['мерзко', 'тошниловка', 'отвращение', 'гадко', 'антипатия', 'брезгую', 'отторжение'],\n 4: ['бесит', 'я протестую', 'сейчас порву', 'недоволен', 'я в ярости', 'гневаюсь', 'злюсь'],\n 5: ['чувствую себя нелепо', 'позор мне', 'ущербность', 'некчемность', 'стесняюсь', 'смущаюсь', 'неловко'],\n 6: ['другое'] * 7,\n },\n 'state_is': {\n 'len': 4,\n 1: ['поддержка', 'информация', 'забота', 'запасной план', 'полезность', 'разнообразие', 'деньги', 'эффективность', 'план действий', 'достоверные источники', 'доверие', 'авторитетное мнение', 'стабильность', 'развитиеуверенность'],\n 2: ['близость', 'чувства', 'внимание', 'индивидуальность', 'красота', 'привилегии', 'творчество', 'признание', 'похвала', 'искренность', 'благодарность', 'восхищение', 'связь', 'привлекательность'],\n 3: ['власть', 'преданность', 'сила', 'честь', 'возможность противостоять', 'ответственность', 'профессионализм', 'справедливость', 'возможность оказать сопротивление', 'автономность', 'однозначность', 'свобода', 'независимость', 'достижения', 'личные границы'],\n 4: ['другое'] * 14,\n },\n 'wants': {\n 1: 'безопасности',\n 2: 'любви',\n 3: 'уважения',\n },\n 'my_advices': {\n 'len': 5,\n 1: return_text_by_filepath('advices/my/1.txt'),\n 2: return_text_by_filepath('advices/my/2.txt'),\n 3: return_text_by_filepath('advices/my/3.txt'),\n 4: return_text_by_filepath('advices/my/4.txt'),\n 5: return_text_by_filepath('advices/my/5.txt'),\n },\n 'partner_advices': {\n 'len': 5,\n 1: return_text_by_filepath('advices/partner/1.txt'),\n 2: return_text_by_filepath('advices/partner/2.txt'),\n 3: return_text_by_filepath('advices/partner/3.txt'),\n 4: return_text_by_filepath('advices/partner/4.txt'),\n 5: return_text_by_filepath('advices/partner/5.txt'),\n },\n },\n}\nanother_form_words = {\n\n}","repo_name":"Pro1ooEgor/IFeelYou_bot","sub_path":"answers.py","file_name":"answers.py","file_ext":"py","file_size_in_byte":5420,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11958600083","text":"import jieba\nimport numpy as np\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom diy_exception import DeleteAllWordsError\n\n# 读入停用词表\nstop_words = open('hit_stopwords.txt', 'r', encoding='utf-8').read()\n\n\nclass JaccardSimilarity:\n\n def add_stop_words(self, list):\n temp = []\n for item in list:\n if item not in stop_words:\n temp.append(item)\n if len(temp) == 0:\n # 如果执行停用词删除后,文本为空,抛出异常\n raise DeleteAllWordsError\n return temp\n\n # 杰卡德相似度计算\n # @profile\n def jaccard_similarity(self, s1, s2):\n # 字符表\n chars = ['\\n', '\\t', ',', '。', ';', ':', \"?\", '、', '!', '《', '》',\n '‘', '’', '“', '”', ' ', '1', '2', '3', '4', '5', '6', '7', '8',\n '9', '0', '.', '*', '-', '—', ',', '——', '……', '(', ')', '…',\n '%', '#', '@', '$', '¥', '~', '`', '~', '·']\n # 删除文本中的字符\n for item in chars:\n s1 = s1.replace(item, '')\n s2 = s2.replace(item, '')\n # jieba分词\n result = jieba.cut(s1, cut_all=True)\n\n # 停用词,使用时解开注释\n # jac = JaccardSimilarity()\n # result = jac.add_stop_words(result)\n\n s1 = ' '.join(list(result))\n #jieba分词\n result = jieba.cut(s2, cut_all=True)\n\n # 停用词,使用时解开注释\n # result = jac.add_stop_words(result)\n\n s2 = ' '.join(list(result))\n # 调用sklearn的CountVectorizer,将文本的词语转换为词频矩阵\n cv = CountVectorizer(tokenizer=lambda s: s.split())\n # 语料库\n corpus = [s1, s2]\n # 使用fit_transform函数计算各个词语出现的次数\n vectors = cv.fit_transform(corpus).toarray()\n # 求交集\n numerator = np.sum(np.min(vectors, axis=0))\n # 求并集\n denominator = np.sum(np.max(vectors, axis=0))\n # 计算杰卡德相似度\n return 1.0 * numerator / denominator\n\n\n\n\n","repo_name":"LinRS1999/text-similarity","sub_path":"jaccard.py","file_name":"jaccard.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30516159502","text":"import shutil\nfrom argparse import ArgumentParser, Namespace\nfrom pathlib import Path\nfrom tempfile import TemporaryDirectory\n\nfrom mmengine.config import Config\nfrom mmengine.utils import mkdir_or_exist\n\ntry:\n from model_archiver.model_packaging import package_model\n from model_archiver.model_packaging_utils import ModelExportUtils\nexcept ImportError:\n raise ImportError('`torch-model-archiver` is required.'\n 'Try: pip install torch-model-archiver')\n\n\ndef mmaction2torchserve(\n config_file: str,\n checkpoint_file: str,\n output_folder: str,\n model_name: str,\n label_file: str,\n model_version: str = '1.0',\n force: bool = False,\n):\n \"\"\"Converts MMAction2 model (config + checkpoint) to TorchServe `.mar`.\n\n Args:\n config_file (str): In MMAction2 config format.\n checkpoint_file (str): In MMAction2 checkpoint format.\n output_folder (str): Folder where `{model_name}.mar` will be created.\n The file created will be in TorchServe archive format.\n label_file (str): A txt file which contains the action category names.\n model_name (str | None): If not None, used for naming the\n `{model_name}.mar` file that will be created under `output_folder`.\n If None, `{Path(checkpoint_file).stem}` will be used.\n model_version (str): Model's version.\n force (bool): If True, if there is an existing `{model_name}.mar` file\n under `output_folder` it will be overwritten.\n \"\"\"\n mkdir_or_exist(output_folder)\n\n config = Config.fromfile(config_file)\n\n with TemporaryDirectory() as tmpdir:\n config.dump(f'{tmpdir}/config.py')\n shutil.copy(label_file, f'{tmpdir}/label_map.txt')\n\n args = Namespace(\n **{\n 'model_file': f'{tmpdir}/config.py',\n 'serialized_file': checkpoint_file,\n 'handler': f'{Path(__file__).parent}/mmaction_handler.py',\n 'model_name': model_name or Path(checkpoint_file).stem,\n 'version': model_version,\n 'export_path': output_folder,\n 'force': force,\n 'requirements_file': None,\n 'extra_files': f'{tmpdir}/label_map.txt',\n 'runtime': 'python',\n 'archive_format': 'default'\n })\n manifest = ModelExportUtils.generate_manifest_json(args)\n package_model(args, manifest)\n\n\ndef parse_args():\n parser = ArgumentParser(\n description='Convert MMAction2 models to TorchServe `.mar` format.')\n parser.add_argument('config', type=str, help='config file path')\n parser.add_argument('checkpoint', type=str, help='checkpoint file path')\n parser.add_argument(\n '--output-folder',\n type=str,\n required=True,\n help='Folder where `{model_name}.mar` will be created.')\n parser.add_argument(\n '--model-name',\n type=str,\n default=None,\n help='If not None, used for naming the `{model_name}.mar`'\n 'file that will be created under `output_folder`.'\n 'If None, `{Path(checkpoint_file).stem}` will be used.')\n parser.add_argument(\n '--label-file',\n type=str,\n default=None,\n help='A txt file which contains the action category names. ')\n parser.add_argument(\n '--model-version',\n type=str,\n default='1.0',\n help='Number used for versioning.')\n parser.add_argument(\n '-f',\n '--force',\n action='store_true',\n help='overwrite the existing `{model_name}.mar`')\n args = parser.parse_args()\n\n return args\n\n\nif __name__ == '__main__':\n args = parse_args()\n\n mmaction2torchserve(args.config, args.checkpoint, args.output_folder,\n args.model_name, args.label_file, args.model_version,\n args.force)\n","repo_name":"open-mmlab/mmaction2","sub_path":"tools/deployment/mmaction2torchserve.py","file_name":"mmaction2torchserve.py","file_ext":"py","file_size_in_byte":3894,"program_lang":"python","lang":"en","doc_type":"code","stars":3560,"dataset":"github-code","pt":"3"} +{"seq_id":"41206840886","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef J(theta, X, y):\n try:\n return np.sum((y - X.dot(theta))**2) / len(X)\n except:\n return float('inf')\n\ndef dJ_sgd(theta, x_b_i, y_i):\n return x_b_i.T.dot(x_b_i.dot(theta) - y_i) * 2.\n\ndef sgd(X_b, y, initial_theta, n_iters):\n t0, t1 = 5, 50\n def learning_rate(t):\n return t0 / (t + t1)\n theta = initial_theta\n for cur_iter in range(n_iters):\n rand_i = np.random.randint(len(X_b))\n gradient = dJ_sgd(theta, X_b[rand_i], y[rand_i])\n theta = theta - learning_rate(cur_iter) * gradient\n return theta\n\ndef test():\n m = 100000\n x = np.random.normal(size=m)\n X = x.reshape(-1,1)\n y = 4. * x + 3. + np.random.normal(0,3, size=m)\n\n X_b = np.hstack([np.ones((len(X),1)), X])\n initial_theta = np.zeros(X_b.shape[1])\n theta = sgd(X_b, y, initial_theta, n_iters=m//3)\n\n print(theta)\n\nif __name__ == '__main__':\n test()\n\n","repo_name":"yanmxa/machine-learning","sub_path":"gradient/stochastic_grad.py","file_name":"stochastic_grad.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"42603432403","text":"import numpy as np\nimport functools \n\ndef bearing(lat1,lon1, lat2,lon2):\n \"\"\"assumes lat/lon values are in decimal degrees\n Return is in degrees Clockwise from North\"\"\"\n lat1 = lat1*np.pi/180.\n lat2 = lat2*np.pi/180.\n lon1 = lon1*np.pi/180.\n lon2 = lon2*np.pi/180.\n # could probably just use np.radians instead...\n\n X = np.cos(lat2) * np.sin(lon2 - lon1)\n Y = np.cos(lat1) * np.sin(lat2) - np.sin(lat1)*np.cos(lat2)*np.cos(lon2-lon1)\n\n b = np.arctan2(X,Y)\n return(np.degrees(b))\n\n\ndef representation(center_lon, # in radians\n center_lat, # in radians\n instrument_tilt, # in degrees, rotation clockwise\n len_lon=180, # extent in km\n len_lat=185, # extent in km\n R=6371): # \"radius\" of earth\n\n tilt_deg = instrument_tilt * 2 * np.pi / 360\n\n x, y, z = (R * np.cos(center_lat) *\n np.sin(center_lon),\n R * np.cos(center_lat) *\n np.cos(center_lon), R * np.sin(center_lat))\n C = np.array([x,y,z]) # center of scene\n\n dlat, dlon = np.sin(-tilt_deg), np.cos(-tilt_deg)\n dir_lon = np.array([-np.sin(center_lat) * np.sin(center_lon) * dlat +\n np.cos(center_lat) * np.cos(center_lon) * dlon,\n -np.sin(center_lat) * np.cos(center_lon) * dlat -\n np.cos(center_lat) * np.sin(center_lon) * dlon,\n np.cos(center_lat) * dlat])\n dir_lon /= np.linalg.norm(dir_lon)\n\n A = len_lon / 2 / R\n midpt_1 = np.cos(A) * C + R * np.sin(A) * dir_lon\n\n dir_lat = np.cross(midpt_1, dir_lon)\n dir_lat /= np.linalg.norm(dir_lat)\n\n B = len_lat/ 2 / R\n\n corners = [np.cos(B) * midpt_1 + R * np.sin(B) * dir_lat]\n corners.append(np.cos(B) * midpt_1 - R * np.sin(B) * dir_lat)\n\n midpt_2 = np.cos(A) * C - R * np.sin(A) * dir_lon\n corners.append(np.cos(B) * midpt_2 + R * np.sin(B) * dir_lat)\n corners.append(np.cos(B) * midpt_2 - R * np.sin(B) * dir_lat)\n corners = np.array(corners)\n\n corners_lon_lat = np.array([(np.arctan2(x_ / R, y_ / R),\n np.arcsin(z_ / R)) for x_, y_, z_ in corners])\n\n # now work out halfspace\n\n # these are the edge segmentsin lon/lat space\n supports = [corners_lon_lat[0]-corners_lon_lat[1],\n corners_lon_lat[0]-corners_lon_lat[2],\n corners_lon_lat[1]-corners_lon_lat[3],\n corners_lon_lat[2]-corners_lon_lat[3]]\n\n # normals to each edge segment\n normals = np.array([(s[1],-s[0]) for s in supports])\n pts = [corners_lon_lat[0], # a point within each edge\n corners_lon_lat[0],\n corners_lon_lat[1],\n corners_lon_lat[3]]\n bdry_values = np.array([np.sum(n * p) for n, p in zip(normals, pts)])\n center_values = [np.sum(n * [center_lon, center_lat]) for n in normals]\n center_signs = np.sign(center_values - bdry_values)\n\n def _check(normals, center_signs, bdry_values, lon_lat_vals):\n normal_mul = np.asarray(lon_lat_vals).dot(normals.T)\n values_ = normal_mul - bdry_values[None,:]\n signs_ = np.sign(values_) * center_signs[None,:]\n return np.squeeze(np.all(signs_ == 1, 1))\n\n _check = functools.partial(_check, normals, center_signs, bdry_values)\n\n return corners_lon_lat, _check, normals, bdry_values, center_signs\n\n","repo_name":"geostacks/GeoStacks","sub_path":"geostacks/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3385,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"9358599341","text":"#encoding: utf-8\r\nfrom subprocess import call\r\nimport os\r\n\r\n# Ahk2exe.exe /in \"MyScript.ahk\" /icon \"MyIcon.ico\" /pass \"CustomPassword\" /NoDecompile\r\n# CompiledScript.exe /force /ErrorStdOut \"likecider - ghosert\"\r\n\r\ndef readContent(filename):\r\n content = ''\r\n with open(filename) as f:\r\n line = f.readline()\r\n while line:\r\n content = content + line\r\n line = f.readline()\r\n return content\r\n\r\nenum_ahk = readContent('enum.ahk')\r\nenum_js = readContent('enum.js')\r\n\r\nimport re\r\n\r\nnew_enum_js = re.sub(r'\\n', '', enum_js)\r\nnew_enum_ahk = re.sub(r'REPLACE_WITH_ENUM_JS', new_enum_js, enum_ahk)\r\n\r\nwith open('new_enum.ahk', 'w') as f:\r\n f.write(new_enum_ahk)\r\n\r\nconsoleString = '\"C:\\Program Files\\AutoHotkey\\Compiler\\Ahk2exe.exe\" /in \"new_enum.ahk\" /NoDecompile'\r\ncall(consoleString)\r\n\r\nos.system('del new_enum.ahk')\r\n\r\n# run for testing purpose.\r\nconsoleString = u'\"new_enum.exe /force /ErrorStdOut \"likecider - ghosert\" \"代理梦想家80后 - ghosert\"'.encode('cp936')\r\nos.system(consoleString)\r\n\r\n","repo_name":"ghosertEclipse/EclipseProject","sub_path":"python-workspace/anypai/src/to_be_decided/build_run_enum.py","file_name":"build_run_enum.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"20651509860","text":"import redis\nimport pymongo\nimport json\nimport time\nfrom pymongo.server_api import ServerApi\n\n# Conexão com o Redis\nr = redis.Redis(\n host='redis-10412.c262.us-east-1-3.ec2.cloud.redislabs.com',\n port=10412,\n password='ian1902'\n)\n\n# Conexão com o MongoDB\nclient = pymongo.MongoClient(\"mongodb+srv://iznthelindo:190204@mercadoLivre.1cjw9r7.mongodb.net/mercadoLivre?retryWrites=true&w=majority\", server_api=ServerApi('1'))\ndb = client.mercadoLivre\ncollection = db[\"usuario\"]\n\n# Função para inserir um novo usuário no Redis\ndef iniciarRedis():\n nome = input(\"Digite o nome: \")\n\n # Buscar o cliente no MongoDB\n client_data = collection.find_one({\"nome\": nome})\n\n if client_data:\n client_json = json.dumps(client_data, default=str)\n\n # Armazenar o cliente no Redis\n r.set('usuario', client_json)\n print(\"Cliente inserido no REDIS com sucesso\")\n\n # Obter os dados do Redis\n redis_data = r.get('usuario')\n\n if redis_data:\n redis_dict = json.loads(redis_data)\n\n print(\"\\nCadastrar novo favorito\")\n\n new_fav_obj = {\n \"id\": str(input(\"Digite um id: \")),\n \"favName\": str(input(\"Digite um nome: \")),\n \"preco\": str(input(\"Digite um preço: \"))\n }\n\n # Adicionar o novo favorito ao dicionário\n redis_dict.setdefault(\"favoritos\", []).append(new_fav_obj)\n\n # Converter o dicionário de volta para JSON\n updated_json = json.dumps(redis_dict, default=str)\n\n # Atualizar os dados no Redis\n r.set('usuario', updated_json)\n\n # Atualizar os dados no MongoDB\n collection.update_one({\"_id\": client_data[\"_id\"]}, {\"$set\": {\"favoritos\": redis_dict[\"favoritos\"]}})\n\n print(\"Novo favorito cadastrado\")\n\n Fast_Buy = input(\"Digite o seu Fast Buy: \")\n collection.update_one({\"_id\": client_data[\"_id\"]}, {\"$set\": {\"Fast_Buy\": Fast_Buy}})\n print(\"Fast Buy adicionado! Você tem 10s para comprar tudo pela metade do preço\")\n\n time.sleep(10)\n\n collection.update_one({\"_id\": client_data[\"_id\"]}, {\"$unset\": {\"Fast_Buy\": \"\"}})\n print(\"Fast Buy removido!\")\n\n\n # Timer de 10 segundos\n print(\"Aguardando 10 segundos...\")\n time.sleep(10)\n print(\"Timer de 10 segundos expirado.\")\n\n else:\n print(\"Erro ao obter os dados do Redis\")\n else:\n print(\"Cliente não encontrado no MongoDB\")\n\n\n\n# Chamar a função iniciarRedis para executar o código\niniciarRedis()\n","repo_name":"itupii/naorelacional","sub_path":"redisController.py","file_name":"redisController.py","file_ext":"py","file_size_in_byte":2617,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30277249918","text":"# coding=utf-8\nfrom __future__ import absolute_import, unicode_literals, print_function, division\nfrom fabric.context_managers import cd\nfrom fabric.decorators import task, hosts\nfrom fabric.operations import run\n\n\ndef _fresh_clone_repo(virtual_env_dir, repo_folder_name='devbackend', tag='develop'):\n \"\"\"\n Clone the git repo and checkout the correct tag.\n :param virtual_env_dir: directory path to the virtual env on the server\n :param repo_folder_name: name for the top level folder of the new repo\n :param tag: name of the tag/branch to checkout\n :return: None\n \"\"\"\n with cd(virtual_env_dir):\n run('rm -rf {}/'.format(repo_folder_name))\n run('git clone git@104.236.119.129:root/swizly.git {}'.format(repo_folder_name))\n with cd('{}'.format(repo_folder_name)):\n run('git checkout {0}'.format(tag))\n\n\ndef _git_status(virtual_env_dir, repo_folder_name='devbackend'):\n \"\"\"\n Run 'git status' on the git folder on the server\n :param virtual_env_dir: directory path to the virtual env on the server\n :param repo_folder_name: name for the top level folder of the new repo\n :return: None\n \"\"\"\n with cd('{}/{}'.format(virtual_env_dir, repo_folder_name)):\n run('git status')\n\n\ndef _git_pull(virtual_env_dir, repo_folder_name='devbackend', branch='develop'):\n \"\"\"\n Run 'git pull origin ' on the git folder on the server\n :param virtual_env_dir: directory path to the virtual env on the server\n :param repo_folder_name: name for the top level folder of the new repo\n :param branch: The name of the branch to pull\n :return: None\n \"\"\"\n with cd('{}/{}'.format(virtual_env_dir, repo_folder_name)):\n run('git pull origin {}'.format(branch))\n\n\ndef _git_reset_hard(virtual_env_dir, repo_folder_name='devbackend', branch=None):\n \"\"\"\n Run 'git reset --hard' on the git folder on the server\n :param virtual_env_dir: directory path to the virtual env on the server\n :param repo_folder_name: name for the top level folder of the new repo\n :param branch: The name of the branch to reset to\n :return: None\n \"\"\"\n with cd('{}/{}'.format(virtual_env_dir, repo_folder_name)):\n if branch:\n run('git reset --hard {}'.format(branch))\n else:\n run('git reset --hard')\n\n\ndef _git_checkout(virtual_env_dir, repo_folder_name='devbackend', branch='develop', options=None):\n \"\"\"\n Run 'git checkout ' on the git folder on the server\n :param virtual_env_dir: directory path to the virtual env on the server\n :param repo_folder_name: name for the top level folder of the new repo\n :param branch: The name of the branch to checkout\n :param options: options for the checkout command eg. '-f'\n :return: None\n \"\"\"\n with cd('{}/{}'.format(virtual_env_dir, repo_folder_name)):\n if options:\n run('git checkout {} {}'.format(options, branch))\n else:\n run('git checkout {}'.format(branch))\n\n\ndef _git_fetch(virtual_env_dir, repo_folder_name='devbackend', options=None):\n \"\"\"\n Run 'git checkout ' on the git folder on the server\n :param virtual_env_dir: directory path to the virtual env on the server\n :param repo_folder_name: name for the top level folder of the new repo\n :param options: options for the fetch command eg. '--all'\n :return: None\n \"\"\"\n with cd('{}/{}'.format(virtual_env_dir, repo_folder_name)):\n if options:\n run('git fetch {}'.format(options))\n else:\n run('git fetch')\n\n\nS01 = 'root@104.236.35.196'\nS02 = 'root@104.236.47.137'\nS03 = 'root@104.236.17.228'\nS04 = 'root@104.131.108.12'\nS05 = 'root@104.131.166.166'\nS06 = 'root@104.236.107.132'\nRABBIT_MQ = 'root@104.236.86.150'\nVIRTUAL_ENV_DIR = '/opt/apps/Swizly-3.0-env'\n\n\n@task(default=True)\n@hosts(S01, S02, S03, S04, S05, S06, RABBIT_MQ)\ndef setup(branch=None):\n \"\"\"\n Setup the git repo on all servers in the cluster. fab setup | fab setup:branch='release/3.0'\n :param branch: optional argument for the git branch to checkout\n :return: None\n \"\"\"\n if branch:\n _fresh_clone_repo(VIRTUAL_ENV_DIR, tag=branch)\n else:\n _fresh_clone_repo(VIRTUAL_ENV_DIR)\n\n\n@task()\n@hosts(S01, S02, S03, S04, S05, S06, RABBIT_MQ)\ndef git_status():\n \"\"\"\n Does 'git status' for each of the servers in the cluster.\n :return: None\n \"\"\"\n _git_status(VIRTUAL_ENV_DIR)\n\n\n@task()\n@hosts(S01, S02, S03, S04, S05, S06, RABBIT_MQ)\ndef git_pull(branch=None):\n \"\"\"\n Does a 'git pull origin ' for each of the servers in the cluster. fab git_pull:branch='release/3.0'\n :return: None\n \"\"\"\n if branch:\n _git_pull(VIRTUAL_ENV_DIR, branch=branch)\n else:\n _git_pull(VIRTUAL_ENV_DIR)\n\n\n@task()\n@hosts(S01, S02, S03, S04, S05, S06, RABBIT_MQ)\ndef git_checkout(options=None, branch=None):\n \"\"\"\n Does a 'git checkout ' for each of the servers in the cluster. fab git_checkout:options='-f',branch='release/3.0'\n :return: None\n \"\"\"\n if branch:\n _git_checkout(VIRTUAL_ENV_DIR, branch=branch, options=options)\n else:\n _git_checkout(VIRTUAL_ENV_DIR, options=options)\n\n\n@task()\n@hosts(S01, S02, S03, S04, S05, S06, RABBIT_MQ)\ndef git_reset(branch=None, hard=True):\n \"\"\"\n Does a 'git reset' for each of the servers in the cluster. fab git_reset:branch='origin/develop',hard='False'\n :return: None\n \"\"\"\n # TODO there is a bug here right now with the translation to boolean. Fix later\n if hard:\n _git_reset_hard(VIRTUAL_ENV_DIR, branch=branch)\n else:\n print('Not supported')\n\n\n@task()\n@hosts(S01, S02, S03, S04, S05, S06, RABBIT_MQ)\ndef git_fetch(options=None):\n \"\"\"\n Does a 'git fetch ' for each of the servers in the cluster. fab git_fetch:options='--all'\n :return: None\n \"\"\"\n _git_fetch(VIRTUAL_ENV_DIR, options=options)\n","repo_name":"devinbarry/Pulsar-cluster","sub_path":"fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":5901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22704152094","text":"#!/usr/bin/env python3\n\nimport os\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndf = pd.read_csv(r'C:\\Users\\User\\Desktop\\Datasets\\Bitcoin\\bitcoin.csv',\n sep=',',\n header=0)\n\nos.makedirs(r'C:\\Users\\User\\Desktop\\abc', exist_ok=True)\n\n# Another useful dataset exploration technique involves comparing multiple columns of the dataset\n# The enumerate functions will generate pairs of indexes elements\nfor col1_idx, column1 in enumerate(df.columns):\n for col2_idx, column2 in enumerate(df.columns):\n if col1_idx < col2_idx:\n print(f'Generating {column1} to {column2} plot')\n fig, axes = plt.subplots(1, 1, figsize=(5, 5))\n axes.scatter(df[column1], df[column2], label=f'{column1} to {column2}', color='green', marker='x')\n axes.set_title(f'{column1} to {column2}')\n axes.set_xlabel(column1)\n axes.set_ylabel(column2)\n axes.legend()\n plt.savefig(f'C:\\\\Users\\\\User\\\\Desktop\\\\abc_{column1}_{column2}_scatter.png', dpi=300)\n plt.close(fig)\n#\n# plt.close()\n","repo_name":"Zarifpayam/viz-homework","sub_path":"Homework7.py","file_name":"Homework7.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19146374233","text":"import requests\r\nfrom ipywidgets import Label, BoundedFloatText, BoundedIntText, Dropdown, Button, Output, VBox\r\n\r\nprescribe_label = Label('Drug prescription prediction for age, gender, bp, cholesterol and \"Na to K\"')\r\nage_text = BoundedIntText(min=16, max=100, value=47, description=\"Age:\", disabled=False)\r\ngender_dropdown = Dropdown(options=['F', 'M'], description='Gender:', disabled=False)\r\nbp_dropdown = Dropdown(options=['HIGH', 'LOW', 'NORMAL'], value=\"LOW\", description='BP:', disabled=False)\r\ncholesterol_dropdown = Dropdown(options=['HIGH', 'NORMAL'], description='Cholesterol:', disabled=False)\r\nna_to_k_text = BoundedFloatText(min=0.0, max=50.0, value=14, description=\"Na to K\", disabled=False)\r\nprescribe_button = Button(description=\"Presribe\")\r\nprescribe_output = Output()\r\n\r\n\r\n\r\n# Button click event handlers ...\r\ndef prescribe_button_on_click(b):\r\n request_url = f\"https://itsshaikaslamwebapp22.azurewebsites.net//drug?Age={age_text.value}&Sex={gender_dropdown.value}&BP={bp_dropdown.value}&Cholesterol={cholesterol_dropdown.value}&Na_to_K={na_to_k_text.value}\"\r\n response = requests.get(request_url)\r\n recommended_drug = response.json()[\"recommended_drug\"]\r\n\r\n prescribe_output.clear_output()\r\n with prescribe_output:\r\n\r\n print(f\"The recommended drug is {recommended_drug}\")\r\n \r\nprescribe_button.on_click(prescribe_button_on_click)\r\n\r\nvbox_prescribe = VBox([prescribe_label, age_text, gender_dropdown, bp_dropdown, cholesterol_dropdown, na_to_k_text, prescribe_button, prescribe_output])\r\n\r\n ","repo_name":"itsshaikaslam/E2Emodels","sub_path":"mywebapp.py","file_name":"mywebapp.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42463313067","text":"#!/usr/bin/env python\nfrom typing import List, Tuple\n\nimport sudachipy\n\nimport segmenter_lib\n\n\nclass JaTokenizer():\n\t_MODE = sudachipy.SplitMode.C\n\n\tdef __init__(self) -> None:\n\t\tself._tokenizer = sudachipy.Dictionary().create()\n\n\tdef __call__(self, text: bytes) -> Tuple[List[int], List[int]]:\n\t\tunicode_text = text.decode()\n\t\ttokens = self._tokenizer.tokenize(unicode_text)\n\t\tstarts = []\n\t\tends = []\n\t\tfor token in tokens:\n\t\t\tstarts.append(len(unicode_text[:token.begin()].encode()))\n\t\t\tends.append(len(unicode_text[:token.end()].encode()))\n\t\treturn starts, ends\n\n\nif __name__ == \"__main__\":\n\tsegmenter_lib.Main(JaTokenizer())\n","repo_name":"diasurgical/devilutionX","sub_path":"tools/segmenter/segment_ja.py","file_name":"segment_ja.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":7280,"dataset":"github-code","pt":"3"} +{"seq_id":"14525365897","text":"import os\nimport unittest\nfrom testcase.utils.Constant import Constant\nfrom testcase.utils.CommonSH import CommonSH\nfrom testcase.utils.Logger import Logger\nfrom yat.test import Node\nfrom yat.test import macro\n\n\nPrimary_SH = CommonSH('PrimaryDbUser')\n\n\n@unittest.skipIf(3 != Primary_SH.get_node_num(), '非1+2环境不执行')\nclass Tools(unittest.TestCase):\n def setUp(self):\n self.log = Logger()\n self.log.info(f'-----{os.path.basename(__file__)} start-----')\n self.root_com_pri = CommonSH('PrimaryRoot')\n self.root_node_pri = Node('PrimaryRoot')\n self.root_node_sta1 = Node('Standby1Root')\n self.root_node_sta2 = Node('Standby2Root')\n self.constant = Constant()\n self.ssh_file = '~/.ssh/'\n self.ssh_path_bak = os.path.join(macro.DB_INSTANCE_PATH, 'ssh')\n self.check_res = ['A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8',\n 'A9', 'A10', 'A11', 'A12', 'A13', 'A14']\n text = \"-----step1: 创建root互信; expect: 执行成功-----\"\n self.log.info(text)\n self.host_tuple = (self.root_node_pri.ssh_host,\n self.root_node_sta1.ssh_host,\n self.root_node_sta2.ssh_host)\n self.params = {'-f': 'test_hosts'}\n self.root_com_pri.exec_gs_sshexkey(macro.DB_SCRIPT_PATH,\n *self.host_tuple,\n **self.params)\n\n text = '----step2: 破坏互信(删除互信文件,杀掉root互信进程); expect: 执行成功-----'\n self.log.info(text)\n rm_cmd = f\"rm -rf {self.ssh_file}/* ;\" \\\n \"ps -aux | grep root | grep ssh-agent | grep \" \\\n \"/root/gaussdb_tmp/gauss_socket_tmp | grep -v PID | \" \\\n \"awk '{{print $2}}' | xargs kill -9\"\n self.log.info(rm_cmd)\n rm_msg = self.root_node_pri.sh(rm_cmd).result()\n self.log.info(f'rm_msg = {rm_msg}')\n\n def test_server_tools(self):\n self.log.info('----step3: 互信状态不正常时检查操作系统; '\n 'expect: 执行失败----')\n hostname_cmd = f'''hostname; '''\n hostname1 = self.root_node_sta1.sh(hostname_cmd).result()\n self.log.info(hostname1)\n\n checkos_cmd = f'''source {macro.DB_ENV_PATH};\n expect <\")\n directory = input(\"[*] Enter the path to work directory --> \")\n columns = int(input(\"[*] Enter number of columns --> \"))\n table_filename = input(\"[*] Enter file name for table --> \")\n\n doc_table = DocxTable(columns=columns, filename=table_filename)\n table_handler = doc_table.get_table()\n\n count = 0\n fulfil_table(count, \"\", \"\", \"\", table_handler)\n for file in sorted(os.listdir(path=directory), key=lambda x: int(x[11:14])):\n path = directory + '/' + file\n if file.endswith('.pdf'):\n title = PDFParser.get_title_or_description(path)\n description = PDFParser.get_title_or_description(path, description=True)\n elif file.endswith('.docx'):\n title, description = DocxParser.get_title_or_description(path)\n else:\n title = file\n description = f'Unknown type of file ---> {file}'\n count += 1\n fulfil_table(count, file, title, str(description), table_handler)\n\n doc_table.save_table()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"jbul4sec/docs_parser","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34794297841","text":"from __future__ import annotations\nfrom typing import TYPE_CHECKING, Dict, Any, Optional\n\nimport configparser\nimport logging\nimport os\nimport sys\n\nfrom deepmerge import always_merger\n\nif TYPE_CHECKING:\n from configparser import ConfigParser\n\nlogger = logging.getLogger(__file__)\n\n\nLIBDIR = os.path.dirname(os.path.realpath(__file__))\nlogger.debug(\"libdir: %s\", LIBDIR)\n\n\nUSER_STORAGE_DIR = os.path.join(os.path.expanduser(\"~\"), \".orion.d\")\nlogger.debug(\"userdir: %s\", USER_STORAGE_DIR)\n\n\nDEFAULT_CONFIG_FILE = \"orion.cfg\"\nUSER_CONFIG_PATH = os.path.join(USER_STORAGE_DIR, DEFAULT_CONFIG_FILE)\nlogger.debug(\"user config: %s\", USER_CONFIG_PATH)\n\n\nDEFAULT_CONFIG = {\n \"logging\": {\n \"loggers\": \"all\",\n \"debug_logging\": True,\n \"debug_level\": \"info\",\n }\n}\n\n\nclass OrionConfig:\n\n def __init__(\n self,\n options: Optional[Dict[str, Any]] = None,\n config_path: Optional[str] = None,\n ) -> None:\n if options is not None:\n config = always_merger.merge(DEFAULT_CONFIG, options)\n self.cfg = generate_config(config)\n else:\n self.cfg = generate_config()\n\n loggers = self.cfg.get(\"logging\", \"loggers\")\n self.loggers = loggers.replace(\" \", \"\").split(\",\")\n self.debug_logging = self.cfg.getboolean(\"logging\", \"debug_logging\")\n self.debug_level = self.cfg.get(\"logging\", \"debug_level\")\n\n with open(config_path if config_path else USER_CONFIG_PATH, \"w\") as fp:\n self.cfg.write(fp)\n\n\ndef generate_config(options: Optional[Dict[str, Any]] = None) -> ConfigParser:\n cfg = configparser.ConfigParser()\n populate_config(cfg, options if options else DEFAULT_CONFIG)\n return cfg\n\n\ndef populate_config(config: ConfigParser, data: Dict[str, Any]) -> None:\n for k, v in data.items():\n try:\n config.add_section(k)\n except configparser.DuplicateSectionError:\n pass\n for option, value in v.items():\n config.set(k, option, str(value))\n","repo_name":"krummja/pygame_orion","sub_path":"pygame_orion/core/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21575858415","text":"#!/usr/bin/env python3\n#Testing basic label detection using aws rekognition\n\nimport boto3 as boto\nimport logging\n\n#Setting basic logger config\nlogging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s %(threadName)s', filename=\"test.log\", filemode=\"a\")\n\n#Detecting labels from images\ndef detect_labels(bucket, key, max_labels=20, min_confidence=90, region=\"us-east-2\"):\n logging.info(\"Starting rekognition service\")\n rekognition = boto.client(\"rekognition\", region)\n try:\n response = rekognition.detect_labels(\n Image={\n \"S3Object\": {\n \t \"Bucket\": bucket,\n \t \"Name\": key,\n }\n },\n MaxLabels=max_labels,\n MinConfidence=min_confidence)\n logging.info(\"Response received\")\n return response['Labels']\n except Exception as e:\n logging.error(e)\n\n\nif __name__ == \"__main__\":\n labels = detect_labels(\"ubunturekbucket1\", \"test.jpg\")\n print(labels)\n for label in labels:\n logging.warning(\"Output: {Name} - {Confidence}\".format(**label))\n","repo_name":"vedant-jad99/Cloud-Computing-Projects-and-Assignments-CS356","sub_path":"Project Assignment-3/tests/recognition.py","file_name":"recognition.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23546800063","text":"import RPi.GPIO as GPIO\nimport time\nimport random\n\n\n\n\ndef init_sensor():\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(16, GPIO.OUT)\n GPIO.setup(20, GPIO.IN)\n GPIO.output(16, True)\n print(\"INIT Funktioniert\")\n\n\ndef get_data():\n name = \"Kontakt:\"\n return [name, GPIO.input(20), \"\"]","repo_name":"OE7DIO/rasp_sensor","sub_path":"customTestRaspi.py","file_name":"customTestRaspi.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"6983875576","text":"from django.contrib import admin\nfrom django.urls import path, include\n\nfrom .readiness_probe_views import readiness_probe\n\n\nDJANGO_ADMIN_BASE_URL = 'api/admin-control-center/'\n\n\nurlpatterns = [\n path(DJANGO_ADMIN_BASE_URL, admin.site.urls),\n path('api/readiness-probe//', readiness_probe),\n path('api/accounts/', include('accounts.urls')),\n path('api/categories/', include('categories.urls')),\n path('api/orders/', include('orders.urls')),\n path('api/v/', include('shoutouts.urls')),\n path('api/request-shoutout/', include('request_shoutout.adapters.http.urls')),\n path('api/talents/', include('talents.urls')),\n path('api/wirecard/', include('wirecard.urls')),\n]\n","repo_name":"michel-rodrigues/viggio_backend","sub_path":"app/project_configuration/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12597996873","text":"\"\"\"\nEnrollment track related signals.\n\"\"\"\n\n\nfrom django.dispatch import Signal\n\n# The purely documentational providing_args argument for Signal is deprecated.\n# So we are moving the args to a comment.\n\n# providing_args=['user', 'course_key', 'mode', 'countdown']\nENROLLMENT_TRACK_UPDATED = Signal()\n\n# providing_args=[\"course_enrollment\", \"skip_refund\"]\nUNENROLL_DONE = Signal()\n\n# providing_args=[\"event\", \"user\", \"course_id\", \"mode\", \"cost\", \"currency\"]\nENROLL_STATUS_CHANGE = Signal()\n\n# providing_args=[\"course_enrollment\"]\nREFUND_ORDER = Signal()\n\nUSER_EMAIL_CHANGED = Signal()\n","repo_name":"openedx/edx-platform","sub_path":"common/djangoapps/student/signals/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":6774,"dataset":"github-code","pt":"3"} +{"seq_id":"39852038607","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 8 16:57:02 2021\n\n@author: leonaodole\n\n\n\"\"\"\n#Required Packages \nimport pandas as pd\nfrom sklearn import preprocessing\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn import model_selection\nimport math\nimport numpy as np\n\n\n \ndef readInData(filename): \n dataset = pd.read_csv(filename, header = None)\n return dataset\n \n \ndef kNN_Classifier():\n #Load Heart Data\n \n #Build Classifier\n \n #Train on trainSet.txt \n #Use leave one out cv with k = {1,2,...,10}\n return\n\n#Load Heart Data\ntrain_data = readInData(\"./heart_trainSet.txt\")\ntrain_labels = readInData(\"./heart_trainLabels.txt\")\ntrain_labels = train_labels.values.ravel()\nkNN = KNeighborsClassifier(n_neighbors=5)\n\n#Create Cross Validation sets \ncv_splits = model_selection.KFold(n_splits = 5)\ni=0\nfor i_train, i_test in cv_splits.split(train_data,train_labels):\n print(\"Train:\", i_train, \"Test:\", i_test,i)\n scores = model_selection.cross_val_score(kNN, train_data, train_labels, scoring='accuracy' ,cv= cv_splits)\n print('Accuracy: %.3f (%.3f)' % (scores[i], scores[i]))\n i=i+1","repo_name":"eodole/Machine-Learning-Projects","sub_path":"ClassifierML/hw5.py","file_name":"hw5.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21375257210","text":"import pickle\nimport utils\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport utils\nfrom utils import MLP\nfrom utils import CMA_info\n\nCURRENT_MODEL = None\nN_dim = 5\ndsize = 100000\n\nx1_min = -10\nx1_max = 10\n\nx2_min = 20\nx2_max = 40\n\ntest_min = -1\ntest_max= 1\n\nlimits = [x1_min, x1_max, x2_min, x2_max]\ntest_limits = [test_min, test_max]\n\nnum_samples = 50\n\nfor repeat_iteration in range(500):\n trained_models = {}\n attack_output = {}\n ds = utils.make_dataset(dsize, N_dim, limits)\n trained_models[N_dim] = utils.train_model(N_dim, ds, 'dset_size_%s_find_failures'%dsize, disable_progress = True)\n model_to_attack = trained_models[N_dim].cpu()\n attack_output[N_dim] = utils.cma_experiment(model_to_attack, N_dim, test_limits, limits, 10, disable_progress = True)\n attack_rate = len(attack_output[N_dim].in_dist_advs)/len(attack_output[N_dim].starts)\n print(attack_rate)\n if attack_rate < 0.4:\n iter_info = [trained_models, attack_output]\n with open('failed_iter_10.p','wb') as F:\n pickle.dump(iter_info, F)\n print(\"Found, stopping!\")\n break\n","repo_name":"Spandan-Madan/in_distribution_adversarial_examples","sub_path":"training_models/uniform_data_experiments/finding_failures.py","file_name":"finding_failures.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"38582522618","text":"import json\nimport re\nimport plotly\nimport joblib\nimport pandas as pd\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem.wordnet import WordNetLemmatizer\nfrom nltk.corpus import stopwords\nfrom flask import Flask\nfrom flask import render_template, request, jsonify\nfrom plotly.graph_objs import Bar\nfrom sqlalchemy import create_engine\nfrom visualization import PlotBuilder\n\napp = Flask(__name__)\n\n# Regex to match URLs\nurl_regex = r\"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+\"\n\n# Lemmatizer based on the WordNet corpus\nlemmatzr = WordNetLemmatizer()\n\nstopwords = stopwords.words(\"english\")\n\n\ndef tokenize(text):\n \"\"\"\n Cleans, normalizes and converts the text into an array of lemmatized tokens\n :text: Piece to be tokenized\n :return: List of tokens corresponding to the lemmatized words of the text\n \"\"\"\n # Replace URLs with a fixed placeholder\n clean_text = re.sub(url_regex, \"urlplaceholder\", text)\n\n # Make the text all lowercase and clean it of any remaining special characters\n clean_text = re.sub(r\"[^0-9a-zA-Z]\", \" \", clean_text.lower())\n\n # Convert the text into a list of tokens, each corresponding to a word and remove heading and trailing spaces\n word_tokens = [word.strip() for word in word_tokenize(clean_text)]\n\n return [lemmatzr.lemmatize(word) for word in word_tokens if word not in stopwords]\n\n\n# Load data\nengine = create_engine('sqlite:///data/DisasterResponse.db')\ndf = pd.read_sql_table('Message', engine)\n\n# Instantiate a PlotBuilder and set the datasource to generate plots\nplot_builder = PlotBuilder(df)\n\n# Load model\nmodel = joblib.load(\"models/classifier.pkl\")\n\n\n# Index webpage displays cool visuals and receives user input text for model\n@app.route('/')\n@app.route('/index')\ndef index():\n # Create visuals by using the PlotBuilder\n graphs = [\n plot_builder.build_genre_totals_bar(),\n plot_builder.build_category_totals_bar(),\n plot_builder.build_message_length_box()\n ]\n \n # Encode plotly graphs in JSON\n ids = [\"graph-{}\".format(i) for i, _ in enumerate(graphs)]\n graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)\n \n # Render web page with plotly graphs\n return render_template('master.html', ids=ids, graphJSON=graphJSON)\n\n\n# Web page that handles user query and displays model results\n@app.route('/go')\ndef go():\n # Save user input in query\n query = request.args.get('query', '') \n\n # Use model to predict classification for query\n classification_labels = model.predict([query])[0]\n classification_results = dict(zip(df.columns[4:], classification_labels))\n\n # This will render the go.html Please see that file. \n return render_template(\n 'go.html',\n query=query,\n classification_result=classification_results\n )\n\n\ndef main():\n app.run(host='0.0.0.0', port=3001, debug=True)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"s-londono/disasterresponse","sub_path":"app/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72858043922","text":"import torch\n\ndef cross_entropy(outputs, targets):\n # [batch, seq_length, channel, width, height]\n return torch.sum(-targets*torch.log(outputs)-(1-targets)*torch.log(1-outputs))\n\ndef psnr(outputs, targets):\n # [batch, seq_length, channel, width, height]\n num_pixels = outputs.shape[2] * outputs.shape[3] * outputs.shape[4]\n batch_size = outputs.shape[0]\n seq_length = outputs.shape[1]\n psnr = torch.zeros((outputs.shape[0],outputs.shape[1]))\n for i in range(batch_size):\n for j in range(seq_length):\n mse = torch.mean((outputs[i,j,:,:,:] - targets[i,j,:,:,:])**2)\n psnr[i,j] = 20 * torch.log10(torch.max(outputs[i,j,:,:,:])) - 10 * torch.log10(mse)\n return torch.sum(psnr)\n\n","repo_name":"liluxuan1997/video-prediction","sub_path":"metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"561535545","text":"import boto3\nimport requests\nimport requests_cache\nimport tempfile\n\nfrom telethon.tl.functions.channels import GetFullChannelRequest, JoinChannelRequest, GetMessagesRequest\nfrom telethon.tl.functions.messages import ImportChatInviteRequest\nfrom telethon.tl.types import UpdateNewChannelMessage, Message, ChatPhotoEmpty, MessageService, MessageActionChatEditPhoto\nfrom textblob import TextBlob\nfrom . import db, config\nfrom .connection import client\n\nimport logging\n\n\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.DEBUG)\nlog.addHandler(logging.StreamHandler())\n\ns3_client = boto3.session.Session().client(\n 's3',\n region_name=config.S3_REGION_NAME,\n endpoint_url=config.S3_ENDPOINT_URL,\n aws_access_key_id=config.S3_KEY_ID,\n aws_secret_access_key=config.S3_SECRET\n)\n\nrequests_cache.install_cache('s3_cache')\n\n\ndef start():\n client.add_update_handler(_handle_update)\n client.idle()\n client.disconnect()\n\ndef join_chat(reference):\n \"\"\"\n Where reference can either be of the form of\n https://t.me/joinchat/BlIEfhCm--HBjli-lCH0Ew\n or\n https://t.me/hello1290\n \"\"\"\n if '/joinchat/' in reference:\n client(ImportChatInviteRequest(reference.split('/')[-1]))\n else:\n entity = client.get_entity(reference)\n client(JoinChannelRequest(entity))\n\n\ndef _handle_update(update):\n if isinstance(update, UpdateNewChannelMessage):\n _handleNewChannelMessage(update)\n\ndef _handleNewChannelMessage(update):\n log.debug(\"Handling update: %s \" % update)\n if isinstance(update.message, MessageService) and isinstance(update.message.action, MessageActionChatEditPhoto):\n store_profile_photo(update.message.to_id, ignore_cache=True)\n elif isinstance(update.message, Message):\n peer_channel = update.message.to_id\n full_channel = client(GetFullChannelRequest(peer_channel))\n pinnedMessage = None\n if full_channel.full_chat.pinned_msg_id:\n pinnedMessage = client(\n GetMessagesRequest(peer_channel, [full_channel.full_chat.pinned_msg_id])\n ).messages[0].message\n sentiment = TextBlob(update.message.message)\n db.ChatUpdate(\n channel_id=peer_channel.channel_id,\n from_id=update.message.from_id,\n title=full_channel.chats[0].title,\n about=full_channel.full_chat.about,\n pinnedMessage=pinnedMessage,\n sentimentPolarity=sentiment.polarity,\n sentimentSubjectivity=sentiment.subjectivity,\n username=full_channel.chats[0].username,\n participants_count=full_channel.full_chat.participants_count\n ).save()\n store_profile_photo(update.message.to_id)\n\ndef store_profile_photo(channel, ignore_cache=False):\n def fetch(filename, download_big=False):\n url = '%s/%s' % (config.IMAGE_SERVER_URL, filename)\n if ignore_cache or requests.head(url).status_code != 200:\n entity = client.get_entity(channel)\n if isinstance(entity.photo, ChatPhotoEmpty):\n s3_client.upload_file(config.NOAVATAR_PICTURE, config.S3_BUCKET, filename, dict(ACL=\"public-read\"))\n else:\n with tempfile.SpooledTemporaryFile(mode='r+b') as f:\n client.download_profile_photo(entity, f, download_big=download_big)\n f.seek(0)\n s3_client.upload_fileobj(f, config.S3_BUCKET, filename, dict(ACL=\"public-read\"))\n\n fetch('%s.jpeg' % channel.channel_id)\n fetch('%s-big.jpeg' % channel.channel_id, download_big=True)\n","repo_name":"hcv57/coinrankchat-telegram","sub_path":"coinrankchat/telegram/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":3563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7235581565","text":"import json\nimport subprocess\nimport os\nimport glob\nimport base64\nimport sys\nimport shlex\nfrom concurrent.futures import ThreadPoolExecutor, wait\n\nclass Clippy:\n config = dict()\n args = [\n \"cargo\",\n \"clippy\",\n \"--message-format=json\",\n \"--verbose\"\n ]\n compiler_output = list()\n compiler_returncode = 0\n github_output = list()\n\n # execute the clippy command\n def exec(self, dir):\n command = self.build_command()\n print(\"-- COMMAND: \", command)\n\n try:\n process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True, cwd=dir)\n output = process.stdout.readlines()\n process.wait()\n\n print('-- Return code; ', process.returncode)\n\n # 101 seems to be a bug\n # if (process.returncode != 0) and (process.returncode != 101):\n if process.returncode != 0:\n print('Non-zero exit code; ', process.returncode)\n self.compiler_returncode = process.returncode\n\n return output\n except subprocess.CalledProcessError as e:\n print(\"Error in calling cargo; \", e.output)\n return None\n except:\n print(\"Unexpected error:\", sys.exc_info()[0])\n print(sys.exc_info())\n return None\n\n # build command with args\n def build_command(self):\n gen_args = []\n\n if 'ssh' in self.config and self.config['ssh']:\n gen_args.append('eval $(ssh-agent -s)')\n gen_args.append('&&')\n gen_args.append('ssh-add /root/.ssh/id_rsa')\n gen_args.append('&&')\n\n if 'github_token' in self.config and self.config['github_token']:\n gen_args.append(\"CARGO_NET_GIT_FETCH_WITH_CLI=true\")\n\n return ' '.join(gen_args + self.args)\n\n # main handler\n def run(self, dir):\n localPaths = []\n\n if 'path_glob' in self.config:\n for path in glob.glob(\"/\".join([dir, self.config['path_glob']])):\n if os.path.exists(\"\".join([path, \"Cargo.toml\"])):\n print('Globbed path, found Cargo at: ', path)\n localPaths.append(path)\n\n print(\"Creating executor, theads of;\", self.config['threads'])\n executor = ThreadPoolExecutor(max_workers=self.config['threads'])\n futures = []\n for path in localPaths:\n print(\"-- THREAD: Creating submission for; \", path)\n futures.append(executor.submit(self.compile, path))\n\n # wait for all clippy's to complete\n wait(futures)\n else:\n self.compile(dir)\n\n for message in self.github_output:\n print(message.replace('\\n', '%0A').replace('\\r', '%0D'))\n\n # if we got a non-zero exit code, we should fail the job\n if self.compiler_returncode != 0:\n exit(1)\n\n # compile the command and output together\n def compile(self, dir):\n output = self.exec(dir)\n\n if output == None:\n print(\"Failed to lint\")\n exit(1)\n\n self.process_output(output, dir)\n self.generate_github_output()\n\n # process clippy output\n def process_output(self, output, dir):\n for line in output:\n try:\n line = line.strip()\n json_line = json.loads(line)\n\n if \"reason\" in json_line:\n if json_line['reason'] == \"compiler-message\":\n # we'll accept this and add it to our compiler output\n self.compiler_output.append({\"json\": json_line, \"path\": dir})\n # not a json line so, we'll skip\n except AttributeError:\n print('Skipping line in output; ', line)\n except ValueError:\n print('Skipping line in output; ', line)\n\n # convert compiler output to github output\n def generate_github_output(self):\n for json_line in self.compiler_output:\n gh_output = self.line_compiler_to_gh(json_line['json'], json_line['path'])\n\n if gh_output != None:\n self.github_output.append(gh_output)\n\n # convert each compiler line to a valid github warning or error\n def line_compiler_to_gh(self, json_line, dir):\n # validate we have spans\n if 'spans' not in json_line['message']:\n return None\n\n level = json_line['message']['level']\n message = json_line['message']['rendered']\n\n # likely a compiler error or dependency issue\n if not json_line['message']['spans'] and json_line['message']['level'] == 'error':\n return f\"::error::{json_line['message']['message']} from {json_line['package_id']}\"\n\n # loop through spans for this error\n for span in json_line['message']['spans']:\n # skip any non-primary spans\n if span['is_primary'] is not True:\n continue\n\n # assign initial path\n path = span['file_name']\n\n if 'path_glob' in self.config:\n path = dir.replace(self.config['base_dir'] + \"/\", \"\") + span['file_name']\n\n if level == \"warning\":\n return f\"::warning file={path},line={span['line_start']},col={span['column_start']}::{message}\"\n\n if level == \"error\":\n return f\"::error file={path},line={span['line_start']},col={span['column_start']}::{message}\"\n\n print(\"Line was missing compiler information\")\n return None\n\n # enable SSH key for private cargo repositories\n def enable_ssh(self, arg_git_ssh_key):\n f = open(\"/root/.ssh/id_rsa\", \"wb\")\n f.write(base64.b64decode(arg_git_ssh_key))\n f.close()\n os.chmod(\"/root/.ssh/id_rsa\", 0o600)\n\n subprocess.run('git config --global url.\"git@github.com:\".insteadOf \"https://github.com/\"'.split(\" \"), stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n subprocess.run('ssh-keyscan github.com >> /root/.ssh/known_hosts'.split(\" \"), stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n\n self.config['ssh'] = True\n\n # enable github pat token\n def enable_github_token(self, arg_github_token):\n token = base64.b64encode(f'pat:{arg_github_token}'.encode('ascii'))\n subprocess.run(f'git config --global \"http.https://github.com/.extraheader\" \"authorization: Basic {token.decode()}\"', shell=True)\n\n if 'ssh_path_rewrite' in self.config and self.config['ssh_path_rewrite']:\n subprocess.run(f'git config --global --add url.https://github.com/.insteadOf \"git@github.com:\"', shell=True)\n subprocess.run(f'git config --global --add url.https://github.com/.insteadOf \"ssh://git@github.com:\"', shell=True)\n subprocess.run(f'git config --global --add url.https://github.com/.insteadOf \"ssh://git@github.com/\"', shell=True)\n\n # switch to a different verison of rust stable\n def switch_rust_version(self, arg_rust_version):\n subprocess.call(['rustup', 'toolchain', 'install', arg_rust_version, '--profile', 'minimal'])\n subprocess.call(['rustup', 'default', arg_rust_version])\n subprocess.call(f'rustup target add x86_64-unknown-linux-musl', shell=True)\n subprocess.call(['rustup', 'component', 'add', 'clippy'])\n subprocess.call(['cargo', 'clippy', '--version'])\n\n # useful for target issues\n subprocess.run(f'rustup component list --installed', shell=True)\n subprocess.run(f'rustup show', shell=True)\n\n def __init__(self):\n # inputs\n self.config['base_dir'] = '/github/workspace'\n arg_path_glob = os.environ.get('INPUT_PATH_GLOB')\n arg_threads = os.environ.get('INPUT_THREADS')\n arg_clippy_args = os.environ.get('INPUT_CLIPPY_ARGS')\n arg_git_ssh_key = os.environ.get('INPUT_GIT_SSH_KEY')\n arg_rust_version = os.environ.get('INPUT_RUST_VERSION')\n arg_github_pat = os.environ.get('INPUT_GITHUB_TOKEN')\n arg_ssh_path_rewrite = os.environ.get('INPUT_SSH_PATH_REWRITE')\n\n # update os environ\n os.environ['HOME'] = '/root'\n\n # check for valid arguments\n if arg_path_glob != None and len(arg_path_glob) > 0:\n self.config['path_glob'] = arg_path_glob\n\n if arg_clippy_args != None and len(arg_clippy_args) > 0:\n self.args += shlex.split(arg_clippy_args)\n\n if arg_threads != None and arg_threads.isdigit():\n self.config['threads'] = int(arg_threads)\n else:\n self.config['threads'] = 1\n\n if arg_git_ssh_key != None and len(arg_git_ssh_key) > 0:\n self.enable_ssh(arg_git_ssh_key)\n\n if arg_github_pat != None and len(arg_github_pat) > 0:\n if arg_ssh_path_rewrite != None and len(arg_ssh_path_rewrite) > 0:\n self.config['ssh_path_rewrite'] = True\n\n self.enable_github_token(arg_github_pat)\n self.config['github_token'] = True\n\n if arg_rust_version != None and len(arg_rust_version) > 0:\n self.switch_rust_version(arg_rust_version)\n\n # run app\n self.run(self.config['base_dir'])\n\nClippy()","repo_name":"actions-marketplace-validations/qernal_github-actions-rust-clippy","sub_path":"src/clippy.py","file_name":"clippy.py","file_ext":"py","file_size_in_byte":9155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42051936746","text":"import os\nimport sys\nimport math\n\nimport torch.nn as nn\nimport torch\nimport torch.nn.functional as F\n\nsrc_dir = os.path.dirname(os.path.realpath(__file__))\nwhile not src_dir.endswith(\"src\"):\n src_dir = os.path.dirname(src_dir)\nif src_dir not in sys.path:\n sys.path.append(src_dir)\n\nfrom utils.torch_utils import to_cpu, _sigmoid\n\n\ndef _gather_feat(feat, ind, mask=None):\n dim = feat.size(2)\n ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)\n feat = feat.gather(1, ind)\n if mask is not None:\n mask = mask.unsqueeze(2).expand_as(feat)\n feat = feat[mask]\n feat = feat.view(-1, dim)\n return feat\n\n\ndef _transpose_and_gather_feat(feat, ind):\n feat = feat.permute(0, 2, 3, 1).contiguous()\n feat = feat.view(feat.size(0), -1, feat.size(3))\n feat = _gather_feat(feat, ind)\n return feat\n\n\ndef _neg_loss(pred, gt, alpha=2, beta=4):\n ''' Modified focal loss. Exactly the same as CornerNet.\n Runs faster and costs a little bit more memory\n Arguments:\n pred (batch x c x h x w)\n gt_regr (batch x c x h x w)\n '''\n pos_inds = gt.eq(1).float()\n neg_inds = gt.lt(1).float()\n\n neg_weights = torch.pow(1 - gt, beta)\n\n loss = 0\n\n '''print(pred.shape)\n print(pred.device)\n\n print(pos_inds.shape)\n print(pos_inds.device)'''\n\n pos_loss = torch.log(pred) * torch.pow(1 - pred, alpha) * pos_inds\n neg_loss = torch.log(1 - pred) * torch.pow(pred, alpha) * neg_weights * neg_inds\n\n num_pos = pos_inds.float().sum()\n pos_loss = pos_loss.sum()\n neg_loss = neg_loss.sum()\n\n if num_pos == 0:\n loss = loss - neg_loss\n else:\n loss = loss - (pos_loss + neg_loss) / num_pos\n return loss\n\n\nclass FocalLoss(nn.Module):\n '''nn.Module warpper for focal loss'''\n\n def __init__(self):\n super(FocalLoss, self).__init__()\n self.neg_loss = _neg_loss\n\n def forward(self, out, target):\n return self.neg_loss(out, target)\n\n\nclass L1Loss(nn.Module):\n def __init__(self):\n super(L1Loss, self).__init__()\n\n def forward(self, output, mask, ind, target):\n pred = _transpose_and_gather_feat(output, ind)\n mask = mask.unsqueeze(2).expand_as(pred).float()\n loss = F.l1_loss(pred * mask, target * mask, size_average=False)\n loss = loss / (mask.sum() + 1e-4)\n return loss\n\n\nclass L1Loss_Balanced(nn.Module):\n \"\"\"Balanced L1 Loss\n paper: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)\n Code refer from: https://github.com/OceanPang/Libra_R-CNN\n \"\"\"\n\n def __init__(self, alpha=0.5, gamma=1.5, beta=1.0):\n super(L1Loss_Balanced, self).__init__()\n self.alpha = alpha\n self.gamma = gamma\n assert beta > 0\n self.beta = beta\n\n def forward(self, output, mask, ind, target):\n pred = _transpose_and_gather_feat(output, ind)\n mask = mask.unsqueeze(2).expand_as(pred).float()\n loss = self.balanced_l1_loss(pred * mask, target * mask)\n loss = loss.sum() / (mask.sum() + 1e-4)\n\n return loss\n\n def balanced_l1_loss(self, pred, target):\n assert pred.size() == target.size() and target.numel() > 0\n\n diff = torch.abs(pred - target)\n b = math.exp(self.gamma / self.alpha) - 1\n loss = torch.where(diff < self.beta,\n self.alpha / b * (b * diff + 1) * torch.log(b * diff / self.beta + 1) - self.alpha * diff,\n self.gamma * diff + self.gamma / b - self.alpha * self.beta)\n\n return loss\n\nclass BinRotLoss(nn.Module):\n def __init__(self):\n super(BinRotLoss, self).__init__()\n\n def forward(self, output, mask, ind, rotbin, rotres):\n pred = _transpose_and_gather_feat(output, ind)\n loss = compute_rot_loss(pred, rotbin, rotres, mask)\n return loss\n\ndef compute_res_loss(output, target):\n return F.smooth_l1_loss(output, target, reduction='elementwise_mean')\n\n# TODO: weight\ndef compute_bin_loss(output, target, mask):\n mask = mask.expand_as(output)\n output = output * mask.float()\n return F.cross_entropy(output, target.long(), reduction='elementwise_mean')\n\ndef compute_rot_loss(output, target_bin, target_res, mask):\n # output: (B, 50, 8) [bin1_cls[0], bin1_cls[1], bin1_sin, bin1_cos,\n # bin2_cls[0], bin2_cls[1], bin2_sin, bin2_cos]\n # target_bin: (B, 50, 2) [bin1_cls, bin2_cls]\n # target_res: (B, 50, 2) [bin1_res, bin2_res]\n # mask: (B, 50, 1)\n # import pdb; pdb.set_trace()\n output = output.view(-1, 8)\n target_bin = target_bin.view(-1, 2)\n target_res = target_res.view(-1, 2)\n mask = mask.view(-1, 1)\n loss_bin1 = compute_bin_loss(output[:, 0:2], target_bin[:, 0], mask)\n loss_bin2 = compute_bin_loss(output[:, 4:6], target_bin[:, 1], mask)\n loss_res = torch.zeros_like(loss_bin1)\n if target_bin[:, 0].nonzero().shape[0] > 0:\n idx1 = target_bin[:, 0].nonzero()[:, 0]\n valid_output1 = torch.index_select(output, 0, idx1.long())\n valid_target_res1 = torch.index_select(target_res, 0, idx1.long())\n loss_sin1 = compute_res_loss(\n valid_output1[:, 2], torch.sin(valid_target_res1[:, 0]))\n loss_cos1 = compute_res_loss(\n valid_output1[:, 3], torch.cos(valid_target_res1[:, 0]))\n loss_res += loss_sin1 + loss_cos1\n if target_bin[:, 1].nonzero().shape[0] > 0:\n idx2 = target_bin[:, 1].nonzero()[:, 0]\n valid_output2 = torch.index_select(output, 0, idx2.long())\n valid_target_res2 = torch.index_select(target_res, 0, idx2.long())\n loss_sin2 = compute_res_loss(\n valid_output2[:, 6], torch.sin(valid_target_res2[:, 1]))\n loss_cos2 = compute_res_loss(\n valid_output2[:, 7], torch.cos(valid_target_res2[:, 1]))\n loss_res += loss_sin2 + loss_cos2\n return loss_bin1 + loss_bin2 + loss_res\n\n\nclass Compute_Loss(nn.Module):\n def __init__(self, device):\n super(Compute_Loss, self).__init__()\n self.device = device\n self.focal_loss = FocalLoss()\n self.l1_loss = L1Loss()\n self.l1_loss_balanced = L1Loss_Balanced(alpha=0.5, gamma=1.5, beta=1.0)\n self.weight_hm_cen = 1.\n self.weight_z_coor, self.weight_cenoff, self.weight_dim, self.weight_direction = 1., 1., 1., 1.\n self.rot_loss = BinRotLoss()\n\n def forward(self, outputs, tg):\n # tg: targets\n\n outputs = outputs._asdict()\n outputs['hm_cen'] = _sigmoid(outputs['hm_cen'])\n outputs['cen_offset'] = _sigmoid(outputs['cen_offset'])\n\n l_hm_cen = self.focal_loss(outputs['hm_cen'], tg['hm_cen'])\n l_cen_offset = self.l1_loss(outputs['cen_offset'], tg['obj_mask'], tg['indices_center'], tg['cen_offset'])\n l_direction = self.l1_loss(outputs['direction'], tg['obj_mask'], tg['indices_center'], tg['direction'])\n #l_direction = self.rot_loss(outputs['direction'], tg['obj_mask'], tg['indices_center'], tg['anglebin'], tg['angleoffset'])\n # Apply the L1_loss balanced for z coor and dimension regression\n l_z_coor = self.l1_loss_balanced(outputs['z_coor'], tg['obj_mask'], tg['indices_center'], tg['z_coor'])\n l_dim = self.l1_loss_balanced(outputs['dim'], tg['obj_mask'], tg['indices_center'], tg['dim'])\n\n total_loss = l_hm_cen * self.weight_hm_cen + l_cen_offset * self.weight_cenoff + \\\n l_dim * self.weight_dim + l_direction * self.weight_direction + \\\n l_z_coor * self.weight_z_coor\n\n loss_stats = {\n 'total_loss': to_cpu(total_loss).item(),\n 'hm_cen_loss': to_cpu(l_hm_cen).item(),\n 'cen_offset_loss': to_cpu(l_cen_offset).item(),\n 'dim_loss': to_cpu(l_dim).item(),\n 'direction_loss': to_cpu(l_direction).item(),\n 'z_coor_loss': to_cpu(l_z_coor).item(),\n }\n\n return total_loss, loss_stats\n","repo_name":"wangx1996/CenterPillarNet","sub_path":"src/losses/losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":7867,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"3"} +{"seq_id":"70213587922","text":"with open('input') as f:\n lines = [x.strip() for x in f.readlines()]\n\n# Part one\nstate = lines[0][15:]\nrules = {l[:5]: l[-1] for l in lines[2:]}\nfirst = 0\n\nfor _ in range(20):\n state = '...' + state + '...'\n first -= 1\n new = ''\n for i in range(len(state)-4):\n s = state[i:i+5]\n new += rules[s]\n state = new\nprint(sum(i+first for i, s in enumerate(state) if s == '#'))\n\n# Part two\n# First, realize that the sum above grows with 21000 for each 1000 steps and starts at 480\nprint(50000000000//1000*21000+480)\n","repo_name":"fuglede/adventofcode","sub_path":"2018/day12/solutions.py","file_name":"solutions.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"3"} +{"seq_id":"17322815163","text":"import pytest\n\nfrom trcustoms.awards.specs.dragon_statue import (\n DragonStatueAwardSpec,\n Requirement,\n)\nfrom trcustoms.common.tests.factories import RatingClassFactory\nfrom trcustoms.levels.tests.factories import LevelFactory\nfrom trcustoms.users.tests.factories import UserFactory\n\n\n@pytest.fixture(name=\"spec\")\ndef fixture_spec() -> None:\n spec = DragonStatueAwardSpec()\n spec.requirements = {\n 1: [Requirement(count=1, rating=2), Requirement(count=3, rating=1)],\n 2: [Requirement(count=1, rating=3), Requirement(count=2, rating=2)],\n }\n spec.descriptions = {\n tier: f\"Tier {tier} description\" for tier in spec.requirements.keys()\n }\n return spec\n\n\n@pytest.mark.django_db\n@pytest.mark.parametrize(\n \"level_ratings, expected_tier\",\n [\n ([1], 0),\n ([2], 1),\n ([1, 1], 0),\n ([1, 2], 1),\n ([1, 1, 1], 1),\n ([1, 1, 2], 1),\n ([3], 2),\n ([4], 2),\n ([2, 2], 2),\n ([2, 3], 2),\n ([2, 4], 2),\n ([2, 2, 1], 2),\n ([2, 2, 2], 2),\n ],\n)\ndef test_dragon_statue_award_spec(\n spec: DragonStatueAwardSpec,\n level_ratings: list[int],\n expected_tier: int,\n) -> None:\n user = UserFactory()\n for position in level_ratings:\n rating_class = RatingClassFactory(position=position)\n LevelFactory(authors=[user], rating_class=rating_class)\n assert user.authored_levels.count() == len(level_ratings)\n\n max_eligible_tier = 0\n for tier in spec.supported_tiers:\n if spec.check_eligible(user, tier):\n max_eligible_tier = tier\n\n assert max_eligible_tier == expected_tier\n","repo_name":"rr-/TRCustoms","sub_path":"backend/trcustoms/awards/tests/specs/test_dragon_statue.py","file_name":"test_dragon_statue.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"3"} +{"seq_id":"73667317521","text":"from requests.exceptions import ConnectionError, Timeout, TooManyRedirects\nimport json\nimport requests\nfrom app.entity.ticker import Ticker\nfrom app.core.config import (\n BLOCKCHAIN_DOT_COM_BASE_URL\n)\n\n\nclass blockchain_api:\n place_trade_url = \"https://exchange.blockchain.com/\"\n\n def __init__(self):\n pass\n\n async def get_ticker(self, selected_symbols=None):\n url = f\"{BLOCKCHAIN_DOT_COM_BASE_URL}/tickers/\"\n\n try:\n response = requests.get(url)\n data = json.loads(response.text)\n\n return self.__format_resp(data, selected_symbols)\n except (ConnectionError, Timeout, TooManyRedirects) as e:\n print(e)\n\n def __format_resp(self, data, includes=None):\n resp = []\n for ticker in data:\n if includes and ticker[\"symbol\"] not in includes:\n continue\n\n resp.append(Ticker(\n ask=ticker[\"price_24h\"],\n bid=0.0,\n symbol=ticker[\"symbol\"]\n ).__dict__)\n\n return {\n \"exchange\": \"BLOCKCHAIN.COM\",\n \"data\": resp,\n \"url\": self.place_trade_url\n }\n\n\nif __name__ == \"__main__\":\n b = blockchain_api()\n print(b.get_ticker([\"BTC-USD\"]))\n","repo_name":"TheDhejavu/bestside-exchange","sub_path":"app/core/exchanges/blockchain_api.py","file_name":"blockchain_api.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"8322936055","text":"#!/usr/bin/env python \n\nimport roslib\nroslib.load_manifest('sky_rocket')\nimport rospy\nimport tf\nfrom std_msgs.msg import Float64MultiArray\n\nif __name__ == \"__main__\":\n \n rospy.init_node('rocket_tf_listener')\n\n rospy.loginfo(\"TF listener is starting!!\")\n\n listener = tf.TransformListener()\n\n rocket_position = rospy.Publisher(\"/rocket_after_tf\",Float64MultiArray,queue_size=10)\n\n rate = rospy.Rate(10.0)\n\n while not rospy.is_shutdown():\n\n try:\n (trans,rot) = listener.lookupTransform('/world','/rocket',rospy.Time(0))\n\n except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):\n continue\n\n data = trans + rot\n\n data = Float64MultiArray(data=data)\n\n rocket_position.publish(data)\n\n rate.sleep()\n\n \n","repo_name":"nptttn-pat/HG_internship","sub_path":"rocket/src/sky_rocket/nodes/rocket_tf_listener.py","file_name":"rocket_tf_listener.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26722393479","text":"from Bio import SeqIO\n\ndef subset_fasta(input_file, output_file, max_sequences):\n with open(input_file, \"r\") as infile, open(output_file, \"w\") as outfile:\n for i, record in enumerate(SeqIO.parse(infile, \"fasta\")):\n if i >= max_sequences:\n break\n SeqIO.write(record, outfile, \"fasta\")\n\ninput_fasta = \"../data/raw_data/2mil.fasta\" # Path to your original FASTA file\noutput_fasta = \"../data/raw_data/100k.fasta\" # Path to the output FASTA file\nmax_sequences = 100_000 # Number of sequences to keep\n\nsubset_fasta(input_fasta, output_fasta, max_sequences)","repo_name":"eugenechoi2004/COS597N","sub_path":"templates/scratch/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29028941967","text":"import pytest\nfrom mock import patch\n\nfrom plan import settings\nfrom plan.api.idm import actions\nfrom plan.services.models import Service\nfrom plan.roles.models import Role\nfrom common import factories\n\npytestmark = pytest.mark.django_db\n\n\n@patch('plan.idm.adapters.rolenode.RoleNodeManager.create')\ndef test_add_role(create_node):\n service = factories.ServiceFactory()\n role = factories.RoleFactory(service=service)\n\n actions.add_role(role)\n\n assert create_node.call_args[1]['slug'] == str(role.pk)\n assert create_node.call_args[1]['name'] == {'ru': role.name, 'en': role.name_en}\n assert create_node.call_args[1]['parent'] == \\\n '/type/services/services_key/%s/%s_key/*/role/' % (service.slug, service.slug)\n\n\n@patch('plan.idm.adapters.rolenode.RoleNodeManager.delete')\ndef test_delete_role(delete_node):\n service = factories.ServiceFactory()\n role = factories.RoleFactory(service=service)\n\n actions.delete_role(role)\n\n role_node_path = '/type/services/services_key/%s/%s_key/*/role/%s/' % (service.slug, service.slug, role.pk)\n assert delete_node.call_args[1]['node_path'] == role_node_path\n\n\n@pytest.mark.parametrize('review_required', [True, False])\ndef test_set_review_policy_to_service(review_required):\n role_count = 3\n service: Service = factories.ServiceFactory()\n [factories.RoleFactory(service=service) for _ in range(role_count)]\n\n with patch('plan.idm.adapters.rolenode.RoleNodeManager.update') as manager_update_mock:\n actions.set_review_policy_to_service(service, review_required)\n\n assert manager_update_mock.call_count == service.role_set.count() + Role.objects.filter(service=None).count()\n for call in manager_update_mock.call_args_list:\n assert call.kwargs['system'] == settings.ABC_IDM_SYSTEM_SLUG\n assert call.kwargs['review_required'] == review_required\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"Intranet/tests/unit/idm/test_actions.py","file_name":"test_actions.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"43026784674","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport base64\nimport hashlib\n\nfrom collections.abc import Sequence\n\nENCODINGS = [\"identity\", \"gzip\"]\nDEFAULT_ENCODING = \"identity\"\nBUFFER_MAX = 1 * 1024 * 1024 # We'll buffer up to 1MB\n\n\ndef _compressor(request, response):\n # Skip items with a Vary: Cookie/Authorization Header because we don't know\n # if they are safe from the CRIME attack.\n if response.vary is not None and (set(response.vary) & {\"Cookie\", \"Authorization\"}):\n return\n\n # Avoid compression if we've already got a Content-Encoding.\n if \"Content-Encoding\" in response.headers:\n return\n\n # Ensure that the Accept-Encoding header gets added to the response.\n vary = set(response.vary if response.vary is not None else [])\n vary.add(\"Accept-Encoding\")\n response.vary = vary\n\n # Negotiate the correct encoding from our request.\n target_encoding = request.accept_encoding.best_match(\n ENCODINGS, default_match=DEFAULT_ENCODING\n )\n\n # If we have a Sequence, we'll assume that we aren't streaming the\n # response because it's probably a list or similar.\n streaming = not isinstance(response.app_iter, Sequence)\n\n # If our streaming content is small enough to easily buffer in memory\n # then we'll just convert it to a non streaming response.\n if (\n streaming\n and response.content_length is not None\n and response.content_length <= BUFFER_MAX\n ):\n response.body\n streaming = False\n\n if streaming:\n response.encode_content(encoding=target_encoding, lazy=True)\n\n # We need to remove the content_length from this response, since\n # we no longer know what the length of the content will be.\n response.content_length = None\n\n # If this has a streaming response, then we need to adjust the ETag\n # header, if it has one, so that it reflects this. We don't just append\n # ;gzip to this because we don't want people to try and use it to infer\n # any information about it.\n if response.etag is not None:\n md5_digest = hashlib.md5(\n (response.etag + \";gzip\").encode(\"utf8\"), usedforsecurity=False\n )\n md5_digest = md5_digest.digest()\n md5_digest = base64.b64encode(md5_digest)\n md5_digest = md5_digest.replace(b\"\\n\", b\"\").decode(\"utf8\")\n response.etag = md5_digest.strip(\"=\")\n else:\n original_length = len(response.body)\n response.encode_content(encoding=target_encoding, lazy=False)\n\n # If the original length is less than our new, compressed length\n # then we'll go back to the original. There is no reason to encode\n # the content if it increases the length of the body.\n if original_length < len(response.body):\n response.decode_content()\n\n # If we've added an encoding to the content, then we'll want to\n # recompute the ETag.\n if response.content_encoding is not None:\n response.md5_etag()\n\n\ndef compression_tween_factory(handler, registry):\n def compression_tween(request):\n response = handler(request)\n\n # We use a response callback here so that it happens after all of the\n # other response callbacks are called. This is important because\n # otherwise we won't be able to check Vary headers and such that are\n # set by response callbacks.\n request.add_response_callback(_compressor)\n\n return response\n\n return compression_tween\n","repo_name":"pypi/warehouse","sub_path":"warehouse/utils/compression.py","file_name":"compression.py","file_ext":"py","file_size_in_byte":4032,"program_lang":"python","lang":"en","doc_type":"code","stars":3382,"dataset":"github-code","pt":"3"} +{"seq_id":"20913811925","text":"#resource paths\nRESOURCE_PATH = \"resources\"\nLIBRARIES_PATH = \"libraries\"\nSAVES_PATH = \"saves\"\nGRAPHICS_PATH = \"graphics\"\nCODE_PATH = \"code\"\nAUDIO_PATH = \"audio\"\nSTRINGS_PATH = \"strings\"\n\n#code resources folders\nGRAPHIC_OBJECTS_DIR = \"graphicobjects\"\n\n#graphics resource folders\nFONT_DIR = \"fonts\"\nSPRITE_DIR = \"sprites\"\nGRAPHIC_MISC_DIR = \"misc\"\nACTOR_GRAPHICS_DIR = \"actors\"\nMAP_TILE_GRAPHICS_DIR = \"maptiles\"\n\n#audio resource folders\nMUSIC_DIR = \"music\"\nSFX_DIR = \"fx\"\n\n#misc\nMISC_DIR = \"misc\"\nSETTINGS_FILE_NAME = \"options\"\nARCHIVE_FILE_NAME = \"resources.dat\"\nTEXT_DIR = \"text\" #deprecated; used only in DoL\nWIDGET_PATH = \"widgets\"\nLOG_PATH = \"logs\"\nLOG_FILENAME_HEADER = \"log\"\nSERVER_LOG_FILENAME_HEADER = \"serverlog\"\n\n#full paths\nFULL_FONT_PATH = [RESOURCE_PATH, GRAPHICS_PATH, FONT_DIR]\nFULL_STRINGS_PATH = [RESOURCE_PATH, STRINGS_PATH]\nFULL_SPRITE_PATH = [RESOURCE_PATH, GRAPHICS_PATH, SPRITE_DIR]\nFULL_GRAPHIC_OBJECTS_PATH = [RESOURCE_PATH, CODE_PATH, GRAPHIC_OBJECTS_DIR]\nFULL_MISC_PATH = [RESOURCE_PATH, MISC_DIR]\nACTOR_GRAPHICS_FULL_PATH = [RESOURCE_PATH, GRAPHICS_PATH, ACTOR_GRAPHICS_DIR]\nFULL_MUSIC_PATH = [RESOURCE_PATH, AUDIO_PATH, MUSIC_DIR]\nFULL_SFX_PATH = [RESOURCE_PATH, AUDIO_PATH, SFX_DIR]\nMAP_TILE_GRAPHICS_FULL_PATH = [RESOURCE_PATH, GRAPHICS_PATH, MAP_TILE_GRAPHICS_DIR]\nFULL_GRAPHIC_PATH = [RESOURCE_PATH, GRAPHICS_PATH]\n#note: save path in settingskeys.py, or overwritten in local dynamic settings\n\n#extensions\nPNG_EXTENSION = \".png\"\nANI_EXTENSION = \".ani\"\nTTF_EXTENSION = \".ttf\"\nPYTHON_EXTENSION = \".py\"\nTEXT_EXTENSION = \".txt\"\nSTRINGS_EXTENSION = \".strings\"\nSTRINGS_METADATA_EXTENSION = \".stringsmeta\"\nGRAPHIC_FONT_EXTENSION = \".gfont\"\nGRAPHIC_FONT_2_EXTENSION = \".gfont2\"\nGRAPHIC_FONT_3_EXTENSION = \".gfont3\"\nACTOR_EXTENSION = \".actor\"\nTILEMAP_SCENE_EXTENSION = \".tilemapscene\"\nGRAPHIC_OBJECT_EXTENSION = \".gfxobj\"\nIMAGE_EXTENSION = PNG_EXTENSION\nLABEL_OBJECT_EXTENSION = \".label\"\nBORDERED_SPRITE_EXTENSION = \".bordered\"\nMULTI_SPRITE_EXTENSION = \".multi\"\nCONFIG_EXTENSION = \".config\"\nSAVE_EXTENSION = \".sav\"\nSOUND_EXTENSION = \".ogg\"\nLOG_EXTENSION = \".log\"\n\n\n#strings filenames\nSTRINGS_META_FILENAME = \"_metadata\" + STRINGS_METADATA_EXTENSION\nSTRINGS_MISC_FILENAME = \"misc\" + STRINGS_EXTENSION\n","repo_name":"kaijyuu2/kaiengine","sub_path":"gconfig/paths.py","file_name":"paths.py","file_ext":"py","file_size_in_byte":2223,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"21504016594","text":"import os\r\nimport pandas as pd\r\nimport pickle\r\nimport shutil\r\nfrom pypdf import PdfReader\r\nimport re\r\nimport argparse\r\n \r\nimport argparse\r\n\r\nparser = argparse.ArgumentParser(description='Command Line Script')\r\nparser.add_argument('--input', help='the input directory', required=True)\r\nparser.add_argument('--output', help='the output directory', required=True)\r\nargs = parser.parse_args()\r\n\r\n\r\nword_vector = pickle.load(open(\"tfidf.pkl\", \"rb\"))\r\nmodel = pickle.load(open(\"clf.pkl\", \"rb\"))\r\n\r\n\r\ndef cleanResume(txt):\r\n cleanText = re.sub('http\\S+\\s', ' ', txt)\r\n cleanText = re.sub('RT|cc', ' ', cleanText)\r\n cleanText = re.sub('#\\S+\\s', ' ', cleanText)\r\n cleanText = re.sub('@\\S+', ' ', cleanText) \r\n cleanText = re.sub('[%s]' % re.escape(\"\"\"!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~\"\"\"), ' ', cleanText)\r\n cleanText = re.sub(r'[^\\x00-\\x7f]', ' ', cleanText) \r\n cleanText = re.sub('\\s+', ' ', cleanText)\r\n return cleanText\r\ncategory_mapping = {\r\n 15: \"Java Developer\",\r\n 23: \"Testing\",\r\n 8: \"DevOps Engineer\",\r\n 20: \"Python Developer\",\r\n 24: \"Web Designing\",\r\n 12: \"HR\",\r\n 13: \"Hadoop\",\r\n 3: \"Blockchain\",\r\n 10: \"ETL Developer\",\r\n 18: \"Operations Manager\",\r\n 6: \"Data Science\",\r\n 22: \"Sales\",\r\n 16: \"Mechanical Engineer\",\r\n 1: \"Arts\",\r\n 7: \"Database\",\r\n 11: \"Electrical Engineering\",\r\n 14: \"Health and fitness\",\r\n 19: \"PMO\",\r\n 4: \"Business Analyst\",\r\n 9: \"DotNet Developer\",\r\n 2: \"Automation Testing\",\r\n 17: \"Network Security Engineer\",\r\n 21: \"SAP Developer\",\r\n 5: \"Civil Engineer\",\r\n 0: \"Advocate\",\r\n}\r\n# Command line script to categorize resumes\r\ndef categorize_resumes(input_directory, output_directory, output_csv):\r\n if not os.path.exists(input_directory):\r\n os.makedirs(input_directory)\r\n if not os.path.exists(output_directory):\r\n os.makedirs(output_directory)\r\n results = []\r\n if(len(os.listdir(input_directory))==0):\r\n print(\"No File Found..\")\r\n else:\r\n for resume_file in os.listdir(input_directory):\r\n if resume_file.endswith('.pdf'): # Change the extension as needed\r\n resume_path = os.path.join(input_directory, resume_file)\r\n reader = PdfReader(resume_path)\r\n page = reader.pages[0]\r\n text = page.extract_text()\r\n cleaned_resume = cleanResume(text)\r\n\r\n input_features = word_vector.transform([cleaned_resume])\r\n prediction_id = model.predict(input_features)[0]\r\n category_name = category_mapping.get(prediction_id, \"Unknown\")\r\n \r\n category_folder = os.path.join(output_directory, category_name)\r\n \r\n if not os.path.exists(category_folder):\r\n os.makedirs(category_folder)\r\n \r\n target_path = os.path.join(category_folder, resume_file)\r\n shutil.copy(resume_path, target_path)\r\n \r\n results.append({'filename': resume_file, 'category': category_name})\r\n \r\n results_df = pd.DataFrame(results)\r\n results_df.to_csv(output_csv, index=False)\r\n\r\nif __name__ == '__main__':\r\n input_directory = args.input\r\n output_directory = args.output\r\n output_csv = \"categorized_resumes.csv\"\r\n \r\n categorize_resumes(input_directory, output_directory, output_csv)\r\n print(\"Resumes categorization and processing completed.\")\r\n","repo_name":"Chando0185/Resume_Categorization","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":3473,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"9716046538","text":"class Solution:\n def isNumber(self, s: str) -> bool:\n digitseen,eseen,dotseen=False,False,False\n count=0\n n=len(s)\n for i in range(n):\n char=s[i]\n# checking digits\n if char.isdigit():\n digitseen=True\n \n# checking plus minus condition\n elif char==\"+\" or char==\"-\":\n# if it occur 2 times means invalid\n if count==2:\n return False\n# if plus or minus appear after e and index is also greater than 1 means its invalid\n if i>0 and s[i-1]!='e' and s[i-1]!='E':\n return False\n# if it come at last index also invalid\n if i==n-1:\n return False\n# else we have to increment the count\n count+=1\n \n# checking for dot \n elif char==\".\":\n# if we seen E before or dot before means invalid\n if eseen or dotseen:\n return False\n# if it is on last index and till now no digit is seen also invalid\n if i==n-1 and not digitseen:\n return False\n# else make dotseen true\n dotseen=True\n \n# last check e or E\n elif char==\"e\" or char==\"E\":\n# if already we see e or till now no digit is seen or if it found on last index its invalid\n if eseen or not digitseen or i==n-1:\n return False\n# else make them true\n eseen=True\n \n# if the char goes in else part means its not valid number\n else:\n return False\n return True\n ","repo_name":"janvi2002/Leetcode","sub_path":"0065-valid-number/0065-valid-number.py","file_name":"0065-valid-number.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36898894664","text":"# -*-coding:utf-8-*-\n#https://blog.csdn.net/jackxu8/article/details/71159315\n\n# conda uninstall mkl=2018\n# conda install mkl=2017\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pymc3 as pm\nimport pandas as pd\nimport theano\n\ndata = pd.read_csv('radon.csv')\ndata['log_radon'] = data['log_radon'].astype(theano.config.floatX)\ncounty_names = data.county.unique()\ncounty_idx = data.county_code.values\n\nn_counties = len(data.county.unique())\nprint(n_counties)\n\n# 模型需要使用的数据的一小部分\ndata[['county', 'log_radon', 'floor']].head()\n\n\nwith pm.Model() as unpooled_model:\n\n # 每个国家的独立参数\n alpha = pm.Normal('alpha', 0, sd=100, shape=n_counties)\n beta = pm.Normal('beta', 0, sd=100, shape=n_counties)\n\n # 模型误差\n eps = pm.HalfCauchy('eps', 5)\n\n # radon含量的数学模型\n radon_est = alpha[county_idx] + beta[county_idx]*data.floor.values\n\n # Data likelihood\n # 均值就是待预测的radon含量\n # 方差就是测量误差eps\n # 并给定观测值(测量值)\n y = pm.Normal('y', mu=radon_est, sd=eps, observed=data.log_radon)\n\nwith unpooled_model:\n unpooled_trace = pm.sample(5000)\n\npm.traceplot(unpooled_trace)\n\n\nwith pm.Model() as hierarchical_model:\n\n # 超参数\n # HalfCauchy 柯西半连续型\n # Normal 正态型\n mu_alpha = pm.Normal('mu_alpha', mu=0., sd=100**2)\n sigma_alpha = pm.HalfCauchy('sigma_alpha', 5)\n mu_beta = pm.Normal('mu_beta', mu=0., sd=100**2)\n sigma_beta = pm.HalfCauchy('sigma_beta', 5)\n\n # 每个国家的参数均服从同一个正态分布\n alpha = pm.Normal('alpha', mu=mu_alpha, sd=sigma_alpha, shape=n_counties)\n beta = pm.Normal('beta', mu=mu_beta, sd=sigma_beta, shape=n_counties)\n\n # 模型误差\n eps = pm.HalfCauchy('eps', 5)\n\n # radon含量的模型\n radon_est = alpha[county_idx] + beta[county_idx] * data.floor.values\n\n # Data likelihood\n # 均值就是待预测的radon含量\n # 方差就是测量误差eps\n # 并给定观测值(测量值)\n radon_like = pm.Normal('radon_like', mu=radon_est, sd=eps, observed=data.log_radon)\n\nwith hierarchical_model:\n hierarchical_trace = pm.sample(5000)","repo_name":"curryli/pandasFlow","sub_path":"Bayes/pymc_fenceng.py","file_name":"pymc_fenceng.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"2198497381","text":"from rest_framework import generics, response, status, views\n\nfrom .permissions import IsAuthor\nfrom .serializers import FavouriteSerializer\nfrom .models import Favourite\n\n\nclass FavouriteListAPIView(generics.ListAPIView):\n serializer_class = FavouriteSerializer\n\n def get_queryset(self):\n user = self.request.user\n queryset = Favourite.objects.filter(customer=user)\n return queryset\n\n\nclass FavouriteAPIView(views.APIView):\n\n def post(self, request, format=None):\n data = request.data\n product = data.get('product')\n products = Favourite.objects.filter(product=product, customer=request.user)\n if products:\n return response.Response({\"message\": \"You already have\"}, status=status.HTTP_400_BAD_REQUEST)\n serializer = FavouriteSerializer(data=data)\n if serializer.is_valid():\n serializer.save(customer=request.user)\n return response.Response(serializer.data, status=status.HTTP_201_CREATED)\n return response.Response(status=status.HTTP_400_BAD_REQUEST)\n\n\nclass FavouriteDeleteAPIView(generics.DestroyAPIView):\n queryset = Favourite.objects.all()\n serializer_class = FavouriteSerializer\n permission_classes = [IsAuthor, ]\n","repo_name":"Almaz2312/project_restapi","sub_path":"favourite/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29495272382","text":"# -*- coding: utf-8 -*-\n#-------------------------\n# Author: Walber C de Jesus Rocha\n# University: Universidade Federal do Recôncavo da Bahia - UFRB\n# Paper title: Sistema de medição de distância baseado em visão computacional utilizando laser de linha\n# Project: Construção de uma Colônia de Robôs Autônomos para Reconhecimento, Busca e Inspeção\n#-------------------------\n\nimport cv2\nimport numpy\nfrom math import pi, tan, atan2, degrees\n\ndef cut_image(img):\n cut_img = img[240:340, 0:640].copy()\n cut_img = cv2.normalize(cut_img, cut_img, 0, 255, cv2.NORM_MINMAX)\n cut_img = cv2.GaussianBlur(cut_img, (1, 1), 0)\n return cut_img\n\ndef gradient_sobel(cut_img):\n # ------------------------------ Gradient Sobel\n gray = cv2.cvtColor(cut_img, cv2.COLOR_BGR2GRAY)\n sobel_gy = cv2.Sobel(gray, cv2.CV_8U, 0, 1, ksize=3)\n\n # ------------------------------ Thereshold\n ret, threshold = cv2.threshold(numpy.absolute(\n sobel_gy), 50, 255, cv2.THRESH_BINARY)\n kernel = numpy.ones((2, 2), numpy.uint8)\n erosion = cv2.erode(threshold, kernel, iterations=1)\n\n return erosion\n\ndef pixel_aggregation(cut_img):\n\thsv = cv2.cvtColor(cut_img, cv2.COLOR_BGR2HSV) # Color space conversion, BRG to HSV\n\n\tdark_red_a = numpy.array([0, 100, 100]) # Range dark red\n\tdark_red_b = numpy.array([10, 255, 255])\n\t\n\tlight_red_a = numpy.array([160, 200, 100]) # Range light red\n\tlight_red_b = numpy.array([179, 255, 255])\n\t\n\tmask_dark = cv2.inRange(hsv, dark_red_a, dark_red_b) # Mask dark red\n\tmask_light = cv2.inRange(hsv, light_red_a, light_red_b) # Mask light red\n\n\tmask = mask_dark + mask_light\n\n\tstructure = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 1)) # Structuring element\n\topen_mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, structure) # Morphological operator\n\tresult = cv2.bitwise_and(cut_img, cut_img, mask=open_mask) # I calculate the pixel-by-pixel conjunction\n\n #------------------------------ Thereshold\n\tgray = cv2.cvtColor(result, cv2.COLOR_BGR2GRAY)\n\tret, threshold = cv2.threshold(numpy.absolute(gray), 50, 255, cv2.THRESH_BINARY)\n\t\n\treturn threshold\n\n# ------------------------------ HoughLines\ndef filter_image(threshold):\n y = []\n lines = cv2.HoughLinesP(threshold, 1, numpy.pi/2, 20, 30, 15)\n\n if lines is None:\n return None\n else:\n for line in lines:\n for x1, y1, x2, y2 in line:\n y.extend([y1, y2])\n\n return sorted(set(y))\n\n# ------------------------------ Distance calculate\ndef get_distance(y):\n if(y is None):\n return None\n else:\n yFinal = []\n distFinal = []\n yFinal.append(y[0])\n\n for i in range(1, len(y)):\n yAux = y[i-1]\n\n if(abs(y[i] - yAux) >= 2):\n yFinal.append(y[i])\n\n for i in yFinal:\n DPF = abs((i + 240) - (height/2))\n theta = RPP * DPF + DR\n dist = laser_height/tan(theta)\n distFinal.append(round(dist, 3))\n\n return sorted(distFinal, key=float)\n\ndef laser_RangeFinder(img):\n cropped_img = cut_image(img)\n \n result = gradient_sobel(cropped_img)\n # result = pixel_aggregation(cropped_img)\n \n y_coordinates = filter_image(result)\n distances = get_distance(y_coordinates)\n\n return(print(distances))\n\nif __name__ == '__main__':\n\n RPP = 0.001620713896608 # Radians per pixel\n DR = -0.024015443752004 # Radial displacement\n laser_height = 2.8 # laser height (cm)\n\n # Image path\n img = cv2.imread('Images_Tests/50cm.jpg')\n \n width = numpy.size(img, 1) # get width image\n height = numpy.size(img, 0) # get height image\n\n laser_RangeFinder(img)","repo_name":"rwalber/Laser-RangeFinder","sub_path":"rangeFinder.py","file_name":"rangeFinder.py","file_ext":"py","file_size_in_byte":3655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23551854570","text":"class Action(object):\n def __init__(self,string):\n self.string = string\nclass ExternalAction(Action): # e.g. move forward\n def __init__(self,string, vector):\n Action.__init__(self,string, vector)\n self.vector = vector\nclass LearningAction(Action): # modify the policy\n def __init__(self,string):\n Action.__init__(self,string)\nclass InternalAction(Action): # all internal actions that do not modify the policy e.g. observe something or change Instruction Pointer\n def __init__(self,string):\n Action.__init__(self,string)\n\nDM_LAB_ACTIONS = [ ExternalAction('look_left',(-20, 0, 0, 0, 0, 0, 0)),\n ExternalAction('look_right',(20, 0, 0, 0, 0, 0, 0)),\n ExternalAction('look_up',(0, 10, 0, 0, 0, 0, 0)),\n ExternalAction('look_down',(0, 10, 0, 0, 0, 0, 0)),\n ExternalAction('strafe_left',(0,0,-1, 0, 0, 0, 0)),\n ExternalAction('strafe_right',(0, 0, 1, 0, 0, 0, 0)),\n ExternalAction('forward'),(0, 0, 0, -1, 0, 0, 0),\n ExternalAction('backward',(0, 0, 0,1, 0, 0)),\n ExternalAction('fire',(0, 0, 0, 0, 0, 1, 0)),\n ExternalAction('jump',(0, 0, 0, 0, 0, 0, 1)),\n ExternalAction('crouch',(0, 0, 0, 0, 0, 0, 1))]\nDM_LAB_ACTIVE_PERCEPTION = [InternalAction('observe')] #only visual\n\nSTANDARD_SSA_INTERNAL_ACTIONS = [\n\n InternalAction('prepEval'),\n InternalAction('setIP'),\n\n\n ]\nSSA_DL_INTERNAL_ACTIONS=[InternalAction('output'), #output --> connect layer[IP] to the output layer and output the action\n InternalAction('storeWM'), #store data in working memory (this can be sensory input, or output from any layer !)\n InternalAction('fetchWM'), #get data from the working memory\n InternalAction('fetchWM'), #get data from the working memory\n InternalAction('dropout'), # switch off some units in the current layer\n InternalAction('reassign') #switch on again\n ]\nSSA_LEARNING_ACTIONS = [LearningAction('incProb'),\n LearningAction('incEvolutionParam')]\ndef get_SSA_actions(DL=True,active_perception=False): # Learning actions\n actions=[]\n actions = DM_LAB_ACTIONS\n if(active_perception):\n actions+=DM_LAB_ACTIVE_PERCEPTION\n actions += STANDARD_SSA_INTERNAL_ACTIONS\n if(DL):\n actions+=SSA_DL_INTERNAL_ACTIONS\n else:\n raise NotImplementedError\n actions += SSA_LEARNING_ACTIONS\n\n\ndef isPLA(action):\n return isinstance(action,LearningAction)\ndef executeAction(agent,):\n if(isinstance(agent.method.chosenAction,ExternalAction)):\n agent.action=agent.method.chosenAction.vector\n\n else:\n SSA_action(agent.method)\ndef SSA_action(ssa):\n switch={\n 'prepEval': ssa.prepareEvaluation(ssa.currentInstruction[3]),\n 'setIP': ssa.setIP(ssa.currentInstruction[1]),\n 'output': ssa.output(),\n 'forwardPass': ssa.forwardPass(),\n 'backwardPass': ssa.backwardPass(),\n 'incProb': ssa.incProb(),\n 'incEvolutionParam': ssa.incEvolutionParam(),\n }\n func = switch.get(ssa.chosenAction)\n return func()\n\n","repo_name":"bossdm/LifelongRL","sub_path":"Actions/DMActions.py","file_name":"DMActions.py","file_ext":"py","file_size_in_byte":3346,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"} +{"seq_id":"2337653613","text":"# Config file for MWG walk\n# Author: Edmund Dable-Heath\n\"\"\"\n Config file for the metropolis within gibbs approach to the quantum walk SVP experiment.\n\"\"\"\n\nimport numpy as np\nimport math\n\n\ndef compute_graph_bounds(basis, t_points, sub_dim):\n \"\"\"\n Compute the range that the graph walk should be constrained to for either computational efficacy reasons or to\n ensure the existence of the shortest vector within the scope.\n :param basis: lattice basis for computation of the dimension and the integer bounds, int-(m,m)-ndarray\n :param t_points: the total points to be considered for computation efficacy, int\n :param sub_dim: the dimension of the current walk being considered, either 2 or 3 dimension, int\n \"\"\"\n dim = basis.shape[0]\n integer_bounds = math.ceil(dim * math.log2(dim) + math.log2(np.linalg.det(basis)))\n bound = math.floor(t_points ** (1/float(sub_dim)) / 2)\n if bound > integer_bounds:\n return integer_bounds\n else:\n return bound\n\n\n# Multiprocessing parameters\ncores = 4\n\n# Lattice parameters ----------------------\ndimension = 5\nlattice_type = 'hnf'\nlattice_num = 0\nlattice_basis = np.genfromtxt(\n 'run_data/lattice.csv',\n delimiter=',',\n dtype=None\n)\n\n# Walk parameters -------------------------\ntotal_points = 31**2\ngraph_bounds_2 = compute_graph_bounds(lattice_basis, total_points, 2)\ngraph_bounds_3 = compute_graph_bounds(lattice_basis, total_points, 3)\ndist_2 = np.genfromtxt(\n 'run_data/dist_2.csv',\n delimiter=',',\n dtype=None\n)\ndist_3 = np.genfromtxt(\n 'run_data/dist_3.csv',\n delimiter=',',\n dtype=None\n)\ncoords_2 = np.genfromtxt(\n 'run_data/coords_2.csv',\n delimiter=',',\n dtype=None\n)\ncoords_3 = np.genfromtxt(\n 'run_data/coords_3.csv',\n delimiter=',',\n dtype=None\n)\n\n# Model Parameter -------------------------\ngamma_mark = 1\nnumber_of_runs = 1\n","repo_name":"eddableheath/QDHMC","sub_path":"mwg_config.py","file_name":"mwg_config.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"6138101824","text":"from io import BytesIO\nfrom flask import Flask, render_template\nfrom flask_wtf import FlaskForm\nfrom flask_wtf.file import FileField, FileRequired\nfrom PIL import Image\nimport numpy as np\nfrom itertools import islice\nimport base64\n\nSENSITIVITY = 30\n\napp = Flask(__name__)\napp.secret_key = b'mytopsecretkey'\n\n\nclass UploadForm(FlaskForm):\n image = FileField(validators=[FileRequired()])\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n\n form = UploadForm()\n\n if form.validate_on_submit():\n\n # save the uploaded image in memory\n image_buffer = BytesIO()\n f = form.image.data\n f.save(image_buffer)\n\n # use Pillow and numpy to analyse\n img = Image.open(image_buffer)\n\n img_data = np.asarray(img)\n img_data_agg = img_data // SENSITIVITY * SENSITIVITY # aggregate near colors\n\n # reshape the array to flatten it to pixel and count the unique occurrence\n unique_pixels, counts = np.unique(img_data_agg.reshape(-1, img_data_agg.shape[2]), axis=0, return_counts=True)\n\n colors = []\n for pixel in unique_pixels:\n colors.append(html_color(pixel))\n\n color_dict = dict(zip(colors, counts))\n colors_sorted = dict(sorted(color_dict.items(), key=lambda item: item[1], reverse=True))\n\n main_colors = dict(islice(colors_sorted.items(), 10))\n\n # base64 encode image and decode to string and pass it to the html template\n image_str = base64.b64encode(image_buffer.getvalue()).decode()\n return render_template('index.html', form=form, colors=main_colors, image_str=image_str)\n\n return render_template('index.html', form=form)\n\n\ndef html_color(list_r_g_b):\n \"\"\"Turns a list with RGB value in to html color code \"\"\"\n html_color_code = f'#{list_r_g_b[0]:02x}{list_r_g_b[1]:02x}{list_r_g_b[2]:02x}'\n return html_color_code\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"Floenzens/ColorPalleteGenerator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12835499735","text":"import asyncio\nfrom inspect import getmembers, isfunction\n\nfrom src import generic_sites, non_generic_sites, utils\n\n\nasync def main():\n SCRIPTS = [\n i[1]\n for i in getmembers(non_generic_sites, isfunction)\n if i[0] not in (\"_get\", \"_post\", \"dataclass\", \"ping_generic_schema\")\n ] + [site.ping for site in generic_sites.SITES]\n\n tasks = tuple(map(lambda s: asyncio.create_task(s()), SCRIPTS))\n responses = await asyncio.gather(*tasks)\n\n utils.Log(responses)\n\n\nasyncio.run(main())\n","repo_name":"Alexey-Klechikov/pyBostad","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"18128559025","text":"def recursive_calculate(expression, steps, target=24, operators=(\"+\", \"-\", \"*\", \"/\")):\n if len(expression) == 1:\n if expression[0] is not None and math.isclose(expression[0], target, rel_tol=1e-9):\n return steps\n else:\n return None\n\n for i, num in enumerate(expression[:-1]):\n for op in operators:\n result = apply_operator(op, num, expression[i+1])\n if result is None:\n continue\n new_expression = expression[:i] + (result,) + expression[i+2:]\n new_steps = steps + [(op, num, expression[i+1], result)]\n res_steps = recursive_calculate(new_expression, new_steps, target)\n if res_steps is not None:\n return res_steps\n return None\n\ndef format_expression(steps):\n expression = {}\n for step in steps:\n op, a, b, res = step\n expression[res] = f\"({expression.get(a, a)} {op} {expression.get(b, b)})\"\n return expression[res]\n\n# ... (保留其他代码不变)\n\nif __name__ == \"__main__\":\n # ... (保留输入代码不变)\n \n steps, perm = find_expression(nums)\n if steps is not None:\n print(\"找到一个可以得到 24 的计算式:\")\n expression = format_expression(steps)\n print(f\"{expression} = 24\")\n else:\n print(\"无法通过四则运算得到 24。\")\n","repo_name":"goldengrape/24_points_design_by_GPT4","sub_path":"code10.py","file_name":"code10.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"} +{"seq_id":"19964809423","text":"from numpy import shape\nfrom math import log\n\n\nimport pickle\nimport operator\n\ndef filetoDataSet(filename):\n fr = open(filename, 'r', encoding='utf-8')\n all_lines = fr.readlines()\n featname = all_lines[0].strip().split(',')[1:-1]\n dataSet = []\n for line in all_lines[1:]:\n line = line.strip()\n lis = line.split(',')[1:]\n dataSet.append(lis)\n fr.close()\n return dataSet, featname\n\n\n# 计算数据集的权重香农熵\ndef calcShannonEnt(dataSet, weight):\n labelCounts = {}\n i = 0\n # 给所有可能分类创建字典\n for featVec in dataSet:\n currentLabel = featVec[-1]\n if currentLabel not in labelCounts.keys():\n labelCounts[currentLabel] = 0\n labelCounts[currentLabel] += weight[i]\n i += 1\n # 计算香农熵\n shannonEnt = 0.0\n for key in labelCounts:\n prob = float(labelCounts[key] / sum(weight)) \n shannonEnt -= prob * log(prob, 2)\n return shannonEnt\n\n\n# 对离散变量划分数据集,取出该特征取值value的所有样本\ndef splitDataSet(dataSet, weight, axis, value, countMissValue):\n retDataSet = []\n retWeight = []\n i = 0\n for featVec in dataSet:\n if featVec[axis] == '?' and (not countMissValue):\n continue\n if countMissValue and featVec[axis] == '?':\n retVec = featVec[:axis]\n retVec.extend(featVec[axis+1:])\n retDataSet.append(retVec)\n if featVec[axis] == value:\n retVec = featVec[:axis]\n retVec.extend(featVec[axis+1:])\n retDataSet.append(retVec)\n retWeight.append(weight[i])\n i += 1\n return retDataSet, retWeight\n\n#对连续变量划分数据集,direction规定划分的方向,\n#决定是划分出小于value的数据样本还是大于value的数据样本集\ndef splitContinuousDataSet(dataSet, axis, value, direction, countMissValue):\n retDataSet = []\n for featVec in dataSet:\n if featVec[axis] == '?' and (not countMissValue):\n continue\n if countMissValue and featVec[axis] == '?':\n retVec = featVec[:axis]\n retVec.extend(featVec[axis+1:])\n retDataSet.append(retVec)\n if (direction and featVec[axis] <= value) or ((not direction) and featVec[axis] > value):\n retVec = featVec[:axis]\n retVec.extend(featVec[axis+1:])\n retDataSet.append(retVec)\n return retDataSet\n\n\ndef getUnmissDataSet(dataSet, weight, axis):\n retDataSet = []\n retWeight = []\n tag = []\n i = 0\n for featVec in dataSet:\n if featVec[axis] == '?':\n tag.append(i)\n else:\n retVec = featVec[:axis]\n retVec.extend(featVec[axis+1:])\n retDataSet.append(retVec)\n i += 1\n for i in range(len(weight)):\n if i not in tag:\n retWeight.append(weight[i])\n return retDataSet, retWeight\n\n\ndef DataSetPredo(dataSet, labels, decreteindex):\n DataSetlen = len(dataSet)\n Entropy = calcShannonEnt(dataSet, [1 for i in range(DataSetlen)])\n for index in decreteindex: # 对每一个是连续值的属性下标\n UnmissDatalen = 0\n for i in range(DataSetlen): # 字符串转浮点数\n if dataSet[i][index] != '?':\n UnmissDatalen += 1\n dataSet[i][index] = float(dataSet[i][index])\n allvalue = [vec[index] for vec in dataSet if vec[index] != '?']\n sortedallvalue = sorted(allvalue)\n T = []\n for i in range(len(allvalue)-1): # 划分点集合\n T.append(float(sortedallvalue[i]+sortedallvalue[i+1]) / 2.0)\n bestGain = 0.0\n bestpt = -1.0\n for pt in T: # 对每个划分点\n nowent = 0.0\n for small in range(2): # 化为正类(1)负类(0)\n Dt = splitContinuousDataSet(dataSet, index, pt, small, False)\n p = len(Dt) / float(UnmissDatalen)\n nowent += p * calcShannonEnt(Dt, [1 for i in range(len(Dt))])\n if Entropy - nowent > bestGain:\n bestGain = Entropy-nowent\n bestpt = pt\n labels[index] = str(labels[index] + \"<=\" + \"%.3f\" % bestpt)\n for i in range(DataSetlen):\n if dataSet[i][index] != '?':\n dataSet[i][index] = \"是\" if dataSet[i][index] <= bestpt else \"否\"\n return dataSet, labels\n\n'''''\n从输入的训练样本集中,计算划分之前的熵,找到当前有多少个特征,遍历每一个特征计算信息增益,找到这些特征中能带来信息增益最大的那一个特征。\n这里用分了两种情况,离散属性和连续属性\n1、离散属性,在遍历特征时,遍历训练样本中该特征所出现过的所有离散值,假设有n种取值,那么对这n种我们分别计算每一种的熵,最后将这些熵加起来\n就是划分之后的信息熵\n2、连续属性,对于连续值就稍微麻烦一点,首先需要确定划分点,用二分的方法确定(连续值取值数-1)个切分点。遍历每种切分情况,对于每种切分,\n计算新的信息熵,从而计算增益,找到最大的增益。\n假设从所有离散和连续属性中已经找到了能带来最大增益的属性划分,这个时候是离散属性很好办,直接用原有训练集中的属性值作为划分的值就行,但是连续\n属性我们只是得到了一个切分点,这是不够的,我们还需要对数据进行二值处理。\n'''\n\n# 选择最优的数据集划分方式\ndef chooseBestFeatureToSplit(dataSet, labels, weight):\n numFeatures = len(dataSet[0]) - 1\n baseEntropy = calcShannonEnt(dataSet, [1 for i in range(len(dataSet))])\n dataSetWeight = sum(weight)\n bestInfoGain = 0.0\n bestFeature = -1\n bestSplitDic = {}\n for i in range(numFeatures):\n # 获取第i个特征所有可能的取值\n featList = [example[i] for example in dataSet]\n # 对连续型特征进行处理\n if type(featList[0]).__name__ == 'float' or type(featList[0]).__name__ == 'int':\n # 产生n-1个候选划分点\n sortfeatList = sorted(featList)\n splitList = []\n for j in range(len(sortfeatList)-1):\n splitList.append((sortfeatList[j] + sortfeatList[j+1]) / 2.0) \n bestSplitEntropy = 10000 # 设定一个很大的熵值(之后用)\n slen = len(splitList)\n # 求用第j个候选划分点时,得到的信息熵,并记录最佳划分点\n for j in range(slen):\n value = splitList[j]\n newEntropy = 0.0\n for direct in range(2):\n subDataSet = splitContinuousDataSet(dataSet, i, value, 0, False)\n probG = len(subDataSet) / float(len(dataSet))\n newEntropy += probG * calcShannonEnt(subDataSet, [1 for i in range(len(dataSet))])\n if newEntropy < bestSplitEntropy:\n bestSplitEntropy = newEntropy\n bestSplit = j\n # 用字典记录当前特征的最佳划分点\n bestSplitDic[labels[i]] = splitList[bestSplit]\n infoGain = baseEntropy - bestSplitEntropy\n # 对离散型特征进行处理\n else:\n unMissDataSet, unMissWeight = getUnmissDataSet(dataSet, weight, i)\n entropy = calcShannonEnt(unMissDataSet, unMissWeight)\n unMissSumWeight = sum(unMissWeight)\n lou = unMissSumWeight / dataSetWeight\n uniqueVals = set(featList)\n newEntropy = 0.0\n for value in uniqueVals:\n subDataSet, weightVec_v = splitDataSet(dataSet, unMissWeight, i, value, False)\n # 特征为i的数据集占总数的比例\n prob = len(weightVec_v) / unMissSumWeight\n newEntropy += prob * calcShannonEnt(subDataSet, weightVec_v)\n infoGain = (entropy - newEntropy) * lou\n if infoGain > bestInfoGain:\n bestInfoGain = infoGain\n bestFeature = i\n # 若当前节点的最佳划分特征为连续特征,则将其以之前记录的划分点为界进行二值化处理\n # 即将该特征改为“是否小于等于bestSplitValue”, 例如将“密度”变为“密度<=0.3815”\n # 注意:以下这段直接操作了原dataSet数据, 之前的那些float型的值相应变为0和1\n # 【为何这样做?】在函数createTree()末尾将看到解释\n if type(dataSet[0][bestFeature]).__name__ == 'float' or type(dataSet[0][bestFeature]).__name__ == 'int':\n bestSplitValue = bestSplitDic[labels[bestFeature]]\n labels[bestFeature] = labels[bestFeature] + '<=' + str(bestSplitValue)\n for i in range(shape(dataSet)[0]):\n if dataSet[i][bestFeature] <= bestSplitValue:\n dataSet[i][bestFeature] = 1\n else:\n dataSet[i][bestFeature] = 0\n return bestFeature\n\n\n# 多数表决,剪枝\ndef majorityCnt(classList, weight):\n classCount = {}\n i = 0\n # classList = ['yes', 'yes', 'no', 'no', 'no']\n for vote in classList:\n if vote not in classCount.keys():\n classCount[vote] = 0\n classCount[vote] += weight[i]\n i += 1\n # classCount = {'yes': 2, 'no': 3}\n # 排序\n sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)\n # 返回出现次数最多的\n return sortedClassCount[0][0]\n\n\ndef splitDataSet_adjustWeight(dataSet, weight, axis, value, r_v):\n retDataSet = []\n retWeight = []\n i = 0\n for featVec in dataSet:\n if featVec[axis] == '?':\n retVec = featVec[:axis]\n retVec.extend(featVec[axis+1:])\n retDataSet.append(retVec)\n retWeight.append(weight[i] * r_v)\n elif featVec[axis] == value:\n retVec = featVec[:axis]\n retVec.extend(featVec[axis+1:])\n retDataSet.append(retVec)\n retWeight.append(weight[i])\n i += 1\n return retDataSet, retWeight\n\n\n# 主程序,递归产生决策树\ndef createTree(dataSet, weight, labels):\n classList = [example[-1] for example in dataSet]\n # 当类别与属性完全相同时停止\n if classList.count(classList[0]) == len(classList):\n return classList[0]\n # 遍历完所有特征值时,返回数量最多的\n if len(dataSet[0]) == 1:\n return majorityCnt(classList, weight)\n # 获取最佳划分属性\n bestFeat = chooseBestFeatureToSplit(dataSet, labels, weight)\n bestFeatLabel = labels[bestFeat]\n decisionTree = {bestFeatLabel: {}}\n featValues = [example[bestFeat] for example in dataSet]\n uniqueVals = sorted(list(set(featValues)))\n unMissDataSet, unMissWeight = getUnmissDataSet(dataSet, weight, bestFeat)\n unMissSumWeight = sum(unMissWeight)\n del(labels[bestFeat])\n for value in uniqueVals:\n subLabels = labels[:]\n subDataSetV, weightVec_v = splitDataSet(dataSet, unMissWeight, bestFeat, value, False)\n r_v = sum(weightVec_v) / unMissSumWeight\n subDataSet, subWeight = splitDataSet_adjustWeight(dataSet, weight, bestFeat, value, r_v)\n decisionTree[bestFeatLabel][value] = createTree(subDataSet, subWeight, subLabels)\n return decisionTree\n\n\n# 输入三个变量(决策树,属性特征标签,测试的数据)\ndef classify(inputTree, featLables, testVec):\n classLabel = []\n root = list(inputTree.keys())[0] # 获取树的第一个特征属性\n secondDict = inputTree[root] # 树的分支,子集合Dict\n featIndex = featLables.index(root) # 获取决策树第一层在featLables中的位置\n for key in secondDict.keys():\n if testVec[featIndex] == key:\n if type(secondDict[key]).__name__ == 'dict':\n classLabel = classify(secondDict[key], featLables, testVec)\n else:\n classLabel = secondDict[key]\n return classLabel\n\n\n# 存储函数 classifierStorage.txt\ndef storeTree(inputTree, filename):\n fw = open(filename, 'wb') # pickle默认方式是二进制,需要制定'wb'\n pickle.dump(inputTree, fw)\n fw.close()\n\n\n# 读取函数 classifierStorage.txt\ndef grabTree(filename):\n fr = open(filename, 'rb') # 需要制定'rb',以byte形式读取\n return pickle.load(fr)\n\n\n# if __name__ == '__main__':\n# dataset, labels = DataSetPredo('xigua.txt', [0, 1, 2, 3, 4, 5, 6, 7, 8])\n# print(labels)\n# weight = [1.0 for i in range(len(dataset))]\n# myTree = createTree(dataset[:100], weight, labels)\n# # myTree = createTree(dataset[:100], labels, dataset_full, labels_full)\n# print(myTree)\n# createPlot(myTree)\n# i = 1\n# cnt = 0\n# for lis in dataset[100:]:\n# judge = classify(myTree, labels, lis[:-1])\n# target = lis[-1]\n# if judge == target:\n# cnt += 1\n# print(\"Test %d was classified %s, it's class is %s %s\" % (i, judge, target, \"=====\" if judge == target else \"\"))\n# i += 1\n# print(\"The Tree's Accuracy is %.3f\" % (cnt / float(i)))\n\n","repo_name":"DeceiverWu/python-id3-stuOnline","sub_path":"stuOnline/apps/content/id3.py","file_name":"id3.py","file_ext":"py","file_size_in_byte":13104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12781645936","text":"\nfrom ASTListener import ASTListener\nfrom SymbolTable.SymbolTable import *\nfrom SymbolTable.SymbolInfo import *\nfrom AST import Expression, Function, Literals, Program, Statement, Variable\n\n\nclass SemanticValidator(ASTListener):\n def __init__(self):\n self.symbolTable = SymbolTable()\n self.errors = []\n\n def enterProgram(self, node):\n self.symbolTable.newScope()\n\n def exitProgram(self, node):\n self.symbolTable.endScope()\n\n def enterCompound(self, node):\n self.symbolTable.newScope()\n\n def exitCompound(self, node):\n self.symbolTable.endScope()\n\n def enterVariableDecl(self, node):\n declList = node.declList\n for varDeclInit in declList.declInitializeList:\n # Check if new var already exists in current scope\n symbolInfo = self.symbolTable.getSymbolInCurrentScope(varDeclInit.name)\n if symbolInfo is None:\n if type(varDeclInit) is Variable.ArrayInitialize:\n self.symbolTable.addSymbol(varDeclInit.name, ArrayInfo(node.type, varDeclInit.size))\n else:\n self.symbolTable.addSymbol(varDeclInit.name, VarInfo(node.type))\n else:\n self.errors.append(varDeclInit.getPosition() + \": Redefinition of '\" + varDeclInit.name + \"'\")\n\n def enterVarDeclInitialize(self, node):\n symbolInfo = self.symbolTable.getSymbol(node.name)\n if node.expression is not None:\n # if (node.expression is Expression.BinOp and (symbolInfo.type == \"char\" and not hasattr(symbolInfo,type))):\n # self.errors.append(\n # node.getPosition() + \": Type mismatch: expected '\" + symbolInfo.type + \"' but found 'BinOp'\")\n # else:\n # getTypeResult = getType(node.right, symbolInfo.type, self.symbolTable)\n # if symbolInfo.type != getTypeResult[0] and getTypeResult[0] != \"undefined input\":\n # self.errors.append(getTypeResult[1] + \": Type mismatch: expected '\" + symbolInfo.type + \"' but found '\" +getTypeResult[0] + \"'\")\n if (isinstance(node.expression,Expression.BinOp)and (symbolInfo.type == \"char\" and not hasattr(symbolInfo,\"size\"))):\n self.errors.append(node.getPosition() + \": Type mismatch: expected '\" + symbolInfo.type + \"' but found 'BinOp'\")\n else:\n getTypeResult = getType(node.expression, symbolInfo.type, self.symbolTable)\n if symbolInfo.type != getTypeResult[0] and getTypeResult[0] != \"undefined input\":\n self.errors.append(getTypeResult[1] + \": Type mismatch for '\" + node.name + \"': expected '\" + symbolInfo.type + \"' but found '\" +getTypeResult[0] + \"'\")\n\n def enterCall(self, node):\n symbolInfo = self.symbolTable.getSymbol(node.funcName)\n if symbolInfo is None or type(symbolInfo) is not FunctionInfo:\n if node.funcName == \"printf\" or node.funcName == \"scanf\":\n if len(node.args) > 0:\n paramType = getType(node.args[0], \"string\", self.symbolTable)\n if paramType[0] != \"string\" and paramType[0] != \"char*\":\n self.errors.append(paramType[1] + \": Wrong parameter type for '\" + node.funcName + \"'! Expected: 'char*' found '\" + paramType[0] + \"'\")\n else:\n self.errors.append(node.getPosition() + \": Wrong amount of parameters for '\" + node.funcName + \"'! Expected at least one argument\")\n\n else:\n self.errors.append(node.getPosition() + \": Undefined reference to '\" + node.funcName + \"'\")\n else:\n symbolInfo.used = True\n if symbolInfo.isDecl:\n self.errors.append(node.getPosition() + \": Undefined reference to '\" + node.funcName + \"'\")\n else:\n if len(node.args) == len(symbolInfo.paramTypes):\n for i in range (0, len(symbolInfo.paramTypes)):\n foundParamType = getType(node.args[i], symbolInfo.paramTypes[i],self.symbolTable)[0]\n if foundParamType != symbolInfo.paramTypes[i]:\n self.errors.append(node.getPosition() + \": Wrong parameter type for '\" + node.funcName + \"'! Expected: '\" + symbolInfo.paramTypes[i] + \"' found '\" + foundParamType + \"'\")\n else:\n self.errors.append(node.getPosition() + \": Wrong amount of parameters for '\" + node.funcName + \"'! Expected: \" + str(len(symbolInfo.paramTypes)) + \" found \" + str(len(node.args)))\n\n def enterMutable(self, node):\n symbolInfo = self.symbolTable.getSymbol(node.name)\n if symbolInfo is None or (type(symbolInfo) is not VarInfo and type(symbolInfo) is not ArrayInfo):\n self.errors.append(node.getPosition() + \": Undefined reference to '\" + node.name + \"'\")\n else:\n symbolInfo.used = True\n\n def enterSubScript(self, node):\n symbolInfo = self.symbolTable.getSymbol(node.mutable.name)\n if symbolInfo is None or type(symbolInfo) is not ArrayInfo:\n self.errors.append(node.getPosition() + \": Subscripted value '\" + node.mutable.name + \"' is not an array\")\n else:\n symbolInfo.used = True\n # if int(symbolInfo.size) < int(node.index._int):\n # self.errors.append(node.index.getPosition() + \": Index out of range for '\" + node.mutable.name + \"'! Max index: '\" + str(\n # int(symbolInfo.size) - 1) + \"' but found '\" + str(node.index._int) + \"'\")\n\n def enterFunctionDef(self, node):\n # Check if new function already exists\n symbolInfo = self.symbolTable.getSymbol(node.name)\n if symbolInfo is None:\n params = node.params\n paramTypes = []\n for param in params.params:\n paramTypes.append(param.type)\n self.symbolTable.addSymbol(param.name, VarInfo(param.type))\n\n self.symbolTable.addSymbol(node.name, FunctionInfo(node.returns, paramTypes))\n self.symbolTable.newScope()\n\n elif type(symbolInfo) is FunctionInfo and symbolInfo.isDecl:\n # Previous declaration => check if definition matches declaration\n if node.returns != symbolInfo.type:\n self.errors.append(node.getPosition() + \": Wrong return type for '\" + node.name + \"'! Expected: '\" + symbolInfo.type + \"' found '\" + node.returns + \"'\")\n\n params = node.params\n paramTypes = []\n if len(params.params) == len(symbolInfo.paramTypes):\n for i in range (0, len(symbolInfo.paramTypes)):\n if params.params[i].type != symbolInfo.paramTypes[i]:\n self.errors.append(params.params[i].getPosition() + \": Wrong parameter type for '\" + node.name + \"'! Expected: '\" + symbolInfo.paramTypes[i] + \"' found '\" + params.params[i].type + \"'\")\n paramTypes.append(params.params[i].type)\n self.symbolTable.addSymbol(params.params[i].name, VarInfo(params.params[i].type))\n else:\n self.errors.append(node.getPosition() + \": Wrong amount of parameters for '\" + node.name + \"'! Expected: \" + str(len(symbolInfo.paramTypes)) + \" found \" + str(len(params.params)))\n\n self.symbolTable.addSymbol(node.name, FunctionInfo(node.returns, paramTypes))\n self.symbolTable.newScope()\n\n else:\n self.errors.append(node.getPosition() + \": Redefinition of '\" + node.name + \"'\")\n\n def exitFunctionDef(self, node):\n self.symbolTable.endScope()\n\n def enterFunctionDecl(self, node):\n # Check if new function already exists\n symbolInfo = self.symbolTable.getSymbol(node.name)\n if symbolInfo is None:\n params = node.params\n paramTypes = []\n for param in params.params:\n paramTypes.append(param.type)\n self.symbolTable.addSymbol(node.name, FunctionInfo(node.returns, paramTypes, isDecl=True))\n else:\n self.errors.append(node.getPosition() + \": Redefinition of '\" + node.name + \"'\")\n\n def enterAssign(self, node):\n symbolInfo = None\n if type(node.left) is Expression.SubScript:\n symbolInfo = self.symbolTable.getSymbol(node.left.mutable.name)\n else:\n symbolInfo = self.symbolTable.getSymbol(node.left.name)\n if symbolInfo is not None:\n if (isinstance(node.right,Expression.BinOp) and (symbolInfo.type == \"char\" and not hasattr(symbolInfo,\"size\"))):\n self.errors.append(node.getPosition() + \": Type mismatch: expected '\" + symbolInfo.type + \"' but found 'BinOp'\")\n else:\n getTypeResult = getType(node.right, symbolInfo.type, self.symbolTable)\n if((symbolInfo.type == \"float\" or symbolInfo.type==\"double\") and (getTypeResult[0]!=\"float\" and getTypeResult[0]!=\"double\")):\n self.errors.append(getTypeResult[1] + \": Type mismatch: expected '\" + symbolInfo.type + \"' but found '\" +getTypeResult[0] + \"'\")\n elif ((symbolInfo.type==\"int\" or symbolInfo.type==\"long\" or symbolInfo.type==\"signed\" or symbolInfo.type==\"unsigned\")and(getTypeResult[0]!=\"int\" and getTypeResult[0]!=\"long\" and getTypeResult[0]!=\"signed\" and getTypeResult[0]!=\"unsigned\")):\n self.errors.append(getTypeResult[1] + \": Type mismatch: expected '\" + symbolInfo.type + \"' but found '\" +getTypeResult[0] + \"'\")\n\n def enterBinOp(self, node):\n foundMismatch=False\n if type(node.left) is not Expression.BinOp:\n leftType = getType(node.left,\"\",self.symbolTable)[0]\n else:\n leftTypeResult = checkBinOp(node.left,self.symbolTable)\n\n if (leftTypeResult[0] == \"string\" or leftTypeResult[0] == \"char\"):\n if (leftTypeResult[1] == \"string\" or leftTypeResult[1] == \"char\"):\n leftType = leftTypeResult[0]\n else:\n foundMismatch = True\n error = leftTypeResult[2].getPosition() + \": type mismatch! Cannot compare '\" + leftTypeResult[\n 0] + \"' with '\" + leftTypeResult[1] + \"'\"\n if error not in self.errors:\n self.errors.append(error)\n\n else:\n if (leftTypeResult[1] == \"string\" or leftTypeResult[1] == \"char\"):\n foundMismatch = True\n error = leftTypeResult[2].getPosition() + \": type mismatch! Cannot compare '\" + leftTypeResult[\n 0] + \"' with '\" + leftTypeResult[1] + \"'\"\n if error not in self.errors:\n self.errors.append(error)\n\n else:\n leftType = leftTypeResult[0]\n if type(node.right)is not Expression.BinOp:\n rightType = getType(node.right,\"\",self.symbolTable)[0]\n else:\n rightTypeResult = checkBinOp(node.right,self.symbolTable)\n if (rightTypeResult[0] == \"string\" or rightTypeResult[0] == \"char\"):\n if (rightTypeResult[1] == \"string\" or rightTypeResult[1] == \"char\"):\n rightType = rightTypeResult[0]\n else:\n foundMismatch = True\n error = rightTypeResult[2].getPosition() + \": type mismatch! Cannot compare '\" + rightTypeResult[\n 0] + \"' with '\" + rightTypeResult[1] + \"'\"\n if error not in self.errors:\n self.errors.append(error)\n\n else:\n if (rightTypeResult[1] == \"string\" or rightTypeResult[1] == \"char\"):\n foundMismatch = True\n error = rightTypeResult[2].getPosition() + \": type mismatch! Cannot compare '\" + rightTypeResult[\n 0] + \"' with '\" + rightTypeResult[1] + \"'\"\n if error not in self.errors:\n self.errors.append(error)\n\n else:\n rightType = rightTypeResult[0]\n\n if not foundMismatch:\n if(leftType == \"string\" or leftType == \"char\"):\n if(rightType!=\"string\" and rightType!=\"char\"):\n error = node.getPosition() + \": type mismatch! Cannot compare '\" + leftType + \"' with '\" + rightType + \"'\"\n if error not in self.errors:\n self.errors.append(error)\n else:\n if(rightType==\"string\" or rightType==\"char\"):\n error = node.getPosition() + \": type mismatch! Cannot compare '\" + leftType + \"' with '\" +rightType+ \"'\"\n if error not in self.errors:\n self.errors.append(error)\n\n\ndef getType(expression,expectedType,symbolTable):\n if type(expression) is Expression.BinOp:\n assignee = \"assignee\"\n if type(expression.right) is Expression.BinOp:\n if expression.right.operator.value == \"<\" or expression.right.operator.value == \">\" or expression.right.operator.value == \"==\":\n return [\"int\", expression.right.getPosition()]\n else:\n childResult = checkBinOp(expression.right, symbolTable)\n return [childResult[1], childResult[2].getPosition()]\n if type(expression.right) is Expression.Mutable:\n assignee = symbolTable.getSymbol(expression.right.name)\n if type(expression.right) is Expression.Call:\n assignee = symbolTable.getSymbol(expression.right.funcName)\n if type(expression.right) is Expression.SubScript:\n assignee = symbolTable.getSymbol(expression.right.mutable.name)\n if assignee is not None: # Opmerking: is toch altijd \"not None\"?\n assigneeType = \"AType\"\n if hasattr(assignee, \"type\"):\n assigneeType = assignee.type\n if type(expression.right) is Literals.Int or assigneeType == \"short\" or assigneeType == \"int\" \\\n or assigneeType == \"signed\" or assigneeType == \"unsigned\":\n if expectedType == \"short\" or expectedType == \"int\" or expectedType == \"signed\"\\\n or expectedType == \"unsigned\" or expectedType == \"float\" or expectedType == \"double\":\n return getType(expression.left, expectedType, symbolTable)\n else:\n return [\"int\", expression.right.getPosition()]\n if type(expression.right) is Literals.Double or assigneeType == \"double\" or assigneeType == \"float\":\n if expectedType == \"float\" or expectedType == \"double\":\n return getType(expression.left, expectedType, symbolTable)\n else:\n return [\"double\", expression.right.getPosition()]\n if type(expression.right) is Literals.String or assigneeType == \"string\" or Literals.Char or assigneeType==\"char\":\n if expectedType == \"char\":\n return getType(expression.left, expectedType, symbolTable)\n else:\n return [\"string\", expression.right.getPosition()]\n\n else:\n return [\"undefined input\", expression.right.getPosition()]\n\n foundType = None\n if type(expression) is Expression.Mutable:\n found = symbolTable.getSymbol(expression.name)\n if found is not None:\n foundType = found.type\n else:\n foundType = \"undefined input\"\n if type(expression) is Expression.Call:\n found = symbolTable.getSymbol(expression.funcName)\n if found is not None:\n foundType = found.type\n else:\n foundType = \"undefined input\"\n if type(expression) is Expression.SubScript:\n found = symbolTable.getSymbol(expression.mutable.name)\n if found is not None:\n foundType = found.type\n else:\n foundType = \"undefined input\"\n\n if foundType is not None:\n if foundType!=\"undefined input\":\n if foundType == \"short\" or foundType == \"int\" or foundType == \"signed\" or foundType == \"unsigned\":\n if expectedType == \"short\" or expectedType == \"int\" or expectedType == \"signed\" or expectedType == \"unsigned\" or expectedType == \"float\" or expectedType == \"double\":\n return [expectedType,expression.getPosition()]\n else:\n return [foundType, expression.getPosition()]\n if foundType == \"double\" or foundType == \"float\":\n if expectedType == \"float\" or expectedType == \"double\":\n return [expectedType, expression.getPosition()]\n else:\n return [foundType, expression.getPosition()]\n if foundType == \"string\":\n if expectedType == \"char\" or expectedType==\"string\":\n return [expectedType, expression.getPosition()]\n else:\n return [foundType, expression.getPosition()]\n if foundType ==\"char\":\n if expectedType == \"char\":\n return [expectedType, expression.getPosition()]\n else:\n return [foundType, expression.getPosition()]\n else:\n return [\"undefined input\", expression.getPosition()]\n\n # Literals\n if type(expression) is Literals.Int:\n if expectedType == \"short\" or expectedType == \"int\" or expectedType == \"signed\" or expectedType == \"unsigned\"\\\n or expectedType == \"float\" or expectedType == \"double\":\n return [expectedType, expression.getPosition()]\n else:\n return [\"int\", expression.getPosition()]\n if type(expression) is Literals.Double:\n if expectedType == \"float\" or expectedType == \"double\":\n return [expectedType, expression.getPosition()]\n else:\n return [\"double\", expression.getPosition()]\n if type(expression) is Literals.String:\n if expectedType == \"char\":\n return [expectedType, expression.getPosition()]\n else:\n return [\"string\", expression.getPosition()]\n if type(expression) is Literals.Char:\n if expectedType == \"char\":\n return [expectedType, expression.getPosition()]\n else:\n return [\"char\", expression.getPosition()]\n\n\ndef checkBinOp(expression,symbolTable):\n if type(expression.left) is not Expression.BinOp:\n leftType = getType(expression.left, \"\", symbolTable)[0]\n else:\n leftTypeResult = checkBinOp(expression.left, symbolTable)\n if (leftTypeResult[0] == \"string\" or leftTypeResult[0] == \"char\"):\n if (leftTypeResult[1] == \"string\" or leftTypeResult[1] == \"char\"):\n leftType = leftTypeResult[0]\n else:\n return [leftTypeResult[0], leftTypeResult[1], leftTypeResult[2]]\n else:\n if (leftTypeResult[1] == \"string\" or leftTypeResult[1] == \"char\"):\n return [leftTypeResult[0], leftTypeResult[1], leftTypeResult[2]]\n else:\n leftType = leftTypeResult[0]\n if type(expression.right) is not Expression.BinOp:\n rightType = getType(expression.right, \"\", symbolTable)[0]\n else:\n rightTypeResult = checkBinOp(expression.right, symbolTable)\n if (rightTypeResult[0] == \"string\" or rightTypeResult[0] == \"char\"):\n if (rightTypeResult[1] == \"string\" or rightTypeResult[1] == \"char\"):\n rightType = rightTypeResult[0]\n else:\n return [rightTypeResult[0], rightTypeResult[1], rightTypeResult[2]]\n else:\n if (rightTypeResult[1] == \"string\" or rightTypeResult[1] == \"char\"):\n return [rightTypeResult[0], rightTypeResult[1], rightTypeResult[2]]\n else:\n rightType = rightTypeResult[0]\n return[leftType,rightType,expression]\n","repo_name":"ZhongXiLu/compilers","sub_path":"src/SemanticValidator.py","file_name":"SemanticValidator.py","file_ext":"py","file_size_in_byte":19914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"43798865783","text":"# 2668 숫자고르기\n# 31256 KB / 40 ms\n\nimport sys\ninput = sys.stdin.readline\n\ndef dfs(x,now):\n stack = [(x,now)]\n v[x] = True\n while stack:\n x, now = stack.pop()\n now.append(x)\n for j in g[x]:\n # 현재 탐색에 없는 경우\n if j not in now:\n v[j] = True\n stack.append((j, now.copy()))\n else: # 현재 탐색에 있는 경우(cycle 발생)\n result.append(now)\n return\n\nn = int(input())\ng = [[] for _ in range(n+1)]\nv = [False]*(n+1)\nfor i in range(1, n+1):\n a = int(input())\n g[a].append(i)\n\n# 탐색하면서 cycle일 경우 result에 저장\nresult = []\nfor i in range(1, n+1):\n if not v[i]:\n dfs(i,[])\n\n# 합하고 정렬후 len과 요소 출력\nresult = sum(result,[])\nresult.sort()\nprint(len(result))\nprint('\\n'.join(map(str,result)))","repo_name":"KDT-02-Algorithm-Study/Algorithm-Study","sub_path":"week14_230413/2668_숫자고르기/2668_정광배.py","file_name":"2668_정광배.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"30188635933","text":"# coding: utf-8\n\nimport urllib.request\nimport urllib.parse\nfrom lxml import html\nfrom bs4 import BeautifulSoup\nimport textwrap\nfrom mini_templates import Tmp\n\n\nclass MiniReadability:\n def __init__(self, url):\n self._url = url\n\n def get_article_bs(self):\n article_text = ''\n with urllib.request.urlopen(self._url) as f:\n host_name = self._get_host_name(self._url)\n tmp = Tmp()\n template = tmp.get_templates_from_host_name(host_name)\n meta_tags = template['meta_tags']\n text_tags = template['text_tags']\n delete_tags = template['delete_tags']\n html_raw = f.read().decode('utf-8')\n root_bs = BeautifulSoup(html_raw, \"html.parser\")\n # print(html_raw)\n print('site:', host_name, end=' ')\n if len(meta_tags) == 0:\n print('template not find')\n else:\n print('template find')\n text_nodes = self._get_nodes_with_text_bs(root_bs, meta_tags, text_tags, delete_tags)\n # print(text_nodes)\n article_text = self._get_text_from_nodes(text_nodes, [])\n # print(article_text)\n return article_text\n\n\n @staticmethod\n def _get_host_name(url):\n parse_url = urllib.parse.urlparse(url)\n return parse_url.hostname\n\n def _get_nodes_with_text_bs(self, root_bs, meta_tags, text_tags, delete_tags)-> list:\n if len(delete_tags) > 0:\n delete_nodes = root_bs.find_all(delete_tags)\n for delete in delete_nodes:\n delete.extract()\n text_nodes = []\n if len(meta_tags) == 0:\n # сайт был без шаблона, надо найти
с самым длинным текстом\n text_nodes = root_bs.find_all(text_tags)\n fat_text_node = self._get_parent_node_with_longest_text(text_nodes)\n text_nodes = fat_text_node.find_all(text_tags)\n pass\n else:\n # сайт с шаблоном, ищем по шаблону\n meta_nodes = []\n for mt in meta_tags:\n for key, value in mt.items():\n nodes_list = root_bs.find_all(key, value)\n meta_nodes += nodes_list\n for node in meta_nodes:\n list_n = node.find_all(text_tags)\n text_nodes += list_n\n return text_nodes\n\n @staticmethod\n def _get_parent_node_with_longest_text(text_nodes):\n parents = {}\n for node in text_nodes:\n parent = node.parent\n if parent in parents.keys():\n parents[parent] += len(node.text)\n else:\n parents.update({parent: len(node.text)})\n max_len = max(parents.values())\n max_node = [key for key, value in parents.items() if value == max_len]\n return max_node[0]\n\n @staticmethod\n def _get_text_from_nodes(text_nodes, tags_replace_template)-> str:\n text = ''\n for node in text_nodes:\n refs = node.find_all('a')\n for ref in refs:\n ref_str = ref.text + ' [' + ref.get('href') + ']'\n ref.replaceWith(ref_str)\n node_text = node.text\n node_text = ''.join(node_text)\n node_text = node_text.replace('\\n', '')\n node_text = node_text.replace('\\t', '')\n text += '\\n'.join(textwrap.wrap(node_text)) + '\\n\\n'\n return text\n\n def _get_nodes_with_text(self, html_raw, meta_tags, text_tags) ->dict:\n nodes_with_text = {}\n tree = html.fromstring(html_raw)\n div_nodes = tree.xpath('//div')\n\n for node in div_nodes:\n text_tags_count = 0\n for text_t in text_tags:\n text_children = node.xpath('./' + text_t)\n text_tags_count += len(text_children)\n # print(node.attrib)\n # print('children tags with text for node = %d' % text_tags_count)\n # print()\n if text_tags_count > 0:\n nodes_with_text.update({node: text_tags_count})\n return nodes_with_text\n","repo_name":"GoonChoo/mini-readability","sub_path":"MiniReadability.py","file_name":"MiniReadability.py","file_ext":"py","file_size_in_byte":4152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33044342553","text":"import tensorflow as tf\r\n\r\ndef focal_loss(y_true, y_pred, alpha=0.25, gamma=2.0):\r\n # y_true: one-hot encoded ground truth labels\r\n # y_pred: predicted class probabilities\r\n # alpha: balancing parameter for positive and negative examples\r\n # gamma: focusing parameter to down-weight easy examples\r\n\r\n # convert one-hot encoded ground truth labels to class indices\r\n y_true = tf.argmax(y_true, axis=-1)\r\n\r\n # compute cross-entropy loss\r\n ce_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y_true, logits=y_pred)\r\n\r\n # compute class probabilities from logits\r\n y_pred = tf.nn.softmax(y_pred, axis=-1)\r\n\r\n # compute weights based on the focal loss formula\r\n alpha_factor = tf.ones_like(y_true) * alpha\r\n alpha_factor = tf.where(tf.equal(y_true, 1), alpha_factor, 1 - alpha_factor)\r\n pt = tf.where(tf.equal(y_true, 1), y_pred, 1 - y_pred)\r\n focal_weight = alpha_factor * tf.pow(1 - pt, gamma)\r\n\r\n # compute the final loss by multiplying the cross-entropy loss with the weights\r\n focal_loss = focal_weight * ce_loss\r\n return tf.reduce_mean(focal_loss)","repo_name":"samuelokpor/modiffed-SSD-with-Attention-Mechanisms","sub_path":"focal_loss.py","file_name":"focal_loss.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"33448157277","text":"import json\nimport numpy as np\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn import linear_model\nfrom sklearn import metrics\nfrom sklearn.metrics import make_scorer, mean_squared_error\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.model_selection import KFold\nfrom sklearn.ensemble import RandomForestRegressor\n\ndata = np.zeros((99999999, 5))\n\nwith open(\"tweets_#patriots.txt\") as f:\n tweets = f.readlines()\n print(len(tweets))\n nums = 0\n for t in tweets:\n tw = json.loads(t)\n if tw['type'] == 'tweet':\n data[nums][0] = tw['author']['followers']\n data[nums][1] = tw['metrics']['impressions']\n data[nums][2] = tw['metrics']['citations']['influential']\n data[nums][3] = int(tw['tweet']['created_at'][11:13])\n data[nums][4] = tw['metrics']['citations']['total']\n nums += 1\n print(nums)\n\n\ndata = data[:nums].astype(np.uint32)\nnp.random.shuffle(data)\n\nX = data[:,:-1]\nY = data[:,-1]\n\n\n#random forest\nc = 0.1\n\nnum_trees = np.arange(1,17) * 3\nnum_features = list(range(1,4))\nnum_depths = list(range(3,7))\n\nmin_rmse_index = []\nplt.figure()\nfor ndepths in num_depths:\n rmse = []\n for ntrees in num_trees: \n print(\"Doing rmse - depths: \", ndepths, \", trees: \", ntrees) \n test_mse =[]\n train_mse =[]\n kf = KFold(n_splits=10, random_state=None, shuffle=False)\n for train_index, test_index in kf.split(X):\n X_train, X_test = X[train_index], X[test_index]\n Y_train, Y_test = Y[train_index], Y[test_index]\n train_size = X_train.shape[0]\n test_size = X_test.shape[0]\n regr = RandomForestRegressor(n_estimators=ntrees, max_depth=ndepths, max_features= 4, n_jobs=-1)\n regr.fit(X_train, Y_train)\n Y_test_predict = regr.predict(X_test)\n test_mse.append(mean_squared_error(Y_test, Y_test_predict))\n rmse.append(np.sqrt(np.mean(test_mse)))\n\n min_rmse_index.append(rmse.index(min(rmse)))\n\n y = rmse\n x = num_trees\n plt.plot(x, y, lw=2, label=\"# of depths = \"+str(ndepths))\n plt.grid(color=str(c), linestyle='--', linewidth=1)\n c = c + 0.1\nplt.xlabel('# of trees')\nplt.ylabel('RMSE')\nplt.legend()\n\n\n#polynomial\nnum_orders = list(range(1,8))\n\nplt.figure()\nrmse = []\nfor order in num_orders:\n print(\"Doing rmse - order: \", order) \n test_mse =[]\n train_mse =[]\n kf = KFold(n_splits=10, random_state=None, shuffle=False)\n for train_index, test_index in kf.split(X):\n X_train, X_test = X[train_index], X[test_index]\n Y_train, Y_test = Y[train_index], Y[test_index]\n train_size = X_train.shape[0]\n test_size = X_test.shape[0]\n poly = PolynomialFeatures(order)\n X_train = poly.fit_transform(X_train)\n X_test = poly.fit_transform(X_test)\n model = linear_model.Ridge(alpha=1)\n model.fit(X_train,Y_train)\n #regr = RandomForestRegressor(n_estimators=ntrees, max_depth=ndepths, max_features= 4, n_jobs=-1)\n #regr.fit(X_train, Y_train)\n Y_test_predict = model.predict(X_test)\n test_mse.append(mean_squared_error(Y_test, Y_test_predict))\n tmp = np.sqrt(np.mean(test_mse))\n print(tmp)\n rmse.append(tmp)\n\ny = rmse\nx = num_orders\nplt.plot(x, y, lw=2)\nplt.grid(linestyle='--', linewidth=1)\nplt.yscale('log') \nplt.xlabel('# of order')\nplt.ylabel('RMSE')\nplt.savefig('plot/3-RMSE-poly-superbowl.png')\nplt.clf()\nplt.savefig('plot/3-RMSE-randomforrest-patriots.png')\nplt.clf()","repo_name":"AChin1311/Popularity-Prediction-on-Twitter","sub_path":"Q_3.py","file_name":"Q_3.py","file_ext":"py","file_size_in_byte":3450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13548909253","text":"import os\nimport numpy\n\nfrom ximpol import XIMPOL_CONFIG, XIMPOL_DATA, XIMPOL_DOC\nfrom ximpol.utils.logging_ import logger\nfrom ximpol.core.pipeline import xPipeline\nfrom ximpol.evt.binning import xBinnedModulationCube, xEventBinningBase\nfrom ximpol.evt.event import xEventFile\nfrom ximpol.utils.matplotlib_ import pyplot as plt\nfrom ximpol.utils.matplotlib_ import save_current_figure\nfrom ximpol.config.crab_pulsar import pol_degree_spline, pol_angle_spline,\\\n pl_index_spline, pl_normalization_spline\n\n\n\"\"\"Script-wide simulation and analysis settings.\n\"\"\"\nCFG_FILE_PATH = os.path.join(XIMPOL_CONFIG, 'crab_pulsar.py')\n#OUT_FILE_PATH_BASE = os.path.join(XIMPOL_DATA, 'crab_pulsar_varGain_singleExp')\nOUT_FILE_PATH_BASE = os.path.join(XIMPOL_DATA, 'crab_pulsar')\nEVT_FILE_PATH = '%s.fits' % OUT_FILE_PATH_BASE\nANALYSIS_FILE_PATH = '%s_analysis.txt' % OUT_FILE_PATH_BASE\nSIM_DURATION = 100000.\nDETECTOR = 'IXPE'\nNUM_PHASE_BINS = 25\nEQP_BINNING = False\nPHASE_BINNING = None\nE_BINNING = [1., 10.]\nOUTPUT_FOLDER = os.path.join(XIMPOL_DOC, 'figures', 'showcase')\n\n\n\"\"\"Main pipeline object.\n\"\"\"\nPIPELINE = xPipeline(clobber=False)\n\n\ndef _sel_file_path(i):\n \"\"\"Return the path to the i-th xpselct output file.\n \"\"\"\n return '%s_phase%04d.fits' % (OUT_FILE_PATH_BASE, i)\n\ndef _mcube_file_path(i):\n \"\"\"Return the path to the i-th xpbin MCUBE output file.\n \"\"\"\n return '%s_phase%04d_mcube.fits' % (OUT_FILE_PATH_BASE, i)\n\ndef _pha1_file_path(i):\n \"\"\"Return the path to the i-th xpbin PHA1 output file.\n \"\"\"\n return '%s_phase%04d_pha1.fits' % (OUT_FILE_PATH_BASE, i)\n\ndef _phase_binning():\n \"\"\"Read the input event file and create an equipopulated binning in the\n pulsar phase.\n \"\"\"\n if EQP_BINNING:\n evt_file = xEventFile(EVT_FILE_PATH)\n phase = evt_file.event_data['PHASE']\n return xEventBinningBase.equipopulated_binning(NUM_PHASE_BINS, phase,\n 0., 1.)\n else:\n return numpy.linspace(0., 1., NUM_PHASE_BINS)\n\n\ndef generate():\n \"\"\"Generate the events.\n \"\"\"\n PIPELINE.xpobssim(configfile=CFG_FILE_PATH, duration=SIM_DURATION,\n outfile=EVT_FILE_PATH)\n\ndef prepare():\n \"\"\"Prepare the event data for the actual analysis.\n \"\"\"\n for i, (_min, _max) in enumerate(zip(PHASE_BINNING[:-1],\n PHASE_BINNING[1:])):\n PIPELINE.xpselect(EVT_FILE_PATH, phasemin=_min, phasemax=_max,\n outfile=_sel_file_path(i))\n PIPELINE.xpbin(_sel_file_path(i), algorithm='MCUBE', ebinalg='LIST',\n ebinning=E_BINNING, outfile=_mcube_file_path(i))\n PIPELINE.xpbin(_sel_file_path(i), algorithm='PHA1',\n outfile=_pha1_file_path(i))\n\ndef analyze():\n \"\"\"Analyze the data.\n \"\"\"\n logger.info('Opening output file %s...' % ANALYSIS_FILE_PATH)\n analysis_file = open(ANALYSIS_FILE_PATH, 'w')\n for i, (_min, _max) in enumerate(zip(PHASE_BINNING[:-1],\n PHASE_BINNING[1:])):\n _mcube = xBinnedModulationCube(_mcube_file_path(i))\n _mcube.fit()\n _fit_results = _mcube.fit_results[0]\n _phase = 0.5*(_min + _max)\n _phase_err = 0.5*(_max - _min)\n _pol_deg = _fit_results.polarization_degree\n _pol_deg_err = _fit_results.polarization_degree_error\n _pol_angle = _fit_results.phase\n _pol_angle_err = _fit_results.phase_error\n _spec_fitter = PIPELINE.xpxspec(_pha1_file_path(i), plot=False)\n (_index, _index_err), (_norm, _norm_err) = _spec_fitter.fit_parameters()\n # The division by the phase interval is a workaround and we should\n # keep track of that in xpselect.\n _norm /= (_max - _min)\n _norm_err /= (_max - _min)\n _data = (_phase, _phase_err, _pol_deg, _pol_deg_err, _pol_angle,\n _pol_angle_err, _index, _index_err, _norm, _norm_err)\n _fmt = ('%.4e ' * len(_data)).strip()\n _fmt = '%s\\n' % _fmt\n _line = _fmt % _data\n analysis_file.write(_line)\n analysis_file.close()\n\ndef plot(save=False):\n \"\"\"Plot the stuff in the analysis file.\n \"\"\"\n sim_label = '%s %s ks' % (DETECTOR,SIM_DURATION/1000.)\n mod_label = 'Input model'\n lc_label = 'Light curve'\n _phase, _phase_err, _pol_deg, _pol_deg_err, _pol_angle,\\\n _pol_angle_err, _index, _index_err, _norm,\\\n _norm_err = numpy.loadtxt(ANALYSIS_FILE_PATH, unpack=True)\n plt.figure('Polarization degree')\n pl_normalization_spline.plot(scale=0.12, show=False, color='lightgray',\n label=lc_label)\n plt.errorbar(_phase, _pol_deg, xerr=_phase_err, yerr=_pol_deg_err, fmt='o',\n label=sim_label)\n pol_degree_spline.plot(show=False, label=mod_label)\n plt.axis([0., 1., 0., 0.5])\n plt.legend(bbox_to_anchor=(0.45, 0.95))\n if save:\n save_current_figure('crab_polarization_degree', OUTPUT_FOLDER, False)\n plt.figure('Polarization angle')\n pl_normalization_spline.plot(scale=0.4, offset=1.25, show=False,\n color='lightgray', label=lc_label)\n plt.errorbar(_phase, _pol_angle, xerr=_phase_err, yerr=_pol_angle_err,\n fmt='o', label=sim_label)\n pol_angle_spline.plot(show=False, label=mod_label)\n plt.axis([0., 1., 1.25, 3.])\n plt.legend(bbox_to_anchor=(0.45, 0.95))\n if save:\n save_current_figure('crab_polarization_angle', OUTPUT_FOLDER, False)\n plt.figure('PL normalization')\n plt.errorbar(_phase, _norm, xerr=_phase_err, yerr=_norm_err, fmt='o',\n label=sim_label)\n pl_normalization_spline.plot(show=False, label=mod_label)\n plt.axis([0., 1., None, None])\n plt.legend(bbox_to_anchor=(0.45, 0.95))\n if save:\n save_current_figure('crab_pl_norm', OUTPUT_FOLDER, False)\n plt.figure('PL index')\n pl_normalization_spline.plot(scale=0.18, offset=1.3, show=False,\n color='lightgray', label=lc_label)\n plt.errorbar(_phase, _index, xerr=_phase_err, yerr=_index_err, fmt='o',\n label=sim_label)\n pl_index_spline.plot(show=False, label=mod_label)\n plt.axis([0., 1., 1.3, 2.1])\n plt.legend(bbox_to_anchor=(0.45, 0.95))\n if save:\n save_current_figure('crab_pl_index', OUTPUT_FOLDER, False)\n plt.show()\n\ndef run(save_plots=False):\n \"\"\"Run all the tasks.\n \"\"\"\n if os.path.exists(ANALYSIS_FILE_PATH):\n logger.info('%s exists, delete it if you want to recreate it.' %\\\n ANALYSIS_FILE_PATH)\n else:\n generate()\n global PHASE_BINNING\n PHASE_BINNING = _phase_binning()\n prepare()\n analyze()\n plot(save_plots)\n\n\nif __name__ == '__main__':\n run(save_plots=True)\n","repo_name":"lucabaldini/ximpol","sub_path":"ximpol/examples/crab_pulsar.py","file_name":"crab_pulsar.py","file_ext":"py","file_size_in_byte":6800,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"} +{"seq_id":"33850772267","text":"from __future__ import division, unicode_literals\n\n\nVERSION = '0.19.1'\n__version__ = VERSION\n\n# Used for 'User-Agent' in HTTP and 'Creator' in PDF\nVERSION_STRING = 'WeasyPrint %s (http://weasyprint.org/)' % VERSION\n\n__all__ = ['HTML', 'CSS', 'Document', 'Page', 'default_url_fetcher',\n 'VERSION']\n\n\nimport lxml.etree\n\nfrom .urls import (default_url_fetcher, wrap_url_fetcher,\n path2url, ensure_url, url_is_absolute)\nfrom .logger import LOGGER\n# Some import are at the end of the file (after the CSS class) is defined\n# to work around circular imports.\n\n\nclass HTML(object):\n \"\"\"Represents an HTML document parsed by `lxml `_.\n\n You can just create an instance with a positional argument:\n ``doc = HTML(something)``\n The class will try to guess if the input is a filename, an absolute URL,\n or a file-like object.\n\n Alternatively, use **one** named argument so that no guessing is involved:\n\n :param filename: A filename, relative to the current directory or absolute.\n :param url: An absolute, fully qualified URL.\n :param file_obj: a file-like: any object with a :meth:`~file.read` method.\n :param string: a string of HTML source. (This argument must be named.)\n :param tree: a parsed lxml tree. (This argument must be named.)\n\n Specifying multiple inputs is an error: ``HTML(filename=foo, url=bar)``\n will raise.\n\n You can also pass optional named arguments:\n\n :param encoding: Force the source character encoding.\n :param base_url: The base used to resolve relative URLs\n (eg. in ````). If not provided, try to use\n the input filename, URL, or ``name`` attribute of file-like objects.\n :param url_fetcher: a function or other callable\n with the same signature as :func:`default_url_fetcher` called to\n fetch external resources such as stylesheets and images.\n (See :ref:`url-fetchers`.)\n :param media_type: The media type to use for ``@media``.\n Defaults to ``'print'``. **Note:** In some cases like\n ``HTML(string=foo)`` relative URLs will be invalid if ``base_url``\n is not provided.\n\n \"\"\"\n def __init__(self, guess=None, filename=None, url=None, file_obj=None,\n string=None, tree=None, encoding=None, base_url=None,\n url_fetcher=default_url_fetcher, media_type='print'):\n url_fetcher = wrap_url_fetcher(url_fetcher)\n\n source_type, source, base_url, protocol_encoding = _select_source(\n guess, filename, url, file_obj, string, tree, base_url,\n url_fetcher)\n\n if source_type == 'tree':\n result = source\n else:\n if source_type == 'string':\n parse = lxml.etree.fromstring\n else:\n parse = lxml.etree.parse\n if not encoding:\n encoding = protocol_encoding\n parser = lxml.etree.HTMLParser(encoding=encoding)\n result = parse(source, parser=parser)\n if result is None:\n raise ValueError('Error while parsing HTML')\n base_url = find_base_url(result, base_url)\n if hasattr(result, 'getroot'):\n result.docinfo.URL = base_url\n result = result.getroot()\n else:\n result.getroottree().docinfo.URL = base_url\n self.root_element = result\n self.base_url = base_url\n self.url_fetcher = url_fetcher\n self.media_type = media_type\n\n def _ua_stylesheets(self):\n return [HTML5_UA_STYLESHEET]\n\n def render(self, stylesheets=None, enable_hinting=False):\n \"\"\"Lay out and paginate the document, but do not (yet) export it\n to PDF or another format.\n\n This returns a :class:`~document.Document` object which provides\n access to individual pages and various meta-data.\n See :meth:`write_pdf` to get a PDF directly.\n\n .. versionadded:: 0.15\n\n :param stylesheets:\n An optional list of user stylesheets. (See\n :ref:`stylesheet-origins`\\.) List elements are :class:`CSS`\n objects, filenames, URLs, or file-like objects.\n :type enable_hinting: bool\n :param enable_hinting:\n Whether text, borders and background should be *hinted* to fall\n at device pixel boundaries. Should be enabled for pixel-based\n output (like PNG) but not vector based output (like PDF).\n :returns: A :class:`~document.Document` object.\n\n \"\"\"\n return Document._render(self, stylesheets, enable_hinting)\n\n def write_pdf(self, target=None, stylesheets=None, zoom=1):\n \"\"\"Render the document to a PDF file.\n\n This is a shortcut for calling :meth:`render`, then\n :meth:`Document.write_pdf() `.\n\n :param target:\n A filename, file-like object, or :obj:`None`.\n :param stylesheets:\n An optional list of user stylesheets. (See\n :ref:`stylesheet-origins`\\.) The list’s elements are\n :class:`CSS` objects, filenames, URLs, or file-like objects.\n :type zoom: float\n :param zoom:\n The zoom factor in PDF units per CSS units.\n **Warning**: All CSS units (even physical, like ``cm``)\n are affected.\n For values other than 1, physical CSS units will thus be “wrong”.\n Page size declarations are affected too, even with keyword values\n like ``@page { size: A3 landscape; }``\n :returns:\n The PDF as byte string if :obj:`target` is not provided or\n :obj:`None`, otherwise :obj:`None` (the PDF is written to\n :obj:`target`.)\n\n \"\"\"\n return self.render(stylesheets).write_pdf(target, zoom)\n\n def write_image_surface(self, stylesheets=None, resolution=96):\n surface, _width, _height = (\n self.render(stylesheets, enable_hinting=True)\n .write_image_surface(resolution))\n return surface\n\n def write_png(self, target=None, stylesheets=None, resolution=96):\n \"\"\"Paint the pages vertically to a single PNG image.\n\n There is no decoration around pages other than those specified in CSS\n with ``@page`` rules. The final image is as wide as the widest page.\n Each page is below the previous one, centered horizontally.\n\n This is a shortcut for calling :meth:`render`, then\n :meth:`Document.write_png() `.\n\n :param target:\n A filename, file-like object, or :obj:`None`.\n :param stylesheets:\n An optional list of user stylesheets. (See\n :ref:`stylesheet-origins`\\.) The list’s elements are\n :class:`CSS` objects, filenames, URLs, or file-like objects.\n :type resolution: float\n :param resolution:\n The output resolution in PNG pixels per CSS inch. At 96 dpi\n (the default), PNG pixels match the CSS ``px`` unit.\n :returns:\n The image as byte string if :obj:`target` is not provided or\n :obj:`None`, otherwise :obj:`None` (the image is written to\n :obj:`target`.)\n\n \"\"\"\n png_bytes, _width, _height = (\n self.render(stylesheets, enable_hinting=True)\n .write_png(target, resolution))\n return png_bytes\n\n\nclass CSS(object):\n \"\"\"Represents a CSS stylesheet parsed by tinycss.\n\n An instance is created in the same way as :class:`HTML`, except that\n the ``tree`` parameter is not available. All other parameters are the same.\n\n ``CSS`` objects have no public attribute or method. They are only meant to\n be used in the :meth:`~HTML.write_pdf`, :meth:`~HTML.write_png` and\n :meth:`~HTML.render` methods of :class:`HTML` objects.\n\n \"\"\"\n def __init__(self, guess=None, filename=None, url=None, file_obj=None,\n string=None, encoding=None, base_url=None,\n url_fetcher=default_url_fetcher, _check_mime_type=False,\n media_type='print'):\n url_fetcher = wrap_url_fetcher(url_fetcher)\n\n source_type, source, base_url, protocol_encoding = _select_source(\n guess, filename, url, file_obj, string, tree=None,\n base_url=base_url, url_fetcher=url_fetcher,\n check_css_mime_type=_check_mime_type,)\n\n kwargs = dict(linking_encoding=encoding,\n protocol_encoding=protocol_encoding)\n if source_type == 'string':\n if isinstance(source, bytes):\n method = 'parse_stylesheet_bytes'\n else:\n # unicode, no encoding\n method = 'parse_stylesheet'\n kwargs.clear()\n else:\n # file_obj or filename\n method = 'parse_stylesheet_file'\n # TODO: do not keep this?\n self.stylesheet = getattr(PARSER, method)(source, **kwargs)\n self.base_url = base_url\n self.rules = list(preprocess_stylesheet(\n media_type, base_url, self.stylesheet.rules, url_fetcher))\n for error in self.stylesheet.errors:\n LOGGER.warn(error)\n\n\ndef _select_source(guess=None, filename=None, url=None, file_obj=None,\n string=None, tree=None, base_url=None,\n url_fetcher=default_url_fetcher, check_css_mime_type=False):\n \"\"\"\n Check that only one input is not None, and return it with the\n normalized ``base_url``.\n\n \"\"\"\n if base_url is not None:\n base_url = ensure_url(base_url)\n\n nones = [guess is None, filename is None, url is None,\n file_obj is None, string is None, tree is None]\n if nones == [False, True, True, True, True, True]:\n if hasattr(guess, 'read'):\n type_ = 'file_obj'\n elif url_is_absolute(guess):\n type_ = 'url'\n else:\n type_ = 'filename'\n return _select_source(\n base_url=base_url, url_fetcher=url_fetcher,\n check_css_mime_type=check_css_mime_type,\n **{type_: guess})\n if nones == [True, False, True, True, True, True]:\n if base_url is None:\n base_url = path2url(filename)\n return 'filename', filename, base_url, None\n if nones == [True, True, False, True, True, True]:\n result = url_fetcher(url)\n if check_css_mime_type and result['mime_type'] != 'text/css':\n LOGGER.warn(\n 'Unsupported stylesheet type %s for %s',\n result['mime_type'], result['redirected_url'])\n return 'string', '', base_url, None\n protocol_encoding = result.get('encoding')\n if base_url is None:\n base_url = result.get('redirected_url', url)\n if 'string' in result:\n return 'string', result['string'], base_url, protocol_encoding\n else:\n return 'file_obj', result['file_obj'], base_url, protocol_encoding\n if nones == [True, True, True, False, True, True]:\n if base_url is None:\n # filesystem file-like objects have a 'name' attribute.\n name = getattr(file_obj, 'name', None)\n # Some streams have a .name like '', not a filename.\n if name and not name.startswith('<'):\n base_url = ensure_url(name)\n return 'file_obj', file_obj, base_url, None\n if nones == [True, True, True, True, False, True]:\n return 'string', string, base_url, None\n if nones == [True, True, True, True, True, False]:\n return 'tree', tree, base_url, None\n\n raise TypeError('Expected exactly one source, got ' + (\n ', '.join(name for i, name in enumerate(\n 'guess filename url file_obj string tree'.split()) if not nones[i]\n ) or 'nothing'))\n\n\n# Work around circular imports.\nfrom .css import PARSER, preprocess_stylesheet\nfrom .html import find_base_url, HTML5_UA_STYLESHEET\nfrom .document import Document, Page\n","repo_name":"DongHuaLu/mdcom","sub_path":"python27/Lib/site-packages/weasyprint/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":11972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21948931826","text":"from tkinter import *\r\nfrom PIL import Image, ImageTk\r\n\r\nrootWidget = Tk()\r\nrootWidget.title(\"Tkinter\")\r\n# # rootWidget.iconbitmap('Siddhnat Joshi.jpg')\r\nimageWidget = ImageTk.PhotoImage(Image.open(\"Siddhnat Joshi.jpg\"))\r\nlabelWidget = Label(image = imageWidget)\r\nlabelWidget.grid(row= 0, column= 0)\r\nlabelWidget2 = Label(rootWidget, text = \"It's Me !!\", bd=1, relief=SUNKEN)\r\nlabelWidget2.grid(row=1, column=0, sticky = W+E)\r\nrootWidget.mainloop()\r\n\r\n","repo_name":"siddhantjoshi/Thinker","sub_path":"tkinker/Code/icon.py","file_name":"icon.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7537090046","text":"from _01_02_frequent_words import window\nfrom _01_03_reverse_dna import reverse_dna\n\n\ndef find_all(sub, string):\n # http://stackoverflow.com/a/3874760\n \"\"\"\n >>> text = \"Allowed Hello Hollow\"\n >>> tuple(findall('ll', text))\n (1, 10, 16)\n \"\"\"\n index = 0 - len(sub)\n try:\n while True:\n index = string.index(sub, index + len(sub))\n yield index\n except ValueError:\n pass\n\n\ndef get_kmers(s, k):\n return list(set([\"\".join(x) for x in window(s, k)]))\n\n\nif __name__ == '__main__':\n with open('in.txt', 'r') as f:\n k = int(f.readline())\n s1 = f.readline().strip()\n s2 = f.readline().strip()\n possible_kmers = get_kmers(s1, k)\n result = list()\n for kmer in possible_kmers:\n for first_pos in find_all(kmer, s1):\n for second_pos in find_all(kmer, s2):\n result.append((first_pos, second_pos))\n for second_pos in find_all(reverse_dna(kmer), s2):\n result.append((first_pos, second_pos))\n\n with open('out.txt', 'w') as f:\n f.write(str('\\n'.join(map(str, result))))\n","repo_name":"okainov/bioinf-algo-2015","sub_path":"_06_05_shared_kmers.py","file_name":"_06_05_shared_kmers.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"15237683274","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport xgboost as xgb\n# from catboost import CatBoostClassifier\nfrom lightgbm import LGBMClassifier\nfrom sklearn.metrics import log_loss, accuracy_score, f1_score, make_scorer\nfrom sklearn.model_selection import train_test_split\nimport catboost as cb\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier, ExtraTreesClassifier, VotingClassifier\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.model_selection import GridSearchCV, cross_val_score, StratifiedKFold, KFold\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.preprocessing import LabelEncoder\nimport os\nimport optuna\nfrom optuna import Trial\nfrom optuna.samplers import TPESampler\n\ndef get_dict(data):\n dic_x = {}\n for i, x in enumerate((data), start=0):\n dic_x[x] = i\n return dic_x\n\ndef check_bool(temp1, temp2):\n diff = sum(temp1['class'] != temp2['class'])\n li = []\n for i in range(len(temp1)):\n if temp1['class'][i] != temp2['class'][i]:\n li.append(i)\n print('개수', diff, 'list', li)\n return diff, li\n\nos.getcwd()\nos.chdir(\"C://data_minsung/dacon\")\ntrain_df = pd.read_csv('./train.csv')\ntest_df = pd.read_csv('./test.csv')\n\n\n# 데이터 전처리\nsnp_col = train_df.columns[5:-1]\nfor col in snp_col:\n dic = get_dict(list(set(train_df[col])))\n for i in range(len(train_df)):\n train_df[col][i] = dic[train_df[col][i]]\n for i in range(len(test_df)):\n test_df[col][i] = dic[test_df[col][i]]\n\n\ntrain_df = train_df.drop(['id','father','mother', 'gender'], axis=1)\ntest_df = test_df.drop(['id','father','mother', 'gender'], axis=1)\n\nle = LabelEncoder() # from sklearn.preprocessing\ntemp = train_df['class'].copy()\nlabel_dic = {'A': 0, 'B': 1, 'C': 2}\n\n\ntrain_y = le.fit_transform(train_df['class'].copy())\ntrain_x = train_df.drop('class', axis=1)\n\nSEEDS = [42, 1028, 1234, 0, 24]\n\ndef RF_objective(trial):\n max_depth = trial.suggest_int('max_depth', 1, 10)\n max_leaf_nodes = trial.suggest_int('max_leaf_nodes', 2, 1000)\n n_estimators = trial.suggest_int('n_estimators', 100, 500)\n model = RandomForestClassifier(max_depth=max_depth, max_leaf_nodes=max_leaf_nodes, n_estimators=n_estimators,\n n_jobs=2, random_state=seed)\n model.fit(train_x, train_y)\n\n kfold = StratifiedKFold(n_splits=10)\n\n score = cross_val_score(model, train_x, train_y, cv=kfold, scoring=make_scorer(f1_score,average='micro'))\n f1_mean = score.mean()\n return f1_mean\n# Execute optuna and set hyperparameters\nparameter = []\nfor seed in SEEDS:\n sampler = TPESampler(seed=seed)\n RF_study = optuna.create_study(direction='maximize', sampler=sampler)\n RF_study.optimize(RF_objective, n_trials=100)\n print(\"Best Score:\", RF_study.best_value)\n print(\"Best trial:\", RF_study.best_trial.params)\n parameter.append(RF_study.best_trial.params)\n\n\nrfc1 = RandomForestClassifier(**parameter[0], random_state=SEEDS[0])\nrfc2 = RandomForestClassifier(**parameter[1], random_state=SEEDS[1])\nrfc3 = RandomForestClassifier(**parameter[2], random_state=SEEDS[2])\nrfc4 = RandomForestClassifier(**parameter[3], random_state=SEEDS[3])\nrfc5 = RandomForestClassifier(**parameter[4], random_state=SEEDS[4])\n\n\n# optuna\n# rfc = RandomForestClassifier(max_depth=6, max_leaf_nodes=268, n_estimators=286,\n# random_state=42) #rfc1=97.1917\n# rfc = RandomForestClassifier(max_depth=5, max_leaf_nodes=364, n_estimators=335, random_state=42) # 100번 튜닝, rfc2 = rfc1\n\n# grid\n# rfc = RandomForestClassifier(bootstrap=False, max_features=1, min_samples_leaf=2,\n# min_samples_split=7, n_estimators=500, random_state=47)\n\n\n\n# rfc ensemble\n\nvotingC = VotingClassifier(estimators=[('1', rfc1), ('2', rfc2),('3', rfc3), ('4', rfc4),('5', rfc5)], voting='soft', n_jobs=4) #temp8 = 96\n\n\n# # 단일모델\n# rfc.fit(train_x, train_y)\n# pred = rfc.predict(test_df)\n\n\n# 앙상블 모델\nvotingC = votingC.fit(train_x.astype(int), train_y)\npred = votingC.predict(test_df.astype(int))\n\nlabel_rev_dic = {}\nfor i, x in enumerate(label_dic):\n label_rev_dic[i] = x\n\nresult = []\nfor x in pred:\n result.append(label_rev_dic[x])\n\ntemp = pd.read_csv('./test.csv')\n\nresult_df = pd.DataFrame(result, index=temp['id'], columns=['class'])\nresult_df.to_csv(\"./result/model_result_temp_rfc_ensemble.csv\")\n\n# rfc(1개) > xgb(temp5와 비교 3개) > lgbm(4개)\n# rfc 4개, xgb 26개, lgbm, lgbm 4개\ntemp1 = pd.read_csv(\"./result/model_result_temp_rfc_ensemble.csv\")\ntemp2 = pd.read_csv(\"./result/model_result_temp_rfc.csv\")\ntemp3 = pd.read_csv(\"./result/model_result_14.csv\")\n#\ncheck_bool(temp1, temp2)\ncheck_bool(temp2, temp3)\ncheck_bool(temp1, temp3)\n\n\n\n","repo_name":"Minsung-Jeong/anomaly","sub_path":"PycharmProjects/project22/dacon/genetic/rfc_seed_ensemble.py","file_name":"rfc_seed_ensemble.py","file_ext":"py","file_size_in_byte":5036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30690689387","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.core.validators import MinValueValidator\nfrom django.urls import reverse\nfrom django.shortcuts import get_object_or_404\n\nfrom .levels import Levels\nfrom combat_app.combat.hero.basic import HEROES_CLASSES, HEROES_MODEL_CHOICES\nfrom combat_app.combat.unit.basic import UNIT_CLASSES\n\n\nclass Hero(models.Model):\n \"\"\"Hero model, describes main characteristics of user's hero.\"\"\"\n name = models.CharField(max_length=24, default='Странник')\n user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='heroes', blank=True)\n attack = models.IntegerField(default=0)\n defense = models.IntegerField(default=0)\n mana = models.IntegerField(default=0)\n spell_power = models.IntegerField(default=0)\n initiative = models.FloatField(default=0)\n in_battle = models.IntegerField(null=True, default=-1)\n default = models.CharField(max_length=30, choices=HEROES_MODEL_CHOICES)\n army = models.JSONField(default=dict, blank=True)\n level = models.IntegerField(default=1)\n exp = models.IntegerField(default=0)\n free_point = models.BooleanField(default=False)\n\n # Spells as self model, with foreignkey.\n\n def __str__(self):\n return f'{self.user.username}-{self.name}'\n\n @classmethod\n def _create_hero(cls, user, default_hero, hero_name, army: dict = None):\n default = HEROES_CLASSES[default_hero]\n if not army:\n army = {}\n hero = Hero.objects.create(\n user=user,\n name=hero_name,\n attack=default.attack,\n defense=default.defense,\n mana=default.mana,\n spell_power=default.spell_power,\n initiative=default.initiative,\n default=default_hero,\n army=army\n )\n hero.save()\n return hero\n\n @classmethod\n def create(cls, user, hero_name, hero_class, army=None):\n assert user.heroes.count() <= 3, 'User can\\'t have more then 3 heroes'\n assert hero_class in HEROES_CLASSES, f'Invalid hero class - {hero_class}'\n hero = cls._create_hero(user, hero_class, hero_name, army)\n return hero\n\n def add_attack(self, value: int):\n self.attack += value\n self.save(update_fields=['attack'])\n\n def set_attack(self, value: int):\n self.attack = value\n self.save(update_fields=['attack'])\n\n def add_defense(self, value: int):\n self.defense += value\n self.save(update_fields=['defense'])\n\n def set_defense(self, value: int):\n self.defense = value\n self.save(update_fields=['defense'])\n\n def add_mana(self, value: int):\n self.mana += value\n self.save(update_fields=['mana'])\n\n def set_mana(self, value: int):\n self.mana = value\n self.save(update_fields=['mana'])\n\n def add_spell_power(self, value: int):\n self.spell_power += value\n self.save(update_fields=['spell_power'])\n\n def set_spell_power(self, value: int):\n self.spell_power = value\n self.save(update_fields=['spell_power'])\n\n def add_initiative(self, value):\n self.initiative += value\n self.save(update_fields=['initiative'])\n\n def set_initiative(self, value):\n self.initiative = value\n self.save(update_fields=['initiative'])\n\n def add_spell(self, pk):\n spell = Spell.objects.get(pk=pk)\n self.spells.add(spell)\n self.save(update_fields=['spells'])\n\n def remove_spell(self, pk):\n spell = Spell.objects.get(pk=pk)\n self.spells.remove(spell)\n self.save(update_fields=['spells'])\n\n def clear_spells(self):\n self.spells.clear()\n self.save(update_fields=['spells'])\n\n def set_unit_in_army(self, unit, count):\n assert unit in UNIT_CLASSES, 'Invalid unit class!'\n assert self._unit_can_be_added(unit, count), 'You can\\'t add this count of units in army.'\n if count == 0:\n self._del_unit_from_army(unit)\n else:\n self.army[unit] = count\n self.save()\n\n def gain_exp(self, exp):\n self.exp += exp\n self.save(update_fields=['exp'])\n\n def push_hero_in_battle(self, combat):\n combat.add_to_random_team(self)\n\n def release_hero_from_battle(self):\n \"\"\"Reset in_battle for current hero to -1.\"\"\"\n self.in_battle = -1\n self.save(update_fields=['in_buttle'])\n\n def _set_unit_in_army(self, army, unit, count):\n assert unit in UNIT_CLASSES, 'Invalid unit class!'\n if count == 0:\n self._del_unit_from_army(unit)\n else:\n army[unit] = count\n\n def _calculate_army_power(self, army):\n power = 0\n for unit, count in army.items():\n power += UNIT_CLASSES[unit].army_cost * count\n return power\n\n def _unit_can_be_added(self, unit, count):\n temp_army = self.army.copy()\n self._set_unit_in_army(temp_army, unit, count)\n print(Levels.data['levels'][self.level]['army_power'], self._calculate_army_power(temp_army))\n return True if Levels.data['levels'][self.level]['army_power'] >= self._calculate_army_power(temp_army) else False\n\n def _del_unit_from_army(self, unit):\n if unit in self.army:\n del self.army[unit]\n @property\n def available_stacks(self):\n return HEROES_CLASSES[self.default].get_available_stacks(self.level)\n\n @property\n def level_info(self):\n return Levels.data['levels'][self.level]\n\n @property\n def url(self):\n return reverse('user:user_page', args=[self.user.username])\n\nclass SpellTome(models.Model):\n name = models.CharField(max_length=30)\n\n def __str__(self):\n return self.name\n\n\nclass Spell(models.Model):\n tome = models.ForeignKey(SpellTome, related_name='spells', on_delete=models.SET_NULL, null=True)\n hero = models.ManyToManyField(Hero, related_name='spells', blank=True)\n name = models.CharField(max_length=30, unique=True)\n short_name = models.CharField(max_length=16, unique=True)\n description = models.TextField()\n damage_per_tail = models.IntegerField()\n SCHEME_CHOICES = [\n ('CROSS', 'Across'),\n ('RECTAN', 'Rectangle'),\n ('CF', 'Circumference'),\n ]\n scheme = models.CharField(max_length=10, choices=SCHEME_CHOICES)\n height = models.IntegerField(validators=[MinValueValidator(1)])\n width = models.IntegerField(validators=[MinValueValidator(1)])\n\n def save(self, *args, **kwargs):\n if self.scheme in ('CROSS', 'CF'):\n if self.height % 2 == 0:\n self.height -= 1\n if self.width % 2 == 0:\n self.width -= 1\n if self.scheme == 'CF':\n if self.width != self.height:\n if self.height > self.width:\n self.width = self.height\n else:\n self.height = self.width\n return super().save(*args, **kwargs)\n","repo_name":"ArtjomKotkov/game_ham","sub_path":"hero_app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25844732476","text":"import numpy as np\nimport math\nimport scipy.stats as stats\nfrom abc import ABCMeta, abstractmethod\nimport distributions \nimport utils_math\nfrom problems import ABC_problems\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nimport time\n\n\nclass Gaussian_Copula_Problem(ABC_problems.ABC_Problem):\n\n '''\n A problem where data is generated by a 2D Gaussian copula where: marginal 1 = beta, marginal 2 = MoG\n '''\n\n def __init__(self, N=100, n=50):\n self.N = N # number of posterior samples\n self.n = n # number of i.i.d data x_i ~ p(x|theta)\n\n self.prior = [distributions.uniform, distributions.uniform, distributions.uniform]\n self.prior_args = np.array([[0.5, 12.5], [0, 1.0], [0.4, 0.8]]) # uniform prior\n self.simulator_args = ['alpha', 'coeff', 'cov'] # just for information\n self.K = 3 # number of parameters\n self.stat = 'raw'\n \n self.true_cov = np.array([[1, 0.60], [0.60, 1]])\n self.true_coeff = 0.50\n self.true_alpha = 6.0\n self.true_beta = 2.0\n self.true_mean1 = 1.0\n self.true_mean2 = 4.0\n self.true_sigma1 = 1.0\n self.true_sigma2 = 0.5\n\n self.MoG_u2x_mappings = distributions.MoG2.learn_u2x_mappings(self.true_mean1, self.true_sigma1, self.true_mean2, self.true_sigma2)\n\n def get_true_theta(self):\n return np.array([self.true_alpha, self.true_coeff, self.true_cov[0, 1]])\n\n \n # A. (normalized) marginal quantiles\n def _ss_quantiles(self, X, n_quantiles):\n dim = X.shape[1]\n prob = np.linspace(0.025, 0.975, n_quantiles)\n stat = np.zeros([1, n_quantiles*dim])\n for k in range(dim):\n quantiles = stats.mstats.mquantiles(X[:, k], prob)\n stat_k = quantiles\n stat[0, k*n_quantiles : (k+1)*n_quantiles] = np.array(stat_k)\n return stat\n\n # B. correlation between latent\n def _ss_corr(self, Z):\n V = np.mat(Z).T * np.mat(Z) / Z.shape[0]\n (d,d) = V.shape\n upper_tri_elements = V[np.triu_indices(d, k=1)]\n stat = np.array(upper_tri_elements)\n return stat\n \n def statistics(self, data, theta=None):\n if self.stat == 'raw':\n # (marginal quantiles) + (latent correlation) as summary statistics\n stat_A = self._ss_quantiles(data, n_quantiles=20)\n stat_B = self._ss_corr(self.X2Z(data, theta[0], theta[1]))\n stat = np.hstack((stat_A, stat_B))\n return stat\n else:\n # (marginal quantiles) + (latent correlation) as summary statistics\n stat_A = self._ss_quantiles(data, n_quantiles=5)\n stat_B = self._ss_corr(self.X2Z(data, theta[0], theta[1]))\n stat = np.hstack((stat_A, stat_B))\n return stat\n \n def simulator(self, theta):\n # some preparation\n alpha = theta[0]\n coeff = theta[1]\n V = np.array([[1, theta[2]], [theta[2], 1]])\n\n # sample z ~ N(0, V)\n Z = distributions.normal_nd.draw_samples([0, 0], V, self.n)\n\n # convert z to x\n X = self.Z2X(Z, alpha, coeff)\n return X\n\n def log_likelihood(self, theta):\n\n # calculate L(theta; x_o) = p(theta|x_o)\n \n alpha = theta[0]\n coeff = theta[1]\n V = np.array([[1, theta[2]], [theta[2], 1]])\n\n # compute the copula density\n Z = self.X2Z(self.data_obs, alpha, coeff)\n c = distributions.copula.copula_density(Z, V)\n\n # compute the marginal pdf\n p1 = distributions.beta.pdf(self.data_obs[:, 0], alpha, self.true_beta)\n p2 = distributions.MoG2.pdf(self.data_obs[:, 1], coeff, self.true_mean1, self.true_sigma1, self.true_mean2, self.true_sigma2)\n\n # likelihood = copula density * marginal pdf\n ll = (np.log(c) + np.log(p1) + np.log(p2)).sum()\n return ll\n \n def log_pdf(self, data, theta):\n\n # calculate p(x|theta) \n\n alpha = theta[0]\n coeff = theta[1]\n V = np.array([[1, theta[2]], [theta[2], 1]])\n\n # compute the copula density\n Z = self.X2Z(data, alpha, coeff)\n c = distributions.copula.copula_density(Z, V)\n\n # compute the marginal pdf\n p1 = distributions.beta.pdf(data[:, 0], alpha, self.true_beta)\n p2 = distributions.MoG2.pdf(data[:, 1], coeff, self.true_mean1, self.true_sigma1, self.true_mean2, self.true_sigma2)\n\n # copula density * marginal pdf = pdf\n return np.log(c)+np.log(p1)+np.log(p2)\n\n def sample_from_prior(self):\n sample_alpha = self.prior[0].draw_samples(self.prior_args[0, 0], self.prior_args[0, 1], 1)[0]\n sample_coeff = self.prior[1].draw_samples(self.prior_args[1, 0], self.prior_args[1, 1], 1)[0]\n sample_cov = self.prior[2].draw_samples(self.prior_args[2, 0], self.prior_args[2, 1], 1)[0]\n return np.array([sample_alpha, sample_coeff, sample_cov])\n\n def X2Z(self, X, alpha, coeff):\n # get u = beta-CDF(x), MoG-CDF(x)\n U = np.zeros(X.shape)\n U[:, 0] = distributions.beta.cdf(X[:, 0], alpha, self.true_beta)\n U[:, 1] = distributions.MoG2.cdf(X[:, 1], coeff, self.true_mean1, self.true_sigma1, self.true_mean2, self.true_sigma2)\n\n # get z = inverse-Phi(u)\n Z = distributions.normal.invcdf(U, mu=0, sigma=1)\n return Z\n\n def Z2X(self, Z, alpha, coeff):\n # get u = Phi(z)\n U = np.zeros(Z.shape)\n X = np.zeros(Z.shape)\n for k in range(2):\n U[:, k] = distributions.normal.cdf(Z[:, k], 0, 1)\n\n # get x = inverse-CDF(u)\n X[:, 0] = distributions.beta.invcdf(U[:, 0], alpha, self.true_beta)\n X[:, 1] = distributions.MoG2.invcdf(U[:, 1], coeff, self.MoG_u2x_mappings)\n return X\n\n def visualize(self):\n print('visualizing p(x|theta)')\n \n # preparation\n samples = self.data_obs\n [m, dim] = samples.shape\n min_values = samples.min(axis=0)\n max_values = samples.max(axis=0)\n\n # likelihood values \n N_grid = 300\n ranges = []\n for k in range(dim):\n r = np.array(np.linspace(min_values[k], max_values[k], N_grid))\n ranges.append(r)\n X, Y = np.meshgrid(*ranges)\n R = np.array(np.meshgrid(*ranges)).T.reshape(-1, dim)\n\n pdf = np.exp(self.log_pdf(R, self.get_true_theta()))\n Z = pdf.reshape(X.shape)\n\n # plot the contour\n plt.figure(figsize=(5, 5))\n plt.contour(X, Y, Z, 10, cmap='jet', linewidths=0.75)\n plt.xlabel(r'$y_1$')\n plt.ylabel(r'$y_2$')\n plt.xlim((0.3, 1.1))\n plt.ylim((-1, 6))\n plt.show()\n ","repo_name":"cyz-ai/neural-approx-ss-lfi","sub_path":"problems/problem_GC.py","file_name":"problem_GC.py","file_ext":"py","file_size_in_byte":6919,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"3"} +{"seq_id":"30038017456","text":"from flask import Blueprint, jsonify, request, g\r\nfrom app.db import get_db\r\n\r\nbp = Blueprint(\"courses\", __name__, url_prefix=\"/api/courses\")\r\n\r\n@bp.route(\"\", methods=[\"GET\"])\r\ndef get_courses():\r\n db = get_db()\r\n courses = db.execute(\"SELECT * FROM courses\").fetchall()\r\n return jsonify([dict(course) for course in courses])\r\n\r\n@bp.route(\"\", methods=[\"POST\"])\r\ndef create_course():\r\n db = get_db()\r\n data = request.get_json()\r\n name = data.get(\"name\")\r\n teacher_id = data.get(\"teacher_id\")\r\n if name is None:\r\n return jsonify({\"error\": \"Name is required\"}), 400\r\n if teacher_id is None:\r\n return jsonify({\"error\": \"Teacher ID is required\"}), 400\r\n db.execute(\"INSERT INTO courses (name, teacher_id) VALUES (?, ?)\", (name, teacher_id))\r\n db.commit()\r\n return jsonify({\"message\": \"Course created successfully\"})\r\n\r\n@bp.route(\"/\", methods=[\"GET\"])\r\ndef get_course(id):\r\n db = get_db()\r\n course = db.execute(\"SELECT * FROM courses WHERE id = ?\", (id,)).fetchone()\r\n if course is None:\r\n return jsonify({\"error\": \"Course not found\"}), 404\r\n return jsonify(dict(course))\r\n\r\n@bp.route(\"/\", methods=[\"PUT\"])\r\ndef update_course(id):\r\n db = get_db()\r\n data = request.get_json()\r\n name = data.get(\"name\")\r\n teacher_id = data.get(\"teacher_id\")\r\n if name is None:\r\n return jsonify({\"error\": \"Name is required\"}), 400\r\n if teacher_id is None:\r\n return jsonify({\"error\": \"Teacher ID is required\"}), 400\r\n db.execute(\"UPDATE courses SET name = ?, teacher_id = ? WHERE id = ?\", (name, teacher_id, id))\r\n db.commit()\r\n return jsonify({\"message\": \"Course updated successfully\"})\r\n\r\n@bp.route(\"/\", methods=[\"DELETE\"])\r\ndef delete_course(id):\r\n db = get_db()\r\n db.execute(\"DELETE FROM courses WHERE id = ?\", (id,))\r\n db.commit()\r\n return jsonify({\"message\": \"Course deleted successfully\"})\r\n\r\n@bp.route(\"//students\", methods=[\"GET\"])\r\ndef get_course_students(id):\r\n db = get_db()\r\n course = db.execute(\"SELECT * FROM courses WHERE id = ?\", (id,)).fetchone()\r\n if course is None:\r\n return jsonify({\"error\": \"Course not found\"}), 404\r\n students = db.execute(\"SELECT * FROM students WHERE id IN (SELECT student_id FROM course_students WHERE course_id = ?)\", (id,)).fetchall()\r\n return jsonify([dict(student) for student in students])\r\n\r\n@bp.route(\"//grades\", methods=[\"GET\"])\r\ndef get_course_grades(id):\r\n db = get_db()\r\n course = db.execute(\"SELECT * FROM courses WHERE id = ?\", (id,)).fetchone()\r\n if course is None:\r\n return jsonify({\"error\": \"Course not found\"}), 404\r\n grades = db.execute(\"SELECT * FROM grades WHERE course_id = ?\", (id,)).fetchall()\r\n return jsonify([dict(grade) for grade in grades])\r\n\r\n \r\n","repo_name":"adekunle8032/Altschool-third-semester-project2","sub_path":"app/students_api/routes/courses.py","file_name":"courses.py","file_ext":"py","file_size_in_byte":2803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"2855984407","text":"import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"Demo\")\n\nprocess.load(\"FWCore.MessageService.MessageLogger_cfi\")\nprocess.load(\"Configuration.StandardSequences.GeometryDB_cff\")\nprocess.load(\"Configuration.StandardSequences.FrontierConditions_GlobalTag_cff\")\nprocess.load('Configuration.StandardSequences.Services_cff')\nprocess.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')\nprocess.load('Configuration.EventContent.EventContent_cff')\nprocess.load('Configuration.StandardSequences.GeometryDB_cff')\nprocess.load('Configuration.StandardSequences.MagneticField_38T_cff')\nprocess.load('Configuration.StandardSequences.RawToDigi_cff')\nprocess.load('Configuration.StandardSequences.Reconstruction_cff')\n\n## process.GlobalTag.globaltag=\"94X_mc2017_realistic_v10\"\n#process.GlobalTag.globaltag = \"120X_mcRun3_2021_realistic_v6\" ## Updating global tag since we are using run 3 2021 mc\nfrom Configuration.AlCa.GlobalTag import GlobalTag\nprocess.GlobalTag = GlobalTag(process.GlobalTag, '126X_mcRun3_2023_forPU65_v1', '')\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(100) ) #-1 = tutti (numero edi eventi)\n\nprocess.source = cms.Source(\"PoolSource\",\n # replace 'myfile.root',' with the source file you want to use\n fileNames = cms.untracked.vstring(\n\t\t'root://cms-xrd-global.cern.ch//store/group/phys_tracking/Soohyuny/DeepCoreNtuplizer/TT_TuneCP5_13p6TeV_powheg-pythia8/DeepCoreNtuplizerInput'\n# 'file:/uscms_data/d3/hichemb/princeton/project2/CMSSW_12_0_0_pre4/src/RecoTracker/DeepCoreTraining/test/Ntuplizer_step1_test.root'\n #'file:/eos/uscms/store/user/hichemb/RelValQCD_Pt_1800_2400_14/DeepCoreNtuplizerInput/211013_194728/0000/output/Ntuplizer_output1.root'\n ## 'file:/uscms_data/d3/hichemb/princeton/project2/CMSSW_12_0_0_pre4/src/RecoTracker/DeepCoreTraining/test/step3.root'\n ## 'root://cms-xrd-global.cern.ch//store/user/arizzi/TrainJetCore/QCD_Pt_1800to2400_TuneCUETP8M1_13TeV_pythia8/TrainJetCoreAll/181026_130638/0005/step3_5435.root' #barrel example\n # 'root://cms-xrd-global.cern.ch//store/user/vbertacc/DeepCoreTrainingSampleEC_signelCore_2k/UBGGun_E-1000to7000_Eta-1p2to2p1_13TeV_pythia8/DeepCoreTrainingSampleEC_all/200509_143853/0000/step3_10.root'#endcap example\n ),\n)\n\nprocess.options = cms.untracked.PSet(\n allowUnscheduled = cms.untracked.bool(True),\n numberOfThreads = cms.untracked.uint32(8),\n numberOfStreams = cms.untracked.uint32(8),\n wantSummary = cms.untracked.bool(True)\n)\n\nprocess.DeepCoreNtuplizerTest = cms.EDProducer('DeepCoreNtuplizer' ,\n ptMin = cms.double(500), #500 used for barrel training, 1000 used for endcap training\n pMin=cms.double(0),\n deltaR = cms.double(0.1),\n barrelTrain =cms.bool(True),\n endcapTrain =cms.bool(False),\n fullTrain =cms.bool(False),\n \n vertices = cms.InputTag(\"offlinePrimaryVertices\"),\n pixelClusters=cms.InputTag(\"siPixelClustersPreSplitting\"),\n cores = cms.InputTag(\"ak4CaloJets\"),\n centralMIPCharge = cms.double(18000.0),\n chargeFractionMin = cms.double(2),\n simTracks= cms.InputTag(\"g4SimHits\"),\n simHit= cms.InputTag(\"g4SimHits\",\"TrackerHitsPixelBarrelLowTof\"),\n simHitEC= cms.InputTag(\"g4SimHits\",\"TrackerHitsPixelEndcapLowTof\"),\n pixelCPE = cms.string( \"PixelCPEGeneric\" )\n)\n\nprocess.SimpleMemoryCheck = cms.Service(\"SimpleMemoryCheck\",\n ignoreTotal = cms.untracked.int32(1),\n oncePerEventMode = cms.untracked.bool(True)\n)\n\nprocess.TFileService = cms.Service(\"TFileService\",\n fileName = cms.string(\"DeepCoreTrainingSample_test.root\"),\n closeFileFast = cms.untracked.bool(True)\n )\n \nprocess.MessageLogger.cerr.threshold = \"Info\"\nprocess.MessageLogger.debugModules = [\"DeepCoreNtuplizerTest\"]\n\nprocess.p = cms.Path(process.DeepCoreNtuplizerTest\n\n) \n# 500 is the goodone (for barel training) #1000 is the tested one, with p cut insted of pt\n","repo_name":"soohyuny/DeepCore","sub_path":"Ntuplizer/DeepCoreTraining/test/test_DeepCoreNtuplizer.py","file_name":"test_DeepCoreNtuplizer.py","file_ext":"py","file_size_in_byte":3793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"39931468924","text":"from collections import defaultdict\nimport uuid\nimport time\nimport numpy as np\nimport base64\nimport cv2\nimport torch\nfrom ts.torch_handler.base_handler import BaseHandler\nfrom ilock import ILock\nimport warnings\nfrom torchvision.transforms.functional import adjust_gamma\n\nfrom methods.constants import (\n CUDA_ENABLED,\n TRIANGLE_MODEL_PATH, \n MAP_LOCATION,\n VOD_TRIANGLE_PATH,\n VOD_TRIANGLE_BATCHES\n)\n\nfrom predictors.triangle.infer_standalone import infer, load_model\nfrom predictors.vod_triangles.src.batch_inference import (\n infer_batch as vod_triangle_inference,\n fetch_model as vod_triangle_fetch_model\n)\nfrom methods.misc import (\n fetch_triangles_translators_batches,\n convert_coords_list2dicts,\n convert_yolo_output2dict,\n broaden_yolo_output,\n filter_yolo_output,\n)\nfrom ts.metrics.dimension import Dimension\n\nclass TriangleHandler(BaseHandler):\n \n def initialize(self, context):\n \"\"\"\n Args:\n context: representing information about the available GPU\n\n Processes:\n initiates table detection model, cell detection model, easyocr model\n Returns:\n None\n \"\"\"\n properties = context.system_properties\n self.context=context\n self.map_location = MAP_LOCATION\n self.device = MAP_LOCATION\n # assert self.device == \"cuda\", \"GPU ISNT RECOGNIZED\"\n print(\"DEVICE\", self.device)\n self.triangle_model = load_model(TRIANGLE_MODEL_PATH, map_location=self.device)\n print(VOD_TRIANGLE_PATH)\n self.vod_triangle_model, self.vod_triangle_normalize = vod_triangle_fetch_model(ckpt=VOD_TRIANGLE_PATH, device=MAP_LOCATION)\n print(\"loaded yolo\")\n self.triangle_count_dm = Dimension('level','trianglescount')\n # warm up triangle model\n if self.device != 'cpu':\n self.triangle_model(torch.zeros(1, 3, 640, 640).to(self.device).type_as(next(self.triangle_model.parameters())))\n print(\"loaded vod_triangles\")\n return self\n\n def preprocess(self, data):\n \"\"\"\n Args:\n data: list of binary images\n Returns:\n list of cv2 images\n \"\"\"\n \n images = []\n for row in data:\n image = row.get('data') or row.get('body')\n\n if isinstance(image, str):\n image = base64.b64decode(image)\n\n img = np.frombuffer(image, dtype=np.uint8).reshape((640, 640, 3))\n if img.shape != (640, 640, 3):\n warnings.warn(f\"img.shape isnt (640,640,3)!={img.shape} undefined behaviour and wrong results can be returned\")\n \n img = img.transpose((2, 0, 1)) # h, w, c --> c, h, w\n images.append(img)\n\n images = np.ascontiguousarray(np.array(images))\n images = torch.from_numpy(images)\n return images\n\n def inference_triangles(self, data, *args, **kwargs):\n \"\"\"\n Args:\n data: list of cv2 images\n Returns:\n\n \"\"\"\n tic = time.time()\n results = infer(data, model=self.triangle_model)\n \n # print(f\"BatchSize.Batches:{len(data)}\") \n idx = str(uuid.uuid4())\n self.context.metrics.add_time(\n 'YoloInferenceTriangleTimeForBatch',\n (time.time() - tic) * 1000,\n idx, 'ms'\n )\n\n return results\n\n def inference_vertices(self, imgs, triangles_bboxes, *args, **kwargs):\n \"\"\"\n Args:\n data: list of cv2 images\n Returns:\n \n \"\"\"\n\n tic = time.time()\n results = defaultdict(dict)\n for img_index in range(len(imgs)):\n results[img_index] = {\n \"img_index\": img_index,\n \"triangles\": []\n } # has to be int because of sorting]\n triangles_cnt_ = 0\n for ind, (triangles, translators) in enumerate(fetch_triangles_translators_batches(\n yolo_output=triangles_bboxes, \n imgs=imgs,\n n_batch=VOD_TRIANGLE_BATCHES,\n device=self.device\n )):\n tic_justinf = time.time()\n vertices = vod_triangle_inference(triangles, self.vod_triangle_model, self.vod_triangle_normalize, device=self.device)\n idx = str(uuid.uuid4())\n self.context.metrics.add_time(\n 'TrianglesInternalInferenceTimeForBatch',\n (time.time() - tic_justinf) * 1000,\n idx, 'ms'\n )\n for translator, triangle_vertices in zip(translators, vertices):\n tr_vert = triangle_vertices.tolist()\n bbox = triangles_bboxes[translator.img_index][translator.triangle_index]\n vert_dicts = convert_coords_list2dicts(tr_vert, translator)\n bbox, confidence = convert_yolo_output2dict(bbox)\n img_dict = results[translator.img_index]\n \n img_dict[\"triangles\"].append(\n {\n \"num\" : translator.triangle_index,\n \"vertices\" : vert_dicts,\n \"yolo_confidence\" : confidence,\n \"bbox\" : bbox\n }\n )\n results = list((results.items()))\n results.sort(key=lambda a: a[0])\n idx = str(uuid.uuid4())\n self.context.metrics.add_time(\n 'TrianglesInternalTotalTimeForBatch',\n (time.time() - tic) * 1000, \n idx, 'ms'\n )\n self.context.metrics.add_counter('Stage2TotalTrianglesNumber', triangles_cnt_, idx)\n return list(map(lambda x: x[1], results))\n\n def handle(self, data, context):\n \"\"\"Entry point for default handler. It takes the data from the input request and returns\n the predicted outcome for the input.\n Args:\n data (list): The input data that needs to be made a prediction request on.\n context (Context): It is a JSON Object containing information pertaining to\n the model artefacts parameters.\n Returns:\n list : Returns a list of dictionary with the predicted response.\n \"\"\"\n start_time = time.time()\n self.context = context\n \n metrics = self.context.metrics\n idx = str(uuid.uuid4())\n metrics.add_counter('BatchSize', len(data), idx)\n\n tic = time.time()\n data_preprocess = self.preprocess(data)\n metrics.add_time('PreprocessingTimeForBatch', (time.time() - tic) * 1000, idx, 'ms')\n tic = time.time()\n if CUDA_ENABLED:\n # with ILock(\"torchserve-mutex\"): # dont use if models are slow\n data_preprocess = data_preprocess.to(self.device)\n data_preprocess = adjust_gamma(data_preprocess, 0.5)\n if not self._is_explain():\n yolo_output = self.inference_triangles(data_preprocess)\n # filter_yolo_output(yolo_output)\n broaden_yolo_output(yolo_output, 0.15) # force yolo to get mode context to help stage2\n vod_vertices = self.inference_vertices(data_preprocess, triangles_bboxes=yolo_output)\n else:\n vod_vertices = self.explain_handle(data_preprocess, data)\n torch.cuda.empty_cache()\n else:\n data_preprocess = data_preprocess.to(self.device)\n data_preprocess = adjust_gamma(data_preprocess, 0.5)\n if not self._is_explain():\n yolo_output = self.inference_triangles(data_preprocess)\n # filter_yolo_output(yolo_output)\n broaden_yolo_output(yolo_output, 0.15) # force yolo to get mode context to help stage2\n vod_vertices = self.inference_vertices(data_preprocess, triangles_bboxes=yolo_output)\n else:\n vod_vertices = self.explain_handle(data_preprocess, data)\n \n metrics.add_time('InferenceTotalTimeForBatch', (time.time() - tic) * 1000, idx, 'ms')\n\n stop_time = time.time()\n metrics.add_time('HandlerTime', round((stop_time - start_time) * 1000, 2), idx, 'ms')\n \n return vod_vertices\n\n\nif __name__ == '__main__':\n import os\n class Metrics:\n def add_time(self, *args, **kwargs):\n print(str(args))\n\n def add_metric(self, *args, **kwargs):\n print(str(args))\n def add_counter(self, *args, **kwargs):\n print(str(args))\n\n class Temp:\n def __init__(self, *args, **kwargs) -> None:\n self.system_properties = {\"gpu_id\": \"0\"}\n self.metrics = Metrics()\n def __str__(self):\n return (\"here\")\n\n def get_request_header(self, *args):\n return False\n print(\"tmp\", Temp())\n\n # fname = \"./predictors/triangle/fname_810.png\"\n fname = \"./predictors/triangle/fname_810.png\"\n img = cv2.cvtColor(cv2.imread(fname), cv2.COLOR_BGR2RGB)\n img = cv2.resize(img, (640, 640)).astype(np.uint8)\n img_bytes = img.tobytes()\n open(\"../tests/triangles_heavy_img.png\", \"wb\").write(img_bytes)\n\n tic = time.time()\n handler = TriangleHandler().initialize(Temp())\n #exit(0)\n print(\"Time for init\", time.time() - tic)\n for _ in range(100):\n tic = time.time()\n results = handler.handle([{\"data\" : img_bytes} for _ in range(20)], Temp())\n toc = time.time()\n print(\"Handling Time\", toc - tic)\n #print(f\"first results: {results[0]}\")\n\n # for res in results[0][\"triangles\"]:\n # import matplotlib.pyplot as plt\n\n # xs, ys = [], []\n # for vert in res[\"bbox\"]:\n # x_ = vert[\"x\"]\n # y_ = vert[\"y\"]\n # xs.append(x_)\n # ys.append(y_)\n\n # cv2.rectangle(img, [xs[0], ys[0]], [xs[1], ys[1]], (0, 255, 0), thickness=2)\n # plt.imshow(img)\n # xs, ys = [], []\n\n # for vert in res[\"vertices\"]:\n # x_ = vert[\"x\"]\n # y_ = vert[\"y\"]\n # xs.append(x_)\n # ys.append(y_)\n # print(res)\n # plt.scatter(xs,ys)\n # dirname = \"deleteme\"\n # if not os.path.isdir(dirname):\n # os.mkdir(dirname)\n # plt.savefig(f\"./{dirname}/giff.png\")","repo_name":"khlin216/torchserve-streamer","sub_path":"torchserve/coordinators/triangles_coordinator.py","file_name":"triangles_coordinator.py","file_ext":"py","file_size_in_byte":10278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17357688416","text":"# /usr/bin/env python3.6\r\n\r\n# https://leetcode.com/problems/longest-consecutive-sequence/\r\n\r\n\r\nclass Leet128(object):\r\n def longest_consecutive(self, nums):\r\n \"\"\"\r\n Finds the length of the longest consecutive sequence.\r\n\r\n Args:\r\n An array of numbers.\r\n\r\n Returns:\r\n The length of the longest consecutive sequence.\r\n \"\"\"\r\n\r\n # Example: [100,4,200,1,3,2]\r\n # 100 -> while condition fails\r\n # 4 -> if condition fails\r\n # 200 -> while condition fails\r\n # 1 -> 2 -> 3 -> 4 (max_length = 4)\r\n # 3 -> if condition fails\r\n # 2 -> if condition fails\r\n # Time: O(n) Only checks each element once.\r\n\r\n # Key idea: convert an input array to a set to make search O(1)\r\n nums = set(nums)\r\n max_length = 0\r\n\r\n for num in nums:\r\n if num-1 not in nums: # This prevents duplicate work\r\n current_length = 1\r\n\r\n while num+1 in nums:\r\n current_length += 1\r\n num += 1\r\n\r\n max_length = max(max_length, current_length)\r\n\r\n return max_length\r\n","repo_name":"riehseun/software-engineering","sub_path":"archive/algorithms-leetcode/leet128.py","file_name":"leet128.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"43791643138","text":"import pygame\r\nimport tkinter as tk\r\nfrom tkinter import filedialog\r\nfrom os import walk, listdir, rmdir\r\nimport os.path as op\r\n\r\n\r\nclass SpriteCollection():\r\n\tpygame.init()\r\n\tdef __init__(self):\t\t\r\n\t\t\r\n\t\tself.master = tk.Tk()\r\n\t\tself.master.geometry(\"384x128\")\r\n\t\tself.master.title(\"Sprite reducer by Ashardalon78\")\r\n\r\n\t\ttk.Label(self.master,text=\"Red:\").grid(row=0,column=0)\r\n\t\tself.T1 = tk.Text(self.master,height=1,width=5)\r\n\t\tself.T1.grid(row=0,column=1)\r\n\t\tself.T1.insert(tk.END,255)\r\n\t\ttk.Label(self.master,text=\"Green:\").grid(row=1,column=0)\r\n\t\tself.T2 = tk.Text(self.master,height=1,width=5)\r\n\t\tself.T2.grid(row=1,column=1)\r\n\t\tself.T2.insert(tk.END,255)\r\n\t\ttk.Label(self.master,text=\"Blue:\").grid(row=2,column=0)\r\n\t\tself.T3 = tk.Text(self.master,height=1,width=5)\r\n\t\tself.T3.grid(row=2,column=1)\r\n\t\tself.T3.insert(tk.END,255)\r\n\t\t\t\t\r\n\t\ttk.Button(self.master, text=\"Start\", command=self.read_files).grid(row=3, column=0)\t\t\r\n\r\n\t\tself.master.mainloop()\t\t\t\t\r\n\t\t\r\n\tdef read_files(self):\r\n\t\tself.red = int(self.T1.get('1.0',tk.END))\r\n\t\tself.green = int(self.T2.get('1.0',tk.END))\r\n\t\tself.blue = int(self.T3.get('1.0',tk.END))\r\n\t\t\r\n\t\tpath = filedialog.askdirectory()\r\n\t\tif not path:\r\n\t\t\treturn\r\n\t\r\n\t\tfor root, subdirs, files in walk(path):\t\t\t\r\n\t\t\tfor name in files:\r\n\t\t\t\tif name.endswith(\".png\"):\t\r\n\t\t\t\t\tprint(\"Prosessing: \" + str(op.join(root,name)))\t\t\t\t\t\t\t\t\t\t\t\r\n\t\t\t\t\tsprite = pygame.image.load(op.join(root,name))\r\n\t\t\t\t\tsprite_red = self.reduce_sprite(sprite)\r\n\t\t\t\t\tpygame.image.save(sprite_red, op.join(root,name))\r\n\t\t\t\t\t\r\n\tdef reduce_sprite(self, img):\r\n\t\twidth = img.get_width()\r\n\t\theight = img.get_height()\r\n\t\tbreakouter = False\r\n\t\tfor i in range(width):\r\n\t\t\tfor j in range(height):\r\n\t\t\t\tcolor = img.get_at((i, j))\t\t\t\t\t\r\n\t\t\t\tif color[0] != self.red or color[1] != self.green or color[2] != self.blue:\r\n\t\t\t\t\tlim_left = i\r\n\t\t\t\t\tbreakouter = True\r\n\t\t\t\t\tbreak\r\n\t\t\tif breakouter: break\r\n\t\tbreakouter = False\r\n\t\tfor i in range(width - 1, -1, -1):\r\n\t\t\tfor j in range(height):\r\n\t\t\t\tcolor = img.get_at((i, j))\r\n\t\t\t\tif color[0] != self.red or color[1] != self.green or color[2] != self.blue:\r\n\t\t\t\t\tlim_right = i\r\n\t\t\t\t\tbreakouter = True\r\n\t\t\t\t\tbreak\r\n\t\t\tif breakouter: break\r\n\t\tbreakouter = False\r\n\t\tfor j in range(height):\r\n\t\t\tfor i in range(width):\r\n\t\t\t\tcolor = img.get_at((i, j))\r\n\t\t\t\tif color[0] != self.red or color[1] != self.green or color[2] != self.blue:\r\n\t\t\t\t\tlim_top = j\r\n\t\t\t\t\tbreakouter = True\r\n\t\t\t\t\tbreak\r\n\t\t\tif breakouter: break\r\n\t\tbreakouter = False\r\n\t\tfor j in range(height - 1, -1, -1):\r\n\t\t\tfor i in range(width):\r\n\t\t\t\tcolor = img.get_at((i, j))\r\n\t\t\t\tif color[0] != self.red or color[1] != self.green or color[2] != self.blue:\r\n\t\t\t\t\tlim_bottom = j\r\n\t\t\t\t\tbreakouter = True\r\n\t\t\t\t\tbreak\r\n\t\t\tif breakouter: break\r\n\t\t\r\n\t\t#return img.subsurface((lim_right, lim_bottom, lim_left - lim_right, lim_top - lim_bottom))\r\n\t\treturn img.subsurface((lim_left, lim_top, lim_right - lim_left + 1, lim_bottom - lim_top + 1))\r\n\t\r\nAC = SpriteCollection()","repo_name":"Ashardalon78/spriteSizeReducer","sub_path":"spriteSizeReducer.py","file_name":"spriteSizeReducer.py","file_ext":"py","file_size_in_byte":2945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34064350199","text":"import math\nimport random\nimport numpy as np\nimport torch\n\ndef set_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n\ndef data2cluster(x, c):\n if isinstance(x, list):\n x = torch.from_numpy(np.array(x))\n elif isinstance(x, np.ndarray): \n x = torch.from_numpy(x) \n x = x.to(c.device)\n return data2cluster_euc(x, c)\n \ndef data2cluster_euc(x, c):\n temp = x[:, None, :] - c[None, :, :]\n temp = temp ** 2\n temp = temp.sum(-1)\n value, idx = temp.min(dim=1)\n return idx, value.sum()\n \n \nclass ScheduledOptim:\n \n def __init__(self, optimizer, init_lr, n_warmup_steps=0, \n n_current_steps=0, final_steps=0):\n self._optimizer = optimizer\n self.init_lr = init_lr\n self.n_warmup_steps = n_warmup_steps\n self.n_current_steps = n_current_steps\n self.final_steps = final_steps\n self.lr = 0\n\n def step_and_update_lr(self):\n self._update_learning_rate()\n self._optimizer.step()\n\n def zero_grad(self):\n self._optimizer.zero_grad(set_to_none=True)\n \n def _update_learning_rate(self): \n if self.n_warmup_steps == 0: return\n \n self.n_current_steps += 1\n if self.n_current_steps < self.n_warmup_steps:\n lr_mult = float(self.n_current_steps) / float(max(1, self.n_warmup_steps))\n else:\n progress = float(self.n_current_steps - self.n_warmup_steps) / float(max(1, self.final_steps - self.n_warmup_steps))\n lr_mult = max(0.01, 0.5 * (1.0 + math.cos(math.pi * progress)))\n \n self.lr = self.init_lr * lr_mult \n for param_group in self._optimizer.param_groups:\n param_group['lr'] = self.lr #param_group['lr'] * lr_mult\n \n\n\n","repo_name":"qjchen1972/dmean","sub_path":"kmean/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38578944924","text":"from os import path, curdir, access, R_OK, W_OK\nimport os\nimport yaml\n\n\nclass ConfigCreator:\n cfg_name: str = 'image-scraper-config.yaml'\n cfg_path = path.join(curdir, cfg_name)\n\n cfg_exists: bool = False\n cfg_isfile: bool = False\n\n read_permission: bool = False\n write_permission: bool = False\n\n scraper_names: list\n config_data: dict = {}\n\n def __init__(self, scraper_names: list = []) -> None:\n self.scraper_names = scraper_names\n\n self.test_permissions()\n\n # configure\n if self.cfg_exists:\n self.read_config()\n else:\n self.config_input()\n self.write_config()\n\n if 'saved_images' not in self.config_data:\n self.config_data['saved_images'] = []\n\n if self.config_data.get('remove_saved', False):\n self.remove_saved()\n\n def test_permissions(self) -> None:\n if access(curdir, R_OK):\n self.read_permission = True\n if access(curdir, W_OK):\n self.write_permission = True\n\n if path.exists(self.cfg_path):\n self.cfg_exists = True\n\n if path.isfile(self.cfg_path):\n self.cfg_isfile = True\n\n def read_config(self) -> None:\n if self.cfg_isfile:\n if self.read_permission:\n with open(self.cfg_path, 'rt') as f:\n self.config_data = yaml.load(stream=f, Loader=yaml.FullLoader)\n else:\n print(f\"Insufficient permissions to read config '{self.cfg_name}' from '{curdir}'.\")\n else:\n print(f\"'{self.cfg_name}' already exists at path '{curdir}' but is not a file...\")\n\n def write_config(self) -> None:\n if self.write_permission:\n with open(self.cfg_path, 'wt') as f:\n yaml.dump(self.config_data, stream=f)\n else:\n print(f\"Insufficient permissions to write config file '{self.cfg_name}' at '{curdir}'.\")\n\n def update_saved(self, added_saved_images: list = []) -> None:\n self.config_data['saved_images'] = list(set(self.config_data['saved_images'] + added_saved_images))\n self.write_config()\n\n def config_input(self) -> None:\n # TODO setup scheduler\n # TODO set background image of desktop in os specific way\n\n while True: # set save path for images\n try:\n _save_path: str = input(\"Save path for the images: \")\n _path_ok: bool = self.check_save_path(_save_path)\n\n if _path_ok:\n break\n except Exception:\n not_valid_msg()\n\n while True: # remove saved images?\n try:\n _remove_saved: str = input(\"Remove saved images (yes/no): \")[0].lower()\n\n if _remove_saved == 'y':\n self.config_data['remove_saved'] = True\n elif _remove_saved == 'n':\n self.config_data['remove_saved'] = False\n else:\n continue\n break\n except Exception:\n not_valid_msg()\n\n self.add_scraper_counts()\n\n print()\n\n def add_scraper(self, scraper: dict):\n \"\"\"\n scraper scheme:\n\n scraper = {\n 'name': 'your_scraper_name'\n 'long_name': 'your long name' # is optional\n }\n \"\"\"\n self.scraper_names.append(scraper)\n\n def add_scraper_counts(self):\n for scraper in self.scraper_names:\n while True: # count\n try:\n count: int = int(input(f\"Image count for '{scraper.get('long_name', scraper.get('name'))}': \"))\n\n if count > 0:\n self.config_data[f\"count_{scraper.get('name')}\"] = count\n else:\n self.config_data[f\"count_{scraper.get('name')}\"] = 0\n break\n except TypeError:\n print(\"Your input was not a number. Try again.\\n\")\n except Exception:\n not_valid_msg()\n\n def remove_saved(self) -> None:\n save_path = self.config_data.get('save_path')\n saved_images = set(self.config_data.get('saved_images'))\n\n for img in saved_images:\n try:\n os.remove(path.join(save_path, img))\n except PermissionError:\n print(f\"Failed to remove '{img}' at '{save_path}'.\")\n except FileNotFoundError:\n print(f\"File '{img}' not found at '{save_path}'.\")\n except Exception as e:\n print(f\"An unknown error occurred trying to remove '{img}' from '{save_path}'...\")\n print(f\"Details:\\n{e}\")\n\n self.config_data['saved_images'].remove(img)\n self.update_saved()\n\n def check_save_path(self, save_path) -> bool:\n if path.exists(save_path):\n if path.isdir(save_path):\n self.config_data['save_path'] = str(save_path)\n else:\n print('Path is not a directory...')\n return False\n else:\n try:\n if len(save_path) > 0:\n os.mkdir(save_path)\n self.config_data['save_path'] = str(save_path)\n else:\n _save_path = path.join(curdir, 'images')\n if not path.exists(_save_path):\n os.mkdir(_save_path)\n\n self.config_data['save_path'] = str(_save_path)\n except PermissionError:\n print(f\"Insufficient permissions for creating directory '{str(save_path)}'...\")\n return False\n except Exception as e:\n print(f\"Error creating directory '{str(save_path)}'...\")\n print(f\"Details:\\n{e}\")\n return False\n return True\n\n # call needed info for scraper\n def get_save_path(self) -> str:\n _save_path = path.join(curdir, 'images')\n save_path = self.config_data.get('save_path', _save_path)\n\n if self.check_save_path(save_path):\n return save_path\n return _save_path\n\n def get_scraper_count(self, name: str):\n count: int = self.config_data.get(f\"count_{name}\", 0)\n\n if count > 0:\n return count\n\n self.config_data[f\"count_{name}\"] = 0\n return 0\n\n\ndef not_valid_msg() -> None:\n print(\"Your input was not valid. Try again.\\n\")\n","repo_name":"nobkd/ImageScraper","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":6469,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"33362149981","text":"from flask import Flask, render_template, redirect, url_for, request\nfrom flask_bootstrap import Bootstrap\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField, SubmitField\nfrom wtforms.validators import DataRequired\nimport requests\nimport os\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = '8BYkEfBA6O6donzWlSihBXox7C0sKR6b'\nBootstrap(app)\n\n\n## CREATE DATABASE ------------------------------------------------------\napp.config['SQLALCHEMY_DATABASE_URI'] = \"sqlite:///movie-collection.db\"\ndb = SQLAlchemy(app)\nclass Movie(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(250), nullable=False)\n year = db.Column(db.Integer, nullable=False)\n description = db.Column(db.String(250), nullable=False)\n rating = db.Column(db.Float, nullable=False)\n ranking = db.Column(db.Integer, nullable=False)\n review = db.Column(db.String(250), nullable=False)\n img_url = db.Column(db.String(250), nullable=False)\n \n #Optional: this will allow each movie object to be identified by its title when printed.\n def __repr__(self):\n return f''\n \n# Create the tables\nwith app.app_context():\n db.create_all()\n \n# # Create initial entry\n# new_movie = Movie(\n# title=\"Phone Booth\",\n# year=2002,\n# description=\"Publicist Stuart Shepard finds himself trapped in a phone booth, pinned down by an extortionist's sniper rifle. Unable to leave or receive outside help, Stuart's negotiation with the caller leads to a jaw-dropping climax.\",\n# rating=7.3,\n# ranking=10,\n# review=\"My favourite character was the caller.\",\n# img_url=\"https://image.tmdb.org/t/p/w500/tjrX2oWRCM3Tvarz38zlZM7Uc10.jpg\"\n# )\n# with app.app_context():\n# db.session.add(new_movie)\n# db.session.commit() \n\n\n## FORMS ----------------------------------------\n\nclass EditForm(FlaskForm):\n # Have a look at the basic fields of WTForms: https://wtforms.readthedocs.io/en/2.3.x/fields/#basic-fields\n new_rating = StringField(label='Your Rating Out of 10 e.g. 7.5', validators=[DataRequired()])\n new_review = StringField(label='Your Review', validators=[DataRequired()])\n submit = SubmitField('Update')\n\nclass AddForm(FlaskForm):\n # Have a look at the basic fields of WTForms: https://wtforms.readthedocs.io/en/2.3.x/fields/#basic-fields\n new_movie_title = StringField(label='Movie Title', validators=[DataRequired()])\n submit = SubmitField('Add Movie')\n \n## PAGE ROUTING ------------------------------------------------------\n\n@app.route(\"/\")\ndef home():\n # Sort all movies in descending order\n all_movies = Movie.query.order_by(Movie.rating.desc()).all()\n \n # Update their ratings\n for movie in all_movies:\n movie.ranking = all_movies.index(movie) + 1\n db.session.commit()\n \n return render_template(\"index.html\", movies=all_movies)\n\n@app.route(\"/add\", methods=['POST', 'GET'])\ndef add():\n add_form = AddForm()\n \n if add_form.validate_on_submit():\n # --------- Code to search API for movie details goes here 👇 -----------\n query = add_form.new_movie_title.data\n parameters = {\n \"api_key\": os.environ.get('TMDB_API'),\n \"query\": query\n }\n\n response = requests.get(\"https://api.themoviedb.org/3/search/movie\", params=parameters, verify=False)\n response.raise_for_status() # Raise exceptions\n data = response.json()\n movies = data['results']\n return render_template(\"select.html\", movies=movies) \n \n return render_template(\"add.html\", form=add_form)\n\n@app.route(\"/find\")\ndef find_movie():\n \n # The movie argument is a dictionary passed as string through the URL -> needs to be converted to dict\n movie = eval(request.args.get('movie')) \n\n if movie:\n # Add the movie to the database\n new_movie = Movie(\n title=movie['title'],\n year=movie['release_date'][:4],\n description=movie['overview'],\n rating=0,\n ranking=0,\n review=\"\",\n img_url=f\"https://image.tmdb.org/t/p/w500{movie['poster_path']}\"\n )\n db.session.add(new_movie)\n db.session.commit()\n \n # Redirect to edit page after getting the new movie ID\n # db_movie = Movie.query.filter_by(title=movie['title']).first()\n # movie_id = db_movie.id\n # print(\"================================================\")\n # print(type(movie_id), movie_id)\n # print(\"================================================\")\n \n return redirect(url_for('edit', id=new_movie.id))\n\n@app.route(\"/edit\", methods=['POST', 'GET'])\ndef edit():\n \n edit_form = EditForm()\n movie_id = request.args.get('id')\n \n if edit_form.validate_on_submit():\n \n # get the movie from the database and update values from the form\n movie_to_update = Movie.query.get(movie_id)\n movie_to_update.rating = edit_form.new_rating.data\n movie_to_update.review = edit_form.new_review.data\n \n # Commit Changes\n db.session.commit()\n \n return redirect(url_for('home')) \n \n return render_template(\"edit.html\", form=edit_form)\n\n\n@app.route(\"/delete\", methods=['GET'])\ndef delete():\n \n movie_id = request.args.get('id')\n movie_to_delete = Movie.query.get(movie_id) \n db.session.delete(movie_to_delete)\n db.session.commit()\n return redirect(url_for('home'))\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"heySanj/100-PYTHON","sub_path":"capstones/movie-list/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"35591804995","text":"import dataclasses\nimport copy\nimport enum\nimport sys\nimport copy\nimport functools\nimport pprint\nimport wrapt\nimport traceback\nimport jsondiff\nimport json\nfrom datetime import datetime\nfrom collections import ChainMap\nfrom typing import (\n Callable,\n Union,\n Dict,\n List,\n Optional,\n Iterable,\n Any,\n Type,\n TypeVar,\n Tuple,\n)\n\nfrom storage import EntityStorage\nfrom loggers import get_logger\nfrom model import (\n Entity,\n EntityClass,\n World,\n Scope,\n Version,\n Registrar,\n Serialized,\n Identity,\n EntityRef,\n Permission,\n MissingEntityException,\n CompiledJson,\n Common,\n)\nimport scopes.movement as movement\n\nlog = get_logger(\"dimsum\")\nPyRefKey = \"py/ref\"\nPyObjectKey = \"py/object\"\nPyTypeKey = \"py/type\"\n\n\n# From stackoverflow\ndef full_class_name(klass):\n module = klass.__module__\n return module + \".\" + klass.__qualname__\n\n\nclass Identities(enum.Enum):\n PRIVATE = 1\n PUBLIC = 2\n HIDDEN = 3\n\n\nclass EntityProxy(wrapt.ObjectProxy):\n def __init__(self, ref: EntityRef):\n super().__init__(ref)\n self._self_ref = ref\n\n def __getattr__(self, *arg):\n if self.__wrapped__ is None:\n log.info(\"self.None __getattr__: %s %s\", arg, self._self_ref)\n return super().__getattr__(*arg)\n\n def __deepcopy__(self, memo):\n return copy.deepcopy(self.__wrapped__, memo)\n\n def __repr__(self) -> str:\n assert self.__wrapped__\n return str(self.__wrapped__)\n\n def __str__(self) -> str:\n assert self.__wrapped__\n return str(self.__wrapped__)\n\n\nclass SerializationException(Exception):\n pass\n\n\n@dataclasses.dataclass(frozen=True)\nclass RestoreContext:\n classes: List[Type] = dataclasses.field(default_factory=list)\n lookup: Dict[str, Type] = dataclasses.field(default_factory=dict)\n add_reference: Optional[Callable] = None\n depth: int = 0\n\n @functools.cached_property\n def classes_map(self):\n return {self._importable_name(c): c for c in self.classes or []}\n\n def increase(self) -> \"RestoreContext\":\n return RestoreContext(\n classes=self.classes, lookup=self.lookup, depth=self.depth + 1\n )\n\n def load_class(self, module_and_name: str):\n \"\"\"\n Loads the module and returns the class.\n >>> cls = loadclass('datetime.datetime')\n >>> cls.__name__\n 'datetime'\n >>> loadclass('does.not.exist')\n >>> loadclass('builtins.int')()\n 0\n\n This is copied from jsonpickle.\n \"\"\"\n # Check if the class exists in a caller-provided scope\n if self.classes_map:\n try:\n return self.classes_map[module_and_name]\n except KeyError:\n pass\n # Otherwise, load classes from globally-accessible imports\n names = module_and_name.split(\".\")\n # First assume that everything up to the last dot is the module name,\n # then try other splits to handle classes that are defined within\n # classes\n for up_to in range(len(names) - 1, 0, -1):\n module = \".\".join(names[:up_to])\n try:\n __import__(module)\n obj = sys.modules[module]\n for class_name in names[up_to:]:\n obj = getattr(obj, class_name)\n return obj\n except (AttributeError, ImportError, ValueError):\n continue\n return None\n\n def _importable_name(self, cls):\n \"\"\"\n >>> class Example(object):\n ... pass\n >>> ex = Example()\n >>> importable_name(ex.__class__) == 'jsonpickle.util.Example'\n True\n >>> importable_name(type(25)) == 'builtins.int'\n True\n >>> importable_name(None.__class__) == 'builtins.NoneType'\n True\n >>> importable_name(False.__class__) == 'builtins.bool'\n True\n >>> importable_name(AttributeError) == 'builtins.AttributeError'\n True\n \"\"\"\n # Use the fully-qualified name if available (Python >= 3.3)\n name = getattr(cls, \"__qualname__\", cls.__name__)\n return \"{}.{}\".format(cls.__module__, name)\n\n\n@functools.singledispatch\ndef _restore_value(value, ctx: RestoreContext) -> Any:\n return value\n\n\n@_restore_value.register\ndef _restore_value_list(value: list, ctx: RestoreContext) -> Any:\n return [_restore_value(v, ctx) for v in value]\n\n\ndef _restore_value_obj_version(value: dict, ctx: RestoreContext):\n return Version(i=value[\"i\"])\n\n\ndef _restore_value_obj_entity_ref(value: dict, ctx: RestoreContext):\n try:\n pyObject = value[PyRefKey]\n del value[PyRefKey]\n ref = EntityRef.new(pyObject=pyObject, **value)\n assert ctx.add_reference\n return ctx.add_reference(ref)\n except:\n log.exception(\"error:entity-ref value=%s\", value, exc_info=True)\n raise\n\n\ndef _restore_value_obj_datetime(value: dict, ctx: RestoreContext):\n return datetime.fromisoformat(value[\"time\"])\n\n\n_handlers = {\n \"model.entity.Version\": _restore_value_obj_version,\n \"model.entity.EntityRef\": _restore_value_obj_entity_ref,\n \"datetime.datetime\": _restore_value_obj_datetime,\n}\n\n\n@_restore_value.register\ndef _restore_value_dict(value: dict, ctx: RestoreContext) -> Any:\n def restore_all():\n try:\n return {key: _restore_value(v, ctx) for key, v in value.items()}\n except:\n log.error(\"error:restore-all value=%s\", value)\n raise\n\n def restore_children():\n try:\n return {\n key: _restore_value(v, ctx)\n for key, v in value.items()\n if key not in [PyObjectKey]\n }\n except:\n log.error(\"error:restore-children value=%s\", value)\n raise\n\n if PyObjectKey in value:\n try:\n class_name = value[PyObjectKey]\n if class_name in _handlers:\n try:\n return _handlers[class_name](restore_children(), ctx)\n except:\n log.error(\n \"error:handler class-name=%s handler=%s\",\n class_name,\n _handlers[class_name],\n )\n raise\n ctor = ctx.load_class(class_name)\n if ctor:\n restored = restore_children()\n try:\n return ctor(**restored)\n except:\n log.error(\n \"error: class-name=%s ctor=%s restored=%s\",\n class_name,\n ctor,\n restored,\n exc_info=True,\n )\n raise\n except:\n log.error(\"error:object value=%s\", value)\n raise\n if PyTypeKey in value:\n return ctx.load_class(value[PyTypeKey])\n return restore_all()\n\n\n@dataclasses.dataclass(frozen=True)\nclass FlattenContext:\n identities: Identities = Identities.PUBLIC\n depth: int = 1\n\n def decrease(self) -> \"FlattenContext\":\n assert self.depth > 0\n return FlattenContext(identities=self.identities, depth=self.depth - 1)\n\n def path(self, key: str) -> \"FlattenContext\":\n return self\n\n\nclass NoFlattenerException(Exception):\n pass\n\n\n@functools.singledispatch\ndef _flatten_value(value, ctx: FlattenContext) -> Any:\n raise NoFlattenerException(f\"no flattener: {value}\")\n\n\ndef _include_key(key: str) -> bool:\n return not key.startswith(\"_\")\n\n\n@_flatten_value.register\ndef _flatten_value_dict(value: dict, ctx: FlattenContext) -> Any:\n return {\n key: _flatten_value(v, ctx.path(key))\n for key, v in value.items()\n if _include_key(key)\n }\n\n\ndef _py_object(obj: Any, **kwargs) -> Dict[str, Any]:\n return {\n **{\"py/object\": full_class_name(obj.__class__)},\n **kwargs,\n }\n\n\n@_flatten_value.register\ndef _flatten_value_string(value: str, ctx: FlattenContext) -> Any:\n return value\n\n\n@_flatten_value.register\ndef _flatten_value_integer(value: int, ctx: FlattenContext) -> Any:\n return value\n\n\n@_flatten_value.register\ndef _flatten_value_float(value: float, ctx: FlattenContext) -> Any:\n return value\n\n\n@_flatten_value.register\ndef _flatten_value_list(value: list, ctx: FlattenContext) -> Any:\n return [_flatten_value(v, ctx) for v in value]\n\n\n@_flatten_value.register\ndef _flatten_value_tuple(value: tuple, ctx: FlattenContext) -> Any:\n return [_flatten_value(v, ctx) for v in value]\n\n\n@_flatten_value.register\ndef _flatten_value_none(value: None, ctx: FlattenContext) -> Any:\n return None\n\n\n@_flatten_value.register\ndef _flatten_value_object(value: object, ctx: FlattenContext) -> Any:\n # I wish there was a way to get singledisapatch to respect this,\n # but there's simply no way that I can tell.\n if isinstance(value, type):\n return {\"py/type\": full_class_name(value)}\n return _py_object(value, **_flatten_value_dict(value.__dict__, ctx))\n\n\n@_flatten_value.register\ndef _flatten_value_enum(value: enum.Enum, ctx: FlattenContext) -> Any:\n return _py_object(value, value=str(value))\n\n\n@_flatten_value.register\ndef _flatten_value_datetime(value: datetime, ctx: FlattenContext) -> Any:\n return {\n \"py/object\": \"datetime.datetime\",\n \"time\": value.isoformat(),\n }\n\n\n@_flatten_value.register\ndef _flatten_value_entity(value: Entity, ctx: FlattenContext) -> Any:\n if ctx.depth > 0:\n return _py_object(value, **_flatten_value_dict(value.__dict__, ctx.decrease()))\n else:\n assert isinstance(value, Entity)\n assert value.props.name\n ref = EntityRef(\n key=value.key,\n klass=value.klass.__name__,\n name=value.props.name,\n pyObject=full_class_name(value.__class__),\n )\n return _flatten_value(ref, ctx)\n\n\n@_flatten_value.register\ndef _flatten_value_entity_ref(value: EntityRef, ctx: FlattenContext) -> Any:\n return {\n \"py/object\": full_class_name(EntityRef),\n \"py/ref\": value.pyObject,\n \"key\": value.key,\n \"klass\": value.klass,\n \"name\": value.name,\n }\n\n\n@_flatten_value.register\ndef _flatten_value_version(value: Version, ctx: FlattenContext) -> Any:\n return _py_object(value, i=value.i)\n\n\n@_flatten_value.register\ndef _flatten_value_identity(value: Identity, ctx: FlattenContext) -> Any:\n if ctx.identities == Identities.HIDDEN:\n return _py_object(\n value,\n **{\n \"public\": \"\",\n \"signature\": \"\",\n \"private\": \"\",\n },\n )\n\n if ctx.identities == Identities.PRIVATE:\n return _py_object(\n value,\n **{\n \"public\": value.public,\n \"signature\": value.signature,\n \"private\": value.private,\n },\n )\n\n return _py_object(\n value,\n **{\n \"public\": value.public,\n \"signature\": value.signature,\n },\n )\n\n\n@_flatten_value.register\ndef _flatten_value_direction(value: movement.Direction, ctx: FlattenContext) -> Any:\n return _py_object(value, compass=value.name)\n\n\nclass ScopeNotSerializableException(Exception):\n pass\n\n\n@_flatten_value.register\ndef _flatten_value_scope(value: Scope, ctx: FlattenContext) -> Any:\n raise ScopeNotSerializableException()\n\n\ndef _flatten(value, unpicklable=True, identities=Identities.PUBLIC):\n return _flatten_value(value, FlattenContext(identities=identities))\n\n\ndef serialize(\n value, indent=None, unpicklable=True, identities=Identities.PUBLIC\n) -> Optional[str]:\n if value is None:\n return value\n\n try:\n flattened = _flatten(value, unpicklable=unpicklable, identities=identities)\n try:\n return json.dumps(flattened, indent=indent)\n except:\n log.error(\"flattened: %s\", flattened)\n raise\n except ScopeNotSerializableException as e:\n log.error(\"open entity scope value=%s\", value)\n log.error(\"open entity scope scopes=%s\", value.scopes)\n raise e\n\n\ndef _deserialize(compiled: CompiledJson, lookup):\n return _restore_value(\n compiled.compiled, RestoreContext(classes=[Entity, World], add_reference=lookup)\n )\n\n\ndef deserialize_non_entity(\n value: Union[str, Dict[str, Any]], classes: Optional[List[Type]] = None\n):\n if isinstance(value, str):\n value = json.loads(value)\n return _restore_value(value, RestoreContext(classes=classes or []))\n\n\n@dataclasses.dataclass()\nclass Materialized:\n entities: List[Entity]\n\n def empty(self) -> bool:\n return len(self.entities) == 0\n\n def maybe_one(self) -> Optional[Entity]:\n if len(self.entities) == 1:\n return self.entities[0]\n return None\n\n def one(self) -> Entity:\n if len(self.entities) == 1:\n return self.entities[0]\n raise MissingEntityException()\n\n def all(self) -> List[Entity]:\n return self.entities\n\n\nasync def materialize(\n registrar: Optional[Registrar] = None,\n store: Optional[EntityStorage] = None,\n key: Optional[str] = None,\n gid: Optional[int] = None,\n json: Optional[List[Serialized]] = None,\n reach=None,\n depth: int = 0,\n cache: Optional[Dict[str, List[Serialized]]] = None,\n proxy_factory: Optional[Callable] = None,\n refresh: bool = False,\n migrate: Optional[Callable] = None,\n) -> Materialized:\n assert registrar\n assert store\n\n def _noop_migration(v):\n return False, v\n\n migrate = migrate or _noop_migration\n single_entity = json is None\n cache = cache or {}\n found = None\n if key is not None:\n if not refresh:\n log.debug(\"[%d] materialize key=%s\", depth, key)\n found = registrar.find_by_key(key)\n if found:\n return Materialized([found])\n\n if key in cache:\n json = cache[key]\n else:\n json = await store.load_by_key(key)\n if len(json) == 0:\n log.info(\"[%d] %s missing key=%s\", depth, store, key)\n return Materialized([])\n\n if gid is not None:\n if not refresh:\n log.debug(\"[%d] materialize gid=%d\", depth, gid)\n found = registrar.find_by_gid(gid)\n if found:\n return Materialized([found])\n\n json = await store.load_by_gid(gid)\n if len(json) == 0:\n log.info(\"[%d] %s missing gid=%d\", depth, store, gid)\n return Materialized([])\n\n log.debug(\"json: %s\", json)\n\n refs: Dict[str, EntityProxy] = {}\n depths: Dict[str, int] = {}\n\n def reference(ref: EntityRef):\n if ref.key not in refs:\n refs[ref.key] = EntityProxy(ref)\n depths[ref.key] = depth\n return refs[ref.key]\n\n if not json or len(json) == 0:\n raise SerializationException(\"no json for {0}\".format({\"key\": key, \"gid\": gid}))\n\n cache.update(**{se.key: [se] for se in json})\n\n serialized = json[0].serialized # TODO why not all json?\n compiled = CompiledJson.compile(serialized)\n migrated, after_migration = migrate(compiled)\n deserialized = _deserialize(after_migration, reference)\n proxied = proxy_factory(deserialized) if proxy_factory else deserialized\n loaded = proxied\n\n deeper = True\n choice = 0\n if reach:\n choice = reach(loaded, depth)\n if choice < 0:\n log.debug(\"reach! reach! reach!\")\n deeper = False\n choice = 0\n elif choice > 0:\n depths[loaded.key] = depths.setdefault(loaded.key, 0) + choice\n log.debug(\n \"depth-change: %s choice=%d depth[loaded]=%d\",\n loaded.klass,\n choice,\n depths[loaded.key],\n )\n\n loaded.__post_init__()\n\n registrar.register(loaded, compiled=compiled, depth=depth + choice)\n\n if deeper:\n for referenced_key, proxy in refs.items():\n log.debug(\"materialize: %s -> %s\", loaded, referenced_key)\n linked = await materialize(\n registrar=registrar,\n store=store,\n key=referenced_key,\n reach=reach,\n depth=depths[referenced_key] + choice,\n proxy_factory=proxy_factory,\n cache=cache,\n migrate=migrate,\n )\n proxy.__wrapped__ = linked.one()\n\n loaded.validate()\n\n if migrated:\n loaded.touch()\n\n if single_entity:\n return Materialized([loaded])\n\n return Materialized(\n [v for v in [registrar.find_by_key(se.key) for se in json] if v]\n )\n\n\ndef for_update(\n entities: Iterable[Entity], everything: bool = True, **kwargs\n) -> Dict[str, CompiledJson]:\n def _assert(s: Optional[str]) -> str:\n assert s\n return s\n\n return {\n e.key: CompiledJson.compile(\n _assert(serialize(e, identities=Identities.PRIVATE, **kwargs))\n )\n for e in entities\n if everything or e.modified\n }\n","repo_name":"jlewallen/dimsum","sub_path":"src/dimsum/serializing.py","file_name":"serializing.py","file_ext":"py","file_size_in_byte":17147,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"30827605171","text":"from cryptography.fernet import Fernet\n\nSECRET_KEY = \"hkBxrbZ9Td4QEwgRewV6gZSVH4q78vBia4GBYuqd09SsiMsIjH\"\nFERNET_KEY = b'Jbx7Zr2pQ3YgKei404YLNqS_fx_mmUPHd-ryjDGg2wM='\n\n\ndef encrypt(plain_text):\n cipher_suite = Fernet(FERNET_KEY)\n encrypted_text = cipher_suite.encrypt(str.encode(plain_text))\n return encrypted_text.decode('utf-8')\n#\n#\n# def decrypt(encrypted_text):\n# cipher_suite = Fernet(FERNET_KEY)\n# return cipher_suite.decrypt(str.encode(encrypted_text)).decode('utf-8')\n","repo_name":"sm4544/Carian-Drexel","sub_path":"Backend/hospitalManagementDjango/backend/encryption.py","file_name":"encryption.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"359107661","text":"import os,sys\n# 파이썬 디버깅\n\n# TEST_CODE_PATH = \"test.py\"\n# TEST_CASE_PATH = \"testcase.txt\"\n\n# command = f'python3 {TEST_CODE_PATH} < {TEST_CASE_PATH}'\n\n# os.system(command)\n\n\n# 프로그래머스 디버깅\ncommands = [\n 'echo \"\" > tmp_carrage.txt',\n 'cat testcase.txt tmp_carrage.txt test.py > tmp_test.py',\n # 'python3 tmp_test.py'\n # 'rm -rf tmp_test.py'\n]\nfor command in commands:\n os.system(command)\n\n\n\n","repo_name":"leejuhanKr/Algorithm","sub_path":"debug.py","file_name":"debug.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"13083832554","text":"courses = [\"Java-разработчик с нуля\", \"Fullstack-разработчик на Python\", \"Python-разработчик с нуля\",\n \"Frontend-разработчик с нуля\"]\nmentors = [\n [\"Филипп Воронов\", \"Анна Юшина\", \"Иван Бочаров\", \"Анатолий Корсаков\", \"Юрий Пеньков\", \"Илья Сухачев\",\n \"Иван Маркитан\", \"Ринат Бибиков\", \"Вадим Ерошевичев\", \"Тимур Сейсембаев\", \"Максим Батырев\", \"Никита Шумский\",\n \"Алексей Степанов\", \"Денис Коротков\", \"Антон Глушков\", \"Сергей Индюков\", \"Максим Воронцов\", \"Евгений Грязнов\",\n \"Константин Виролайнен\", \"Сергей Сердюк\", \"Павел Дерендяев\"],\n [\"Евгений Шмаргунов\", \"Олег Булыгин\", \"Александр Бардин\", \"Александр Иванов\", \"Кирилл Табельский\",\n \"Александр Ульянцев\", \"Роман Гордиенко\", \"Адилет Асканжоев\", \"Александр Шлейко\", \"Алена Батицкая\", \"Денис Ежков\",\n \"Владимир Чебукин\", \"Эдгар Нуруллин\", \"Евгений Шек\", \"Максим Филипенко\", \"Елена Никитина\"],\n [\"Евгений Шмаргунов\", \"Олег Булыгин\", \"Дмитрий Демидов\", \"Кирилл Табельский\", \"Александр Ульянцев\",\n \"Александр Бардин\", \"Александр Иванов\", \"Антон Солонилин\", \"Максим Филипенко\", \"Елена Никитина\", \"Азамат Искаков\",\n \"Роман Гордиенко\"],\n [\"Владимир Чебукин\", \"Эдгар Нуруллин\", \"Евгений Шек\", \"Валерий Хаслер\", \"Татьяна Тен\", \"Александр Фитискин\",\n \"Александр Шлейко\", \"Алена Батицкая\", \"Александр Беспоясов\", \"Денис Ежков\", \"Николай Лопин\", \"Михаил Ларченко\"]\n]\ndurations = [14, 20, 12, 20]\n\n\ndef get_min_max_duration_cource(courses, durations):\n courses_list = []\n # Допишите код, который генерирует словарь-курс с тремя ключами: \"title\", \"mentors\", \"duration\"\n for title, duration in zip(courses, durations):\n course_dict = {\"title\": title, \"duration\": duration}\n courses_list.append(course_dict)\n\n # min_dur = min(courses_list, key=lambda x: x[\"duration\"])[\"duration\"]\n # max_dur = max(courses_list, key=lambda x: x[\"duration\"])[\"duration\"]\n\n min_dur = min(durations)\n max_dur = max(durations)\n\n maxes = []\n minis = []\n for i, _ in enumerate(durations):\n if _ == max_dur:\n maxes.append(i)\n elif _ == min_dur:\n minis.append(i)\n\n courses_min = []\n courses_max = []\n for i in minis:\n courses_min.append(courses_list[i][\"title\"])\n for i in maxes:\n courses_max.append(courses_list[i][\"title\"])\n\n return courses_min, min_dur, courses_max, max_dur\n\n\nif __name__ == \"__main__\":\n courses_min, min_dur, courses_max, max_dur = get_min_max_duration_cource(courses, durations)\n\n # Допишите конструкцию вывода результата. Можете использовать string.join()\n print(f'Самый короткий курс(ы): {\", \".join(courses_min)} - {min_dur} месяца(ев)')\n print(f'Самый длинный курс(ы): {\", \".join(courses_max)} - {max_dur} месяца(ев)')","repo_name":"TheKex/pytest_basic_use","sub_path":"my_collections/min_max.py","file_name":"min_max.py","file_ext":"py","file_size_in_byte":3779,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36886130682","text":"from flask import (\n Blueprint,\n current_app,\n request\n)\n\n# メインから読み込むBlueprintの定義\nbp = Blueprint(\"endpoint\", __name__)\n\n\n# LINE Messaging APIからのWebhookのリクエストハンドラー\n@bp.route(\"/line/endpoint\", methods=[\"POST\"])\ndef line_endpoint():\n current_app.line_adapter.handle_http_request(request.data, request.headers)\n return \"ok\"\n","repo_name":"uezo/linebot-project-template","sub_path":"controllers/endpoint.py","file_name":"endpoint.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"ja","doc_type":"code","stars":15,"dataset":"github-code","pt":"3"} +{"seq_id":"19247913587","text":"__author__ = 'jonathan'\n\nimport test.nova._fixtures as models\nfrom lib.rome.core.orm.query import Query\n\nfrom lib.rome.core.orm.query import Query as RomeQuery\nfrom lib.rome.core.session.session import Session as RomeSession\nimport six\n# from oslo.utils import timeutils\nfrom lib.rome.core.utils import timeutils\nfrom test.nova.methods.test_ensure_default_secgroup import _security_group_ensure_default, _security_group_get_query\n\nimport logging\nimport uuid\nfrom lib.rome.core.orm.query import or_\nfrom lib.rome.core.orm.query import and_\nfrom sqlalchemy.sql import null\nimport collections\n\nfrom sqlalchemy.sql.expression import asc\nfrom sqlalchemy.sql.expression import desc\nfrom nova.compute import vm_states\n\nLOG = logging.getLogger()\n\n# List of fields that can be joined in DB layer.\n_INSTANCE_OPTIONAL_JOINED_FIELDS = ['metadata', 'system_metadata',\n 'info_cache', 'security_groups',\n 'pci_devices']\n# These are fields that are optional but don't translate to db columns\n_INSTANCE_OPTIONAL_NON_COLUMN_FIELDS = ['fault', 'numa_topology',\n 'pci_requests']\n\n# These are fields that can be specified as expected_attrs\nINSTANCE_OPTIONAL_ATTRS = (_INSTANCE_OPTIONAL_JOINED_FIELDS +\n _INSTANCE_OPTIONAL_NON_COLUMN_FIELDS)\n\ndef get_session(use_slave=False, **kwargs):\n # return FakeSession()\n return RomeSession()\n # return OldRomeSession()\n\n\ndef model_query(context, *args, **kwargs):\n # base_model = kwargs[\"base_model\"]\n # models = args\n return RomeQuery(*args, **kwargs)\n\n\ndef _network_get_query(context, session=None):\n return model_query(context, models.Network, session=session,\n read_deleted=\"no\")\n\ndef _instance_get_all_query(context, project_only=False,\n joins=None, use_slave=False):\n if joins is None:\n joins = ['info_cache', 'security_groups']\n\n query = model_query(context,\n models.Instance,\n project_only=project_only,\n use_slave=use_slave)\n # for join in joins:\n # query = query.options(joinedload(join))\n return query\n\ndef _instance_pcidevs_get_multi(context, instance_uuids, session=None):\n return model_query(context, models.PciDevice, session=session).\\\n filter_by(status='allocated').\\\n filter(models.PciDevice.instance_uuid.in_(instance_uuids))\n\ndef _instances_fill_metadata(context, instances,\n manual_joins=None, use_slave=False):\n \"\"\"Selectively fill instances with manually-joined metadata. Note that\n instance will be converted to a dict.\n :param context: security context\n :param instances: list of instances to fill\n :param manual_joins: list of tables to manually join (can be any\n combination of 'metadata' and 'system_metadata' or\n None to take the default of both)\n \"\"\"\n\n def flatten(l):\n return [item for sublist in l for item in sublist]\n\n uuids = [inst['uuid'] for inst in instances]\n\n if manual_joins is None:\n manual_joins = ['metadata', 'system_metadata']\n\n meta = collections.defaultdict(list)\n if 'system_metadata' in manual_joins:\n for instance in instances:\n for metadata in instance.metadata:\n meta[instance.uuid].append(metadata)\n\n sys_meta = collections.defaultdict(list)\n if 'system_metadata' in manual_joins:\n for instance in instances:\n for system_metadata in instance.system_metadata:\n sys_meta[instance.uuid].append(system_metadata)\n\n pcidevs = collections.defaultdict(list)\n if 'pci_devices' in manual_joins:\n for row in _instance_pcidevs_get_multi(context, uuids):\n pcidevs[row['instance_uuid']].append(row)\n\n filled_instances = []\n for inst in instances:\n inst = dict(inst.iteritems())\n # inst['system_metadata'] = sys_meta[inst['uuid']]\n inst['metadata'] = meta[inst['uuid']]\n if 'pci_devices' in manual_joins:\n inst['pci_devices'] = pcidevs[inst['uuid']]\n filled_instances.append(inst)\n\n return filled_instances\n\ndef _manual_join_columns(columns_to_join):\n manual_joins = []\n for column in ('metadata', 'system_metadata', 'pci_devices'):\n if column in columns_to_join:\n columns_to_join.remove(column)\n manual_joins.append(column)\n return manual_joins, columns_to_join\n\ndef instance_get_all_by_filters(context, filters, sort_key, sort_dir,\n limit=None, marker=None, columns_to_join=None,\n use_slave=False):\n \"\"\"Return instances that match all filters. Deleted instances\n will be returned by default, unless there's a filter that says\n otherwise.\n\n Depending on the name of a filter, matching for that filter is\n performed using either exact matching or as regular expression\n matching. Exact matching is applied for the following filters::\n\n | ['project_id', 'user_id', 'image_ref',\n | 'vm_state', 'instance_type_id', 'uuid',\n | 'metadata', 'host', 'system_metadata']\n\n\n A third type of filter (also using exact matching), filters\n based on instance metadata tags when supplied under a special\n key named 'filter'::\n\n | filters = {\n | 'filter': [\n | {'name': 'tag-key', 'value': ''},\n | {'name': 'tag-value', 'value': ''},\n | {'name': 'tag:', 'value': ''}\n | ]\n | }\n\n Special keys are used to tweek the query further::\n\n | 'changes-since' - only return instances updated after\n | 'deleted' - only return (or exclude) deleted instances\n | 'soft_deleted' - modify behavior of 'deleted' to either\n | include or exclude instances whose\n | vm_state is SOFT_DELETED.\n\n \"\"\"\n # NOTE(mriedem): If the limit is 0 there is no point in even going\n # to the database since nothing is going to be returned anyway.\n if limit == 0:\n return []\n\n sort_fn = {'desc': desc, 'asc': asc}\n\n # if CONF.database.slave_connection == '':\n # use_slave = False\n\n session = get_session(use_slave=use_slave)\n\n if columns_to_join is None:\n columns_to_join = ['info_cache', 'security_groups']\n manual_joins = ['metadata', 'system_metadata']\n else:\n manual_joins, columns_to_join = _manual_join_columns(columns_to_join)\n\n query_prefix = session.query(models.Instance)\n # for column in columns_to_join:\n # query_prefix = query_prefix.options(joinedload(column))\n\n query_prefix = query_prefix.order_by(sort_fn[sort_dir](\n getattr(models.Instance, sort_key)))\n\n # Make a copy of the filters dictionary to use going forward, as we'll\n # be modifying it and we shouldn't affect the caller's use of it.\n filters = filters.copy()\n filters_ = {}\n\n query_prefix = session.query(models.Instance)\n if 'changes-since' in filters:\n filters.pop('changes_since')\n changes_since = timeutils.normalize_time(filters['changes-since'])\n query_prefix = query_prefix.\\\n filter(models.Instance.updated_at >= changes_since)\n\n if 'deleted' in filters:\n # Instances can be soft or hard deleted and the query needs to\n # include or exclude both\n if filters.pop('deleted'):\n if filters.pop('soft_deleted', True):\n deleted = or_(\n models.Instance.deleted == models.Instance.id,\n models.Instance.vm_state == vm_states.SOFT_DELETED\n )\n query_prefix = query_prefix.\\\n filter(deleted)\n else:\n query_prefix = query_prefix.\\\n filter(models.Instance.deleted == models.Instance.id)\n else:\n query_prefix = query_prefix.\\\n filter_by(deleted=0)\n if not filters.pop('soft_deleted', False):\n # It would be better to have vm_state not be nullable\n # but until then we test it explicitly as a workaround.\n not_soft_deleted = or_(\n models.Instance.vm_state != vm_states.SOFT_DELETED,\n models.Instance.vm_state == null()\n )\n query_prefix = query_prefix.filter(not_soft_deleted)\n\n # if 'cleaned' in filters:\n # if filters.pop('cleaned'):\n # query_prefix = query_prefix.filter(models.Instance.cleaned == 1)\n # else:\n # query_prefix = query_prefix.filter(models.Instance.cleaned == 0)\n\n # if not context.is_admin:\n # # If we're not admin context, add appropriate filter..\n # if context.project_id:\n # filters['project_id'] = context.project_id\n # else:\n # filters['user_id'] = context.user_id\n\n # # Filters for exact matches that we can do along with the SQL query...\n # # For other filters that don't match this, we will do regexp matching\n # exact_match_filter_names = ['project_id', 'user_id', 'image_ref',\n # 'vm_state', 'instance_type_id', 'uuid',\n # 'metadata', 'host', 'task_state',\n # 'system_metadata']\n\n # # Filter the query\n # query_prefix = exact_filter(query_prefix, models.Instance,\n # filters, exact_match_filter_names)\n\n # query_prefix = regex_filter(query_prefix, models.Instance, filters)\n # query_prefix = tag_filter(context, query_prefix, models.Instance,\n # models.InstanceMetadata,\n # models.InstanceMetadata.instance_uuid,\n # filters)\n\n # paginate query\n # if marker is not None:\n # try:\n # marker = _instance_get_by_uuid(context, marker, session=session)\n # except exception.InstanceNotFound:\n # raise exception.MarkerNotFound(marker)\n # TODO: following cannot yet work with the RIAK DB implementation!\n # query_prefix = sqlalchemyutils.paginate_query(query_prefix,\n # models.Instance, limit,\n # [sort_key, 'created_at', 'id'],\n # marker=marker,\n # sort_dir=sort_dir)\n # print(\"filters: %s\" % (filters))\n # query_prefix = RomeQuery(models.Instance).filter_dict(filters_)\n # query_prefix = RomeQuery(models.Instance)\n return query_prefix.all()\n # return _instances_fill_metadata(context, query_prefix.all(), manual_joins)\n\n\nclass Context(object):\n def __init__(self, project_id, user_id):\n self.project_id = project_id\n self.user_id = user_id\n\n\nif __name__ == '__main__':\n\n logging.getLogger().setLevel(logging.DEBUG)\n\n context = Context(\"admin\", \"admin\")\n\n host = \"jonathan-VirtualBox\"\n # host = \"edel-17\"\n\n filters = {'deleted': True,\n 'soft_deleted': False,\n 'host': host,\n 'cleaned': False}\n\n # filters = {'deleted': False, 'project_id': u'6bcb3e3fcf2e4d238e22be73215dc394'}\n\n sort_key='created_at'\n sort_dir='desc'\n\n attrs = ['info_cache', 'security_groups', 'system_metadata']\n # with utils.temporary_mutation(context, read_deleted='yes'):\n instances = instance_get_all_by_filters(context, filters, sort_key, sort_dir)\n LOG.debug('There are %d instances to clean', len(instances))\n\n for instance in instances:\n print(instance)","repo_name":"BeyondTheClouds/rome","sub_path":"test/nova/methods/test_instance_get_all_by_filters.py","file_name":"test_instance_get_all_by_filters.py","file_ext":"py","file_size_in_byte":11732,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"29254544197","text":"import os\nimport sys\n\n\ndef main():\n if len(sys.argv) == 1:\n sys.stdout.write(\"Hello, world!\")\n sys.stderr.write(\"Hello, errors!\")\n\n elif len(sys.argv) == 2:\n if sys.argv[1] == \"check environment\":\n if os.environ[\"var_name\"] == \"good value\":\n sys.stdout.write(\"good environment\")\n else:\n sys.stderr.write(\"bad environment `{}`\".format(os.environ[\"var_name\"]))\n exit(1)\n\n elif sys.argv[1] == \"check stdin\":\n data = sys.stdin.readline()\n sys.stdout.write(data)\n\n elif sys.argv[1] == \"check argument\":\n sys.stdout.write(\"good argument\")\n\n else:\n sys.stderr.write(\"bad argument `{}`\".format(sys.argv[1]))\n exit(1)\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"maps/tests/test_bin/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26114044516","text":"# Entrar o nome, total de partidas e quantidade de gols por partida\n# De quantos jogadores o usuario desejar\n# Ao final retornar uma tabela, contendo o codígo, nome, os gols por partida, e o total de gols\n# Dar a opção do usuario procurar por mais detalhes usando o codigo, do jogardor\n\n\nCadDic = {'nome':'joelson','partidas': 5,'golsPartida': [1,3,0,0,0]} , {'nome':'messi','partidas': 4,'golsPartida': [0,1,1,0]}\nJogInf = dict()\n\nwhile True:\n JogInf['nome'] = str(input('Jogador nome: ').strip().lower())\n JogInf['partidas'] = int(input('Quantidade de partidas: '))\n\n\n JogInf['golsPartida']","repo_name":"Bobonimo111/Python-treinos-","sub_path":"Aula 19 Dicionarios/Desafio095.py","file_name":"Desafio095.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72110582800","text":"from __future__ import with_statement\nimport nuke\nimport IECore\nfrom .StringUtil import nukeFileSequence, ieCoreFileSequence\n\n__parameterKnobConverters = []\n\n## Registers knob converter functions for IECore data types.\n# @param paramClass Parameter class type used on the following converters\n# @param knobCreator Function that creates a knob. Function parameters: knobHolder, parameter, knobName, knobLabel\n# @param toKnobConverter Function that sets the value of knobs for a given parameter. Function parameters: knobHolder, parameter, knobName\n# @param fromKnobConverter Function that sets a Parameter value from it's knobs. Function parameters: knobHolder, parameter, knobName\ndef registerParameterKnobConverters( paramClass, knobCreator, toKnobConverter, fromKnobConverter ) :\n\t__parameterKnobConverters.append( ( paramClass, knobCreator, toKnobConverter, fromKnobConverter ) )\n\n## Create new knobs on a given knob holder for a given parameter.\n# @param knobHolder The knob holder must support only two methods: knobs() and addKnob()\n# @param parameter That's the IECore.Parameter object which you want a knob for.\n# @param knobName Specifies the knob name for the given parameter object. Child parameters will have knobs named with that same prefix.\ndef createKnobsFromParameter( knobHolder, parameter, knobName = \"parm\" ) :\n\n\tif knobName:\n\t\tknobLabel = IECore.CamelCase.toSpaced( parameter.name )\n\n\tif parameter.presetsOnly :\n\n\t\tknob = nuke.Enumeration_Knob( knobName, knobLabel, [] )\n\t\tknob.setValues( parameter.presetNames() )\n\t\tknob.setValue( parameter.getCurrentPresetName() )\n\t\tknobHolder.addKnob( knob )\n\n\telse :\n\n\t\tfor (paramClass,knobCreator,toKnobConverter,fromKnobConverter) in __parameterKnobConverters :\n\n\t\t\tif isinstance( parameter, paramClass ) :\n\t\t\t\tknob = knobCreator( knobHolder, parameter, knobName, knobLabel )\n\t\t\t\tif knob :\n\t\t\t\t\tknobHolder.addKnob( knob )\n\t\t\t\t\ttoKnobConverter( knobHolder, parameter, knobName )\n\t\t\t\tbreak\n\t\telse :\n\n\t\t\tknob = nuke.Text_Knob( knobName, \"Not implemented!\" )\n\t\t\tknobHolder.addKnob( knob )\n\n## Set knob values on a given knob holder for a given parameter.\n# @param knobHolder The knob holder must support only two methods: knobs() and addKnob()\n# @param parameter That's the IECore.Parameter object that contains the value to be set on the knob.\n# @param knobName Specifies the knob name for the given parameter object. Child parameters will have knobs named with that same prefix.\ndef setKnobsFromParameter( knobHolder, parameter, knobName = \"parm\" ) :\n\n\tif parameter.presetsOnly :\n\n\t\tknobHolder.knobs()[ knobName ].setValue( parameter.getCurrentPresetName() )\n\n\telse :\n\n\t\tfor (paramClass,knobCreator,toKnobConverter,fromKnobConverter) in __parameterKnobConverters :\n\n\t\t\tif isinstance( parameter, paramClass ) :\n\t\t\t\ttoKnobConverter( knobHolder, parameter, knobName )\n\t\t\t\tbreak\n\n\n## Set parameter values from knobs on a given knob holder.\n# @param knobHolder The knob holder must support only two methods: knobs() and addKnob()\n# @param parameter That's the IECore.Parameter object that will get the knob value.\n# @param knobName Specifies the knob name for the given parameter object. Child parameters will have knobs named with that same prefix.\ndef setParameterFromKnobs( knobHolder, parameter, knobName = \"parm\" ) :\n\n\tif parameter.presetsOnly :\n\t\tparameter.setValue( knobHolder.knobs()[ knobName ].value() )\n\n\telse :\n\n\t\tfor (paramClass,knobCreator,toKnobConverter,fromKnobConverter) in __parameterKnobConverters :\n\n\t\t\tif isinstance( parameter, paramClass ) :\n\t\t\t\tfromKnobConverter( knobHolder, parameter, knobName )\n\t\t\t\tbreak\n\ndef __createCompoundParameterKnob( knobHolder, parameter, knobName, knobLabel ) :\n\n\tif knobLabel:\n\t\tknobHolder.addKnob( nuke.Tab_Knob( knobName, knobLabel, nuke.TABBEGINGROUP) )\n\n\tfor childName, child in parameter.items() :\n\t\tcreateKnobsFromParameter( knobHolder, child, knobName + \"_\" + childName )\n\n\tif knobLabel:\n\t\tknobHolder.addKnob( nuke.Tab_Knob( knobName, knobLabel, nuke.TABENDGROUP) )\n\n\treturn None\n\ndef __compoundParameterToKnob( knobHolder, parameter, knobName ) :\n\n\twith IECore.IgnoredExceptions( KeyError ):\n\t\tcollapsed = parameter.userData()[\"UI\"][\"collapsed\"]\n\t\tknob = knobHolder.knobs()[ knobName ]\n\t\tknob.setValue( collapsed )\n\n\tfor childName, child in parameter.items() :\n\t\tsetKnobsFromParameter( knobHolder, child, knobName + \"_\" + childName )\n\ndef __compoundParameterFromKnob( knobHolder, parameter, knobName ) :\n\n\twith IECore.IgnoredExceptions( KeyError ):\n\t\tknob = knobHolder.knobs()[ knobName ]\n\t\tcollapsed = bool(knob.getValue())\n\t\tif not \"UI\" in parameter.userData() :\n\t\t\tparameter.userData()[\"UI\"] = IECore.CompoundData()\n\t\tparameter.userData()[\"UI\"][\"collapsed\"] = IECore.BoolData(collapsed)\n\n\tfor childName, child in parameter.items() :\n\t\tsetParameterFromKnobs( knobHolder, child, knobName + \"_\" + childName )\n\ndef __createPathParameterKnob( knobHolder, parameter, knobName, knobLabel ) :\n\n\treturn nuke.File_Knob( knobName, knobLabel )\n\ndef __fileSequenceParameterToKnob( knobHolder, parameter, knobName ) :\n\n\tknob = knobHolder.knobs()[ knobName ]\n\tseqPath = nukeFileSequence( parameter.getTypedValue() )\n\tknob.setValue( seqPath )\n\ndef __fileSequenceParameterFromKnob( knobHolder, parameter, knobName ) :\n\n\tseqPath = ieCoreFileSequence( knobHolder.knobs()[knobName].value() )\n\tparameter.setTypedValue( seqPath )\n\ndef __pathParameterToKnob( knobHolder, parameter, knobName ) :\n\n\tknob = knobHolder.knobs()[ knobName ]\n\tknob.setValue( parameter.getTypedValue() )\n\ndef __typedParameterFromKnob( knobHolder, parameter, knobName ) :\n\n\tparameter.setTypedValue( knobHolder.knobs()[knobName].value() )\n\ndef __typedParameterToKnob( knobHolder, parameter, knobName ) :\n\n\tknob = knobHolder.knobs()[ knobName ]\n\tknob.setValue( parameter.getTypedValue() )\n\ndef __numericParameterFromKnob( knobHolder, parameter, knobName ) :\n\n\tparameter.setNumericValue( knobHolder.knobs()[knobName].value() )\n\ndef __numericParameterToKnob( knobHolder, parameter, knobName ) :\n\n\tknob = knobHolder.knobs()[ knobName ]\n\tknob.setValue( parameter.getNumericValue() )\n\ndef __createStringParameterKnob( knobHolder, parameter, knobName, knobLabel ) :\n\n\treturn nuke.String_Knob( knobName, knobLabel )\n\ndef __stringParameterFromKnob( knobHolder, parameter, knobName ) :\n\n\tparameter.setTypedValue( knobHolder.knobs()[knobName].getText() )\n\ndef __createIntParameterKnob( knobHolder, parameter, knobName, knobLabel ) :\n\n\treturn nuke.Int_Knob( knobName, knobLabel )\n\ndef __createBoolParameterKnob( knobHolder, parameter, knobName, knobLabel ) :\n\n\tknob = nuke.Boolean_Knob( knobName, knobLabel )\n\tknob.setFlag( nuke.STARTLINE )\n\treturn knob\n\ndef __createDoubleParameterKnob( knobHolder, parameter, knobName, knobLabel ) :\n\n\tknob = nuke.Double_Knob( knobName, knobLabel )\n\tknob.setValue( parameter.getNumericValue() )\n\treturn knob\n\ndef __createStringVectorParameterKnob( knobHolder, parameter, knobName, knobLabel ):\n\n\treturn nuke.Multiline_Eval_String_Knob( knobName, knobLabel )\n\ndef __stringVectorParameterToKnob( knobHolder, parameter, knobName ):\n\n\tknob = knobHolder.knobs()[knobName]\n\tknob.setValue( \"\\n\".join( parameter.getValue() ) )\n\ndef __stringVectorParameterFromKnob( knobHolder, parameter, knobName ) :\n\ttxt = knobHolder.knobs()[knobName].getText()\n\tif len(txt) :\n\t\tvalues = txt.split(\"\\n\")\n\telse:\n\t\tvalues = []\n\tparameter.setValue( IECore.StringVectorData( values ) )\n\ndef __createNumericVectorParameterKnob( knobHolder, parameter, knobName, knobLabel ):\n\n\tknob = nuke.String_Knob( knobName, knobLabel + ' (space separated)' )\n\treturn knob\n\ndef __numericVectorParameterToKnob( knobHolder, parameter, knobName ):\n\n\tknob = knobHolder.knobs()[knobName]\n\tknob.setValue( \" \".join( [str(v) for v in parameter.getValue()] ) )\n\ndef __numericVectorParameterFromKnob( knobHolder, parameter, knobName ) :\n\n\tdataVectorType = type( parameter.getValue() )\n\tdataType = IECore.DataTraits.valueTypeFromSequenceType( dataVectorType )\n\tvalues = knobHolder.knobs()[knobName].getText().strip()\n\tif len(values) :\n\t\tdataValues = [dataType(v) for v in values.split()]\n\t\tparameter.setValue( dataVectorType( dataValues ) )\n\telse :\n\t\tparameter.setValue( dataVectorType() )\n\n# \\todo: Implement data types V2f, V3f, Color...\n# \\todo: Implement vector data types: V2f, V3f...\nregisterParameterKnobConverters( IECore.CompoundParameter, __createCompoundParameterKnob, __compoundParameterToKnob, __compoundParameterFromKnob )\nregisterParameterKnobConverters( IECore.FileSequenceParameter, __createPathParameterKnob, __fileSequenceParameterToKnob, __fileSequenceParameterFromKnob )\nregisterParameterKnobConverters( IECore.FileNameParameter, __createPathParameterKnob, __pathParameterToKnob, __stringParameterFromKnob )\nregisterParameterKnobConverters( IECore.DirNameParameter, __createPathParameterKnob, __pathParameterToKnob, __stringParameterFromKnob )\nregisterParameterKnobConverters( IECore.StringParameter, __createStringParameterKnob, __typedParameterToKnob, __stringParameterFromKnob )\nregisterParameterKnobConverters( IECore.BoolParameter, __createBoolParameterKnob, __typedParameterToKnob, __typedParameterFromKnob )\nregisterParameterKnobConverters( IECore.IntParameter, __createIntParameterKnob, __numericParameterToKnob, __numericParameterFromKnob )\nregisterParameterKnobConverters( IECore.FloatParameter, __createDoubleParameterKnob, __numericParameterToKnob, __numericParameterFromKnob )\nregisterParameterKnobConverters( IECore.DoubleParameter, __createDoubleParameterKnob, __numericParameterToKnob, __numericParameterFromKnob )\nregisterParameterKnobConverters( IECore.StringVectorParameter, __createStringVectorParameterKnob, __stringVectorParameterToKnob, __stringVectorParameterFromKnob )\nregisterParameterKnobConverters( IECore.IntVectorParameter, __createNumericVectorParameterKnob, __numericVectorParameterToKnob, __numericVectorParameterFromKnob )\nregisterParameterKnobConverters( IECore.FloatVectorParameter, __createNumericVectorParameterKnob, __numericVectorParameterToKnob, __numericVectorParameterFromKnob )\nregisterParameterKnobConverters( IECore.DoubleVectorParameter, __createNumericVectorParameterKnob, __numericVectorParameterToKnob, __numericVectorParameterFromKnob )\n","repo_name":"ImageEngine/cortex","sub_path":"python/IECoreNuke/KnobConverters.py","file_name":"KnobConverters.py","file_ext":"py","file_size_in_byte":10148,"program_lang":"python","lang":"en","doc_type":"code","stars":510,"dataset":"github-code","pt":"3"} +{"seq_id":"38264381696","text":"import face_recognition\r\nimport picamera\r\nimport numpy as np\r\nimport RPi.GPIO as GPIO\r\nimport csv\r\nimport time\r\nfrom datetime import datetime\r\nimport cv2\r\nimport io\r\nimport os\r\n\r\nknown_face_encodings = []\r\nnames = []\r\ncamera = picamera.PiCamera()\r\nface_locations = []\r\nface_encodings = []\r\ns1 = 15\r\ns2 = 37\r\nGPIO.setmode(GPIO.BOARD)\r\nGPIO.setup(s1, GPIO.OUT)\r\nGPIO.setup(s2, GPIO.OUT)\r\nGPIO.setup(16, GPIO.OUT)\r\nservo1 = GPIO.PWM(s1, 50)\r\nservo2 = GPIO.PWM(s2, 50)\r\nservo1.start(0)\r\nservo2.start(0)\r\ncamera.resolution = (320, 240)\r\noutput = np.empty((240, 320, 3), dtype=np.uint8)\r\n\r\ndef load_face_encoding(name, file_name):\r\n image = face_recognition.load_image_file(file_name)\r\n face_encoding = face_recognition.face_encodings(image)[0]\r\n known_face_encodings.append(face_encoding)\r\n names.append(name)\r\n\r\ndef load_faces():\r\n with open(\"capture.csv\") as file:\r\n print(\"Loading faces\")\r\n readFile = csv.reader(file, delimiter=\",\")\r\n for each in readFile:\r\n print(each)\r\n load_face_encoding(each[0], \"images/\"+each[1])\r\n\r\ndef getImageList():\r\n with open(\"capture.csv\") as file:\r\n readFile = csv.reader(file, delimiter=\",\")\r\n for each in readFile:\r\n print(each[1])\r\n lastImage = os.path.splitext(each[1])[0]\r\n return lastImage\r\n\r\ndef change_status(status):\r\n with open('status.csv', 'w') as writeFile:\r\n writer = csv.writer(writeFile)\r\n currentDT = datetime.now()\r\n writer.writerow((status, currentDT.strftime(\"%Y-%m-%d %H:%M:%S\")))\r\n\r\ndef status():\r\n with open(\"status.csv\") as file:\r\n readFile = csv.reader(file)\r\n for each in readFile:\r\n eachData = each\r\n print(eachData[0])\r\n return eachData[0]\r\n\r\ndef addImageDB(imageName):\r\n with open(\"capture.csv\", \"a\") as writeFile:\r\n writer = csv.writer(writeFile)\r\n writer.writerow((\"user\", imageName))\r\n\r\ndef addUser(channel):\r\n stream = io.BytesIO()\r\n camera.capture(stream, format='jpeg')\r\n buff = np.fromstring(stream.getvalue(), dtype=np.uint8)\r\n image = cv2.imdecode(buff, 1)\r\n face_cascade = cv2.CascadeClassifier('/home/pi/Desktop/fyp/haarcascade_frontalface_alt.xml')\r\n gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\r\n faces = face_cascade.detectMultiScale(gray, 1.1, 5)\r\n print(len(faces))\r\n if len(faces) == 1:\r\n lastImage = getImageList()\r\n newImage = str(int(lastImage)+1)+\".jpg\"\r\n cv2.imwrite(\"images/\"+newImage, image)\r\n addImageDB(newImage)\r\n GPIO.output(16,GPIO.HIGH)\r\n time.sleep(2)\r\n GPIO.output(16,GPIO.LOW)\r\n\r\ndef recognize(channel):\r\n last_status = status()\r\n output = np.empty((240, 320, 3), dtype=np.uint8)\r\n if last_status == \"LOCK\":\r\n load_faces()\r\n flag = \"True\"\r\n while flag == \"True\":\r\n print(\"Capturing image.\")\r\n camera.capture(output, format=\"rgb\")\r\n face_locations = face_recognition.face_locations(output)\r\n face_encodings = face_recognition.face_encodings(output, face_locations)\r\n for face_encoding in face_encodings:\r\n matches = face_recognition.face_distance(known_face_encodings, face_encoding)\r\n min_distance = min(matches)\r\n if min_distance < 0.6:\r\n change_status(\"UNLOCK\")\r\n print(\"Opening the gates\")\r\n try:\r\n servo1.start(0)\r\n servo2.start(0)\r\n GPIO.output(s1, True)\r\n GPIO.output(s2, True)\r\n servo2.ChangeDutyCycle(7.5)\r\n time.sleep(2)\r\n print(\"servo 1\")\r\n servo1.ChangeDutyCycle(7.5)\r\n time.sleep(2)\r\n print(\"servo 2\")\r\n GPIO.output(s1, False)\r\n GPIO.output(s2, False)\r\n except:\r\n servo1.stop()\r\n servo2.stop()\r\n flag = \"False\"\r\n\r\ndef lockDoor(channel):\r\n last_status = status()\r\n if last_status == \"UNLOCK\":\r\n print(\"Locking Now\")\r\n try:\r\n servo1.start(0)\r\n servo2.start(0)\r\n GPIO.output(s1, True)\r\n GPIO.output(s2, True)\r\n servo2.ChangeDutyCycle(2.5)\r\n time.sleep(2)\r\n print(\"servo 1\")\r\n servo1.ChangeDutyCycle(12.5)\r\n time.sleep(2)\r\n print(\"servo 2\")\r\n GPIO.output(s1, False)\r\n GPIO.output(s2, False)\r\n change_status(\"LOCK\")\r\n except:\r\n servo1.stop()\r\n servo2.stop()\r\n\r\nGPIO.setup(10, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\r\nGPIO.setup(12, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\r\nGPIO.setup(18, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\r\nGPIO.add_event_detect(10,GPIO.RISING,callback=recognize)\r\nGPIO.add_event_detect(12,GPIO.RISING,callback=addUser)\r\nGPIO.add_event_detect(18,GPIO.RISING,callback=lockDoor)\r\n\r\n\r\nmessage = input(\"\")\r\nGPIO.cleanup()\r\n","repo_name":"prayas26/Door-Security-System","sub_path":"code1.py","file_name":"code1.py","file_ext":"py","file_size_in_byte":5135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22741946517","text":"# -*- coding:utf-8 -*-\nimport pymongo\nimport requests as r\nfrom bs4 import BeautifulSoup\nimport json\n\nprint('Start get my data!!!')\n\n### 建立連線MongoDB\nclient = pymongo.MongoClient(\"x.x.x.x\", 27017)\ndb = client['spadeAce']\ncollection = db.awsmongoEttoday\n\n## eToday單一標題新聞擷取網址\n\n### 預設用來比對 \"歷史\" 的裝網址陣列\ndataList = []\n\n### 預設用來比對 \"當天自己\" 的裝網址陣列\ncheckList = []\n\n### 預設用來容納通過檢核1 & 檢合2 的new news\nwholeContent = []\n\n### 宣告全域變數\ndata = []\n\n## Step1 建立空陣列 導入歷史新聞的url\n\n\nwith open('baseEttoday.json', 'r',encoding= 'utf-8') as f:\n try:\n data = json.load(f)\n print(type(data))\n for i in data:\n ### 將每一筆歷史網址放進陣列中,後面要比對\n dataList.append(i['Link']) \n except:\n ### 此腳本第一次執行會有空值所以exception,或著是重複執行但沒有東西\n print('no data')\n\n\n## Method:獲取網頁新聞網址\ndef eTodayNewsUrl(number):\n \n\n for page in range(1,number+1):\n ## get web infomation\n res = r.get('http://sports.ettoday.net/news-list/%E6%A3%92%E7%90%83/MLB/{}'.format(page))\n pageInfomation = 'http://sports.ettoday.net/news-list/%E6%A3%92%E7%90%83/MLB/{}'.format(page)\n soup = BeautifulSoup(res.text, 'lxml')\n ## 格式讀入\n soupPage = soup.select('.block_content')\n ## 主要網頁 後面要搭配標題網頁\n home = 'http://sports.ettoday.net'\n ## 因為網頁使用類別.block_countent 有數個範圍,故soupPage會是list\n for listNum in range(len(soupPage)):\n ## 每個範圍中因為同一個tag組成一list 所以要再用迴圈一一讀出\n item = soupPage[listNum].select('div > h3 > a')\n for i in item:\n href = i.get('href')\n ## 只有href中開頭為/news/才是需要的單一新聞標題&網址\n if href.startswith('/news/'):\n record =i.text.replace('\\u3000',' ')\n ## 此判定非常非常非常重要,如果新聞的資訊已經在陣列中就不添加進字典or陣列中\n if collection.find_one({'Link':home+href}) == None:\n if home+href not in dataList and home+href not in checkList:\n checkList.append(home+href)\n get_content(home+href)\n\n ## 存字典 {標題:網址}\n\n\n## Method:獲取新聞資訊加進陣列 wholeContent\ndef get_content(url):\n \n NewsContent = {}\n res = r.get(url)\n soup = BeautifulSoup(res.text, 'lxml')\n ## 抓tag資料\n article = soup.select('.story > p')\n title = soup.select('.title')\n titleT = title[0].text.strip()\n date = soup.select('.date')\n dateTime = date[0].text.split('時間:')[1].strip()\n content = ''\n for tag in article:\n content+=tag.text\n ## 裝資料\n NewsContent['Title']=titleT\n NewsContent['Date']=dateTime\n NewsContent['Content']=content\n NewsContent['Link']=url\n wholeContent.append(NewsContent)\n\n print(\"========================================================\")\n print(titleT)\n print(dateTime)\n print(content)\n print(\"========================================================\")\n\n\n### ---------------------程式的起點------------------------------\n\n## 輸入想要搜尋的總頁數\npageTotal = 15 ##input('Page Total: ')\neTodayNewsUrl(pageTotal) \n\n\n## 將文章寫進檔案中 \nprint('New_Record_Count: ' + str(len(wholeContent)))\n### 新舊資料合併\n \nfinalAll = data + wholeContent \n\n## 將文章寫進檔案中 \nwith open('baseEttoday.json', 'w+', encoding = 'utf-8') as f:\n f.write(json.dumps(finalAll, ensure_ascii=False))\n \n \n## 將新資料新增進系統 並關閉mongoDB連線\ntry:\n collection.insert(wholeContent)\nexcept:\n print('cannot do an empty bulk write')\nclient.close()\n","repo_name":"spiraleyeld/Ettoday_Crawler-multi-threading","sub_path":"autoEttodayCrawler.py","file_name":"autoEttodayCrawler.py","file_ext":"py","file_size_in_byte":4066,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"28137688404","text":"import logging\nimport os\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom autotrader.datasource.database.app_schema import BASE\n\n\nclass StockDataBaseApp:\n \"\"\"\n Autotrader database client\n \"\"\"\n\n url = 'sqlite:///app.db'\n\n def __init__(self, logger: logging.Logger):\n self.engine = None\n self.session = None\n self.db_session = None\n self.logger = logger\n\n def connect(self):\n \"\"\"\n Connect to the database\n :param database: name of database to connect\n :return: nothing\n \"\"\"\n if not self.session:\n self.engine = create_engine(StockDataBaseApp.url)\n self.db_session = sessionmaker(bind=self.engine)\n self.session = self.db_session()\n\n def is_connected(self):\n \"\"\"\n Checks if database connection is established\n :return: true if connection to database is established otherwise false\n \"\"\"\n return self.session is not None\n\n def create(self):\n \"\"\"\n Creates a database by given name\n :return:\n \"\"\"\n if self.engine:\n if os.path.exists('app.db'):\n os.remove('app.db')\n BASE.metadata.create_all(self.engine)\n return True\n return False\n\n def close(self):\n \"\"\"\n Closes current session\n :return:\n \"\"\"\n if self.session:\n self.session.close()\n","repo_name":"SlashGordon/autotrader","sub_path":"autotrader/datasource/database/app_database.py","file_name":"app_database.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6162734245","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCalculates statistics on disease trajectories. In particular, runs Cox\nregression by calling R functions. Also calculates relative risks, errors, and\nrestricted mean survival time.\n\n@author: Chris Hayward\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport pdb\nfrom scipy import stats\n \nimport rpy2.robjects as robjects\nfrom rpy2.robjects import pandas2ri\nfrom rpy2.robjects.conversion import localconverter\n\nfrom pipeline_hes.params import params\nfrom pipeline_hes import plot_utils\n\n# IS_PATIENT=True for MI cohort\n# IS_PATIENT=False for Control cohort\n\nHAZARD_COLUMN = 'IS_PATIENT'\n\ndef relative_risk(eP,nP,eC,nC,alpha,numTraces):\n \"\"\"Calculate relative risk and the standard error / CI.\"\"\"\n ratio_p = eP / nP\n ratio_c = eC / nC\n rr = ratio_p / ratio_c\n\n # Sampling distribution of the log(rr)\n se_logrr = np.sqrt((nC-eC)/(nC*eC) + (nP-eP)/(nP*eP))\n \n # Confidence interval\n nrm_z_score = stats.norm.ppf(1-(alpha/2))\n rr_ci = np.exp([np.log(rr)-se_logrr*nrm_z_score,\n np.log(rr)+se_logrr*nrm_z_score])\n \n # Confidence interval (bonferroni corrected)\n nrm_z_score_strict = stats.norm.ppf(1-((alpha/numTraces)/2))\n rr_ci_corrected = np.exp([np.log(rr)-se_logrr*nrm_z_score_strict,\n np.log(rr)+se_logrr*nrm_z_score_strict])\n \n return rr, rr_ci, rr_ci_corrected\n\n\ndef calc_rr(variants_controls, variants_patients, common_traces,\n nC, nP, ctl_name, pat_name):\n \"\"\"Append relative risks to the trajectory dataframe.\"\"\"\n\n # Adding 'pat' and 'ctl' suffixes allows me to RELIABLY figure out which\n # Columns to display when plotting later on\n cols = ['TOTALpat_{}'.format(pat_name), '#{}'.format(pat_name),'%{}'.format(pat_name), \n 'TOTALctl_{}'.format(ctl_name), '#{}'.format(ctl_name), '%{}'.format(ctl_name),\n 'RR','RR_CIl','RR_CIu','RR_CIl_corrected','RR_CIu_corrected']\n # Relative risk for traces in patients\n df_rr = pd.DataFrame(columns=cols).astype(float)\n for i in range(common_traces.shape[0]):\n \n v = common_traces.iloc[i].name\n\n if v in variants_patients.index:\n eP = variants_patients.loc[v]['count']\n df_rr.loc[v,['TOTALpat_{}'.format(pat_name),\n '#{}'.format(pat_name),\n '%{}'.format(pat_name)]] = \\\n [nP, int(eP), 100*eP/nP]\n \n if v in variants_controls.index:\n eC = variants_controls.loc[v]['count']\n df_rr.loc[v,['TOTALctl_{}'.format(ctl_name),\n '#{}'.format(ctl_name),\n '%{}'.format(ctl_name)]] = \\\n [nC, int(eC), 100*eC/nC]\n \n # if this sequence post AMI is also a control sequence, compute RR\n if v in variants_patients.index and v in variants_controls.index:\n rr,rr_ci,rr_ci_corrected = relative_risk(eP,nP,eC,nC,0.05,common_traces.shape[0])\n df_rr.loc[v,['RR','RR_CIl','RR_CIu',\n 'RR_CIl_corrected','RR_CIu_corrected']] = [rr, rr_ci[0], rr_ci[1],\n rr_ci_corrected[0],\n rr_ci_corrected[1]]\n print(df_rr)\n return df_rr\n\n \ndef callR_restricted_mean_surv_time(r_from_pd_df):\n \"\"\"For all subjects following a single trajectory, calculate the RMST.\"\"\"\n\n robjects.r('suppressPackageStartupMessages(library(survRM2))')\n robjects.globalenv['x'] = r_from_pd_df\n\n # tau (truncation time point) defaults to min(max(DUR_pat,DUR_ctl))\n formula = \"\"\"res<-rmst2(\n time=x$DUR,\n status=x$Mortality,\n arm=x${},)\"\"\".format(HAZARD_COLUMN)\n try:\n robjects.r(formula)\n except Exception as ex:\n print(ex)\n return None\n\n robjects.r('print(res)')\n r_tmp = robjects.globalenv['res']\n r_dict = dict(zip(r_tmp.names, r_tmp))\n \n # Sanity checks\n if not (np.array(list(r_dict.keys())) == \\\n np.array(['tau', 'note', 'RMST.arm1', 'RMST.arm0', 'unadjusted.result'])).all():\n raise Exception('Unexpected RMST names')\n\n exp_names = np.array(['result', 'rmst', 'rmtl', 'tau', 'rmst.var', 'fit'])\n if not (np.array(r_dict['RMST.arm0'].names) == exp_names).all():\n raise Exception('Unexpected RMST names')\n if not (np.array(r_dict['RMST.arm1'].names) == exp_names).all():\n raise Exception('Unexpected RMST names') \n\n exp_names = np.array(['Est.', 'se', 'lower .95', 'upper .95'])\n if not (np.array(r_dict['RMST.arm0'][1].names) == exp_names).all():\n raise Exception('Unexpected RMST names')\n if not (np.array(r_dict['RMST.arm1'][1].names) == exp_names).all():\n raise Exception('Unexpected RMST names')\n\n # matrix of values:\n if not (np.array(r_dict['unadjusted.result'].names[0]) == \\\n np.array(['RMST (arm=1)-(arm=0)', 'RMST (arm=1)/(arm=0)','RMTL (arm=1)/(arm=0)'], dtype='(0.05):\n return None\n\n print('\\n!!!\\nProportional Hazards Assumption does NOT hold.\\n!!!\\n')\n\n # Fix short durations\n # Causes problems with the flexsurv... \n with localconverter(robjects.default_converter + pandas2ri.converter):\n tmp_dat = hes_part.copy()\n\n # move them all slightly \n tmp_dat['DUR'] = tmp_dat['DUR'] + np.linspace(1e-5,1e-4,tmp_dat.shape[0])\n \n r_from_pd_df = robjects.conversion.py2rpy(tmp_dat.copy())\n \n # FLEXSURV Loop\n flexsurv_hr_ci_min_BIC = callR_flexsurvspline_grid(hes_part,r_from_pd_df,\n covariates)\n\n return flexsurv_hr_ci_min_BIC\n\n\ndef callR_cox(r_from_pd_df,covariates):\n \"\"\"Run a Cox regression model for a single trajectory (trace).\"\"\"\n \n robjects.r('suppressPackageStartupMessages(library(coxme))')\n robjects.r('suppressPackageStartupMessages(library(flexsurv))')\n robjects.r('suppressPackageStartupMessages(library(muhaz))')\n\n robjects.globalenv['x'] = r_from_pd_df\n\n\n # build and run formula\n formula = 'res<-coxme(Surv(DUR, Mortality) ~ {} + (1 | PROCODE), data=x)'.\\\n format(' + '.join(covariates))\n print('Hazard, running formula: {}'.format(formula))\n\n try:\n robjects.r(formula)\n except Exception:\n print('Failed to execute R-Formula: {}'.format(formula))\n return None\n \n robjects.r('summary(res)')\n \n r_tmp = robjects.globalenv['res']\n r_dict = dict(zip(r_tmp.names, r_tmp))\n \n coeffs = r_dict['coefficients']\n # Get the standard error\n robjects.r('se<-sqrt(diag(vcov(res)))')\n coeffs_se = np.array(robjects.globalenv['se'])\n \n # convert SE to CI (95%)\n alpha=0.05\n coeffs_ci = coeffs_se * stats.norm.ppf(1-(alpha/2))\n\n # bonferroni correction\n coeffs_ci_corrected = [np.nan]# coeffs_se * stats.norm.ppf(1-(alpha_strict/2))\n\n return pd.Series(\n ([np.exp(coeffs[0]),\n np.exp(coeffs[0] - coeffs_ci[0]),\n np.exp(coeffs[0] + coeffs_ci[0]),\n np.exp(coeffs[0] - coeffs_ci_corrected[0]),\n np.exp(coeffs[0] + coeffs_ci_corrected[0]),\n np.nan]),\n ('HR', 'HR_CIl', 'HR_CIu',\n 'HR_CIl_corrected', 'HR_CIu_corrected',\n 'HR_old',))\n\n \ndef callR_hazard_ratio(hes_part_DEBUG,\n r_from_pd_df,\n covariates):\n \"\"\"Wrapper function, running a Cox model, and if the PHA fails, instead\n runs a flexible parametric model instead to obtain the hazard ratio for\n MI versus matched controls.\"\"\"\n\n # Sanity:\n # ! hazard column MUST be first item\n # .. this means that the output values related to the hazard column will be in position ZERO\n covariates = np.append([HAZARD_COLUMN], covariates[covariates!=HAZARD_COLUMN])\n\n # change to factors\n covariates[covariates==HAZARD_COLUMN] = 'as.factor({})'.format(HAZARD_COLUMN)\n covariates[covariates=='SEX'] = 'as.factor(SEX)'\n\n # COX model\n hr_dat = callR_cox(r_from_pd_df,covariates)\n\n # Return empty if fail\n if hr_dat is None:\n return pd.Series(np.repeat(np.nan,6),\n ('HR', 'HR_CIl', 'HR_CIu',\n 'HR_CIl_corrected', 'HR_CIu_corrected',\n 'HR_old',))\n \n # Check PHA - replace with flexsurv HR value if PHA does not hold\n if params.CHECK_PROP_HAZ_ASSUM:\n flexsurv_hr_cl = callR_check_pha(hes_part_DEBUG,covariates)\n \n # replace Cox HR with FLEXSURV HR if PHA did not hold\n if not (flexsurv_hr_cl is None):\n hr_old = hr_dat['HR'].copy() # for debugging really\n hr_dat = flexsurv_hr_cl\n hr_dat['HR_old'] = hr_old\n \n return hr_dat\n\n\n\ndef select_trace_rows(variants_controls_per_subject,\n variants_patients_per_subject,\n thisVariant):\n \"\"\"From the list of all trajectories (aka, variants) and their stats,\n get the subset of rows for a particular trajectory from both cohorts.\"\"\"\n \n # Get all controls for this variant\n ctl_part = variants_controls_per_subject.loc[\n variants_controls_per_subject['variant'] == thisVariant]\n\n # Get all patients for this variant\n pat_part = variants_patients_per_subject.loc[\n variants_patients_per_subject['variant'] == thisVariant]\n \n # if a sequence doesnt exist, skip\n if ctl_part.shape[0]==0 or pat_part.shape[0]==0:\n return None\n\n # Concat \n hes_part = pd.concat([ctl_part,pat_part]).reset_index(drop=True)\n \n return hes_part\n\n\ndef init_covariates(variants_controls_per_subject,\n variants_patients_per_subject):\n \"\"\"The covariates to be used in the models used to calculate the hazard\n ratio.\"\"\"\n \n covariates_ORIG = np.array([HAZARD_COLUMN,\n 'SEX',\n 'INIT_AGE',\n 'IMD04',\n 'MATCHED_DATE'], dtype=object)\n \n # Remove SEX if only looking at males OR females\n if pd.concat([variants_controls_per_subject['SEX'],\n variants_patients_per_subject['SEX']]).drop_duplicates().shape[0]==1:\n covariates_ORIG = covariates_ORIG[covariates_ORIG!='SEX']\n \n return covariates_ORIG\n\n\ndef enough_variants_have_resulted_in_death(v):\n \"\"\"Check that a sufficient number of individuals have died, enabling a\n calculation of the hazard ratio (events per variable).\"\"\"\n \n num_explanatory_vars = 5 if not (params.ONLY_ONE_SEX in ['M','F']) else 4\n n_limit = 20\n # check that enough subjects have died\n if np.sum(v['Mortality']==1) < (num_explanatory_vars * n_limit):\n return False\n return True\n\n\n# For each non-rare sequence for patients, calc HR\ndef hazard_ratio_per_trace(variants_controls_per_subject,\n variants_patients_per_subject,\n common_traces):\n \"\"\"Calculations the hazard ratio and RMST for each trajectory.\"\"\"\n \n covariates_ORIG = init_covariates(variants_controls_per_subject,\n variants_patients_per_subject)\n\n # Trace crossed with [hr,se_lower,se_upper]\n df_cox = pd.DataFrame(\n index=pd.MultiIndex.from_product([common_traces.index,\n ('HR',\n 'HR_CIl', 'HR_CIu',\n 'HR_CIl_corrected', 'HR_CIu_corrected',\n 'HR_old',\n 'RMST0',\n 'RMST1',\n 'RMST0_CIl',\n 'RMST0_CIu',\n 'RMST1_CIl',\n 'RMST1_CIu',\n 'tau',\n 'p_diff_unadj',)],\n names=('variant', 'stat')),\n dtype=float,\n columns=[HAZARD_COLUMN])\n \n # to save R plots\n plot_utils.create_fig_folder()\n \n n_traces = common_traces.shape[0]\n \n for i in range(n_traces): \n thisVariant = common_traces.iloc[i].name\n print('{}\\nHazard for: {}'.format('-'*50,thisVariant))\n \n # Get the rows for this trace\n variants_part = select_trace_rows(variants_controls_per_subject,\n variants_patients_per_subject,\n thisVariant)\n \n if variants_part is None:\n continue\n \n n_subs_for_trace = variants_part.shape[0]\n \n print('number of subjects: {}'.format(n_subs_for_trace))\n\n # Handle the coefficients\n covariates = covariates_ORIG.copy()\n \n # If only females OR males are present, then drop SEX from fomula\n # (some N codes are sex specific - e.g. male genitals related N40-N53)\n if variants_part['SEX'].drop_duplicates().shape[0] == 1:\n covariates = covariates[covariates!='SEX']\n \n # Select columns\n variants_part = variants_part[np.append(['DUR','Mortality','PROCODE'],covariates)]\n \n # Convert init date to year only\n variants_part['MATCHED_DATE'] = variants_part['MATCHED_DATE'].dt.year - 2000\n \n # Ensure that there are enough FACTOR choices\n # - Prop-hazard assumption check will fail if not enough events\n if not enough_variants_have_resulted_in_death(variants_part):\n print('!!! Insufficient data size. Skipping (fill with NaN).')\n continue\n\n # convert to R dataframe\n with localconverter(robjects.default_converter + pandas2ri.converter):\n r_from_pd_df = robjects.conversion.py2rpy(variants_part)\n\n # Run COX-Mixed-effects model (with PROCODE as the random effect)\n # also pass in the variants_part df (just for DEUBUGGING only)\n series_hr = callR_hazard_ratio(variants_part,\n r_from_pd_df, covariates)\n \n df_cox.loc[pd.MultiIndex.from_product(\n [(thisVariant,),series_hr.index]),HAZARD_COLUMN] = series_hr.values\n\n # Get RMST\n series_rmst = callR_restricted_mean_surv_time(r_from_pd_df)\n \n if series_rmst is not None:\n df_cox.loc[pd.MultiIndex.from_product(\n [(thisVariant,),series_rmst.index]),HAZARD_COLUMN] = series_rmst.values\n \n #%%\n robjects.r('warnings()')\n\n # Just get the PATIENT Hazard ratio (hazard for AMI)\n # - unstack does a pivot operation (moves the CI...etc multi-index level to the columns)\n # - droplevel(axis=1) removes 'IS_PATIENT' level from MI in columns\n hr = (df_cox.loc[:,[HAZARD_COLUMN,]]).unstack(level=1).droplevel(0,axis=1)\n \n return hr\n\n","repo_name":"multimorbidity-research-leeds/postMI-process-mining-hes","sub_path":"pipeline_hes/trace_stats.py","file_name":"trace_stats.py","file_ext":"py","file_size_in_byte":19935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37389964915","text":"from os.path import dirname, realpath\nfrom sys import argv, exit, path, version_info\n\n# path fun times\npath.insert(0, dirname(realpath(__file__)))\n\nfrom main import exit_print_types, mount\nif len(argv) < 2:\n exit_print_types()\n\nif argv[1] == '--install-desktop-entry':\n from main import create_desktop_entry\n prefix = None if len(argv) < 3 else argv[2]\n create_desktop_entry(prefix)\n exit(0)\n\nif argv[1] in {'-V', '--version'}:\n # this kinda feels wrong...\n from __init__ import __version__\n pyver = '{0[0]}.{0[1]}.{0[2]}'.format(version_info)\n if version_info[3] != 'final':\n pyver += '{0[3][0]}{0[4]}'.format(version_info)\n # this should stay as str.format so it runs on older versions\n print('ninfs v{0} on Python {1} - https://github.com/ihaveamac/ninfs'.format(__version__, pyver))\n exit(0)\n\nexit(mount(argv.pop(1).lower()))\n","repo_name":"sgnls/ninfs","sub_path":"ninfs/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"72611338960","text":"import re\nimport sys\n\n# 정수랑 문자열 조심\nT = int(input())\n\nfor _ in range(T):\n sentence = sys.stdin.readline().split()\n reverse = sentence[0][::-1]\n for word in sentence[1:]:\n reverse += (' ' + word[::-1])\n print(reverse)\n","repo_name":"junwork123/How-To-Code","sub_path":"Algorithm/9093_WordReversal/9093_WordReversal.py","file_name":"9093_WordReversal.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"43799347293","text":"# 프로그래머스 주식가격\n# https://school.programmers.co.kr/learn/courses/30/lessons/42584\n\ndef solution(prices):\n answer = []\n \n for i in range(len(prices)-1):\n sec = 0\n for j in range(i+1, len(prices)):\n if prices[i] > prices[j]:\n answer.append(sec+1)\n break\n else:\n sec += 1\n \n else:\n answer.append(sec)\n \n answer.append(0)\n \n return answer","repo_name":"KDT-02-Algorithm-Study/Algorithm-Study","sub_path":"week23_230629/pg42584_주식가격/pg42584_최은비.py","file_name":"pg42584_최은비.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"10968906043","text":"import pyttsx3\nfrom pathlib import Path\nfrom datetime import datetime, timedelta\n\nfrom src.config import ConfigReader, CONFIG_PATH\nfrom src.greeting import greet\nfrom src.interface import initial_setup\n\n\ndef main():\n path_to_config = Path(__file__).parent.joinpath(CONFIG_PATH).resolve()\n\n engine = pyttsx3.init()\n config = ConfigReader(path_to_config, engine)\n\n if config.is_primary_start:\n initial_setup(engine, config)\n config.is_primary_start = False\n else:\n engine.setProperty(\"voice\", config.voice)\n engine.setProperty(\"volume\", config.volume)\n engine.setProperty(\"rate\", config.rate)\n\n now = datetime.now()\n if config.last_time == datetime.min:\n delta_time = timedelta.min\n else:\n delta_time = now - config.last_time\n\n if delta_time == timedelta.min or delta_time > timedelta(hours=4):\n greet(engine, config, delta_time)\n\n config.last_time = now\n \n config.save()\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"jaka2005/good-morning-vietnam","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19279034365","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('goals', '0003_goal_creator'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='goal',\n name='category',\n field=models.CharField(max_length=3, null=True, choices=[(b'FIN', b'Finance'), (b'FIT', b'Fitness'), (b'LEA', b'Learning'), (b'PER', b'Personal')]),\n ),\n ]\n","repo_name":"rachelleahklein/woohoo-list","sub_path":"goals/migrations/0004_goal_category.py","file_name":"0004_goal_category.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37402486947","text":"import math\nimport os\n\nimport numpy as np\nfrom PIL import Image\n\nnp.random.seed(404)\n\n\nclass Neuron:\n def __init__(self, img):\n self.img = img\n self.w = np.random.rand(900)\n\n def out(self):\n return self.activation(sum(self.img * self.w))\n\n def activation(self, out, threshold=0):\n if out > threshold:\n return 1\n else:\n return 0\n\n\nclass Layer:\n def __init__(self, n=33, l=0.1):\n self.neurons = []\n self.n = n\n self.l = l\n for i in range(n):\n self.neurons.append(Neuron(np.zeros(900)))\n\n def fit(self, img):\n for i in range(self.n):\n self.neurons[i].img = img\n\n def update_w(self, error):\n for i in range(len(self.neurons)):\n for j in range(len(self.neurons[i].w)):\n self.neurons[i].w[j] += self.l * error[i] * self.neurons[i].img[j]\n\n def predict(self):\n result = []\n for n in self.neurons:\n result.append(n.out())\n return np.asarray(result)\n\n\nnetwork = Layer()\nfor epoch in range(10):\n print(epoch)\n for img in os.listdir(\"Name/\"):\n image = np.array(Image.open(\"Name/\" + img).convert(\"L\"))\n image[image < 255] = 1\n image[image == 255] = 0\n network.fit(image.flatten())\n pred = network.predict()\n target = np.zeros(33)\n target[ord(img[0]) - 1040] = 1\n print()\n print(\"target= \" + chr(target.argmax() + 1040))\n print(target)\n print(len(target))\n print(\"prediction= \" + chr(pred.argmax() + 1040))\n print(pred)\n print(len(pred))\n error = target - pred\n network.update_w(error)\n\nprint()\nimage = np.array(Image.open(\"Test.png\").convert(\"L\"))\nimage[image < 255] = 1\nimage[image == 255] = 0\nnetwork.fit(image.flatten())\np = network.predict()\nprint(\"prediction= \" + chr(p.argmax() + 1040))\n","repo_name":"SudoDobryak/AI_LR","sub_path":"LR2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"43970302101","text":"class Calculator:\n num = 100 # class variables\n\n def __init__(self,a,b):\n self.a = a\n self.b = b\n print(\"I am called automatically when an object is created\")\n\n def getData(self):\n print(\"I am executing methods inside class\")\n\n def Summation(self):\n return self.a + self.b + self.num\n\nobj = Calculator(2,3)\nobj.getData()\nprint(obj.Summation())\n\nobj1 = Calculator(4,5)\nobj1.getData()\nprint(obj1.Summation())","repo_name":"ter031/GitDemo","sub_path":"OopsDemo.py","file_name":"OopsDemo.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"39829579716","text":"# reuturns the maximum num of fish \n# 0 means no travel, others mean yes\n# whenever we see a nonzero num, we dfs; have a boolean value\nclass Solution:\n def findMaxFish(self, grid: List[List[int]]) -> int:\n result = 0\n m, n = len(grid), len(grid[0])\n # visit = ([False]*n for i in range(m))\n for i in range(m): \n for j in range(n): \n if grid[i][j]!=0: \n result = max(result, self.dfs(i, j, grid))\n return result\n \n \n\n def dfs(self, i: int, j: int, grid: List[List[int]]) -> int:\n # base case\n if i<0 or i>=len(grid) or j<0 or j>=len(grid[0]) or grid[i][j]==0: \n return 0\n total = grid[i][j] \n grid[i][j] = 0\n total += self.dfs(i-1, j, grid) + self.dfs(i+1, j, grid) + self.dfs(i, j-1, grid) + self.dfs(i, j+1, grid)\n return total\n","repo_name":"MaTasty/Grind","sub_path":"findMaxFish.py","file_name":"findMaxFish.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"39759820028","text":"import pandas as pd\n\ndef columnselector(csvpath, outputpath):\n # Open Data File\n try:\n data = pd.read_csv(csvpath)\n print(\"Data Loaded!\")\n df = pd.DataFrame({'id': data['id'], 'answer_count': data['answer_count'],\n 'comment_count': data['comment_count'], 'creation_date': data['creation_date'],\n 'last_activity_date': data['last_activity_date'], 'last_edit_date': data['last_edit_date'],\n 'view_count': data['view_count'], 'accepted_answer_id': data['accepted_answer_id'],\n 'score': data['score']})\n try:\n df.to_csv(outputpath, index=False)\n except PermissionError:\n print(\"Output File Permission Error\")\n except PermissionError:\n print(\"Input File Permission Error.\")\n\n\nfor i in range(9):\n columnselector(r\"G:\\References\\MS1\\Fall2017\\CSE6242\\Project\\Pogo\\TagsGraph\\InputCSVFraction\\questions\"+str(i)+\".csv\"\n , r\"DesiredColumnsOnly\\output\"+str(i)+\".csv\")\n print(\"{}th file done!\".format(i))\n","repo_name":"maneetgoyal/pogo-a-virtual-guide","sub_path":"LDAPreProcessor/columnselector.py","file_name":"columnselector.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"42542014319","text":"import numpy as np\nfrom sklearn.metrics import accuracy_score\ndef get_one_hot(arr,num_classes):\n res = np.eye(num_classes)[arr]\n return res\n\n# independent validation\ndef independent_test(model,path,x_train,x_test,y_train,y_test,batch_size):\n model.load_weights(path)\n y_test = get_one_hot(y_test,5)\n y_train = get_one_hot(y_train,5)\n model.evaluate(x_test,y_test,batch_size=batch_size)\n model.evaluate(x_train,y_train,batch_size=batch_size)\n\n# classification_report\nfrom sklearn.metrics import classification_report\ndef classification_reports(model,x_test,y_test):\n y_pred = model.predict_classes(x_test)\n y_true = y_test\n labels = [0,1,2,3,4]\n target_names = ['Cytoplasm','Endoplasmic_reticulum','Extracellular_region','Mitochondria','Nucleus']\n print(classification_report(y_true,y_pred,labels=labels,target_names = target_names,digits=3))\n\n# confusion matrix\nfrom sklearn.metrics import confusion_matrix,accuracy_score\ndef confusion_mmatrix(model,x_test,y_test):\n y_pred = model.predict_classes(x_test)\n y_true = y_test\n print(np.array(y_true).shape)\n print(np.array(y_pred).shape)\n print('confusion_matrix:'+'\\n'+str(confusion_matrix(y_true,y_pred)))\n\n# calculate accuracy\ndef calculate_accuracy(model,x_test,y_test):\n target_names = ['Cytoplasm','Endoplasmic_reticulum','Extracellular_region','Mitochondria','Nucleus']\n y_pred = model.predict_classes(x_test)\n y_true = y_test\n label_0 = 0\n label_true_0 = 0\n label_1 = 0\n label_true_1 = 0\n label_2 = 0\n label_true_2 = 0\n label_3 = 0\n label_true_3 = 0\n label_4 = 0\n label_true_4 = 0\n for i in range(len(y_true)):\n if y_true[i] == 0:\n label_0 += 1\n if y_true[i] == 0 and y_pred[i] == 0:\n label_true_0 += 1\n\n if y_true[i] == 1:\n label_1 += 1\n if y_true[i] == 1 and y_pred[i] == 1:\n label_true_1 += 1\n\n if y_true[i] == 2:\n label_2 += 1\n if y_true[i] == 2 and y_pred[i] == 2:\n label_true_2 += 1\n\n if y_true[i] == 3:\n label_3 += 1\n if y_true[i] == 3 and y_pred[i] == 3:\n label_true_3 += 1\n\n if y_true[i] == 4:\n label_4 += 1\n if y_true[i] == 4 and y_pred[i] == 4:\n label_true_4 += 1\n\n print(target_names[0]+':'+str(label_true_0/label_0))\n print(target_names[1]+':'+str(label_true_1/label_1))\n print(target_names[2]+':'+str(label_true_2/label_2))\n print(target_names[3]+':'+str(label_true_3/label_3))\n print(target_names[4]+':'+str(label_true_4/label_4))\n print('accuracy_all: '+str(accuracy_score(y_true,y_pred)))","repo_name":"Thales-research-institute/DeepmRNALoc","sub_path":"utils/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":2651,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"4258206013","text":"\nimport pandas as pd\nimport os \nimport matplotlib.pyplot as plt\n\npath = 'CTA ridership/'\nfile = 'CTA_-_System_Information_-_List_of__L__Stops.csv'\noutfile = 'GIS/CTA_stop.csv'\ndf= pd.read_csv(path+file)\n\ndf.columns\n'''\nIndex(['STOP_ID', 'DIRECTION_ID', 'STOP_NAME', 'STATION_NAME',\n 'STATION_DESCRIPTIVE_NAME', 'MAP_ID', 'ADA', 'RED', 'BLUE', 'G', 'BRN',\n 'P', 'Pexp', 'Y', 'Pnk', 'O', 'Location', 'Historical Wards 2003-2015',\n 'Zip Codes', 'Community Areas', 'Census Tracts', 'Wards'],\n dtype='object')\n'''\n\ndef latitude(x= df['Location'][1]):\n str_tup = x\n tup = eval(str_tup)\n y = tup[0]\n return y\n\n\ndef longitude(x= df['Location'][1]):\n str_tup = x\n tup = eval(str_tup)\n y = tup[1]\n return y\n\n\ndf['latitude'] =df['Location'].apply(latitude)\ndf['longitude'] =df['Location'].apply(longitude)\n\ndf.to_csv(outfile)","repo_name":"jinsanity07git/ridership_analysis","sub_path":"code/stop_locate.py","file_name":"stop_locate.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14723519200","text":"class Item:\n\n def __init__(self, name, is_beverage=False):\n self.name = name\n self.is_beverage = is_beverage\n\n\nclass Store:\n\n def __init__(self, name, city):\n self.name = name\n self.city = city\n self.city.add_city_store(self)\n self.menu = []\n\n class _StoreItem:\n\n def __init__(self, item, price, stock):\n self.item = item\n self.price = price\n self.stock = stock\n self.sold = 0\n\n def unit_sold(self, units):\n if units <= self.stock_left:\n self.sold += units\n return\n print(f\"{self.item.name} stock not left to sold this many units\")\n\n @property\n def stock_left(self):\n return self.stock - self.sold\n\n def add_item_to_menu(self, item, price, stock):\n self.menu.append(self._StoreItem(item, price, stock))\n\n def sell_item(self, item, sold):\n for menu_item in self.menu:\n if item == menu_item.item.name:\n menu_item.unit_sold(sold)\n\n def get_items_sold(self):\n sold = 0\n for store_item in self.menu:\n sold += store_item.sold\n return sold\n\n\nclass City:\n\n def __init__(self, name, state):\n self.name = name\n self.state = state\n self.state.add_state_city(self)\n self.__stores = []\n\n def add_city_store(self, store):\n self.__stores.append(store)\n\n def get_items_sold(self):\n sold = 0\n for store in self.__stores:\n sold += store.get_items_sold()\n return sold\n\n\nclass State:\n\n def __init__(self, name):\n self.name = name\n self.__cities = []\n\n def add_state_city(self, store):\n self.__cities.append(store)\n\n def get_items_sold(self):\n sold = 0\n for city in self.__cities:\n sold += city.get_items_sold()\n return sold\n\n\ndef driver():\n\n # state\n maharashtra = State('Maharashtra')\n\n # city\n mumbai = City('Mumbai', maharashtra)\n nagpur = City('Nagpur', maharashtra)\n\n # store\n s1_m = Store('s1', mumbai)\n s2_m = Store('s2', mumbai)\n\n s1_n = Store('s1', nagpur)\n s2_n = Store('s2', nagpur)\n\n # items\n sandwich = Item('sandwich')\n poha = Item('poha')\n tea = Item('tea', True)\n\n # adding item to store\n s1_m.add_item_to_menu(sandwich, 100, 10)\n s2_m.add_item_to_menu(sandwich, 90, 8)\n\n s1_m.add_item_to_menu(poha, 50, 20)\n s2_m.add_item_to_menu(poha, 40, 30)\n\n s1_n.add_item_to_menu(poha, 50, 20)\n s2_n.add_item_to_menu(poha, 40, 30)\n\n s1_n.add_item_to_menu(tea, 10, 40)\n s2_n.add_item_to_menu(tea, 8, 50)\n\n # selling\n s1_m.sell_item('sandwich', 8)\n s2_m.sell_item('sandwich', 8)\n\n s1_m.sell_item('poha', 20)\n s2_m.sell_item('poha', 23)\n\n s1_n.sell_item('poha', 20)\n s2_n.sell_item('poha', 23)\n\n s1_n.sell_item('tea', 30)\n s2_n.sell_item('tea', 37)\n\n units_sold = maharashtra.get_items_sold()\n print(f\"{maharashtra.name}: {units_sold}\")\n\n units_sold = mumbai.get_items_sold()\n print(f\"{mumbai.name}: {units_sold}\")\n\n units_sold = nagpur.get_items_sold()\n print(f\"{nagpur.name}: {units_sold}\")\n\n\ndriver()\n","repo_name":"akash-codes93/ADT-PYTHON","sub_path":"LLD/Systems/salesManagement/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7245087861","text":"import scrython\nimport pandas as pd\n\n# returns a list of all known sets\nsets = scrython.sets.Sets()\ntemp = {}\nyear = 1993\nindex = 0\ncolumnName = []\n# defaultList = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\ndf = pd.DataFrame()\n# prints a list of all sets. Filters out sets that do not provide black border cards or are online exclusive\n# for set in reversed(sets.data()):\n# if set['set_type'] != 'token' and set['set_type'] != 'minigame' and set['set_type'] != 'memorabilia' and set['set_type'] != 'treasure_chest' and set['set_type'] !='alchemy':\n# setCode = set['code']\n# setYear = int(set['released_at'][0:4])\n# if setYear != year:\n# # set:pust() is used so that a card always returns no matter the set. Prevents errors from returning empty searches\n# temp['Earl of Squirrel'] = 2 # Do this last\n# sortedTemp = sorted(temp.items(), key=lambda x:x[1], reverse=True)\n# tempDict = dict(sortedTemp)\n# temp = pd.DataFrame.from_dict(tempDict, orient='index', columns=['{}'.format(year)])\n# #df.insert(temp)\n# #df.to_csv('results/{}.csv'.format(year))\n# print('{} CSV Created'.format(year))\n# year += 1\n# print(setCode, setYear)\n# setCards = scrython.cards.Search(q='set:{},pust -is:onlyprint'.format(setCode))\n# if setCards.data is not None:\n# for card in setCards.data():\n# if card['name'] not in temp:\n# temp[card['name']] = 1\n# else:\n# temp[card['name']] += 1\n\nfor set in reversed(sets.data()):\n if set['set_type'] != 'token' and set['set_type'] != 'minigame' and set['set_type'] != 'memorabilia' and set['set_type'] != 'treasure_chest' and set['set_type'] !='alchemy':\n setCode = set['code']\n setYear = int(set['released_at'][0:4])\n if setYear != year:\n columnName.append(str(year))\n index += 1\n year += 1\n for i in temp:\n temp[i][index] = temp[i][index-1]\n print('Advanced to {}'.format(year))\n print(setCode, setYear)\n setCards = scrython.cards.Search(q='set:{},pust -is:onlyprint'.format(setCode))\n if setCards.data is not None:\n for card in setCards.data():\n if card['name'] not in temp:\n temp[card['name']] = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n temp[card['name']][index] = 1\n else:\n temp[card['name']][index] += 1\ncolumnName.append('2023')\ndf = pd.DataFrame.from_dict(temp, orient='index', columns=columnName)\ndf.rename()\n#df.insert(df)\ndf.to_csv('results/results.csv')","repo_name":"sisenberg219/Most-Reprinted-Card","sub_path":"mostreprinted.py","file_name":"mostreprinted.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"74676530002","text":"from data.utils import load_data_to_dataframe\nfrom visualization.exploration import get_correlation_plot, get_outliers_plot, get_distribution_plot, get_scatter_plot\n\n\ndef main():\n try:\n dataframe = load_data_to_dataframe(\n data_path=\"../data/raw/kc_house_data.csv\")\n except:\n print(\"It was not possible to read the provided .csv file\")\n exit(0)\n\n # Generates plots \n get_correlation_plot(dataframe)\n get_outliers_plot(dataframe)\n get_distribution_plot(dataframe)\n get_scatter_plot(dataframe, target_variable=dataframe[\"price\"])\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"JoaoPicolo/Portfolio-HousePrices","sub_path":"src/visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71623830800","text":"# I. Is it Palindrome?\nn = int(input())\ns = input()\ni = 0\nj = n-1\nwhile i < j:\n if s[i] != s[j]:\n print(\"NO\")\n break\n i = i+1\n j = j-1\nelse:\n print(\"YES\")","repo_name":"M-Sayed939/ProblemSolvinng","sub_path":"Sheet2/ProblemI.py","file_name":"ProblemI.py","file_ext":"py","file_size_in_byte":180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"45074504222","text":"from django.utils import timezone\nfrom django.db import models\nfrom rest_framework import status\nfrom .camera_utils.camera_enums import PROTOCOL_CHOICES\nfrom ..data_source.models import AIServices\nfrom ..users.models import User, UserRoles\nfrom django.core.validators import MaxValueValidator, MinValueValidator\n\n\n# Create your models here.\n\n\nclass Camera(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE, related_name=\"camera_user\")\n camera_name = models.CharField(null=False, max_length=25, default=\"Camera\")\n ip = models.CharField(null=False, max_length=25)\n container_id = models.CharField(null=True, max_length=250, default=None)\n port = models.IntegerField(null=False)\n stream_url = models.CharField(max_length=250, default=None, null=True)\n camera_feed_url = models.CharField(max_length=250, default=None, null=True)\n user_role = models.ForeignKey(UserRoles, on_delete=models.SET_NULL, related_name=\"camera_user_role\", null=True)\n service_type = models.ForeignKey(AIServices, on_delete=models.SET_NULL, related_name=\"service_type\", null=True)\n protocol = models.CharField(max_length=4, choices=PROTOCOL_CHOICES, null=True)\n threats = models.JSONField(null=True)\n lat = models.DecimalField(max_digits=20, decimal_places=17, default=0.0)\n lng = models.DecimalField(max_digits=20, decimal_places=17, default=0.0)\n additional_parameters = models.CharField(max_length=500, null=True, blank=True)\n out_port = models.IntegerField(\n unique=True,\n default=None,\n null=True,\n validators=[MaxValueValidator(9999), MinValueValidator(8888)]\n )\n streaming_port = models.IntegerField(\n unique=True,\n null=True,\n default=None,\n validators=[MaxValueValidator(9999), MinValueValidator(8888)]\n )\n ai_streaming_port = models.IntegerField(\n unique=True,\n null=True,\n default=None,\n validators=[MaxValueValidator(9999), MinValueValidator(8888)]\n )\n docker_port_ml = models.IntegerField(\n unique=True,\n null=True,\n default=None,\n validators=[MaxValueValidator(5500), MinValueValidator(5400)]\n )\n is_drone_camera = models.BooleanField(null=True, default=False)\n active_status = models.BooleanField(null=True, default=True)\n is_stream = models.BooleanField(null=True, default=False)\n is_live_stream = models.BooleanField(default=False)\n username = models.CharField(max_length=30,null=True,blank=True)\n password = models.CharField(max_length=128,null=True,blank=True)\n location = models.CharField(max_length=228, null=True)\n created_at = models.DateTimeField(auto_created=True, auto_now_add=True, null=False)\n updated_at = models.DateTimeField(auto_now_add=True, null=False)\n is_deleted = models.BooleanField(default=False)\n\n @classmethod\n def update_cam_urls(cls, ip, stream_url, container_id , camera_feed_url):\n \"\"\"\n\n :param container_id:\n :param ip:\n :param stream_url:\n :return:\n \"\"\"\n update_status = False\n exception_message = \"\"\n camera_object = \"\"\n try:\n obj = Camera.objects.filter(ip=ip).first()\n obj.stream_url = stream_url\n obj.camera_feed_url = camera_feed_url\n obj.container_id = container_id\n obj.save()\n camera_object = obj\n update_status = True\n except Exception as e:\n exception_message = str(e)\n finally:\n return update_status, exception_message, camera_object\n\n @classmethod\n def update_cam_status(cls, ip, cam_status):\n \"\"\"\n\n :param ip:\n :param cam_status:\n :return:\n \"\"\"\n update_status = False\n exception_message = \"\"\n camera_object = \"\"\n aix_response = dict(statusMessage=\"\", data=[], errorStatus=False, statusCode=200)\n try:\n obj = Camera.objects.filter(ip=ip).first()\n if obj:\n if cam_status:\n obj.active_status = True\n elif not cam_status:\n obj.active_status = False\n\n obj.is_stream = False\n obj.is_live_stream = False\n obj.updated_at = timezone.now()\n obj.save()\n camera_object = obj\n update_status = True\n else:\n aix_response.update(\n {\n \"statusMessage\": \"Record not found\",\n \"data\": [],\n \"statusCode\": status.HTTP_400_BAD_REQUEST,\n \"errorStatus\": True,\n }\n )\n\n except Exception as e:\n exception_message = str(e)\n finally:\n return update_status, exception_message, camera_object\n\n @classmethod\n def update_cam_detail(cls, ip, data):\n \"\"\"\n\n :param ip:\n :param data:\n :return:\n \"\"\"\n update_status = False\n exception_message = \"\"\n camera_object = \"\"\n try:\n data[\"service_type\"] = AIServices.objects.filter(pk=data.get(\"service_type\")).first()\n Camera.objects.filter(ip=ip).update(**data)\n camera_object = Camera.objects.get(ip=ip)\n update_status = True\n except Exception as e:\n exception_message = str(e)\n finally:\n return update_status, exception_message, camera_object\n\n @classmethod\n def delete_cam(cls, ip):\n \"\"\"\n FOr deleting cam\n :param ip:\n :return:\n \"\"\"\n delete_status = False\n exception_message = \"\"\n cam_obj = None\n aix_response = dict(statusMessage=\"\", data=[], errorStatus=False, statusCode=200)\n try:\n obj = Camera.objects.filter(ip=ip).first()\n if obj:\n cam_obj = obj\n obj.delete()\n delete_status = True\n else:\n aix_response.update(\n {\n \"statusMessage\": \"Camera record not found\",\n \"errorStatus\": False,\n \"data\": [],\n \"statusCode\": status.HTTP_200_OK,\n }\n )\n except Exception as e:\n exception_message = str(e)\n finally:\n return delete_status, exception_message, cam_obj\n\n @classmethod\n def get_camera_uri(cls, ip, feed_type):\n \"\"\"\n Getter for camera feed URI\n :param feed_type:\n :param ip:\n :return:\n \"\"\"\n fetch_status = False\n exception_message = \"\"\n stream_uri = \"\"\n aix_response = dict(statusMessage=\"\", data=[], errorStatus=False, statusCode=200)\n try:\n obj = Camera.objects.filter(ip=ip).first()\n if obj:\n stream_uri = obj.stream_url\n fetch_status = True\n else:\n aix_response.update(\n {\n \"statusMessage\": \"Camera record not found\",\n \"errorStatus\": False,\n \"data\": [],\n \"statusCode\": status.HTTP_200_OK,\n }\n )\n except Exception as e:\n exception_message = str(e)\n finally:\n return fetch_status, exception_message, stream_uri\n\n\nclass CameraResponse(models.Model):\n camera = models.ForeignKey(\n Camera, on_delete=models.CASCADE, related_name=\"cameraresponse_camera\"\n )\n detections = models.JSONField(null=False)\n created_at = models.DateTimeField(auto_created=True, auto_now_add=True, null=False)\n updated_at = models.DateTimeField(auto_now_add=True, null=False)\n is_deleted = models.BooleanField(default=False)\n\n# class Protocol(models.Model):\n# \"\"\"\n# For defining services\n# \"\"\"\n#\n# id = models.IntegerField(primary_key=True)\n# protocol_key = models.PositiveSmallIntegerField(blank=True, null=True)\n# protocol_name = models.CharField(blank=True, null=True, max_length=40)\n#\n# @classmethod\n# def get_protocol(cls, protocol_key=None):\n# return get_object_or_404(Protocol, protocol_key=protocol_key)\n","repo_name":"AliCheema77/aix_project","sub_path":"apps/camera/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5354106704","text":"# in Python the type is connected to the object, it is not possible to change it\n# It is not possible to convert a variable or object from a type to an other\n# In Python the conversion doesn't modify the original object but create new objects starting from objects \n\nmylist = [1, 2, 3, 2, 1] # define a list\nmyset = set(mylist) # create a set from the list created\nmyset #{1, 2, 3} # unique values, no copies\nmylist #[1, 2, 3, 2, 1] # the list is still here\n\nmystr = '3.14' # create a string\nmyfloat = float(mystr) # transform str in float\nmyfloat #3.14\nmystr #'3.14' # mystr is still here\n\nmylist = [('a', 1), ('b', 2), ('c', 3)] # new list\ntype(mylist) #\nmydict = dict(mylist) # create a dictionary from a list\nmydict #{'a': 1, 'b': 2, 'c': 3}\ntype(mydict) #\n\nint('un milione') # the string 'un milione' is not convertible --> error\n#Traceback (most recent call last):\n# File \"\", line 1, in \n#ValueError: invalid literal for int() with base 10: 'un milione'\nint([1, 2, 3]) # list is not a valid type\n#Traceback (most recent call last):\n# File \"\", line 1, in \n#TypeError: int() argument must be a string, a bytes-like object or a real number, not 'list'\n\nmylist1 = [1, 2, 3] # create a list\nmylist2 = list(mylist1) # new list which is a copy of the first list\nmylist2 #[1, 2, 3]\nmylist1.append(4) # add one element to mylist1\nmylist1 #[1, 2, 3, 4]\nmylist2 #[1, 2, 3] # mylist2 is still the same\n\n\n\n","repo_name":"danilonastasi/python_tutorial","sub_path":"Py_code_convert_types.py","file_name":"Py_code_convert_types.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10713199949","text":"from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img\nfrom keras import backend as K\nK.set_image_dim_ordering('th')\n\n#the path of images to apply augmentation on them\nimages_path='train'\n#create an instance of ImageDataGenerator\ndatagen = ImageDataGenerator(width_shift_range=0.2,\n height_shift_range=0.2, featurewise_std_normalization=True)\ndatagen.flow_from_directory(directory=images_path, target_size=(480,752),color_mode='grayscale', class_mode=None, save_to_dir='saved',save_prefix='keras_')\nimg = load_img('train/images/photon10.png')\ndatagen.fit(img)\nx = img_to_array(img)\nx = x.reshape((1,) + x.shape)\ni = 0\nfor batch in datagen.flow(x, batch_size=32,\n save_to_dir='saved', save_prefix='tut', save_format='png'):\n i += 1\n if i > 20:\n break\n","repo_name":"SarahMestiri/computervision","sub_path":"CNN/data_augment.py","file_name":"data_augment.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42291228473","text":"'''\nKMP algorithm matches a pattern inside a text. In normal brute force algorithm the complexity is O(n*m), where n is\nthe length of the text and m is the length of the pattern. But KMP algorithm can do this in O(n+m). How? KMP algorithm\nkeeps tracks of the matched suffix and prefix by using a lps array. Now what is a lps array? A lps array keeps tracks\nof the number of matched suffix and prefix found up to the current index.\n\nVideo Link: https://www.youtube.com/watch?v=sMARZCTPyNI\n\n'''\n\n\n\n\ndef lps(pattern):\n i=0;j=1;l=len(pattern)\n lps=[0 for i in range(l)]\n \n while(j} proxy for\n\tremote servers. This works similar to the xmlrpc proxy, but uses Perspective\n\tBroker and therefore sucks less.\n\t\n\tB{NOTE:} Most of the time, you can use the L{Network} API\n\tfor calling other nodes on the cluster. See\n\tL{network.I{call_remote}}, \t\n\tL{network.I{call_nodes}}, \t\n\tL{network.I{call_broker}}, and\n\tL{network.I{call_all}}\n\t\n\tIf needed however, you can use this class in standalone modules.\n\t\n\tExample::\n\t\t\n\t\thostname = 'cicero'\n\t\tpb_port = 5053\n\t\tproxy = SimplePBProxy(hostname, pb_port)\n\t\tdeferred_object = proxy.callRemote('admin.status')\n\t\n\tB{NOTE:} Unlike the default Perspective Broker behavior, this doesn't wig out\n\tif the connection is lost. Deferred objects simple won't be returned until\n\tthe remote server comes back up. Likewise, if the remote server isn't yet up,\n\tthe deferred will simply be held open until the remote box is up.\n\t\"\"\"\n\tdef __init__(self, host, port, app=None):\n\t\t\"\"\"\n\t\t@param host: Host to connect to.\n\t\t@rtype host: String\n\t\t\n\t\t@param port: Port PB is on.\n\t\t@rtype port: Integer\n\t\t\n\t\t@param app: Application instance, if any. This would be used to make local\n\t\tcalls locally, without going over the network.\n\t\t@rtype app: Instance\n\t\t\"\"\"\n\t\tself.app = app\n\t\tself.host = host\n\t\tself.port = port\n\t\tself.pending_calls = []\n\n\t\t#\n\t\t# See if this is a local connection\n\t\tif app and host == self.app.host and port == self.app.pb_port:\n\t\t\tself.local = True\n\t\t\tself.rootobj = self.app.api\n\t\telse:\n\t\t\tself.local = False\n\t\t\tself.rootobj = None\n\t\tself.connect()\n\n\tdef connect(self):\n\t\t\"\"\"\n\t\tUsed internally. You can safely ignore this.\n\t\t\"\"\"\n\t\tdef handle_error(failure):\n\t\t\treactor.callLater(1, self.connect)\n\t\tfactory = pb.PBClientFactory()\n\t\tfactory.unsafeTracebacks = 1\n\t\tip = aztk_config.setup.get(\"interfaces\", self.host)\n\t\treactor.connectTCP(ip, self.port, factory)\n\t\td = factory.getRootObject()\n\t\td.addCallback(self._set_root_object)\n\t\td.addErrback(handle_error)\n\t\treturn d\n\n\tdef _set_root_object(self, rootobj):\n\t\tself.rootobj = rootobj\n\t\t\n\t\tdef pending_act(data, deferred):\n\t\t\tdeferred.callback(data)\n\t\tdef pending_err(failure, deferred):\n\t\t\tdeferred.errback(failure)\n\n\t\tfor deferred, method, args, kwargs in self.pending_calls:\n\t\t\td = self.callRemote(method, *args, **kwargs)\n\t\t\td.addCallback(pending_act, deferred)\n\t\t\td.addErrback(pending_err, deferred)\n\t\tself.pending_calls = []\n\n\tdef callRemote(self, method, *args, **kwargs):\n\t\t\"\"\"\n\t\tCall method on remote API. Method is a string and may include a package\n\t\tname, such as 'admin.uptime'. Any additional arguments and keyword arguments\n\t\tare passed to that method as arguments and keyword arguments.\n\t\t\"\"\"\n\t\tif not self.rootobj:\n\t\t\td = Deferred()\n\t\t\tself.pending_calls.append((d, method, args, kwargs))\n\t\t\tself.connect()\n\t\t\treturn d\n\n\t\tapi, method_name = method.split('.')\n\t\tapi = api.lower()\n\t\tif self.local:\n\t\t\treturn maybeDeferred(getattr(getattr(self.app.api, api), method_name), *args, **kwargs)\n\t\telse:\n\t\t\ttry:\n\t\t\t\td = self.rootobj.callRemote('api', method, *args, **kwargs)\n\t\t\t\td.addErrback(self._error_back, method, args, kwargs)\n\t\t\t\treturn d\n\t\t\texcept pb.DeadReferenceError:\n\t\t\t\tself.rootobj = None\n\t\t\t\td = Deferred()\n\t\t\t\tself.pending_calls.append((d, method, args, kwargs))\n\t\t\t\tself.connect()\n\t\t\t\treturn d\n\t\n\tdef _error_back(self, failure, method, args, kwargs):\n\t\tif failure.type is twisted.spread.pb.PBConnectionLost and 'BananaError' not in failure.getErrorMessage():\n\t\t\tself.rootobj = None\n\t\t\td = Deferred()\n\t\t\tself.pending_calls.append((d, method, args, kwargs))\n\t\t\tself.connect()\n\t\t\treturn d\n\t\telse:\n\t\t\treturn failure\n","repo_name":"kordless/zoto-server","sub_path":"aztk/lib/SimplePBProxy.py","file_name":"SimplePBProxy.py","file_ext":"py","file_size_in_byte":4223,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"} +{"seq_id":"13857530189","text":"import sys\n\ndef LinearSpaceAlignment(top, bottom, left, right):\n global protein1, protein2\n global prevScores, currScores\n global align1, align2\n \n if left == right:\n align1 += protein1[top:bottom]\n align2 += ['-']*(bottom - top)\n #align1.extend(list(protein1[top:bottom]))\n #align2.extend(['-']*(bottom - top))\n return -sigma * (bottom - top)\n\n if top == bottom:\n align1.extend(['-']*(right-left))\n align2.extend(list(protein2[left,right]))\n return -sigma * (right - left)\n\n middle = int((left+right)/2)\n midEdge = MiddleEdge(top, bottom, left, right)\n midNode = midEdge[0]\n linearSpaceAlignment(top, midNode, left, middle)\n\n if midEdge[0] == midEdge[2] and midEdge[1] + 1 == midEdge[3]:\n align1 += '-'\n align2 += protein2[midEdge[1]]\n if midEdge[0] + 1 == midEdge[2] and midEdge[1] == midEdge[3]:\n align1 += protein1[midEdge[0]]\n align2 += '-'\n else:\n align1 += protein1[midEdge[0]]\n align2 += protein2[midEdge[1]]\n\n linearSpaceAlignment(middleEdge[2], bottom, middleEdge[3], right)\n return middleEdge\n\ndef Score(v, w):\n global sigma\n global score\n\n n = len(v) + 1\n m = len(w) + 1\n\n prevScores = [0] * (len1 + 1)\n currScores = [0] * (len1 + 1)\n currScores[0] = -sigma\n\n for j in range(m):\n prevScores[j] = -sigma * j\n\n for i in range(1, n):\n currScores[0] = -sigma * i\n for j in range(1, m):\n p = score[order[v[i-1]]][order[w[j-1]]]\n topScore = currScores[j-1] - sigma\n leftScore = prevScores[j] - sigma\n diagScore = prevScores[j-1] + p\n currScores[j] = max(topScore, leftScore, diagScore)\n prevScores = currScores[:]\n\n return prevScores\n\ndef MiddleEdge(v, w):\n middle = int(len2/2)\n i = 0\n j = middle\n k = 0\n l = middle + 1\n\n v1 = v[:j]\n w1 = w[:]\n v2 = v[j+1::-1]\n w2 = w[::-1]\n\n fromSource = Score(v1,w1)\n toSink = Score(v2,w2)\n toSink = toSink[::-1]\n\n score1 = [fromSource[i] + toSink[i] - sigma for i in range(len2)]\n diagnal = [score[order[v[j]]][order[w[i-1]]] for i in range(1, len2)]\n score2 = [fromSource[i-1] + toSink[i] + diagnal[i-1] for i in range(1, len2)]\n\n max1 = max(score1)\n max2 = max(score2)\n maxS = max(max1, max2)\n if maxS == max1:\n i = score1.index(maxS)\n k = score1.index(maxS)\n else:\n i = score2.index(maxS) - 1\n k = score2.index(maxS)\n\n return (i, j, k, l)\n\nif __name__ == \"__main__\":\n\n file = open(sys.argv[1], 'r')\n matrix = open(sys.argv[2], 'r')\n\n protein1 = file.readline().strip()\n protein2 = file.readline().strip()\n\n sigma = 5\n\n order = {}\n row = matrix.readline().strip().split()\n for i in range(len(row)):\n order[row[i]] = i\n\n score = []\n matrix = matrix.readlines()\n matrix = [row.strip().split() for row in matrix]\n for row in matrix:\n score.append([int(a) for a in row[1:]])\n\n len1 = len(protein1)\n len2 = len(protein2)\n\n prevScores = [0] * (len1 + 1)\n cureScores = [0] * (len1 + 1)\n\n #align1 = []\n #align2 = []\n align1 = ''\n align2 = ''\n LinearSpaceAlignment(0, len1, 0, len2)","repo_name":"Shenmolu/rosalind","sub_path":"LinearSpaceAlignment.py","file_name":"LinearSpaceAlignment.py","file_ext":"py","file_size_in_byte":3243,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"3848147791","text":"#!/usr/bin/env python2.7\n\nfrom pyshell.runtools import rcdat\nfrom pyshell.emissions import PreprocessedEmissions\nfrom pyshell.optimizer import Congrad\nfrom pyshell.model import RunTM5\nfrom pandas import Timestamp\nimport os\nimport shutil\nimport xarray as xr\nimport logging\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef forward(dconf, **kwargs):\n \"\"\"\n Run a forward TM5 simulation\n :param dconf: omegaconf.DictConfig or dictionary containing basic settings\n More advanced/stable settings are stored in the TM5 rc-file, accessed under the run.rcfile key\n \"\"\"\n\n emclasses = {'CO2': PreprocessedEmissions}\n obs = load_observations(dconf)\n run = setup_tm5(dconf, **kwargs)\n run.SetupObservations(obs)\n run.SetupEmissions(emclasses)\n run.RunForward()\n return run\n\n\ndef optim(dconf, **kwargs):\n \"\"\"\n Do an inversion with TM5\n :param dconf: omegaconf.DictConfig or dictionary containing basic settings\n More advanced/stable settings are stored in the TM5 rc-file, accessed under the run.rcfile key\n \"\"\"\n \n emclasses = {'CO2': PreprocessedEmissions}\n obs = load_observations(dconf)\n run = setup_tm5(dconf, **kwargs)\n run.SetupObservations(obs)\n opt = Congrad(run)\n opt.SetupOptimizer(restart=False, optimized_prior_state=False, emclasses=emclasses)\n opt.Var4D()\n\n return opt\n\n\ndef load_observations(dconf):\n # Load observations\n obs = xr.open_mfdataset(dconf['observations']['filename'])\n obs.load()\n errmin = float(dconf['observations'].get('err_min', 0.5))\n err = obs.err_obs.values\n err[err < errmin] = errmin\n err *= dconf['observations'].get('error_factor', 1.)\n obs.err_obs.values[:] = err\n return obs\n\n\ndef setup_tm5(dconf, **kwargs):\n rcf = load_rcf(dconf, **kwargs)\n return RunTM5(rcf, dconf)\n\n\ndef load_rcf(dconf, **kwargs):\n \"\"\"\n Load the TM5 rc-file\n :param rc: omegaconf.DictConfig or dictionary\n :return:\n \"\"\"\n rcf = rcdat()\n\n rcf.setkey('my.project', dconf['run']['project'])\n # rcf.setup_meteo_coarsening(rc.meteo.coarsen)\n rcf.readfile(dconf['run']['rcfile'])\n rcf.ti = Timestamp(dconf['run']['start'])\n rcf.tf = Timestamp(dconf['run']['end'])\n rcf.filename = dconf.run.rcfile\n\n # Keys under the \"tm5\" group are needed by TM5 itself (not just pyshell!)\n if 'tm5' in dconf:\n for k, v in dconf.tm5.items():\n logger.info('Set key %s to value %s' % (k, str(v)))\n rcf.setkey(k, v)\n \n # add or overwrite keys with command line arguments, passed through --setkey\n for k, v in kwargs.items():\n logger.info('Set key %s to value %s' % (k, str(v)))\n rcf.setkey(k, v)\n \n rcf.substituteTimes()\n rcf.setkey('jobstep.timerange.start', rcf.ti)\n rcf.setkey('jobstep.timerange.end', rcf.tf)\n rcf.setkey('my.run.dir', os.path.abspath(dconf.run.paths.output))\n \n setup_environment(dconf)\n setup_output(dconf, rcf)\n \n if not os.path.exists(dconf.run.paths.output):\n os.makedirs(dconf.run.paths.output)\n rcf = setup_emissions(dconf, rcf)\n\n return rcf\n\n\ndef setup_environment(dconf):\n os.environ['pyshell.rc'] = dconf.run.rcfile\n if dconf['environment'].get('openmp'):\n nthreads = dconf.environment.openmp.nthreads\n os.environ['omp_num_threads'] = '%s'%nthreads\n os.environ['omp_num_threads'.upper()] = '%s'%nthreads\n os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'\n\n\ndef setup_output(dconf, rcf):\n \"\"\"\n write the output.* keys:\n \"\"\"\n if not dconf.get('output'):\n return\n output_mix = dconf['output'].get('mix')\n if output_mix:\n rcf.setkey('output.mix', True)\n rcf.setkey('output.mix.tstep', output_mix['tstep'])\n rcf.setkey('output.mix.filename.prefix', output_mix['filename_prefix'])\n\n\ndef setup_emissions(dconf, rcf):\n for tracer in dconf.run.tracers :\n for region in dconf.run.regions :\n rcf.setkey('emission.%s.%s.categories' % (tracer, region), len(dconf.emissions[tracer][region]))\n for icat, cat in enumerate(dconf.emissions[tracer][region]):\n # If we don't optimize the category, then it can be specified as just a single key, specifying the temporal resolution\n # (needed for the daily_cycle files). So fill-in dummy values for the rest\n catinfo = dconf.emissions[tracer][region][cat]\n if isinstance(catinfo, str):\n rcf.setkey('emission.%s.%s.category%i'%(tracer, region, icat + 1), \"%s ; 0.0 ; 0000.0-e ; 0.0-e-%s ; 0 ; def-def-0\"%(cat, catinfo))\n\n # Else, the correlation lengths should also be specified:\n else :\n rcf.setkey('emission.%s.%s.category%i'%(tracer, region, icat + 1), \"%s ; %.1f ; %8s ; %s ; %i ; %s\"%(\n cat,\n catinfo.uncertainty,\n catinfo.spatial_correlation,\n catinfo.temporal_correlation,\n catinfo.optimize,\n catinfo.type\n ))\n if dconf.emissions[tracer].get('dailycycle'):\n rcf.setkey(\"%s.dailycycle.type\" % tracer, dconf.emissions[tracer].dailycycle.type)\n rcf.setkey(\"%s.emission.dailycycle\" % tracer, 'T')\n rcf.setkey('%s.dailycycle.prefix' % tracer, dconf.emissions[tracer].dailycycle.prefix)\n\n for cat in dconf.emissions[tracer].categories :\n catinfo = dconf.emissions[tracer].categories[cat]\n #if isinstance(catinfo, str):\n # rcf.setkey('%s.%s.routine' % (tracer, cat), catinfo)\n # else :\n # rcf.setkey('%s.%s.routine' % (tracer, cat), dconf.emissions[tracer].categories[cat].routine)\n if catinfo is not None:\n if 'dailycycle' in catinfo:\n rcf.setkey('%s.%s.dailycycle' % (tracer, cat), 'T')\n\n if 'filename' in dconf['emissions']:\n # Copy the emission file to the run directory:\n shutil.copy(dconf.emissions.filename, rcf.get('PyShell.em.filename'))\n shutil.rmtree(rcf.get('dailycycle.folder'), ignore_errors=True)\n shutil.copytree(dconf.emissions.dailycycle_folder, rcf.get('dailycycle.folder'))\n\n return rcf\n","repo_name":"gmonteil/tm5","sub_path":"pyshell/pyshell/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":6277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6967245909","text":"\"\"\"\nExamples of logical operators\n\"\"\"\n\n# Example 1 - \"and\" operator\nex1 = 4 < 5 and 10 >= 9 # True\nex2 = 4 < 5 and 10 < 9 # False\nex3 = 4 < 5 and 10 >= 9 and 5 != 3 # True\n\n\n# Example 2 - \"or\" operator\nex4 = 4 < 5 or 10 < 9 # True\nex5 = \"Jonas\" == \"Jonas\" or \"Jonas\" == \"Mogens\"\n\nif 4 == 4 or \"The meaning of life\" == 42:\n print(\"Only one is true ..\")\n\n\n# Example 3 - \"not\" operator\nex6 = not True # False\nex7 = not 5 != 4 # True\n\nif not 5 != 4 and not 5 == 4 or not \"Jonas\" == \"Mogens\":\n print(\"God damn .. are all true?\")","repo_name":"jbakchr/aof-lar-at-kode-med-python","sub_path":"01-lectures/04-aften/05-logical-operators/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"25487282582","text":"import re\nimport shutil\nroute = r\"C:\\Users\\sofig\\Desktop\\info\\Informatica\\txtmanip.txt\"\nnewRoute = r\"C:\\Users\\sofig\\Desktop\\info\\Informatica\\txtsaltos.txt\"\nwith open(route,\"r\") as txt:\n originalFile = txt.read()\nwith open(newRoute,\"w\") as newFile:\n shutil.copyfile(route,newRoute)\nwith open(newRoute,\"r+\") as alteredFile:\n #alteredFile.write(re.sub(\"^\\n\", \" \", alteredFile.read()))\n for line in alteredFile:\n line = line.replace(\"\\n\", \"\")","repo_name":"sgramuglia/Informatica","sub_path":"PRACTICA_4_MANIPULACION_DE_ARCHIVOS/ej6.py","file_name":"ej6.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"44025135138","text":"\"\"\"\nABC is a right triangle, 90 at B. Therefore, [1,2,...,x]\n '''\n return [[a for a in range(1, x+1)] for x in L] \n\n#1.7.3\ndef my_function_composition(f,g):\n '''\n input: two functions f and g represented as dictionaries, such that g(f(x)) exists\n output: dictionary that represents the function g(f(x))\n '''\n return {x: g[f[x]] for x in f.keys()}\n\n#1.7.4\n\n\n\n\n\n\n\n\n\n\n","repo_name":"microncomputer/codingthematrix","sub_path":"01_TheField/ch1_winter2021.py","file_name":"ch1_winter2021.py","file_ext":"py","file_size_in_byte":3668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25988951536","text":"import torch\nimport random\nfrom torch.utils.data import Dataset, DataLoader\nfrom abc import ABC\nfrom models.base_model import Model\nfrom torch.utils.tensorboard import SummaryWriter\nfrom typing import List\n\n\nclass BaseDataset(Dataset, ABC):\n\tname = 'base'\n\n\tdef __init__(self, config: dict, mode: str = 'train'):\n\t\tself.config = config\n\t\tself.mode = mode\n\t\tself.device = config['device']\n\t\tself.data_dim = config['data_dim']\n\t\tself.summary_name = self.name\n\n\t'''\n\tNote that dataset's __getitem__() returns (x_coord, x_feat, y_coord, y_feat, name)\n\tBut the collated batch returns type of (SparseTensorWrapper, SparseTensorWrapper)\n\t'''\n\tdef __getitem__(self, idx) \\\n\t\t\t-> (torch.tensor, torch.tensor, torch.tensor, torch.tensor, List[str]):\n\t\t# sparse tensor and tensor should have equal size\n\t\traise NotImplemented\n\n\tdef __iter__(self):\n\t\twhile True:\n\t\t\tidx = random.randint(0, len(self) - 1)\n\t\t\tyield self[idx]\n\n\tdef collate_fn(self, batch: List) -> dict:\n\t\t# convert list of dict to dict of list\n\t\tbatch = {k: [d[k] for d in batch] for k in batch[0]}\n\t\treturn batch\n\n\tdef evaluate(self, model: Model, writer: SummaryWriter, step):\n\t\ttraining = model.training\n\t\tmodel.eval()\n\t\tdata_loader = DataLoader(\n\t\t\tself,\n\t\t\tbatch_size=self.config['eval_batch_size'],\n\t\t\tnum_workers=self.config['num_workers'],\n\t\t\tcollate_fn=self.collate_fn,\n\t\t\tdrop_last=False,\n\t\t)\n\n\t\tprint('')\n\t\teval_losses = []\n\t\tfor eval_step, data in enumerate(data_loader):\n\t\t\tmode = self.mode\n\t\t\tif len(self.config['eval_datasets']) != 1:\n\t\t\t\tmode += '_' + self.summary_name\n\t\t\teval_loss = model.evaluate(data, step, mode)\n\t\t\teval_losses.append(eval_loss)\n\n\t\t\tprint('\\r[Evaluating, Step {:7}, Loss {:5}]'.format(\n\t\t\t\teval_step, '%.3f' % eval_loss), end=''\n\t\t\t)\n\n\t\tprint('')\n\t\tmodel.write_dict_summaries(step)\n\t\tmodel.train(training)\n\n\tdef test(self, model: Model, writer: SummaryWriter, step):\n\t\traise NotImplementedError()\n\n\tdef visualize(self, model: Model, options: List, step):\n\t\ttraining = model.training\n\t\tmodel.eval()\n\n\t\t# fix vis_indices\n\t\tvis_indices = self.config['vis']['indices']\n\t\tif isinstance(vis_indices, int):\n\t\t\t# sample data points from n data points with equal interval\n\t\t\tn = len(self)\n\t\t\tvis_indices = torch.linspace(0, n - 1, vis_indices).int().tolist()\n\n\t\t# override to the index when in overfitting debug mode\n\t\tif isinstance(self.config['overfit_one_ex'], int):\n\t\t\tvis_indices = torch.tensor([self.config['overfit_one_ex']])\n\n\t\tfor option in options:\n\t\t\t# calls the visualizing function\n\t\t\tif hasattr(model, option):\n\t\t\t\tgetattr(model, option)(self, vis_indices, step)\n\t\t\telse:\n\t\t\t\traise ValueError(\n\t\t\t\t\t'model {} has no method {}'.format(\n\t\t\t\t\t\tmodel.__class__.__name__, option\n\t\t\t\t\t)\n\t\t\t\t)\n\t\tmodel.train(training)\n\n\tdef visualize_test(self, model: Model, writer: SummaryWriter, step):\n\t\ttraining = model.training\n\t\tmodel.eval()\n\n\t\t# fix vis_indices\n\t\tvis_indices = self.config['vis']['indices']\n\t\tif isinstance(vis_indices, int):\n\t\t\t# sample data points from n data points with equal interval\n\t\t\tvis_indices = torch.linspace(0, len(self) - 1, vis_indices).int().tolist()\n\n\t\t# override to the index when in overfitting debug mode\n\t\tif isinstance(self.config['overfit_one_ex'], int):\n\t\t\tvis_indices = torch.tensor([self.config['overfit_one_ex']])\n\n\t\tmodel.visualize_test(self, vis_indices, step)\n\t\tmodel.train(training)\n\n","repo_name":"96lives/gca","sub_path":"datasets/base_dataset.py","file_name":"base_dataset.py","file_ext":"py","file_size_in_byte":3313,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"3"} +{"seq_id":"46216968578","text":"class Solution:\n def maxSubArray(self, nums: List[int]) -> int:\n res, curMax = nums[0], 0\n \n for n in nums:\n curMax = max(n, n + curMax)\n res = max(res, curMax)\n \n return res\n# O(n)","repo_name":"ycleo/ds-algo-practice","sub_path":"blind-75/15-greedy/max-contiguous-subarray.py","file_name":"max-contiguous-subarray.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21198548262","text":"import torch\nimport torch.nn.functional as f\nimport torch.nn as nn\nimport numpy as np\nimport torch.optim as optim\nfrom torch.autograd import Variable\nimport random\n\n\nclass memory:\n def __init__(self,size):\n self.memory = np.array([])\n self.size = size\n\n def push(self,event):\n if(len(self.memory) > self.size):\n np.delete(self.memory,0)\n self.memory = np.append(self.memory,[event],axis=0)\n\n def sample(self,batch_size):\n st = np.random.randint(0,self.memory.shape[0]-batch_size)\n sam = self.memory[st:st+batch_size,:]\n return sam\n\nclass Network(nn.Module):\n\n def __init__(self,input_size,actions):\n super(Network,self).__init__()\n self.input_size = input_size\n self.actions = actions\n self.fc1 = nn.Linear(input_size,30)\n self.fc2 = nn.Linear(30,actions)\n\n def forward(self, input_batch):\n x = self.fc1(input_batch)\n x = f.relu(x)\n batch_output = self.fc2(x)\n return batch_output\n\nclass Dqn:\n def __init__(self):\n self.model = Network(3,4)\n self.optim = optim.Adam(self.model.parameters(),lr=0.001)\n self.gamma = 0.01\n self.Memory = memory(10000)\n\n def forward(self,input_batch):\n return self.model.forward(input_batch)\n\n def step(self,current_states,next_states,rewards,actions):\n output = self.forward(current_states)\n current_Q = output.gather(1,actions)\n Next_Q = self.forward(next_states)\n Next_Qmax = Next_Q.max(1)[0]\n targets = rewards + self.gamma*Next_Qmax\n td_loss = f.smooth_l1_loss(current_Q,targets)\n self.optim.zero_grad()\n td_loss.backwards(retain_vaiables=True)\n self.optim.step()\n\n \n\n","repo_name":"thelogical/Deep-Q","sub_path":"network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36906300302","text":"\"\"\"liveledger URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nimport app.views\n\nurlpatterns = [\n path('', app.views.RedirectHomeView.as_view()),\n path('home/', app.views.HomeView.as_view(), name='home'),\n path('index/', app.views.LedgerListView.as_view(), name='ledger-list'),\n path('create/', app.views.LedgerCreateView.as_view(), name='ledger-create'),\n path('delete//', app.views.LedgerDeleteView.as_view(), name='ledger-delete'),\n path('share//', app.views.LedgerShareView.as_view(), name='ledger-share'),\n path('share//removeself/', app.views.LedgerSharedRemoveMeView.as_view(), name='ledger-share-remove-me'),\n path('share//removeuser//', app.views.LedgerSharedUserRemoveView.as_view(), name='ledger-share-removeuser'),\n path('ledger//index/', app.views.LedgerItemListview.as_view(), name='ledgeritem-list'),\n path('ledger//create/', app.views.LedgerItemCreateView.as_view(), name='ledgeritem-create'),\n path('ledgeritem//update/', app.views.LedgerItemUpdateView.as_view(), name='ledgeritem-update'),\n path('ledgeritem//delete/', app.views.LedgerItemDeleteView.as_view(), name='ledgeritem-delete'),\n path('ledgeritem//inquire/', app.views.LedgerItemInquireView.as_view(), name='ledgeritem-inquire'),\n path('forbidden/', app.views.ForbiddenView.as_view(), name='forbidden'),\n]\n","repo_name":"nunchuckBoP/liveledger","sub_path":"app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"70583053522","text":"from os import path\nfrom sys import exit\n\nimport pygame\n\nBLACK = pygame.Color(0, 0, 0)\nWHITE = pygame.Color(255, 255, 255)\nSCREEN_SIZE = SCREEN_WIDTH, SCREEN_HEIGHT = 800, 600\nTITLE = \"Pendulum Simulator\"\nFPS = 60\n\nclass Pendulum:\n def __init__(self):\n pygame.display.init()\n self.surface = pygame.display.set_mode(SCREEN_SIZE)\n\n self.clock = pygame.time.Clock()\n\n pygame.font.init()\n self.dir, self.file = path.split(__file__)\n font_file = path.join(self.dir, \"data\", \"font.otf\")\n self.font = pygame.font.Font(font_file, 12)\n self.bigfont = pygame.font.Font(font_file, 20)\n\n self.active = False\n\n self.mouse = self.mousex, self.mousey = 0, 0\n self.mouseb = (False, False, False)\n\n self.pivot_pos = SCREEN_WIDTH >> 1, SCREEN_HEIGHT >> 2\n self.pivot_radius = 12\n\n self.bob_pos = SCREEN_WIDTH >> 1, SCREEN_HEIGHT - (SCREEN_HEIGHT >> 2)\n self.bob_radius = 27\n\n self.rod_width = 3\n\n self.selected_rect = pygame.Rect(0, 0, 0, 0)\n self.selected = None\n\n self.grabbed = False\n\n def set_window_properties(self):\n pygame.display.set_caption(TITLE)\n\n icon_file = path.join(self.dir, \"data\", \"logo.png\")\n icon = pygame.image.load(icon_file)\n pygame.display.set_icon(icon)\n\n def print_info(self):\n print(f\"{TITLE} https://github.com/jacob-thompson/pendulum-simulator\")\n\n def tick(self):\n self.clock.tick(FPS)\n\n def handle_event(self, event):\n if event.type == pygame.QUIT: exit()\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE: exit()\n else: self.active = True\n\n if event.type == pygame.MOUSEBUTTONDOWN:\n self.active = True\n\n def handle_mouse(self):\n self.mouse = self.mousex, self.mousey = pygame.mouse.get_pos()\n self.mouseb = pygame.mouse.get_pressed()\n\n pivot = pygame.Rect(0, 0, self.pivot_radius << 1, self.pivot_radius << 1)\n pivot.center = self.pivot_pos\n\n bob = pygame.Rect(0, 0, self.bob_radius << 1, self.bob_radius << 1)\n bob.center = self.bob_pos\n\n rod = pygame.draw.line(self.surface, BLACK, self.pivot_pos, self.bob_pos, self.rod_width << 1)\n\n #button == 1 is mouse1; left click\n #button == 2 is scroll wheel; middle click\n #button == 3 is mouse2; right click\n\n if pivot.collidepoint(self.mouse) and self.mouseb[2]:\n self.selected_rect = pivot.copy()\n self.selected = \"frictionless pivot\"\n elif bob.collidepoint(self.mouse) and (self.mouseb[0] or self.mouseb[2]):\n self.selected_rect = bob.copy()\n self.selected = \"massive bob\"\n elif rod.collidepoint(self.mouse) and self.mouseb[2]:\n self.selected_rect = rod.copy()\n self.selected = \"massless rod\"\n elif self.mouseb[2]: self.selected = None\n\n if bob.collidepoint(self.mouse) and self.mouseb[0]: self.grabbed = True\n elif self.mouseb[0]: self.selected = None\n else: self.grabbed = False\n\n #print(self.grabbed)\n\n def draw_background(self):\n self.surface.fill(WHITE)\n\n def draw_pivot(self):\n pygame.draw.circle(self.surface, BLACK, self.pivot_pos, self.pivot_radius)\n\n def draw_bob(self):\n pygame.draw.circle(self.surface, BLACK, self.bob_pos, self.bob_radius + 1)\n pygame.draw.circle(self.surface, WHITE, self.bob_pos, self.bob_radius)\n\n def draw_rod(self):\n pygame.draw.line(self.surface, BLACK, self.pivot_pos, self.bob_pos, self.rod_width)\n\n def draw_selected(self):\n if self.selected == None: pass\n elif self.selected == \"frictionless pivot\":\n position = f\"position: {self.selected_rect.center}\"\n position_surface = self.bigfont.render(position, 1, BLACK)\n position_pos = SCREEN_WIDTH - 3, SCREEN_HEIGHT\n position_rect = position_surface.get_rect(bottomright = position_pos)\n self.surface.blit(position_surface, position_rect)\n\n name_surface = self.bigfont.render(self.selected, 1, BLACK)\n name_pos = position_rect.topright\n name_rect = name_surface.get_rect(bottomright = name_pos)\n self.surface.blit(name_surface, name_rect)\n elif self.selected == \"massive bob\":\n position = f\"position: {self.selected_rect.center}\"\n position_surface = self.bigfont.render(position, 1, BLACK)\n position_pos = SCREEN_WIDTH - 3, SCREEN_HEIGHT\n position_rect = position_surface.get_rect(bottomright = position_pos)\n self.surface.blit(position_surface, position_rect)\n\n name_surface = self.bigfont.render(self.selected, 1, BLACK)\n name_pos = position_rect.topright\n name_rect = name_surface.get_rect(bottomright = name_pos)\n self.surface.blit(name_surface, name_rect)\n elif self.selected == \"massless rod\":\n end = f\"end point: {self.selected_rect.midbottom}\"\n end_surface = self.bigfont.render(end, 1, BLACK)\n end_pos = SCREEN_WIDTH - 3, SCREEN_HEIGHT\n end_rect = end_surface.get_rect(bottomright = end_pos)\n self.surface.blit(end_surface, end_rect)\n\n start = f\"start point: {self.selected_rect.midtop}\"\n start_surface = self.bigfont.render(start, 1, BLACK)\n start_pos = end_rect.topright\n start_rect = start_surface.get_rect(bottomright = start_pos)\n self.surface.blit(start_surface, start_rect)\n\n name_surface = self.bigfont.render(self.selected, 1, BLACK)\n name_pos = start_rect.topright\n name_rect = name_surface.get_rect(bottomright = name_pos)\n self.surface.blit(name_surface, name_rect)\n\n def draw_pendulum(self):\n self.draw_pivot()\n self.draw_rod()\n self.draw_bob() # must be drawn after rod\n\n def draw_license_disclaimer(self):\n disclaimer = \"MIT License Copyright (c) 2023 Jacob Alexander Thompson\"\n surface = self.font.render(disclaimer, 1, BLACK)\n pos = 3, SCREEN_HEIGHT\n rect = surface.get_rect(bottomleft = pos)\n self.surface.blit(surface, rect)\n\n def draw_frame(self):\n self.draw_background()\n\n self.draw_selected()\n\n self.draw_pendulum()\n\n if not self.active:\n self.draw_license_disclaimer()\n\n def update_frame(self):\n pygame.display.flip()","repo_name":"jacob-thompson/pendulum-simulator","sub_path":"src/pendulum_simulator/pendulum.py","file_name":"pendulum.py","file_ext":"py","file_size_in_byte":6496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36608129867","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 26 01:19:11 2021\n\n@author: leen8\n\"\"\"\ndef get_line(fname,parno,lineno):\n f = open(fname, encoding='utf8').read()\n plist = f.split(\"\\n\\n\")\n para = plist[parno]\n llist = para.split(\"\\n\")\n line = llist[lineno]\n return line\nfname = input(\"Please enter the file number ==> \")\nparno = input(\"Please enter the paragraph number ==> \")\nparno = int(parno)\nlineno = input(\"Please enter the line number ==> \")\nlineno = int(lineno)\n\nfname = fname + \".txt\"\nparno -= 1\nlineno -=1\nprint(get_line(fname, parno, lineno))\n","repo_name":"niccolesgit/Python-Projects","sub_path":"labs/Lab 07/check2.py","file_name":"check2.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"28432052408","text":"# Optional task 2\n\n# Requests and save user input\ntemperature = input(\"Is the temperature above 20 degrees? \")\nweekend = input(\"Is today the weekend? \")\nsunny = input(\"Is it sunny? \")\n\n# Atributes true or false to temperature depending on user answer\nif temperature.lower() == \"yes\":\n temperature = True\nelse:\n temperature = False\n\n# Atributes true or false to weekend depending on user answer\nif weekend.lower() == \"yes\":\n weekend = True\nelse:\n weekend = False\n\n# Atributes true or false to sunny depending on user answer\nif sunny.lower() == \"yes\":\n sunny = True\nelse:\n sunny = False\n\n# Prints suggestion of shirt based on temperature value\nif temperature:\n print(\"You should wear a short-sleeved shirt, \", end='')\nelse:\n print(\"You should wear a long-sleeved shirt, \", end='')\n\n# Prints suggestion of bottoms based on weekend value\nif weekend:\n print(\"shorts, \", end='')\nelse:\n print(\"jeans, \", end='')\n\n# Prints suggestion of accessories based on sunny value\nif sunny:\n print(\"and a cap.\")\nelse:\n print(\"and a raincoat.\") \n","repo_name":"RochaJacqueline/HyperionDevCourse","sub_path":"T08/optional_task2.py","file_name":"optional_task2.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"16212950845","text":"from qiskit import QuantumCircuit, transpile, assemble, Aer, execute\r\n\r\ncircuit = QuantumCircuit(2)\r\n\r\ncircuit.h(0)\r\n\r\nwnumber = 0\r\n\r\nqchoice = 0\r\n\r\ndef entanglement():\r\n global wnumber\r\n global qchoice\r\n\r\n circuit.cx(0, 1)\r\n\r\n circuit.measure_all()\r\n\r\n simulator = Aer.get_backend('qasm_simulator')\r\n\r\n job = execute(circuit, simulator, shots=1)\r\n\r\n result = job.result()\r\n\r\n count = result.get_counts()\r\n\r\n print(count)\r\n\r\n for key in count.keys():\r\n print(key, type(key))\r\n wnumber = int(key[0])\r\n qchoice = int(key[0])\r\n\r\n \r\n\r\n\r\n\r\n\r\ni = int(input('Chose a number between 0 and 1: '))\r\n\r\nentanglement()\r\n\r\nprint('The number: ', wnumber, 'Quantum Computer choice: ', qchoice, 'Your choice: ', i)","repo_name":"cardinaldust/qiskit_g","sub_path":"qgame.py","file_name":"qgame.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29456501414","text":"#!/usr/bin/python\n\nimport base\n\n\nclass RawWriter(base.FileBase):\n def create(self, fn, size):\n self.f = open(fn, \"w\")\n self.f.truncate(size)\n\n def write_sector(self, sec, data):\n if data is None:\n return\n\n if len(data) != 1 << 9:\n raise Exception()\n\n self.pwrite(sec << 9, data)\n","repo_name":"jim-minter/vmdisktool","sub_path":"raw.py","file_name":"raw.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"43813316463","text":"# 설탕 배달\n\nimport sys\nsys.stdin = open('input.txt','r')\n\nnum = int(input())\ncnt = 0\nwhile 1: \n if num == 4:\n print(-1)\n break\n if num % 5 == 0:\n cnt += num//5\n print(cnt)\n break\n num -= 3\n cnt += 1","repo_name":"KDT2-Algorithm-study/Algorithm-study","sub_path":"백준/2839/2839_김용진.py","file_name":"2839_김용진.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"3"} +{"seq_id":"22574695845","text":"from flask import render_template, url_for,flash,redirect,request,abort,Blueprint\n\n\n\n\n\n\n\nreport =Blueprint('report',__name__)\n\n\n\n@report.route('/insertreport')\ndef reportcard():\n form=reportForm()\n if form.validate_on_submit():\n term=form.term.data#run queries\n student=form.student.data\n subject=form.subject.data\n mark=form.mark.data\n grade=form.grade.data\n year=form.year.data\n classz=form.classz.data\n report_card=form#get you some report data\n buses=reportdata(reportcard=reportcard.id,subject_id=subject.id,mark=mark,grade=grade)\n db.session.add(buses)\n db.session.commit()\n return redirect(url_for('users.dashboard'))\n return render_template('fabien-ui/location.html',legend=\"login\",form=form)\n\n\n","repo_name":"EbotFabien/Busmanagement","sub_path":"app/entity/report/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"39917306074","text":"import tkinter as tk\nfrom adventurer import Adventurer\nfrom tkinter import messagebox\n# from dungeonadventure import DungeonAdventure\n\n\"\"\"Controller class which provides the main logic to facilitate communication between the view code in Dungeon Adventure\nand the Dungeon model class. Most actions initiated from the Dungeon Adventure view are defined here\"\"\"\nclass DungeonController:\n\n def __init__(self, dungeon, adventure):\n self.__dungeon = dungeon\n self.__view = adventure\n # self.__dungeon.get_hero() = Adventurer(self.__dungeon.get_map().start_point.get_y(), self.__dungeon.get_map().start_point.get_x())\n # self.__dungeon.__dungeon_adventure = dungeon_adventure\n\n def get_dungeon(self):\n return self.__dungeon\n\n def draw_hero(self):\n hero_move = self.__dungeon.get_movement_list()[-1]\n pre_move = self.__dungeon.get_movement_list()[-2]\n self.hero_move(self.__dungeon.get_movement_list())\n print(self.__dungeon.get_hero().get_health())\n self.__dungeon.draw_cell(hero_move.get_y(), hero_move.get_x(), \"#232323\")\n if pre_move == self.__dungeon.get_map().start_point:\n self.__dungeon.draw_cell(pre_move.get_y(), pre_move.get_x(), \"#eee83f\")\n elif pre_move == self.__dungeon.get_map().destination:\n self.__dungeon.draw_cell(pre_move.get_y(), pre_move.get_x(), \"#cf52eb\")\n elif pre_move != self.__dungeon.get_map().start_point and pre_move != self.__dungeon.get_map().destination:\n if pre_move.get_value() == 4 or pre_move.get_value() == 5:\n self.__dungeon.draw_cell(pre_move.get_y(), pre_move.get_x(), \"#ee3f4d\")\n else:\n self.__dungeon.draw_cell(pre_move.get_y(), pre_move.get_x(), \"#F2F2F2\")\n self.check_reach()\n\n def check_reach(self):\n if self.__dungeon.get_movement_list()[-1] == self.__dungeon.get_map().destination:\n if self.__dungeon.get_hero().get_pillars():\n print(\"Congratulations!\")\n messagebox.showinfo(\"Room Info\", f\"{self.__dungeon.get_hero().get_name()}\\n\"\n f\"Congratulations!!!\\n\"\n f\"You have found all the pillars and reach the end!\\n\"\n f\"You Win!!!\\n\")\n exit()\n else:\n messagebox.showinfo(\"Room Info\", f\"{self.__dungeon.get_hero().get_name()}\\n\"\n f\"You have not found all the pillars yet!\\n\")\n print(\"not enough pillars\")\n elif self.__dungeon.get_hero().get_health() <= 0:\n print(\"hero is dead!\")\n messagebox.showinfo(\"Room Info\", f\"{self.__dungeon.get_hero().get_name()}\\n\"\n f\"Game Over!Your health points have fallen below 0.\\n\")\n exit()\n\n def move_west(self):\n x = self.__dungeon.get_hero().get_x()\n y = self.__dungeon.get_hero().get_y()\n print(\"hero(y,x)\", y, x)\n room = self.__dungeon.get_map().get_room(y, x - 1)\n if self.__dungeon.get_map().is_movable(room):\n self.__dungeon.get_hero().set_x(x - 1)\n self.__dungeon.get_movement_list().append(room)\n self.room_info()\n self.draw_hero()\n print(\"west\", \"y\", y, \"x\", x, room.get_value())\n else:\n return\n\n def move_east(self):\n x = self.__dungeon.get_hero().get_x()\n y = self.__dungeon.get_hero().get_y()\n print(\"hero(y,x)\", y, x)\n room = self.__dungeon.get_map().get_room(y, x + 1)\n if self.__dungeon.get_map().is_movable(room):\n self.__dungeon.get_hero().set_x(x + 1)\n self.__dungeon.get_movement_list().append(room)\n self.room_info()\n self.draw_hero()\n print(\"east\", \"y\", y, \"x\", x, room.get_value())\n else:\n return\n\n def move_south(self):\n x = self.__dungeon.get_hero().get_x()\n y = self.__dungeon.get_hero().get_y()\n print(\"hero(y,x)\", y, x)\n room = self.__dungeon.get_map().get_room(y + 1, x)\n if self.__dungeon.get_map().is_movable(room):\n self.__dungeon.get_hero().set_y(y + 1)\n self.__dungeon.get_movement_list().append(room)\n self.room_info()\n self.draw_hero()\n print(\"south\", \"y\", y, \"x\", x, room.get_value())\n else:\n return\n\n def move_north(self):\n x = self.__dungeon.get_hero().get_x()\n y = self.__dungeon.get_hero().get_y()\n print(\"hero(y,x)\", y, x)\n room = self.__dungeon.get_map().get_room(y - 1, x)\n if self.__dungeon.get_map().is_movable(room):\n self.__dungeon.get_hero().set_y(y - 1)\n self.__dungeon.get_movement_list().append(room)\n print(\"north\", \"y\", y, \"x\", x, room.get_value())\n self.room_info()\n self.draw_hero()\n else:\n return\n\n def hero_move(self, moves):\n hero_move = moves[-1]\n if hero_move.get_value() == 6:\n self.__dungeon.get_hero().add_pillar(\"abstraction\")\n hero_move.set_value(0)\n if hero_move.get_value() == 7:\n self.__dungeon.get_hero().add_pillar(\"encapsulation\")\n hero_move.set_value(0)\n if hero_move.get_value() == 8:\n self.__dungeon.get_hero().add_pillar(\"inheritance\")\n hero_move.set_value(0)\n if hero_move.get_value() == 9:\n self.__dungeon.get_hero().add_pillar(\"polymorphism\")\n hero_move.set_value(0)\n if hero_move.get_value() == 4:\n self.__dungeon.get_hero().add_vaccine()\n if hero_move.get_value() == 5:\n self.__dungeon.get_hero().min_health()\n if self.__dungeon.get_hero().get_health() <= 0:\n self.__view.show_dead()\n\n # method to get room info\n def room_info(self):\n #currently not showing there's a potion there after you enter the room - hero already scooped it and then the display is showing no potion\n current_room = self.__dungeon.get_map().get_room(self.__dungeon.get_hero().get_y(), self.__dungeon.get_hero().get_x())\n if current_room.get_value() == 0:\n return None\n # room_description = \"Nothing\"\n elif current_room.get_value() == 2:\n room_description = \"Entrance/Start\"\n return None\n elif current_room.get_value() == 3:\n room_description = \"Exit\"\n elif current_room.get_value() == 4:\n room_description = \"Vaccine\"\n messagebox.showinfo(\"Room Info\", f\"{self.__dungeon.get_hero().get_name()} This Room Has:\\n\"\n f\"{room_description}\\n\"\n f\"your can use it to increase your health.\")\n elif current_room.get_value() == 5:\n room_description = \"Pit of People\"\n messagebox.showinfo(\"Room Info\", f\"{self.__dungeon.get_hero().get_name()} This Room Has:\\n\"\n f\"{room_description}\\n\"\n f\"your health will decrease by 50.\")\n if self.__dungeon.get_hero().get_health() <= 0:\n messagebox.showinfo(\"Game Over!\", f\"Your health points have fallen below 0.\\n\"\n f\"You have died. Please exit out of the game!\")\n self.check_reach()\n elif current_room.get_value() == 6:\n room_description = \"Pillar: Abstraction\"\n messagebox.showinfo(\"Room Info\", f\"{self.__dungeon.get_hero().get_name()} This Room Has:\\n\"\n f\"{room_description}\")\n elif current_room.get_value() == 7:\n room_description = \"Pillar: Encapsulation\"\n messagebox.showinfo(\"Room Info\", f\"{self.__dungeon.get_hero().get_name()} This Room Has:\\n\"\n f\"{room_description}\")\n elif current_room.get_value() == 8:\n room_description = \"Pillar: Inheritance\"\n messagebox.showinfo(\"Room Info\", f\"{self.__dungeon.get_hero().get_name()} This Room Has:\\n\"\n f\"{room_description}\")\n elif current_room.get_value() == 9:\n room_description = \"Pillar: Polymorphism\"\n messagebox.showinfo(\"Room Info\", f\"{self.__dungeon.get_hero().get_name()} This Room Has:\\n\"\n f\"{room_description}\")\n\n # messagebox.showinfo(\"Room Info\", f\"{self.__dungeon.get_hero().get_name()} Your Room Has:\\n\"\n # f\"{room_description}\")\n #\n # if current_room.get_value() >= 4:\n # messagebox.showinfo(\"There is something in this room!\", f\"You use/pick up this item.\\n\"\n # f\"If it is a pit, you fall in and lose 50 health points.\")\n #\n # if current_room.get_value() == 5:\n # if self.__dungeon.get_hero().get_health() <= 0:\n # messagebox.showinfo(\"Game Over!\", f\"Your health points have fallen below 0.\\n\"\n # f\"You have died. Please exit out of the game!\")\n\n\n\n # def hero_stats(self):\n # messagebox.showinfo(\"Hero Stats\", f\"{self.__dungeon.get_hero().get_name()} Your Hero Stats are:\\nHealth Points: {self.__dungeon.get_hero().get_health()}\\n\"\n # f\"Number of Vaccines: {self.__dungeon.get_hero().get_number_vaccine()}\\n\"\n # f\"Number of Vision Potions: {self.__dungeon.get_hero().get_number_vision_potion()}\\n\"\n # f\"Pillars of OO Collected: {self.__dungeon.get_hero().get_number_pillars()}\")\n #\n # def yes_callback(self):\n #\n # self.__dungeon.get_hero().add_health()\n # messagebox.showinfo(\"Taking Vaccine\",\n # f\"{self.__dungeon.get_hero().get_name()} Your Health Points are: {self.__dungeon.get_hero().get_health()} \")\n #\n # def no_callback(self):\n #\n # messagebox.showinfo(\"Not taking vaccine\", \"Show User Health Points (unaltered)\")\n\n # def vaccine_buttons(self):\n #\n # top2 = tk.Toplevel(windows)\n # top2.geometry(\"500x500\")\n # top2.title(\"Would you like to take the vaccine? \")\n # top2_frame = tk.Frame(top2)\n # top2_frame.pack()\n # canvas2 = tk.Canvas(top2, width=400, height=400)\n # canvas2.pack()\n #\n # tk.Label(canvas2, text='Would you like to take the vaccine ? ').pack()\n #\n # vaccine_image = Image.open(\"vaccine.gif\")\n # tk_image2 = ImageTk.PhotoImage(vaccine_image)\n # label2 = tk.Label(top2_frame, image=tk_image2)\n # label2.image = tk_image2\n # label2.grid(row=0, column=0)\n #\n # number = random.randint(1, 100)\n #\n # bottom2_frame = Frame(top2)\n # bottom2_frame.pack(side=BOTTOM)\n #\n # green_button = Button(bottom2_frame, text=\"Yes\", fg=\"green\", command=dc.yes_callback())\n # green_button.pack(side=LEFT)\n #\n # red_button = Button(bottom2_frame, text=\"No\", fg=\"red\", command=dc.no_callback())\n # red_button.pack(side=RIGHT)\n #\n # def get_input(self, entry):\n # self.__dungeon.get_hero().set_name(entry.get())\n # label2 = tk.Label(top_frame,\n # text=f'Hello,{adventurer_name}, Adventurer!\\n '\n # f'Please use the control panel to navigate.\\n '\n # f'Your mission is to collect all 4 pillars of OO and stay alive!\\n'\n # )\n # label2.grid(row=3, column=0)\n # #start_canvas.create_window(200, 230, window=label2)\n\n","repo_name":"michaelweinberg/assignment5","sub_path":"dungeon_controller.py","file_name":"dungeon_controller.py","file_ext":"py","file_size_in_byte":11979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17700953770","text":"#!/usr/bin/env python3\n\nimport cgi, collections, json, subprocess, sys\nimport http.server as server\nimport urllib.request as request\n\nfrom http import HTTPStatus\nfrom os import path\nfrom string import Template\n\nME = path.abspath(__file__)\nME_DIR = path.dirname(ME)\nPIA_CLIENT_ID = \"\"\nTMISSION_CFG = \"/etc/transmission-daemon/settings.json\"\nTMISSION_DAEMON_NAME = \"transmission-daemon\"\nVPN_DAEMON_NAME = \"pia-vpn\"\n\n\ndef start_vpn():\n subprocess.check_call(\n [\n \"openvpn\",\n \"--config\", \"CA Toronto.ovpn\",\n \"--auth-user-pass\", \"pia-passwd\",\n \"--dev\", \"pia-tun\",\n \"--dev-type\", \"tun\",\n \"--daemon\", VPN_DAEMON_NAME,\n \"--script-security\", \"2\",\n \"--up\", ME + \" \\\"up\\\"\",\n \"--down\", ME + \" \\\"down\\\"\",\n \"--up-restart\"\n ],\n cwd=path.join(ME_DIR, \"pia\")\n )\n\n\ndef stop_vpn():\n subprocess.call([\"pkill\", \"-SIGTERM\", \"-f\", VPN_DAEMON_NAME])\n\n\ndef request_pia_fw_port():\n api_url = \"http://209.222.18.222:2000/?client_id=\" + PIA_CLIENT_ID\n with request.urlopen(api_url, timeout=5) as resp:\n resp_charset = resp.info().get_param(\"charset\") or \"ascii\"\n resp_str = resp.read().decode(resp_charset)\n return int(json.loads(resp_str)[\"port\"])\n\n\ndef exec_tmission_cmd(cmd):\n subprocess.call(\n [\"/etc/init.d/\" + TMISSION_DAEMON_NAME, cmd],\n stdout=subprocess.DEVNULL\n )\n\n\ndef exec_pia_cmd(cmd):\n if cmd == \"start\":\n start_vpn()\n elif cmd == \"stop\":\n stop_vpn()\n\n\ndef update_tmission_settings(bind_addr, bind_port):\n with open(TMISSION_CFG + \".tpl\") as settings_tpl:\n settings = json.load(settings_tpl)\n settings.update({\n \"bind-address-ipv4\": bind_addr,\n \"peer-port\": bind_port\n })\n with open(TMISSION_CFG, \"w\") as settings_file:\n json.dump(settings, settings_file)\n\n\ndef process_openvpn_evt(cmd, bind_addr, evt):\n if cmd == \"up\":\n if evt == \"init\":\n fw_port = request_pia_fw_port()\n update_tmission_settings(bind_addr, fw_port)\n exec_tmission_cmd(\"restart\")\n elif cmd == \"down\":\n exec_tmission_cmd(\"stop\")\n else:\n print(\"Unknown command\")\n\n\ndef start_web_server():\n with open(path.join(ME_DIR, \"pia.html\")) as html_file:\n html_template = Template(html_file.read())\n\n class RequestHandler(server.BaseHTTPRequestHandler):\n def log_message(*args):\n pass\n\n def do_HEAD(self):\n self.send_response(HTTPStatus.OK)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n\n def do_GET(self):\n tpl_params = collections.ChainMap(\n self._get_process_status(TMISSION_DAEMON_NAME, \"tmission\"),\n self._get_process_status(VPN_DAEMON_NAME, \"pia\"),\n {\"path\": self.path}\n )\n html = html_template.safe_substitute(**tpl_params)\n self.do_HEAD()\n self.wfile.write(bytes(html, \"utf-8\"))\n\n def do_POST(self):\n form = cgi.FieldStorage(\n fp=self.rfile,\n headers=self.headers,\n environ={\"REQUEST_METHOD\": self.command}\n )\n service = form.getvalue(\"service\")\n cmd = form.getvalue(\"cmd\")\n\n if service == \"tmission\":\n exec_tmission_cmd(cmd)\n elif service == \"pia\":\n exec_pia_cmd(cmd)\n\n self.send_response(HTTPStatus.MOVED_PERMANENTLY)\n self.send_header(\"Location\", self.path)\n self.end_headers()\n\n @staticmethod\n def _get_process_status(process_name, key_prefix):\n proc_returncode = subprocess.call(\n [\"pgrep\", \"-f\", process_name],\n stdout=subprocess.DEVNULL\n )\n if proc_returncode:\n iconcolor, icon, status = \"d43939\", \"emoji-sad\", \"not running\"\n else:\n iconcolor, icon, status = \"28bd14\", \"emoji-happy\", \"running\"\n return {\n key_prefix + \"icon\": icon,\n key_prefix + \"iconcolor\": iconcolor,\n key_prefix + \"status\": status\n }\n\n httpd = server.HTTPServer((\"\", 8888), RequestHandler)\n httpd.serve_forever()\n\n\ndef main():\n argc = len(sys.argv)\n if argc == 1:\n start_web_server()\n elif argc == 2:\n exec_pia_cmd(sys.argv[1])\n elif argc == 4:\n process_openvpn_evt(*sys.argv[1:])\n elif argc == 8:\n # OpenVPN doesn't allow packets to go through a tunnel until the script\n # returns with 0 exit code - self-spawn the sript to request a fw port\n cmd, _, _, _, bind_addr, _, evt = sys.argv[1:]\n subprocess.Popen([ME, cmd, bind_addr, evt])\n else:\n print(\"Illegal arguments specified\")\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"vonZeppelin/pia-vpn-manager","sub_path":"pia.py","file_name":"pia.py","file_ext":"py","file_size_in_byte":4956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8380970028","text":"import _plotly_utils.basevalidators\n\n\nclass TickwidthValidator(_plotly_utils.basevalidators.NumberValidator):\n def __init__(self, plotly_name=\"tickwidth\", parent_name=\"ohlc\", **kwargs):\n super(TickwidthValidator, self).__init__(\n plotly_name=plotly_name,\n parent_name=parent_name,\n edit_type=kwargs.pop(\"edit_type\", \"calc\"),\n max=kwargs.pop(\"max\", 0.5),\n min=kwargs.pop(\"min\", 0),\n **kwargs,\n )\n","repo_name":"plotly/plotly.py","sub_path":"packages/python/plotly/plotly/validators/ohlc/_tickwidth.py","file_name":"_tickwidth.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":14438,"dataset":"github-code","pt":"3"} +{"seq_id":"27488602156","text":"#Blake Reneau 1/06/2023\n#a fork of my store script\nimport os\nfrom colorama import Fore, Back, Style\n\nos.system('clear')\n\nA = 1.50\nB = 2.50\nC = 2.00\nD = 9.50\nE = 2000000.00\n\nprint(Fore.LIGHTRED_EX + \"Price list\" + Style.RESET_ALL)\nprint(\"A. \" + Fore.YELLOW + \"dog food\" + Fore.WHITE + \" - $1.50\")\nprint(\"B. \" + Fore.BLUE + \"cat food\" + Fore.WHITE + \" - $2.50\")\nprint(\"C. \" + Fore.MAGENTA + \"hamster food\" + Fore.WHITE + \" - $2.00\")\nprint(\"D. \" + Fore.RED + \"horse food\" + Fore.WHITE + \" - $9.50\")\nprint(\"E. \" + Fore.GREEN + \"Thermonuclear Reactor\" + Fore.WHITE + \" - $2,000,000\")\nprint(\"input X to exit (or crash the program)\")\nprint(\"\")\nmoney = float(input(\"how much do you have to spend? \"))\n\nwhile money > 0:\n item = input(\"wha'd'ya want? (input the letter) \")\n if item == \"X\":\n print(\"hah, broke\")\n break\n else:\n quantity = int(input(\"how many? \"))\n cost = 0\n if item == \"A\":\n cost = A * quantity\n elif item == \"B\":\n cost = B * quantity\n elif item == \"C\":\n cost = C * quantity\n elif item == \"D\":\n cost = D * quantity\n elif item == \"E\":\n cost = E * quantity\n \n if money >= cost:\n money = money - cost\n break\n else:\n print(\"ha, broke\")\n\nprint(\"money left:\" + str(money))\nprint(\"bye\")\n","repo_name":"nulla-git/school_stuff","sub_path":"python/store_with_color/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"677192001","text":"import json\n\n# 167. Two Sum II - Input array is sorted - # Easy\n# Given a 1-indexed array of integers numbers that is already sorted in non-decreasing order, \n# find two numbers such that they add up to a specific target number. \n# Let these two numbers be numbers[index1] and numbers[index2] \n# where 1 <= first < second <= numbers.length. Return the indices of the two numbers, \n# index1 and index2, as an integer array [index1, index2] of length 2.\n# The tests are generated such that there is exactly one solution. \n# You may not use the same element twice.\n#\n# https://leetcode.com/problems/two-sum-ii-input-array-is-sorted/\n\nclass Solution:\n def twoSum(self, numbers: list[int], target: int) -> list[int]:\n # num_in = {num: [pos] if num not in num_in else num_in[num].append(pos) for pos, num in enumerate(numbers)}\n num_in = dict()\n for pos, num in enumerate(numbers):\n if num not in num_in:\n num_in[num] = [pos]\n else:\n num_in[num].append(pos)\n\n for i in range(len(numbers)):\n if target-numbers[i] in num_in:\n for k in num_in[target-numbers[i]]:\n if i != k:\n return [i+1, k+1]\n \n return [None, None] \n \n\nif __name__ == '__main__': \n with open('OUTPUT/IN', 'r') as f_in, open('OUTPUT/OUT', \"w\") as f_out:\n while True:\n numbers_line = f_in.readline().rstrip()\n target_line = f_in.readline().rstrip()\n if not (numbers_line and target_line):\n break\n numbers = json.loads(numbers_line)\n\n exec = Solution()\n res = exec.twoSum(numbers, int(target_line)) \n\n f_out.write(json.dumps(res) + '\\n')\n","repo_name":"yuvSid/interviewPrepare","sub_path":"python/path_sum_II.py","file_name":"path_sum_II.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"72956997841","text":"import pandas as pd\nimport sys\n\ndef drop_columns(df, cols):\n for col in cols:\n df = df.drop(col,1)\n return df\n\ndef main(filename, remove_rows, remove_columns): \n ext = filename[-4:]\n if ext == '.csv':\n df = pd.read_csv(filename)\n df = drop_columns(df, remove_columns)\n df = df.iloc[remove_rows:, :]\n print(df)\n elif ext == '.xls':\n df = pd.read_excel(filename)\n df = drop_columns(df, remove_columns)\n df = df.iloc[remove_rows:, :]\n print(df)\n else:\n print(\"Could not load data from file\")\n print(\"Use .csv or .xls format\")\n print(\"Program completed\")\n\n# usage: python print_data.py iris_mod.csv 100 col1 col2 col3 ...\n# python print_data.py iris_mod.xls 500 col1 col2 col3 ...\nif __name__ == \"__main__\":\n filename = sys.argv[1]\n remove_rows = int(sys.argv[2])\n remove_columns = sys.argv[3:]\n main(filename, remove_rows, remove_columns)","repo_name":"hermanwh/datadrevet_ML","sub_path":"ml/print_data.py","file_name":"print_data.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"18338677609","text":"# -------------------------------------------------------------------------\n# pyCGNS - Python package for CFD General Notation System - \n# See license.txt file in the root directory of this Python module source \n# -------------------------------------------------------------------------\n#\nimport os\nimport sys\nimport subprocess\n\nimport CGNS.PAT.cgnsutils as CGU\nimport CGNS.PAT.cgnskeywords as CGK\nimport CGNS.MAP as CGM\nimport CGNS.version\n\n\nclass Context:\n def __init__(self):\n self.converter = None\n\n\ndef openFile(filename):\n flags = CGM.S2P_DEFAULTS | CGM.S2P_NODATA\n (t, l, p) = CGM.load(filename, flags=flags, maxdata=33, lksearch=['.'])\n return (t, l, p, filename)\n\n\ndef parseFile(filename, P, C, L):\n C.depth += 1\n R = []\n T = openFile(filename)\n LK = T[1]\n if (C.translate):\n LK = transLinks(filename, LK, P, C)\n searchLinks(LK, C, R)\n for p in R:\n L.append((filename, p))\n if (C.path):\n P.append('%s' % (p[3],))\n else:\n P.append('%s:%s' % (T[3], p[3]))\n for l in LK:\n if (l[0] == ''):\n FH = l[1]\n else:\n FH = \"%s/%s\" % (l[0], l[1])\n parseFile(FH, P, C, L)\n C.depth -= 1\n return P, L\n\n\ndef linkErrorAsString(code):\n s = \"\"\n if (code & CGM.S2P_LKOK):\n s += 'Link ok'\n if (code & CGM.S2P_LKFAIL):\n s += 'Link Failed:'\n if (code & CGM.S2P_LKBADSYNTAX):\n s += 'Link bad syntax'\n if (code & CGM.S2P_LKNOFILE):\n s += 'Linked-to file not found'\n if (code & CGM.S2P_LKFILENOREAD):\n s += 'Linked-to file not readable'\n if (code & CGM.S2P_LKNONODE):\n s += 'Linked-to node not found in file'\n if (code & CGM.S2P_LKLOOP):\n s += 'Link loop detected'\n if (code & CGM.S2P_LKIGNORED):\n s += 'Link ignored'\n return s\n\n\ndef checkString(variable, targetlist, re):\n if (variable is None):\n return False\n if (not re):\n return (variable in targetlist)\n else:\n for t in targetlist:\n if (variable.search(t) is not None):\n return True\n return False\n\n\ndef searchLinks(L, C, R):\n for l in L:\n add = True\n if (add or C.linklist):\n R.append(l)\n\n\ndef asHDFname(FA, C):\n return os.path.splitext(FA)[0] + C.exthdf\n\n\ndef convertInPlace(FA, FH, C):\n if not os.path.isfile(FA):\n if C.verbose:\n print(' ' * C.depth + \" Error: Unreachable file: %s\" % FA)\n return False\n elif not CGM.probe(FA):\n subprocess.check_output([C.converter, \"-h\", FA, FH])\n return True\n else:\n if C.verbose:\n print(' ' * C.depth + \" Error: Mixing links to ADF and HDF files...\")\n return False\n\n\ndef transLinks(filename, L, P, C):\n LH = []\n for l in L:\n LN = asHDFname(l[1], C)\n if (l[0] == ''):\n FA = l[1]\n else:\n FA = \"%s/%s\" % (l[0], l[1])\n if (C.verbose):\n print(' ' * C.depth, '->', FA)\n FH = asHDFname(FA, C)\n if (convertInPlace(FA, FH, C)):\n LH.append([l[0], LN, l[2], l[3]])\n (t, l, p) = CGM.load(filename)\n CGM.save(filename, t, links=LH)\n return LH\n\n# --- last line\n","repo_name":"pyCGNS/pyCGNS","sub_path":"CGNS/APP/lib/cg_com.py","file_name":"cg_com.py","file_ext":"py","file_size_in_byte":3230,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"3"} +{"seq_id":"14437669116","text":"# Слияние файлов Excel в один. Файлы должны лежать в одной директории и иметь одинаковую структуру данных.\nimport os\nimport pandas as pd\nfrom tkinter import filedialog\nimport pyodbc\nimport datetime\nimport sqlite3\nimport requests\n\n\ndatefrom = datetime.datetime(2022, 11, 1)\ndateto = datetime.datetime(2022, 11, 30) # берется диапазон включая крайние даты\n\nclass files_to_one():\n \"\"\" Открывает по очереди файлы из указанной директории, соединяет таблицы вместе из этих файлов и записывает\n результирующую таблицу в один. Принимает параметры\n filetype: str - типы файлов (расширение) без точки\n filename: str - результирующий файл\"\"\"\n\n # filetype: str\n # filename: str\n all_data = pd.DataFrame()\n table_header = [\"Дата\", \"Время\", \"ID оплаты\", \"Референс операции\", \"Номер терминала\", \"Имя магазина\", \"Карта или счет\",\n \"Банк\", \"Платежная система\", \"Сумма транзакции\", \"Тип операции\", \"Код авторизации\", \"Комиссия банка\",\n \"Сумма перевода\", \"Валюта\"]\n\n\n def __init__(self, filetype: str, filename: str, skp_row: int = 0, head_new: bool = False):\n \"\"\" выбор директории и формирование списка файлов\"\"\"\n self.filetype = (\".\" + filetype).lower()\n self.filename = filename.lower()\n self.skp_row = skp_row\n self.head_new = head_new\n dirname = filedialog.askdirectory(initialdir=\"d:\\\\OneDrive\\\\Рабочие документы\\\\Эквайринг\\\\Альфа\", title=f\"Выбор кталога с файлами {filetype}\").replace(\"/\", chr(92)) # initialdir=os.getcwd()\n self.dirname = dirname\n all_dir = os.listdir(dirname)\n filename = self.filename + \".xlsx\"\n if self.filetype == \".xlsx\" and filename in all_dir:\n os.remove(dirname + chr(92) + filename)\n print(f\"file {filename} deleted\")\n all_dir.remove(filename)\n self.filesnames = [dirname + chr(92) + f for f in all_dir if os.path.isfile(dirname + chr(92) + f) and f[-len(self.filetype):].lower() == self.filetype.lower()]\n self.tables_in_one()\n\n\n def read_one_e_file(self, file_name):\n \"\"\" читает файл ексель (первую страницу) и возвращает датафрейм \"\"\"\n if self.filetype in [\".xls\", \".xlsx\", \".xlsm\", \".xlsb\", \".odf\", \".ods\", \".odt\"]:\n return pd.read_excel(pd.ExcelFile(file_name))\n elif self.filetype == \".csv\":\n return pd.read_csv(file_name, sep=\";\", encoding=\"cp1251\", encoding_errors=\"replace\", skiprows=(None if self.skp_row==0 else self.skp_row),\n header=(None if self.head_new else \"infer\"), names=(self.table_header if self.head_new else None),\n dtype=({'Код авторизации': 'str'} if self.head_new else None ))\n\n def tables_in_one(self):\n \"\"\"таблицы в одну из списка файлов\"\"\"\n for file_name in self.filesnames:\n self.all_data = pd.concat([self.all_data, self.read_one_e_file(file_name)], ignore_index=True)\n print(f\"считали файл: {file_name}\")\n\n def wrile_data(self):\n \"\"\"записывает собранную таблицу в один файл Excel\"\"\"\n self.all_data.to_excel(self.dirname + chr(92) + self.filename + \".xlsx\", sheet_name=\"Sheet0\", index=False)\n\nclass Access_baza():\n def __init__(self):\n \"\"\" получение из файла access табличку по терминалам и магазинам\"\"\"\n fn = \"D:\\\\Работа\\\\baza\\\\kosmbase.mdb\"\n conn_str = \"DRIVER={Microsoft Access Driver (*.mdb, *.accdb)}; DBQ=\" + fn + \";\"\n cnxn = pyodbc.connect(conn_str)\n crsr = cnxn.cursor()\n sql = \"\"\"SELECT terminals.posnumber, Magazin.magname, Magazin.nomer, Kompanii.Namesm, Kompanii.INN\n FROM Kompanii INNER JOIN (Magazin INNER JOIN terminals ON Magazin.magkey = terminals.Магазин) ON Kompanii.orgkey = Magazin.magkomp\n ORDER BY Magazin.magname;\"\"\"\n crsr.execute(sql)\n self.datatable = pd.DataFrame(list(map(list, crsr.fetchall())), columns=[name[0] for name in crsr.description])\n crsr.close()\n cnxn.close()\n\n# def vopros(info: str = \" \"):\n# process = input(info + \" 1 - да, 2 пропустить :\")\n# if process == \"1\":\n# return True\n# else:\n# return False\n\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n pos = Access_baza()\n full_data = pd.DataFrame()\n\n print(\"собираем файлы CSV эквайринг ***************\")\n eqv = files_to_one(\"csv\", \"All_month_eqv\", 6, True)\n eqv.all_data = eqv.all_data[eqv.all_data[\"Дата\"].notnull()]\n eqv.all_data = eqv.all_data.fillna(\"\")\n eqv.all_data[\"Дата\"] = pd.to_datetime(eqv.all_data[\"Дата\"], format='%d.%m.%Y')\n eqv.all_data = eqv.all_data[(eqv.all_data[\"Дата\"] >= datefrom) & (eqv.all_data[\"Дата\"] <= dateto)]\n eqv.all_data[[\"Номер терминала\", \"ID оплаты\", \"Референс операции\", \"Банк\", \"Валюта\"]] = eqv.all_data[[\"Номер терминала\", \"ID оплаты\", \"Референс операции\", \"Банк\", \"Валюта\"]].astype(\"int64\").astype(\"str\")\n eqv.all_data = eqv.all_data.drop_duplicates(subset=['Дата', 'Время', 'Карта или счет', 'Номер терминала', \"Код авторизации\",'Сумма транзакции'])\n eqv.all_data[\"Система\"] = \"эквайринг\"\n print(\"**************** эквайринг записываем общий файл\")\n eqv.wrile_data()\n full_data = pd.concat([full_data, eqv.all_data], ignore_index=True)\n print(\"**************** эквайринг собрали\")\n\n print(\"собираем файлы CSV системы быстрых платежей **************\")\n sbp = files_to_one(\"csv\", \"All_month_sbp\")\n sbp.all_data = sbp.all_data.fillna(\"\")\n new_header = ['Дата', 'Время', 'ID СБП', 'ID QR', 'Наименование ЮЛ', 'ID ТСП', 'Карта или счет', 'Тип QR',\n 'Банк', 'Сумма транзакции', 'Тип операции', 'Номер терминала', 'Комиссия банка', 'Сумма перевода',\n 'Валюта', 'Назначение платежа', 'Имя магазина', 'ID магазина', 'ID оплаты', 'Референс операции']\n sbp.all_data.columns = new_header\n sbp.all_data[\"Дата\"] = pd.to_datetime(sbp.all_data[\"Дата\"], format='%d.%m.%Y')\n sbp.all_data = sbp.all_data[(sbp.all_data[\"Дата\"] >= datefrom) & (sbp.all_data[\"Дата\"] <= dateto)]\n sbp.all_data[[\"Номер терминала\", \"Банк\", \"ID оплаты\"]] = sbp.all_data[[\"Номер терминала\", \"Банк\", \"ID оплаты\"]].astype(\"str\")\n sbp.all_data[\"Система\"] = \"СБП\"\n sbp.all_data = sbp.all_data.drop_duplicates(subset=['Дата', 'Время', 'ID СБП', 'ID QR', 'Карта или счет', 'Номер терминала', 'Сумма транзакции'])\n print(\"********************************* СБП записываем общий файл\")\n sbp.wrile_data()\n full_data = pd.concat([full_data, sbp.all_data], ignore_index=True)\n print(\"********************************* СБП собрали\")\n\n # не забыть удалить объединенный файл из каталога, если он уже есть\n\n print(\"собираем файлы Excel выгрузки из БК за месяц *************************\")\n exl = files_to_one(\"xlsx\", \"All_BK_month\")\n exl.all_data = exl.all_data.fillna(\"\")\n tmp_data = pd.to_datetime(exl.all_data[\"Дата\"], format='%d/%m/%Y %H:%M:%S')\n exl.all_data[\"Дата\"] = pd.to_datetime(tmp_data.dt.date)\n exl.all_data[\"Время\"] = tmp_data.dt.time\n exl.all_data = exl.all_data[(exl.all_data[\"Дата\"] >= datefrom) & (exl.all_data[\"Дата\"] <= dateto)]\n new_header = [\"Дата\", \"Платежная система\", \"Карта или счет\", \"Имя магазина\", \"Номер терминала\", \"Банк\",\n \"Код авторизации\", \"Тип операции\", \"Комиссия банка\", \"Валюта комиссии\", \"Сумма транзакции\",\n \"Валюта\", \"Статус\", \"Время\"]\n exl.all_data.columns = new_header\n exl.all_data.drop([\"Валюта комиссии\", \"Статус\"], axis= 1, inplace=True )\n new_header = [\"Дата\", \"Время\", \"Платежная система\", \"Карта или счет\", \"Имя магазина\", \"Номер терминала\", \"Банк\",\n \"Код авторизации\", \"Тип операции\", \"Комиссия банка\", \"Сумма транзакции\", \"Валюта\"]\n exl.all_data = exl.all_data.reindex(columns=new_header)\n exl.all_data[\"Система\"] = \"эквайринг\"\n exl.all_data[[\"Номер терминала\", \"Код авторизации\"]] = exl.all_data[[\"Номер терминала\", \"Код авторизации\"]].astype(\"str\")\n exl.all_data[\"Комиссия банка\"] = exl.all_data[\"Комиссия банка\"] * (-1)\n\n def chg_time(time_wrong: str):\n time_delt = 3\n time_wrong = time_wrong.strftime(\"%H:%M:%S\")\n time_new = int(time_wrong[:2]) + time_delt\n if time_new >= 24:\n raise (\"ошибка конвертации времени\")\n return f\"{time_new:02}{time_wrong[-6:]}\"\n\n exl.all_data['Время'] = exl.all_data['Время'].apply(chg_time)\n print(\"**************************************** excel записываем общий файл\")\n exl.wrile_data()\n print(\"**************************************** excel собрали\")\n\n def chg_operation(per: str):\n per_up = per.upper()\n if per_up == \"КРЕДИТ\":\n return \"Покупка\"\n elif per_up == \"ДЕБЕТ\":\n return \"Возврат\"\n else:\n return per\n\n print(\"Обрабатываем все операции ********************************\")\n full_data = pd.concat([full_data, exl.all_data], ignore_index=True)\n # проверка списка терминалов, чтобы все из отчета были в таблице из базы\n term_baza = pos.datatable[\"posnumber\"].unique() # получаем список терминалов из базы\n for term_otchet in full_data[\"Номер терминала\"].unique(): # ищем терминалы из отчета в списке терминалов в базе\n if term_otchet not in term_baza:\n print(f\"************************ незарегистрированный терминал №{term_otchet} . Требуется сначала внести его в базу\")\n exit(\"найден неопознанный терминал\")\n full_data['Тип операции'] = full_data['Тип операции'].apply(chg_operation)\n\n db = sqlite3.connect(\"c:\\\\Vrem\\\\Python10\\\\bd_bin_code.db\")\n cur = db.cursor()\n\n\n def convert_bin(val):\n if type(val) != str:\n return \"Другой\"\n bin_code = val[0:6]\n sql_str = f\"SELECT * FROM bincode WHERE BIN = '{bin_code}';\"\n cur.execute(sql_str)\n rezult = cur.fetchall()\n col_name = [el[0] for el in cur.description]\n if len(rezult) == 1:\n rez = dict(zip(col_name, rezult[0]))\n if rez['Банк-эмитент'] != \"\":\n return rez['Банк-эмитент']\n else:\n return \"Другой\"\n else:\n return \"Другой\"\n\n\n def convert_bic_code(val):\n if type(val) != str:\n return \"Другой\"\n bic_code = val\n sql_str = f\"SELECT * FROM bic_code WHERE BIC LIKE '%{bic_code}';\"\n cur.execute(sql_str)\n rezult = cur.fetchall()\n col_name = [el[0] for el in cur.description]\n if len(rezult) == 1:\n rez = dict(zip(col_name, rezult[0]))\n if rez['Name_org'] != \"\":\n return rez['Name_org']\n else:\n return \"Другой\"\n else:\n return \"Другой\"\n\n\n full_data.insert(full_data.columns.get_loc('Банк') + 1, 'Наименование банка', \"\")\n count = 1\n flag = False\n rez = \"\"\n new_bin = []\n bin_not_found = []\n chk_calc = []\n for i in range(0, len(full_data)):\n # Печать счетчика\n procent = int(i/len(full_data)*100)\n if procent >= count:\n if flag:\n print(\"\\b\" * len(rez), end=\"\", flush=True)\n rez = f\"Обрабатываем операции: {procent}%\"\n print(rez, end =\"\")\n count += 1\n flag = True\n\n if full_data.loc[i, 'Тип операции'] == \"Возврат\" and full_data.loc[i, 'Сумма транзакции'] > 0:\n full_data.loc[i, 'Сумма транзакции'] = full_data.loc[i, 'Сумма транзакции'] * -1\n if full_data.loc[i, \"Система\"] == \"СБП\":\n full_data.loc[i, \"Сумма перевода\"] = full_data.loc[i, \"Сумма перевода\"] * -1\n if (pd.isna(full_data.loc[i, \"Код авторизации\"]) or full_data.loc[i, \"Код авторизации\"] == \"\") and full_data.loc[i, \"Система\"] == \"эквайринг\":\n full_data.loc[i, \"Код авторизации\"] = \"000000\"\n if full_data.loc[i, \"Комиссия банка\"] > 0:\n full_data.loc[i, 'Сумма транзакции'] = full_data.loc[i, 'Сумма транзакции'] * -1\n if pd.isna(full_data.loc[i, \"Сумма перевода\"]):\n full_data.loc[i, \"Сумма перевода\"] = full_data.loc[i, 'Сумма транзакции'] + full_data.loc[i, 'Комиссия банка']\n if len(full_data.loc[i, \"Тип операции\"]) == \"Покупка\":\n if full_data.loc[i, \"Система\"] == \"СБП\" and round(full_data.loc[i, \"Сумма транзакции\"] * 0.007, 2) != abs(\n full_data.loc[i, \"Комиссия банка\"]):\n chk_calc.append(\n f'{full_data.loc[i, \"Дата\"]}, {full_data.loc[i, \"Время\"]}, {full_data.loc[i, \"Номер терминала\"]}, {full_data.loc[i, \"Сумма транзакции\"]}')\n elif round(full_data.loc[i, \"Сумма транзакции\"] * 0.012, 2) != abs(full_data.loc[i, \"Комиссия банка\"]):\n chk_calc.append(\n f'{full_data.loc[i, \"Дата\"]}, {full_data.loc[i, \"Время\"]}, {full_data.loc[i, \"Номер терминала\"]}, {full_data.loc[i, \"Сумма транзакции\"]}')\n if len(full_data.loc[i, \"Банк\"]) > 7 and full_data.loc[i, \"Система\"] == \"СБП\":\n full_data.loc[i, \"Наименование банка\"] = convert_bic_code(full_data.loc[i, \"Банк\"])\n else:\n full_data.loc[i, \"Наименование банка\"] = convert_bin(full_data.loc[i, \"Карта или счет\"])\n if full_data.loc[i, \"Наименование банка\"].upper() == \"ДРУГОЙ\":\n if full_data.loc[i, \"Банк\"] == \"0\":\n sql_str = f'INSERT OR REPLACE INTO bincode (\"BIN\", \"Платежная система\", \"Страна\", \"Банк-эмитент\", \"Адрес сайта банка\")' \\\n f' VALUES (\"{full_data.loc[i, \"Карта или счет\"][0:6]}\", \"{full_data.loc[i, \"Платежная система\"].split(\" \")[0]}\",' \\\n f' \"{\"Россия\"}\", \"{\"Alfa-Bank, Альфа-Банк\"}\", \"{\"alfabank.ru\"}\");'\n cur.execute(sql_str)\n db.commit()\n new_bin.append(f'добавили бин {full_data.loc[i, \"Карта или счет\"][0:6]} Альфабанка')\n full_data.loc[i, \"Наименование банка\"] = convert_bin(full_data.loc[i, \"Карта или счет\"])\n else:\n if full_data.loc[i, 'Карта или счет'][0:6] not in bin_not_found:\n url = \"https://bin-ip-checker.p.rapidapi.com/\"\n querystring = {\"bin\": f\"{full_data.loc[i, 'Карта или счет'][0:6]}\"}\n payload = {\"bin\": f\"{full_data.loc[i, 'Карта или счет'][0:6]}\"}\n headers = {\"content-type\": \"application/json\",\n \"X-RapidAPI-Key\": \"\",\n \"X-RapidAPI-Host\": \"bin-ip-checker.p.rapidapi.com\"}\n response = requests.request(\"POST\", url, json=payload, headers=headers, params=querystring)\n if response.status_code != 404:\n answ_text = response.json()\n if answ_text[\"success\"] and answ_text[\"code\"] == 200:\n if answ_text[\"BIN\"][\"valid\"] and answ_text[\"BIN\"][\"issuer\"][\"name\"] != \"\":\n sql_str = f'INSERT OR REPLACE INTO bincode (\"BIN\", \"Платежная система\", \"Страна\", \"Банк-эмитент\", \"Тип карты\", \"Категория карты\", \"Адрес сайта банка\")' \\\n f' VALUES (\"{answ_text[\"BIN\"][\"number\"]}\", \"{full_data.loc[i, \"Платежная система\"].split(\" \")[0]}\",' \\\n f' \"{answ_text[\"BIN\"][\"country\"][\"country\"]}\", \"{answ_text[\"BIN\"][\"issuer\"][\"name\"]}\",' \\\n f' \"{answ_text[\"BIN\"][\"type\"]}\", \"{answ_text[\"BIN\"][\"level\"]}\", \"{answ_text[\"BIN\"][\"issuer\"][\"website\"]}\");'\n cur.execute(sql_str)\n db.commit()\n new_bin.append(\n f'добавили бин {full_data.loc[i, \"Карта или счет\"][0:6]} , банк {answ_text[\"BIN\"][\"issuer\"][\"name\"]} система найдена {answ_text[\"BIN\"][\"brand\"]} система добавлена {full_data.loc[i, \"Платежная система\"].split(\" \")[0]} поиск {len(new_bin)+1}')\n full_data.loc[i, \"Наименование банка\"] = convert_bin(full_data.loc[i, \"Карта или счет\"])\n else:\n new_bin.append(f'бин {full_data.loc[i, \"Карта или счет\"][0:6]} не найден, поиск {len(new_bin)+1}')\n bin_not_found.append(full_data.loc[i, \"Карта или счет\"][0:6])\n else:\n bin_not_found.append(full_data.loc[i, \"Карта или счет\"][0:6])\n\n # тут можно еще разных обработок наделать, например банка\n\n print(\"\\b\" * len(rez), end=\"\", flush=True)\n print(\"Обработано 100% операций\")\n print(\"произведен поиск следующих бинов:\")\n print(*new_bin, sep=\"\\n\")\n print(\"ошибки начисления комиссии:\")\n if len(chk_calc) == 0:\n print(\"не найдены\")\n else:\n print(*chk_calc, sep=\"\\n\")\n\n full_data = full_data.drop_duplicates(subset=['Дата', 'Время', 'Карта или счет', 'Номер терминала', \"Код авторизации\",'Сумма транзакции'])\n full_data = full_data.merge(pos.datatable, how=\"left\", left_on=\"Номер терминала\", right_on=\"posnumber\", suffixes=('_alfa', '_pos'))\n full_data.drop([\"Имя магазина\", \"posnumber\", \"Наименование ЮЛ\"], axis=1, inplace=True)\n full_data.rename(columns={\"magname\": \"Магазин\", \"nomer\": \"Номер магазина\", \"Namesm\": \"Юрлицо\"}, inplace=True)\n full_data.sort_values(by=[\"Юрлицо\", \"Номер магазина\", \"Номер терминала\", 'Дата', 'Время'], inplace=True)\n full_data[\"Номер магазина\"] = full_data[\"Номер магазина\"].astype(\"str\")\n\n def okrugl(a):\n return round(a,2)\n\n # загружаем данные банка по операциям\n print(\"Загрузка данных из банка ********************************\")\n filename = filedialog.askopenfilename(initialdir=\"d:\\\\OneDrive\\\\Рабочие документы\\\\Выписки Альфа\", title=\"Выбрать файл с выписками из банка\")\n bank_operation = pd.read_excel(pd.ExcelFile(filename), \"Вся выписка\")\n bank_operation = bank_operation[[\"СекцияДокумент\", \"Номер\", \"Дата\", \"Сумма\", \"Плательщик\", \"Получатель\", \"НазначениеПлатежа\"]]\n list_inn = list(map(str, full_data[\"INN\"].unique()))\n bank_operation = bank_operation[bank_operation['НазначениеПлатежа'].str.contains('|'.join(list_inn))]\n bank_operation = bank_operation[(bank_operation[\"Дата\"] >= datefrom) & (bank_operation[\"Дата\"] <= (dateto + datetime.timedelta(days=1)))]\n bank_operation[\"Найдено\"] = False\n\n # собираем эквайринг по дням\n print(\"Собираем и проверяем суммы по дням ********************************\")\n data_by_day = full_data[[\"INN\", \"Юрлицо\", \"Дата\", \"Система\", 'Сумма транзакции', 'Комиссия банка',\n 'Сумма перевода']].groupby([\"INN\", \"Юрлицо\", \"Дата\", \"Система\"], as_index=False).sum()\n data_by_day[\"Найдено\"] = False\n for i in range(0, len(data_by_day)):\n if data_by_day.loc[i, 'Система'] == \"эквайринг\":\n find_operation = bank_operation[(bank_operation[\"Дата\"] == (data_by_day.loc[i, \"Дата\"] + datetime.timedelta(days=1))) &\n (bank_operation[\"Сумма\"] == round(data_by_day.loc[i, \"Сумма перевода\"],2)) &\n (bank_operation['НазначениеПлатежа'].str.contains(data_by_day.loc[i, \"INN\"])) &\n (bank_operation[\"Найдено\"] == False)]\n if len(find_operation) == 1:\n data_by_day.loc[i, 'Найдено'] = True\n bank_operation.loc[find_operation.index[0], \"Найдено\"] = True\n\n # bank_operation.loc[(bank_operation[\"Дата\"] == (data_by_day.loc[i, \"Дата\"] + datetime.timedelta(days=1))) &\n # (bank_operation[\"Сумма\"] == round(data_by_day.loc[i, \"Сумма перевода\"],2)) &\n # (bank_operation['НазначениеПлатежа'].str.contains(data_by_day.loc[i, \"INN\"])) &\n # (bank_operation[\"Найдено\"] == False), \"Найдено\"] = True\n\n elif data_by_day.loc[i, 'Система'] == \"СБП\":\n find_operation = full_data[(full_data[\"Дата\"] == data_by_day.loc[i, \"Дата\"]) &\n (full_data[\"INN\"] == data_by_day.loc[i, \"INN\"]) &\n (full_data['Система'] == \"СБП\")]\n if len(find_operation) > 0:\n sum_by_day = 0\n for j in range(0, len(find_operation)):\n bank_operation.loc[bank_operation['НазначениеПлатежа'].str.contains(find_operation.iloc[j][\"Референс операции\"]), \"Найдено\"] = True\n sum_by_day += bank_operation.loc[bank_operation['НазначениеПлатежа'].str.contains(find_operation.iloc[j][\"Референс операции\"]), \"Сумма\"].sum()\n sum_by_day = round(sum_by_day, 2)\n if sum_by_day == round(data_by_day.loc[i, 'Сумма транзакции'] - data_by_day.loc[i, 'Комиссия банка'], 2):\n data_by_day.loc[i, 'Найдено'] = True\n\n # собираем комиссию для Филиппа\n print(\"Собираем комиссию Филиппу ********************************\")\n mag_komiss = pd.pivot_table(full_data, index=['Магазин'], columns=[\"Система\"], values=['Комиссия банка'],\n aggfunc=sum) # margins=True\n mag_komiss = mag_komiss * -1\n\n print(\"Собираем платежи по банкам ********************************\")\n banks_data = pd.pivot_table(full_data, index=['Наименование банка'], values=['Сумма транзакции'], aggfunc=[sum, len]) # margins=True\n banks_data = banks_data.sort_values(by=[('sum', 'Сумма транзакции')], ascending=False)\n banks_data[\"% по сумме\"] = round(banks_data[\"sum\"] / banks_data[\"sum\"].sum(), 4)\n banks_data[\"% по количеству\"] = round(banks_data[\"len\"] / banks_data[\"len\"].sum(), 4)\n banks_data.loc['Всего'] = banks_data.sum()\n banks_data.rename(columns={'sum': 'Сумма операций', 'len': 'Количество операций'}, inplace=True)\n cols = list(banks_data.columns.values)\n cols = [cols[0], cols[2], cols[1], cols[3]]\n banks_data = banks_data[cols]\n new_header = [el[0] for el in cols]\n\n\n # записываем результат в файл\n print(\"Запись результирующего файла ********************************\")\n # writer = pd.ExcelWriter(\"\\\\\".join(sbp.dirname.split(\"\\\\\")[:-1]) + chr(92) + \"full_data.xlsx\", engine='xlsxwriter')\n # full_data.to_excel(writer, sheet_name='Sheet0', index=False)\n # data_by_day.to_excel(writer, sheet_name='По дням', index=False)\n # mag_komiss.to_excel(writer, sheet_name='Для Филиппа')\n # bank_operation.to_excel(writer, sheet_name=\"Банк\", index=False)\n # # df3.to_excel(writer, sheet_name='Sheetc')\n # writer.save()\n\n writer = pd.ExcelWriter(\"\\\\\".join(sbp.dirname.split(\"\\\\\")[:-1]) + chr(92) + \"full_data.xlsx\", engine='xlsxwriter')\n with writer as file_name:\n full_data.to_excel(file_name, sheet_name=\"Sheet0\", index=False)\n # banks_data.to_excel(file_name, sheet_name=\"banks\")\n # Convert the dataframe to an XlsxWriter Excel object.\n data_by_day.to_excel(file_name, sheet_name='По дням проверка', index=False)\n mag_komiss.to_excel(file_name, sheet_name='Для Филиппа')\n bank_operation.to_excel(file_name, sheet_name=\"Банк проверка\", index=False)\n banks_data.to_excel(file_name, sheet_name=\"banks\", header=False)\n # Get the xlsxwriter objects from the dataframe writer object.\n workbook = writer.book\n worksheet = writer.sheets[\"banks\"]\n # Add some cell formats.\n format1 = workbook.add_format({'num_format': '#,##0'})\n format2 = workbook.add_format({'num_format': '0%'})\n header_format = workbook.add_format({\n 'bold': True,\n 'text_wrap': True,\n 'valign': 'top',\n 'fg_color': '#D7E4BC',\n 'border': 1})\n worksheet.set_row(0, 30, header_format)\n worksheet.write_row('B1', new_header)\n # Set the column width and format.\n worksheet.set_column('B:B', 15, format1)\n worksheet.set_column('C:C', 10, format2)\n worksheet.set_column('D:D', 15, format1)\n worksheet.set_column('E:E', 10, format2)\n\n\n\n\n db.close()\n\n # full_data.to_excel(\"\\\\\".join(sbp.dirname.split(\"\\\\\")[:-1]) + chr(92) + \"full_data.xlsx\", sheet_name=\"Sheet0\", index=False)\n\n\n","repo_name":"micrab001/firsttry","sub_path":"файлы в один.py","file_name":"файлы в один.py","file_ext":"py","file_size_in_byte":28786,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"28720802389","text":"import os\nimport json\nfrom icgc import ICGC\n\nfrom flask import Flask, send_file, render_template, request, jsonify\n\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef main():\n return render_template('index.html', icgc_ssm_projects=ICGC.get_ssm_projects())\n\n@app.route(\"/dataset-select\", methods=['POST'])\ndef dataset_select():\n form_data = request.get_json(force=True)\n dataset_id = form_data['dataset_id']\n dataset_filename = ICGC.download_dataset(dataset_id)\n ICGC.deconstruct_sigs(dataset_filename)\n return jsonify({\"success\": True})\n\n# Everything not declared before (not a Flask route / API endpoint)...\n@app.route('/')\ndef route_frontend(path):\n # ...could be a static file needed by the front end that\n # doesn't use the `static` path (like in `