diff --git "a/1527.jsonl" "b/1527.jsonl"
new file mode 100644--- /dev/null
+++ "b/1527.jsonl"
@@ -0,0 +1,801 @@
+{"seq_id":"183297386","text":"from selenium import webdriver\nfrom bs4 import BeautifulSoup\nimport time\nimport csv\nimport requests\n\nurl = \"https://exoplanets.nasa.gov/discovery/exoplanet-catalog/\"\n\nbrowser = webdriver.Chrome(\"chromedriver\")\n\nbrowser.get(url)\n\ntime.sleep(10)\n\n\ndef scrape_more_data(hyperlink):\n try:\n page = requests.get(hyperlink)\n soup = BeautifulSoup(page.content, 'html.parser')\n temp_list = []\n for tr_tag in soup.find_all(\"tr\", attrs={\"class\", \"fact_row\"}):\n td_tags = tr_tag.find_all(\"td\")\n for td_tag in td_tags:\n try:\n temp_list.append(td_tag.find_all(\n \"div\", attrs={\"class\", \"value\"})[0].contents[0])\n except:\n temp_list.append(\"\")\n except:\n time.sleep(1)\n scrape_more_data(hyperlink)\n new_planet_data.append(temp_list)\n\n\nheaders = [\"Name\", \"Light Years from Earth\", \"Planet Mass\", \"Stellar Magnitude\", \"Discovery Date\",\n \"Hyperlink\", \"Mass\", \"Planet Radius\", \"Orbital Radius\", \"Orbital Period\", \"Eccentricity\", \"Detection Method\"]\nplanet_data = []\nnew_planet_data = []\nfinal_planet_data = []\n\n\ndef scrape():\n for i in range(1, 437):\n while True:\n time.sleep(2)\n soup = BeautifulSoup(browser.page_source, \"html.parser\")\n current_page_number = int(soup.find_all(\n \"input\", attrs={\"class\", \"page_num\"})[0].get(\"value\"))\n if current_page_number < i:\n browser.find_element_by_xpath(\n \"//*[@id=\\\"primary_column\\\"]/footer/div/div/div/nav/span[2]/a\").click()\n elif current_page_number > i:\n browser.find_element_by_xpath(\n \"//*[@id=\\\"primary_column\\\"]/footer/div/div/div/nav/span[1]/a\").click()\n else:\n break\n for ul_tag in soup.find_all(\"ul\", attrs={\"class\", \"exoplanet\"}):\n li_tags = ul_tag.find_all(\"li\")\n temp_list = []\n for index, li_tag in enumerate(li_tags):\n if index == 0:\n temp_list.append(li_tag.find_all(\"a\")[0].contents[0])\n else:\n try:\n temp_list.append(li_tag.contents[0])\n except:\n temp_list.append(\"\")\n hyperlink_tag = li_tags[0]\n temp_list.append(\"https://exoplanets.nasa.gov\" +\n hyperlink_tag.find_all('a', href=True)[0][\"href\"])\n planet_data.append(temp_list)\n browser.find_element_by_xpath(\n \"//*[@id=\\\"primary_column\\\"]/footer/div/div/div/nav/span[2]/a\").click()\n print(f'{i} pages done.')\n\n\nscrape()\nfor index, data in enumerate(planet_data):\n scrape_more_data(data[5])\n print(f\"{index+1} page done\")\n\nfor index, data in enumerate(planet_data):\n new_planet_data_elt = new_planet_data[index]\n new_planet_data_elt = [elem.replace(\"\\n\", \"\")\n for elem in new_planet_data_elt]\n new_planet_data_elt = new_planet_data_elt[:7]\n final_planet_data.append(data + new_planet_data_elt)\n\nwith open(\"nasa.csv\", \"w\") as f:\n csvWriter = csv.writer(f)\n csvWriter.writerow(headers)\n csvWriter.writerows(final_planet_data)\n","sub_path":"scraper-2.py","file_name":"scraper-2.py","file_ext":"py","file_size_in_byte":2951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"181872327","text":"\"\"\"\n COURSE: COMP 4140\n INSTRUCTOR: Michael Zapp\n ASSIGNMENT: Assignment 3, AES-128 Implementation\n AUTHOR: Matt Deutscher\n STUDENT #: 7727849\n USERID: deutschm\n\n NOTE: The mix columns function (and galois field multiplication helper\n fuction) is based on the Rijndael Mix Columns page on Wikipedia at\n https://en.wikipedia.org/wiki/Rijndael_mix_columns\n I translated the c# version into python...\n\n USEAGE: aes.py [plaintext_file_name] [key_file_name]\n\"\"\"\nimport argparse\nfrom copy import copy\n\ns_box = [\n 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b,\n 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0,\n 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26,\n 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,\n 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2,\n 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0,\n 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed,\n 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,\n 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f,\n 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5,\n 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec,\n 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,\n 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14,\n 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c,\n 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d,\n 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,\n 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f,\n 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e,\n 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11,\n 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,\n 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f,\n 0xb0, 0x54, 0xbb, 0x16\n ]\n\ninv_s_box = [\n 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e,\n 0x81, 0xf3, 0xd7, 0xfb, 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87,\n 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb, 0x54, 0x7b, 0x94, 0x32,\n 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e,\n 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49,\n 0x6d, 0x8b, 0xd1, 0x25, 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16,\n 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92, 0x6c, 0x70, 0x48, 0x50,\n 0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84,\n 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05,\n 0xb8, 0xb3, 0x45, 0x06, 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02,\n 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b, 0x3a, 0x91, 0x11, 0x41,\n 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73,\n 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8,\n 0x1c, 0x75, 0xdf, 0x6e, 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89,\n 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b, 0xfc, 0x56, 0x3e, 0x4b,\n 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4,\n 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59,\n 0x27, 0x80, 0xec, 0x5f, 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d,\n 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef, 0xa0, 0xe0, 0x3b, 0x4d,\n 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,\n 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63,\n 0x55, 0x21, 0x0c, 0x7d\n ]\n\n# pre-calculated, only need the first 11 for a 128-bit key\nr_con = [0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36]\n\n\n# Transformation in the Cipher and Inverse Cipher in which a Round Key is added\n# to the State using an XOR operation. The length of a Round Key equals the size\n# of the State (ie, for Nb = 4, the Round Key length equals 128 bits/16 bytes).\ndef add_round_key(state_array, round_key):\n for index in range(len(state_array)):\n state_array[index] = (hex(int(state_array[index], 16) ^ int(\n round_key[index], 16)))[2:].zfill(2)\n\n\n# Transformation in the Cipher that takes all of the columns of the State and\n# mixes their data (independently of one another) to produce new columns.\ndef mix_columns(state_array):\n temp_array = copy(state_array)\n\n # each of these loops cover an index in their corresponding column\n for index in range(0, len(state_array), 4):\n state_array[index] = hex(galois_multi(int(temp_array[index], 16), 2) ^ \\\n galois_multi(int(temp_array[index + 1], 16), 3) ^ \\\n int(temp_array[index + 2], 16) ^ \\\n int(temp_array[index + 3], 16))[2:].zfill(2)\n\n for index in range(1, len(state_array), 4):\n state_array[index] = hex(int(temp_array[index - 1], 16) ^ \\\n galois_multi(int(temp_array[index], 16), 2) ^ \\\n galois_multi(int(temp_array[index + 1], 16), 3) ^ \\\n int(temp_array[index + 2], 16))[2:].zfill(2)\n\n for index in range(2, len(state_array), 4):\n state_array[index] = hex(int(temp_array[index - 2], 16) ^ \\\n int(temp_array[index - 1], 16) ^ \\\n galois_multi(int(temp_array[index], 16), 2) ^ \\\n galois_multi(int(temp_array[index + 1], 16), 3))[2:].zfill(2)\n\n for index in range(3, len(state_array), 4):\n state_array[index] = hex(galois_multi(int(temp_array[index - 3], 16), 3) ^ \\\n int(temp_array[index - 2], 16) ^ \\\n int(temp_array[index - 1], 16) ^ \\\n galois_multi(int(temp_array[index], 16), 2))[2:].zfill(2)\n\n\n# Transformation in the Inverse Cipher that is the inverse of mix_columns().\ndef inv_mix_columns(state_array):\n temp_array = copy(state_array)\n\n # each of these loops cover an index in their corresponding column\n for index in range(0, len(state_array), 4):\n state_array[index] = hex(galois_multi(int(temp_array[index], 16), 14) ^ \\\n galois_multi(int(temp_array[index + 1], 16), 11) ^ \\\n galois_multi(int(temp_array[index + 2], 16), 13) ^ \\\n galois_multi(int(temp_array[index + 3], 16), 9))[2:].zfill(2)\n\n for index in range(1, len(state_array), 4):\n state_array[index] = hex(galois_multi(int(temp_array[index - 1], 16), 9) ^ \\\n galois_multi(int(temp_array[index], 16), 14) ^ \\\n galois_multi(int(temp_array[index + 1], 16), 11) ^ \\\n galois_multi(int(temp_array[index + 2], 16), 13))[2:].zfill(2)\n\n for index in range(2, len(state_array), 4):\n state_array[index] = hex(galois_multi(int(temp_array[index - 2], 16), 13) ^ \\\n galois_multi(int(temp_array[index - 1], 16), 9) ^ \\\n galois_multi(int(temp_array[index], 16), 14) ^ \\\n galois_multi(int(temp_array[index + 1], 16), 11))[2:].zfill(2)\n\n for index in range(3, len(state_array), 4):\n state_array[index] = hex(galois_multi(int(temp_array[index - 3], 16), 11) ^ \\\n galois_multi(int(temp_array[index - 2], 16), 13) ^ \\\n galois_multi(int(temp_array[index - 1], 16), 9) ^ \\\n galois_multi(int(temp_array[index], 16), 14))[2:].zfill(2)\n\n\ndef galois_multi(a, b):\n p = 0\n hi_bit_set = 0\n\n for index in range(8):\n if b & 1 != 0:\n p ^= a\n\n hi_bit_set = a & 0x80\n a <<= 1\n\n if hi_bit_set != 0:\n a ^= 0x1b\n\n b >>= 1\n\n return p % 256\n\n\n# Transformation in the Cipher that processes the State by cyclically shifting\n# the last three rows of the State by different offsets.\ndef shift_rows(state_array):\n word = [0, 0, 0, 0]\n\n for row in range(1, 4):\n for index in range(4):\n word[index] = state_array[row + index * 4]\n\n word = rot_word(word, row)\n\n for index in range(4):\n state_array[row + index * 4] = word[index]\n\n\n# Transformation in the Inverse Cipher that is the inverse of shift_rows().\ndef inv_shift_rows(state_array):\n word = [0, 0, 0, 0]\n\n for row in range(1, 4):\n for index in range(4):\n word[index] = state_array[row + index * 4]\n\n word = rot_word(word, -row)\n\n for index in range(4):\n state_array[row + index * 4] = word[index]\n\n\n# Transformation in the Cipher that processes the State using a nonlinear byte\n# substitution table (S-box) that operates on each of the State bytes\n# independently.\ndef sub_bytes(state_array):\n for index in range(len(state_array)):\n state_array[index] = \\\n hex(s_box[int(state_array[index], 16)])[2:].zfill(2)\n\n\n# Transformation in the Inverse Cipher that is the inverse of sub_bytes().\ndef inv_sub_bytes(state_array):\n for index in range(len(state_array)):\n state_array[index] = \\\n hex(inv_s_box[int(state_array[index], 16)])[2:].zfill(2)\n\n\n# Function used in the Key Expansion routine that takes a four-byte word and\n# performs a cyclic permutation. Shifts the word n bytes to the right,\n# negative values shift n bytes to the left.\ndef rot_word(word, shift_amount):\n return word[shift_amount:] + word[0:shift_amount]\n\n\n# Function used in the Key Expansion routine that takes a four-byte input word\n# and applies an S-box to each of the four bytes to produce an output word.\ndef sub_word(word):\n new_word = []\n\n for byte in word:\n index = int(byte, 16)\n new_word.append(hex(s_box[index])[2:].zfill(2))\n\n return new_word\n\n\ndef cipher(state_array, expanded_key):\n number_of_rounds = 10\n aes_round = 0\n print_state_array(state_array, aes_round, 'Plaintext')\n\n # first round\n round_key = expanded_key[(aes_round * 16):(aes_round * 16) + 16]\n add_round_key(state_array, round_key)\n aes_round += 1\n print_state_array(state_array, 1)\n\n for a_round in range(1, number_of_rounds):\n sub_bytes(state_array)\n shift_rows(state_array)\n mix_columns(state_array)\n round_key = expanded_key[(aes_round * 16):(aes_round * 16) + 16]\n add_round_key(state_array, round_key)\n aes_round += 1\n print_state_array(state_array, aes_round)\n\n # last round\n sub_bytes(state_array)\n shift_rows(state_array)\n round_key = expanded_key[(aes_round * 16):(aes_round * 16) + 16]\n add_round_key(state_array, round_key)\n\n return state_array\n\n\ndef inv_cipher(state_array, expanded_key):\n number_of_rounds = 10\n aes_round = 10\n print_state_array(state_array, 0, 'Ciphertext')\n\n # first round (or last, depending on point of view)\n round_key = expanded_key[(aes_round * 16):(aes_round * 16) + 16]\n add_round_key(state_array, round_key)\n aes_round -= 1\n\n for a_round in range(1, number_of_rounds):\n inv_shift_rows(state_array)\n inv_sub_bytes(state_array)\n print_state_array(state_array, aes_round)\n round_key = expanded_key[(aes_round * 16):(aes_round * 16) + 16]\n add_round_key(state_array, round_key)\n inv_mix_columns(state_array)\n aes_round -= 1\n\n # last round (or first)\n inv_shift_rows(state_array)\n inv_sub_bytes(state_array)\n round_key = expanded_key[(aes_round * 16):(aes_round * 16) + 16]\n aes_round -= 1\n print_state_array(state_array, aes_round)\n add_round_key(state_array, round_key)\n\n return state_array\n\n\n# The Key Expansion routine takes the cipher key and performs the expansion\n# routine described in the pseudo code in figure 11 of teh text, to generate\n# the key schedule. It generates Nb(Nr+1) words, 44 for a 128-bit key.\ndef key_expansion(key):\n expanded_key = []\n byte_count = len(key)\n r_con_index = 1\n i = 0\n temp = [0, 0, 0, 0]\n w = [0, 0, 0, 0]\n\n # copy over first 16 bytes of key\n for index in range(len(key)):\n expanded_key.append(key[index])\n\n for index in range(4):\n w[index] = expanded_key[(byte_count - 4) + index]\n\n # 160 comes from Nb(Nr + 1) = 4(10 + 1) minus the original key\n while i < 160:\n for index in range(4):\n temp[index] = w[index]\n\n if i % 16 == 0:\n temp = rot_word(temp, 1)\n temp = sub_word(temp)\n temp[0] = int(temp[0], 16) ^ r_con[r_con_index]\n temp[0] = hex(temp[0])[2:].zfill(2)\n r_con_index += 1\n\n for index in range(4):\n w[index] = expanded_key[i + index]\n\n for index in range(4):\n w[index] = hex((int(w[index], 16) ^ int(temp[index], 16)))[2:].zfill(2)\n expanded_key.append(w[index])\n i += 1\n\n byte_count += 1\n\n return expanded_key\n\n\ndef aes_encrypt(plaintext, key):\n expanded_key = key_expansion(key)\n print_key_schedule(expanded_key)\n\n print(\"\\n\\nENCRYPTION PROCESS\\n------------------\")\n ciphertext = cipher(plaintext, expanded_key)\n\n print_state_array(ciphertext, 11, 'Ciphertext')\n\n return ciphertext\n\n\ndef aes_decrypt(ciphertext, key):\n expanded_key = key_expansion(key)\n\n print(\"\\n\\nDECRYPTION PROCESS\\n------------------\")\n plaintext = inv_cipher(ciphertext, expanded_key)\n\n print_state_array(plaintext, 11, 'Plaintext')\n\n return plaintext\n\n\ndef print_key_schedule(expanded_key):\n print('\\nKey Schedule:')\n print_key = ''.join(expanded_key)\n print_key = [print_key[ind:ind + 8] for ind in range(0, len(print_key), 8)]\n print_key = ','.join(print_key)\n print_key = ''.join(\"\\n\" if i % 36 == 0 else char for i, char in\n enumerate(print_key, 1))\n print(print_key)\n\n\ndef print_state_array(state_array, round_number, text=''):\n if round_number is 0:\n print(text + ':')\n elif round_number is 10:\n print('\\n' + 'Last Round\\n----------')\n elif round_number is 11:\n print('\\n' + text + ':')\n elif round_number is -1:\n print('\\nRound 0\\n---------')\n else:\n print('\\nRound ' + str(round_number) + '\\n---------')\n print_state = ''.join(state_array)\n print_state = [print_state[ind:ind + 2] for ind in\n range(0, len(print_state), 2)]\n print_state = ' '.join(print_state)\n\n print(print_state)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='an AES implementation')\n parser.add_argument('plaintext_file_name', help='the file containing the plaintext to be encrypted')\n parser.add_argument('key_file_name', help='the file containing the key to be used')\n args = parser.parse_args()\n\n return args\n\n\ndef main():\n args = parse_args()\n\n print('Plaintext Filename: ' + args.plaintext_file_name)\n print('Key Filename: ' + args.key_file_name)\n\n with open(args.plaintext_file_name) as input_file:\n plaintext = input_file.read().split()\n\n with open(args.key_file_name) as input_file:\n key = input_file.read().split()\n\n ciphertext = aes_encrypt(plaintext, key)\n\n aes_decrypt(ciphertext, key)\n\n print('\\nEnd of Processing...bye')\n\n# ===========================================================================run\nif __name__ == '__main__':\n main()\n","sub_path":"aes.py","file_name":"aes.py","file_ext":"py","file_size_in_byte":15659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"486296687","text":"import math\nimport random\nimport time\n\n#Particle helper class\nclass Particle:\n def __init__(self,x0):\n self.n = len(x0)\n \n self.pos = x0 #current position\n self.pos_best = None #best position\n \n self.vel = [random.uniform(-1,1) for i in range(self.n)] #current velocity\n \n self.fval = math.inf #current function value\n self.fval_best = math.inf #best function value of particle\n \n #self.iters = 0\n\n #update current function value\n def set_fval(self,J):\n self.fval = J(self.pos)\n\n if self.fval < self.fval_best: #see if we have found a new best and update in required\n self.pos_best = self.pos\n self.fval_best = self.fval\n\n #update current velocity\n def update_velocity(self, pos_best_g):\n w = 0.7 #inertia for current velocity\n c1 = 2 #1.2 - 0.2 * self.iters/maxiter #cognative constant - factor for personal decision\n c2 = 2 #1.8 + 0.2 * self.iters/maxiter #social constant - factor for herd decision\n\n for i in range(self.n):\n vel_cognitive = c1 * random.random() * (self.pos_best[i] - self.pos[i]) #calculate the cognitive velocity\n vel_social = c2 * random.random() * (pos_best_g[i] - self.pos[i]) #calculate the social velocity\n self.vel[i]= w * self.vel[i] + vel_cognitive + vel_social #finally update the particle velocity\n \n #self.iters += 1\n\n # update current particle position\n def update_position(self,bounds):\n for i in range(self.n):\n self.pos[i] = self.pos[i] + self.vel[i] #next position = current position + velocity * (time = 1)\n\n # adjusting for bounds\n if self.pos[i] < bounds[i][0]:\n self.pos[i] = bounds[i][0]\n if self.pos[i] > bounds[i][1]:\n self.pos[i] = bounds[i][1]\n\n\n#Optimiser\ndef Particle_swarm(J,numdesign,bounds,num_particles,maxiter):\n fval_best_g = math.inf #best fval for group\n pos_best_g = None #best position for group\n\n swarm=[] #initialize\n for i in range(num_particles):\n x = [(bounds[i][0] + (bounds[i][1] - bounds[i][0])*random.random()) for i in range(numdesign)]\n swarm.append(Particle(x))\n\n for i in range(maxiter): #iterate\n for j in range(num_particles):\n swarm[j].set_fval(J)\n \n if swarm[j].fval < fval_best_g: #check for best global value\n pos_best_g = list(swarm[j].pos)\n fval_best_g = float(swarm[j].fval)\n\n # cycle through swarm and update velocities and position\n for j in range(0,num_particles):\n swarm[j].update_velocity(pos_best_g)\n swarm[j].update_position(bounds)\n\n xopt = pos_best_g\n fopt = fval_best_g\n # print(\"optimal point:\", [round(i,3) for i in pos_best_g]) #print best position\n # print(\"optimal function value:\", fval_best_g) #print objective function value at best position\n \n return xopt, fopt\n","sub_path":"code/particle_swarm_optimization.py","file_name":"particle_swarm_optimization.py","file_ext":"py","file_size_in_byte":3028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"387790526","text":"from flask import Flask, render_template, request, redirect, url_for, send_from_directory, json, redirect\nfrom board import Board\nfrom view_model import ViewModel\nfrom todo_item import Status\nfrom datetime import date, datetime\n\ndef create_app(todo_list_id=0, doing_list_id=0, done_list_id=0):\n app = Flask(__name__)\n\n my_board = Board(todo_list_id, doing_list_id, done_list_id)\n\n def task_sorting_key(task):\n if (task.status == Status.DONE):\n return 1\n else:\n return 0\n\n #pylint: disable=unused-variable\n\n @app.route('/')\n def index():\n item_view_model = ViewModel(sorted(my_board.get_items(), key=task_sorting_key)) \n item_view_model.show_all_done_items = request.cookies.get('showAllDoneItems') == 'True'\n return render_template('index.html', view_model = item_view_model, today = date.today())\n\n @app.route('/', methods=['POST'])\n def add_todo():\n if request.form.get('due'):\n due_obj = datetime.strptime(request.form.get('due'), '%d/%m/%Y')\n else:\n due_obj = None\n my_board.add_item(request.form.get('title'), request.form.get('description'), due_obj)\n return redirect('/', code=303)\n\n @app.route('/tasks/', methods=['PATCH'])\n def update_todo(id):\n my_board.move_item(id, request.form.get('targetList'))\n return json.dumps({'success':True}), 200, {'Content-Type':'application/json'} \n\n @app.route('/tasks/', methods=['DELETE'])\n def remove_todo(id):\n my_board.remove_item(id)\n return json.dumps({'success':True}), 200, {'Content-Type':'application/json'}\n\n @app.route('/js/')\n def send_js(path):\n return send_from_directory('js', path)\n\n #pylint: enable=unused-variable\n \n return app\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"347065009","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nimport DB\nimport numpy as np\nimport Functions as fu\nimport datetime as dt\nimport Equity as eq\n\ntbl_cols=['Date' ,'SecurityId','FundId','Symbol','ISIN' ,'Name','NAV','Dividends','CorpAdj','DividendAdj','lnDeltaNAV', \n 'lnDeltaOSEBX','lnDeltaOSEFX','lnDeltaOBX','NAVAdj','OSEBX','OSEFX','OBX']\n\ntbl_name='mutualfunds'\ndb='OSEData'\n\n\ndef CreateTables():\n\tconn,crsr=DB.Connect(db)\n\tif True:\n\t\tDB.DropTable(tbl_name,conn,crsr,db)\n\t\tDB.createTable(tbl_name,conn,crsr)\n\tsl=DB.GetFundList(conn,crsr)\n\ti=0\n\n\tfor sid,ISIN,fid,symbol,name in sl:\t\n\t\ti+=1\n\t\tif not DB.IsDone('SecurityId',sid,crsr,tbl_name):\n\t\t\ttry:\n\t\t\t\tprint ( 'Getting adjustment for %s (%s)' %(name,sid))\n\t\t\texcept:\n\t\t\t\tprint ( 'Getting adjustment for (%s)' %(sid,))\n\t\t\tp,Cadj,Dadj,d,r=GetAdjustments(sid,ISIN,conn,crsr)\n\t\t\tif not p is None:\n\t\t\t\ttbl=MakeDataSets(p,Cadj,Dadj,d,r,sid,fid,ISIN,symbol,name)\n\t\t\t\tprint ( 'Appending to tables')\n\t\t\t\tDB.InsertTableIntoDB(conn,crsr,tbl_name,tbl_cols,tbl,db)\n\t\telse:\n\t\t\tprint ( ISIN+ \" done\")\n\t\t#if i>10000:\n\t\t#\tbreak\n\t#adjust_erronous(conn,crsr)\n\tconn.close()\n\tprint ( 'Done ... ')\n\ndef excel_date(d):\n\td=dt.datetime.strptime(d,'%Y-%m-%d')\n\ttemp = dt.datetime(1899, 12, 30)\n\tdelta = d - temp\n\treturn float(delta.days) + (float(delta.seconds) / 86400)\n\t\ndef MakeDataSets(p,Cadj,Dadj,d,r,sid,fid,ISIN,symbol,name):\n\tn=len(r)\n\tif len(r)!=len(p):\n\t\traise RuntimeError('price and query lenght do not match')\n\ttbl=[]\n\t\n\tprices0=np.zeros(4)\n\tfor i in range(n):\n\t\t(Date,DateY,DateM,DateD,NAV,OSEBX,OSEFX,OBX)=r[i]\n\t\tNAVAdj=p[i][0]*Dadj[i][0]*Cadj[i][0]\n\n\t\tprices1=eq.checkprices([NAVAdj,OSEBX,OSEFX,OBX],prices0)\t\n\t\t\n\t\tnons=np.array([i is None for i in prices1])\n\t\tprices1[nons]=0\n\t\tprices1=np.array(prices1,dtype=float)\n\n\t\tdeltap=np.log(prices1+(prices1==0))-np.log(prices0+(prices0==0))\n\t\tdeltap=np.array(deltap*(prices1!=0)*(prices0!=0),dtype=object)\n\t\tdeltap[nons]=None\n\n\t\tvariables=[Date,sid,fid,symbol,ISIN,name,p[i][0],d[i][0],Cadj[i][0],Dadj[i][0]]\n\t\t\n\t\tvariables.extend(deltap)\n\t\tvariables.extend(prices1)\n\t\t\n\t\ttbl.append(tuple(variables))\n\t\tprices0=prices1\n\treturn tbl\n\t\n\t\n\ndef GetAdjustments(SecID,ISIN,conn,crsr):\n\n\tdtp,p,r=DB.GetFundPrices(conn,crsr,SecID)\n\tif len(dtp)==0:\n\t\treturn None,None,None,None,None\n\tCadj=GetCorpAdj(SecID,dtp,p,conn,crsr)\n\tDadj,d=eq.GetDivAdj(SecID,ISIN,dtp,p,conn,crsr,1)\n\treturn p,Cadj,Dadj,d,r\n\ndef GetCorpAdj(SecID,dtp,p,conn,crsr):\n\t\"\"\"Returns a corporate action adjustment factor with the same dimesion as the price vector\"\"\"\n\tdta,a=DB.GetFundAdjFacts(conn,crsr,SecID)\n\tdta,a=eq.removeOutsideDates(dta,a,dtp)\n\tif len(a)==0:\n\t\treturn np.ones((len(dtp),1))\n\tdta,a=eq.AddEndStart(dta,a,dtp)\n\tcuma=eq.CumSum(a)\n\tadj=eq.IdentifyAdjustment(dtp,dta,cuma)\n\t#fu.SaveVar(adj)\n\treturn adj\n\n\ndef adjust_erronous(conn,crsr):\n\tr=get_erronous_obs(crsr)\n\terrs=dict()\n\tfor d,i in r:\n\t\tif i in errs:\n\t\t\terrs[i].append(d)\n\t\telse:\n\t\t\terrs[i]=[d]\n\tDB.DropTable(tbl_name+'2',conn,crsr,db)\n\tDB.CopyTable(conn,crsr,tbl_name,tbl_name+'2',db)\n\tcols=str(tbl_cols).replace('[','').replace(']','').replace(\"',\",'],').replace(\"'\",'[')\n\tcols=cols[:len(cols)-1]+']'\t\n\tfor i in errs.keys():\n\t\tf=DB.Fetch(\"\"\"SELECT %s\n\t\t\t FROM [OSEData].[dbo].[mutualfunds]\n\t\t\t where [SecurityId]=%s\n\t\t\t order by [Date]\"\"\" %(cols,i),crsr)\t\t\n\t\tprint ('correcting error for ' + str(i))\n\t\th=np.array(DB.Fetch(\"\"\"SELECT [NAVAdj],[CorpAdj],[lnDeltaNAV],[NAV]\n\t FROM [OSEData].[dbo].[mutualfunds]\n\t where [SecurityId]=%s\n\t order by [Date]\"\"\" %(i,),crsr))\t\n\t\tDB.deleterows_byfieldval('SecurityId',i,tbl_name+'2',db,conn,crsr)\n\t\tfor j in range(1,len(f)):\n\t\t\tif f[j][0] in errs[i]:\n\t\t\t\ta=h[j,0]/h[j-1,0]\n\t\t\t\th[0:j,1]=h[0:j,1]*a#CorpAdj\n\t\t\t\th[0:j,0]=h[0:j,0]*a#NAVAdj\n\t\th=np.array(h,dtype=float)\n\t\tNAVAdj=h[:,0]\n\t\tNAVAdj_sh=fu.ShiftArray(h[:,0],-1)\n\t\tlnDeltaNAV=(np.log(NAVAdj+(NAVAdj==0))-np.log(NAVAdj_sh+(NAVAdj_sh==0)))*(NAVAdj_sh!=0)*(NAVAdj!=0)\n\t\ttbl=[]\n\t\tfor j in range(len(f)):\n\t\t\ttbl.append(tuple(list(f[j][0:8])+[NAVAdj[j],f[j][9],h[j,1],f[j][11],lnDeltaNAV[j]]))\n\t\tDB.InsertTableIntoDB(conn,crsr,tbl_name+'2',tbl_cols,tbl,db)\n\t\t\t\n\t\t\n\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\n\t\t\t\n\t\t\n\n\ndef get_erronous_obs(crsr):\n\treturn DB.Fetch(\"\"\"select [Date],[SecurityId] from\n\t(\n\t\tselect distinct [Date],[ISIN],[SecurityId],[Name],[lnDeltaNAV] from [OSE].[dbo].[mutualfund] as U1\n\t\twhere abs([lnDeltaNAV])>0.5\n\tunion\n\t\tselect distinct [Date],[ISIN],[SecurityId],[Name],[lnDeltaNAV] from [OSE].[dbo].[mutualfund] as U2\n\t\twhere [Date] in\n\t\t(select [Date] from\n\t\t(select count(*) as n,[Date] from\n\t\t(SELECT distinct\n\t\t\t\t[Date]\n\t\t\t ,[Name]\n\t\t\t ,[lnDeltaNAV]\n\t\t FROM [OSEData].[dbo].[%s]\n\t\t WHERE [lnDeltaNAV]<-0.05 and abs([lnDeltaNAV])<=0.5) as T0\n\t\t group by [Date]) as T1\n\t\t where n=1) and [lnDeltaNAV]<-0.20 and abs([lnDeltaNAV])<=0.5\n\tunion\n\t\tselect distinct [Date],[ISIN],[SecurityId],[Name],[lnDeltaNAV] from [OSE].[dbo].[mutualfund] as U3\n\t\twhere [Date] in\n\t\t(select [Date] from\n\t\t(select count(*) as n,[Date] from\n\t\t(SELECT distinct\n\t\t\t\t[Date]\n\t\t\t ,[Name]\n\t\t\t ,[lnDeltaNAV]\n\t\t FROM [OSEData].[dbo].[%s]\n\t\t WHERE [lnDeltaNAV]>0.05 and abs([lnDeltaNAV])<=0.5) as T0\n\t\t group by [Date]) as T1\n\t\t where n=1) and [lnDeltaNAV]>0.20 and abs([lnDeltaNAV])<=0.5\n\t) as T0\n\torder by [Date],[SecurityId]\"\"\" %(tbl_name,tbl_name),\n\tcrsr)\n\n\t\n\t","sub_path":"Funds.py","file_name":"Funds.py","file_ext":"py","file_size_in_byte":5353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"495566234","text":"import numpy as np\nimport math\na = math.pi / 6\nprint(\"The value of sine of pi / 6 is : \", end=\"\")\nprint(math.sin(a))\n\ndef pivalue(x):\n return(x*math.pi/180)\n\nl5 = 9\nl3 = 9\nfrom xlwt import Workbook\nwb = Workbook()\nsheet1 = wb.add_sheet('Sheet 1')\nsheet1.write(0, 0, 'Sr.no')\nsheet1.write(0, 1, 'Servo 2 angle')\nsheet1.write(0, 2, 'Servo 3 angle')\nsheet1.write(0, 3, 'VertDist Point 2')\nsheet1.write(0, 4, 'HorizDist Point2')\nsheet1.write(0, 5, 'Sigma')\n\nc = 1\namax = 180\nfor servo2 in range(45,amax ,2):\n for servo3 in range(0, amax, 2):\n if servo3 < servo2:\n if servo3 + servo2 < 180:\n a = servo2\n b = servo3\n\n Sigma = (180- a - b)/2\n Sigma = pivalue(Sigma)\n Theta = (a - b)/2\n Theta = pivalue(Theta)\n\n cos1 = math.cos(2*Theta)\n sin1 = math.sin(2 * Theta)\n cos2 = math.cos(Theta)\n sin2 = math.sin(Theta)\n\n l8 = l3*(sin1)\n l9 = (l8)/cos2\n\n\n VertDistPoint2 = round( (math.sin(Sigma) ) * l9 , 2)\n HorizDistPoint2 = round((math.cos(Sigma) ) * l9 , 2)\n\n #print(a , b, Theta, sin2,l8, l9,Sigma, VertDistPoint2, HorizDistPoint2)\n print(a , b , l9 , VertDistPoint2, HorizDistPoint2)\n sheet1.write(c, 0, a)\n sheet1.write(c, 1, b)\n sheet1.write(c, 2, l9)\n sheet1.write(c, 3, VertDistPoint2)\n sheet1.write(c, 4, HorizDistPoint2)\n #sheet1.write(c, 5, Sigma)\n c += 1\n\nwb.save('Values.xls')","sub_path":"Day 28 - Robotic arm-3/0 - FinalCode.py","file_name":"0 - FinalCode.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"180506897","text":"#!/usr/bin/python3\n''' amenities file '''\nfrom api.v1.views import app_views\nfrom models.amenity import Amenity\nfrom models import storage\nfrom flask import jsonify, abort, request\n\n\n@app_views.route('/amenities', methods=['GET'], strict_slashes=False)\ndef get_amenity():\n '''return json of all amenity objects '''\n return jsonify([o.to_dict() for o in storage.all(\"Amenity\").values()])\n\n\n@app_views.route('/amenities/', methods=['GET'],\n strict_slashes=False)\ndef get_amenity_by_id(amenity_id):\n ''' return a amenity based on its id '''\n amenity = storage.get('Amenity', amenity_id)\n if amenity:\n return jsonify(amenity.to_dict())\n abort(404)\n\n\n@app_views.route('/amenities/', methods=['DELETE'],\n strict_slashes=False)\ndef delete_amenity(amenity_id):\n ''' delete a amenity based on its id '''\n amenity = storage.get('Amenity', amenity_id)\n if amenity is None:\n abort(404)\n\n amenity.delete()\n storage.save()\n\n return jsonify({}), 200\n\n\n@app_views.route('/amenities', methods=['POST'], strict_slashes=False)\ndef create_amenity():\n '''create a state'''\n if not request.get_json():\n return jsonify({'error': 'Not a JSON'}), 400\n\n if 'name' not in request.get_json():\n return jsonify({'error': 'Missing name'}), 400\n\n new_amenity = Amenity(**request.get_json())\n new_amenity.save()\n return jsonify(new_amenity.to_dict()), 201\n\n\n@app_views.route('amenities/', methods=['PUT'],\n strict_slashes=False)\ndef update_amenity(amenity_id):\n \"\"\"Updates amenity object\"\"\"\n stored_data = request.get_json()\n\n if not stored_data:\n return jsonify({'error': 'Not a JSON'}), 400\n\n retrieved_amenity = storage.get(\"Amenity\", amenity_id)\n if retrieved_amenity is None:\n abort(404)\n\n for k, v in stored_data.items():\n if k not in ['id', 'created_at', 'updated_at']:\n setattr(retrieved_amenity, k, v)\n storage.save()\n return retrieved_amenity.to_dict(), 200\n","sub_path":"api/v1/views/amenities.py","file_name":"amenities.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"466426403","text":"from django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin\n\nfrom .models import *\n\n\nclass MyUserAwardsInline(admin.TabularInline):\n model = Awards\n\n\nclass MyUserNeedInline(admin.TabularInline):\n model = Need\n\n\n@admin.register(MyUser)\nclass MyUserAdmin(UserAdmin):\n inlines = [MyUserAwardsInline, MyUserNeedInline]\n\n def get_fieldsets(self, request, obj=None):\n res = super().get_fieldsets(request, obj)\n return res + (\n ['Профиль', {\n 'fields': ['user_type', 'entity_type', 'birthday', 'social_state'],\n }],\n ['Адреса', {\n 'fields': ['address', 'location'],\n }],\n ['Соц сети', {\n 'fields': ['social_vk', 'social_ok', 'social_ig', 'social_fb']\n }],\n ['Баллы', {\n 'fields': ['balance', 'rating'],\n }],\n )\n\n\n@admin.register(AwardTypes, NeedType)\nclass CommonAdmin(admin.ModelAdmin):\n pass\n\n\nadmin.site.site_title = 'Здоровое утро'\nadmin.site.site_header = 'Здоровое утро'\nadmin.site.index_title = 'Здоровое утро'\nadmin.site.site_url = None\n","sub_path":"morning/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"505124656","text":"import re\n\nfrom flask import request\nfrom markdown.extensions import Extension\nfrom markdown.postprocessors import Postprocessor\n\n\ndef absolute_path_replacer(match):\n \"\"\"Correct the url in a regex match prepending the absolute path\"\"\"\n assert len(match.groups()) == 2\n\n prefix = request.script_root\n if prefix.endswith(\"/\"):\n prefix = prefix[:-1]\n\n return \"{key}=\\\"{path}\\\"\".format(\n key=match.group(1),\n path=prefix + match.group(2)\n )\n\n\nclass LinkPostprocessor(Postprocessor):\n @staticmethod\n def run(text):\n return re.sub(\n '(href|src)=\"(/[^\"]*)\"',\n absolute_path_replacer,\n text,\n flags=re.IGNORECASE,\n )\n\n\nclass AbsoluteLinkExtension(Extension):\n \"\"\" Add the absolute link patch to Markdown. \"\"\"\n\n @staticmethod\n def extendMarkdown(md, md_globals):\n \"\"\" Add an instance of TableProcessor to BlockParser. \"\"\"\n md.postprocessors['link_patch'] = LinkPostprocessor(md)\n\n\ndef makeExtension(*args, **kwargs):\n return AbsoluteLinkExtension(*args, **kwargs)\n","sub_path":"sipa/utils/link_patch.py","file_name":"link_patch.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"621160190","text":"# assembles a podcast feed (rss/xml) containing all planet money episodes\n# (their official feed only includes the most recent episodes)\n# by downloading the human-interfacing HTML (which does contain all episodes, surprisingly),\n# parsing it into python datatypes (PlanetMoneyHTMLParser), and emitting an xml rss feed\n\n# TODO: cache websites?\n\nfrom html.parser import HTMLParser\nfrom html.entities import name2codepoint\nfrom html import escape\n\nimport datetime\nimport urllib.request\n\n\nclass PlanetMoneyHTMLParser(HTMLParser):\n\n def __init__(self):\n self.prev = None\n self.next_attr = ''\n # stack tags (sneaking in before content) wa want to ignore in handle_data\n # eg data we want \n # so here we would ignore 'time'\n self.tag_stack = []\n\n self.feed_entry = {}\n self.feed_entries = []\n super().__init__()\n\n def handle_starttag(self, tag, attrs):\n\n if self.next_attr:\n self.tag_stack.append(tag)\n\n if tag == 'a' and self.prev[0] == 'h2' and ('class', 'title') in self.prev[1]:\n self.next_attr = 'title'\n\n if tag == 'a' and self.prev[0] == 'p' and ('class', 'teaser') in self.prev[1]:\n self.next_attr = 'description'\n\n if tag == 'a' and self.prev[0] == 'li' and ('class', 'audio-tool audio-tool-download') in self.prev[1]:\n self.feed_entry['link'] = attrs[0][1]\n self.feed_entry['guid'] = attrs[0][1]\n\n if tag == 'time':\n if ('class', 'audio-module-duration') in attrs:\n self.next_attr = 'itunes:duration'\n else:\n self.feed_entry['pubDate'] = attrs[0][1]\n\n self.prev = (tag, attrs)\n # XXX check tag,attrs instead of doing [0][1]\n\n def handle_endtag(self, tag):\n if self.tag_stack:\n self.tag_stack.pop()\n\n if tag == 'article' and self.feed_entry:\n if 'link' in self.feed_entry:\n self.feed_entries.append(self.feed_entry)\n self.feed_entry = {}\n\n def handle_data(self, data):\n if not self.next_attr:\n return\n\n if self.tag_stack:\n return\n\n self.feed_entry[self.next_attr] = data\n self.next_attr = ''\n\n\nURL_STEM = 'http://www.npr.org/sections/money/127413729/podcast/archive'\nUSER_AGENT = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'\nHDR = {'User-Agent': USER_AGENT, 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'}\nPLANET_MONEY_EPOCH = 2008\n\nyr_now = datetime.datetime.now().year\nprint('making ' + str(12 * (yr_now - PLANET_MONEY_EPOCH-1)) + ' requests to gather urls, please be patient...')\nreq_nr = 0\nall_feed_entries = []\n\nfor year in range(yr_now, PLANET_MONEY_EPOCH-1, -1):\n for month in range(12, 0, -1):\n\n req_nr += 1\n print('Request number ' + str(req_nr), end='\\r')\n\n # every side goes about 2 months back, so we check every month\n full_url = URL_STEM + '?date=' + str(month) + '-31-' + str(year)\n req = urllib.request.Request(full_url, headers=HDR)\n\n with urllib.request.urlopen(req) as response:\n the_page = str(response.read(), 'utf-8')\n\n parser = PlanetMoneyHTMLParser()\n parser.feed(the_page)\n for e in parser.feed_entries:\n if all(f['link'] != e['link'] for f in all_feed_entries): # prevent dupes\n all_feed_entries.append(e)\n\nwith open('/home/jan/Dropbox/py/planetmoney-rss/npr_pm_test.xml', 'w') as f:\n f.write('''\n \n \n Planet Money but it's all episodes\n https://github.com/xjcl/planetmoney-rss/tree/gh-pages\n http://nationalpublicmedia.com/wp-content/uploads/2014/06/planetmoney.png\n pls don't sue\\n''')\n\n for e in all_feed_entries:\n f.write('- ')\n for k,v in e.items():\n f.write('<' + k + '>' + escape(v) + '' + k + '>')\n f.write('
\\n')\n\n f.write('\\n')\n\n","sub_path":"npr_pm_rip2.py","file_name":"npr_pm_rip2.py","file_ext":"py","file_size_in_byte":4285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"472189679","text":"#----------------description----------------# \n# Author : Lei yuan\n# E-mail : zhzhao18@fudan.edu.cn\n# Company : Fudan University\n# Date : 2020-10-10 17:40:40\n# LastEditors : Zihao Zhao\n# LastEditTime : 2020-10-20 17:18:18\n# FilePath : /speech-to-text-wavenet/torch_lyuan/train.py\n# Description : \n#-------------------------------------------# \n\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nimport torch.optim as optim\nimport torch.nn.functional as F\n\nimport config_train as cfg\nfrom dataset import VCTK\nfrom wavenet import WaveNet\nfrom sparsity import *\nimport utils\nimport visualize as vis\n\nfrom ctcdecode import CTCBeamDecoder\n\nfrom tensorboardX import SummaryWriter\nimport os\nimport numpy as np\n\nimport argparse\n\n\ndef parse_args():\n '''\n Parse input arguments\n '''\n parser = argparse.ArgumentParser(description='WaveNet for speech recognition.')\n parser.add_argument('--resume', action='store_true', help='resume from exp_name/best.pth', default=False)\n parser.add_argument('--vis_mask', action='store_true', help='visualize and save masks', default=False)\n parser.add_argument('--vis_pattern', action='store_true', help='visualize and save patterns', default=False)\n parser.add_argument('--exp', type=str, help='exp dir', default=\"dense\")\n parser.add_argument('--sparse_mode', type=str, help='dense, sparse_pruning, thre_pruning, pattern_pruning', default=\"dense\")\n parser.add_argument('--sparsity', type=float, help='0.2, 0.4, 0.8', default=0.2)\n parser.add_argument('--pattern_para', type=str, help='[pt_num_pt_shape0_pt_shape1_nnz]', default='16_16_16_128')\n parser.add_argument('--coo_para', type=str, help='[pt_shape0, pt_shape1, nnz]', default='8_8_32')\n parser.add_argument('--ptcoo_para', type=str, help='[pt_num, pt_shape0, pt_shape1, pt_nnz, coo_nnz]', default='16_16_16_128_64')\n parser.add_argument('--batch_size', type=int, help='1, 16, 32', default=32)\n parser.add_argument('--lr', type=float, help='0.001 for tensorflow', default=0.001)\n parser.add_argument('--load_from', type=str, help='.pth', default=\"/z\")\n\n args = parser.parse_args()\n return args\n\ndef train(train_loader, scheduler, model, loss_fn, val_loader, writer=None):\n \n decoder_vocabulary = utils.Data.decoder_vocabulary\n vocabulary = utils.Data.vocabulary\n decoder = CTCBeamDecoder(\n decoder_vocabulary,\n #\"_abcdefghijklmopqrstuvwxyz_\",\n model_path=None,\n alpha=0,\n beta=0,\n cutoff_top_n=40,\n cutoff_prob=1.0,\n beam_width=100,\n num_processes=4,\n blank_id=0,\n log_probs_input=False\n )\n\n \n\n best_loss = float('inf')\n for epoch in range(cfg.epochs):\n print(f'Training epoch {epoch}')\n _loss = 0.0\n step_cnt = 0\n \n # sparsity = cal_sparsity(model)\n # print(\"sparsity:\", sparsity)\n for data in train_loader:\n wave = data['wave'].cuda() # [1, 128, 109]\n model = pruning(model, cfg.sparse_mode)\n\n if epoch == 0 and step_cnt == 0:\n loss_val = validate(val_loader, model, loss_fn)\n writer.add_scalar('val/loss', loss_val, epoch)\n \n logits = model(wave)\n logits = logits.permute(2, 0, 1)\n logits = F.log_softmax(logits, dim=2)\n # logits = F.softmax(logits, dim=2)\n text = data['text'].cuda()\n loss = loss_fn(logits, text, data['length_wave'], data['length_text'])\n scheduler.zero_grad()\n loss.backward()\n scheduler.step()\n _loss += loss.data \n\n if epoch == 0 and step_cnt == 10:\n writer.add_scalar('train/loss', _loss, epoch)\n\n if step_cnt % int(3200/cfg.batch_size) == 1:\n print(\"Epoch\", epoch,\n \", train step\", step_cnt, \"/\", len(train_loader),\n \", loss: \", round(float(_loss.data/step_cnt), 5))\n torch.save(model.state_dict(), cfg.workdir+'/weights/last.pth')\n\n\n # TODO get the correct evaluate results\n beam_results, beam_scores, timesteps, out_lens = decoder.decode(logits.permute(1, 0, 2))\n print(logits.size())\n # print(out_lens[0][0])\n print(beam_results[0][0][:out_lens[0][0]])\n for n in beam_results[0][0][:out_lens[0][0]]:\n print(vocabulary[n],end = '')\n\n print(\" \")\n for n in data['text'][0]:\n print(vocabulary[int(n)],end = '')\n print(\" \")\n \n # exit()\n # # beam_results, beam_scores, timesteps, out_lens = decoder.decode(logits)\n # zero = torch.zeros_like(beam_results)\n # beam_results = torch.where(beam_results > 27, zero, beam_results)\n # beam_results = torch.where(beam_results < 0, zero, beam_results)\n # voc = np.tile(vocabulary, (cfg.batch_size, 1))\n # pred = np.take(voc, beam_results[:, 0, :].data.numpy())\n # text_np = np.take(voc, text.data.cpu().numpy().astype(int))\n\n # # print('pred: ', pred.transpose(1, 0))\n # print('pred: ')\n # for i, w in enumerate(pred.transpose(1, 0)[0]):\n # if w != '':\n # print(w, end=\"\")\n # elif w == '':\n # break\n\n # print(\"\")\n # print(\"gt: \")\n # for i, w in enumerate(pred.transpose(1, 0)[0]):\n # if i < 256:\n # print(text_np[0][i], end=\"\")\n # tp, pred, pos = utils.evalutes(utils.cvt_np2string(pred), utils.cvt_np2string(text_np))\n # print('tp: ', tp, 'pred: ', pred, 'pos: ', pos)\n \n step_cnt += 1\n \n _loss /= len(train_loader)\n writer.add_scalar('train/loss', _loss, epoch)\n torch.cuda.empty_cache()\n\n model = pruning(model, cfg.sparse_mode)\n sparsity = cal_sparsity(model)\n print(sparsity)\n loss_val = validate(val_loader, model, loss_fn)\n writer.add_scalar('val/loss', loss_val, epoch)\n\n\n if loss_val < best_loss:\n not_better_cnt = 0\n torch.save(model.state_dict(), cfg.workdir+'/weights/best.pth')\n print(\"saved\", cfg.workdir+'/weights/best.pth', not_better_cnt)\n best_loss = loss_val\n else:\n not_better_cnt += 1\n\n if not_better_cnt > 5:\n exit()\n\ndef validate(val_loader, model, loss_fn):\n model.eval()\n _loss = 0.0\n step_cnt = 0\n for data in val_loader:\n wave = data['wave'].cuda() # [1, 128, 109]\n logits = model(wave)\n logits = logits.permute(2, 0, 1)\n logits = F.log_softmax(logits, dim=2)\n text = data['text'].cuda()\n loss = loss_fn(logits, text, data['length_wave'], data['length_text'])\n _loss += loss.data\n # print(loss)\n step_cnt += 1\n # if cnt % 10 == 0:\n print(\"Val step\", step_cnt, \"/\", len(val_loader),\n \", loss: \", round(float(_loss.data/step_cnt), 5))\n\n \n return _loss/len(val_loader)\n\n\ndef main():\n args = parse_args()\n cfg.resume = args.resume\n cfg.exp_name = args.exp\n cfg.workdir = '/zhzhao/code/wavenet_torch/torch_lyuan/exp_result/' + args.exp + '/debug'\n cfg.sparse_mode = args.sparse_mode\n cfg.batch_size = args.batch_size\n cfg.lr = args.lr\n cfg.load_from = args.load_from\n\n print('initial training...')\n print(f'work_dir:{cfg.workdir}, \\n\\\n pretrained: {cfg.load_from}, \\n\\\n batch_size: {cfg.batch_size}, \\n\\\n lr : {cfg.lr}, \\n\\\n epochs : {cfg.epochs}, \\n\\\n sparse : {cfg.sparse_mode}')\n writer = SummaryWriter(log_dir=cfg.workdir+'/runs')\n\n # build train data\n vctk_train = VCTK(cfg, 'train')\n train_loader = DataLoader(vctk_train,batch_size=cfg.batch_size, num_workers=8, shuffle=True, pin_memory=True)\n\n vctk_val = VCTK(cfg, 'val')\n val_loader = DataLoader(vctk_val, batch_size=cfg.batch_size, num_workers=8, shuffle=False, pin_memory=True)\n\n # build model\n model = WaveNet(num_classes=28, channels_in=20, dilations=[1,2,4,8,16])\n model = nn.DataParallel(model)\n model.cuda()\n\n weights_dir = os.path.join(cfg.workdir, 'weights')\n if not os.path.exists(weights_dir):\n os.mkdir(weights_dir)\n if not os.path.exists(cfg.vis_dir):\n os.mkdir(cfg.vis_dir)\n cfg.vis_dir = os.path.join(cfg.vis_dir, cfg.exp_name)\n if not os.path.exists(cfg.vis_dir):\n os.mkdir(cfg.vis_dir)\n model.train()\n\n if cfg.resume and os.path.exists(cfg.workdir + '/weights/best.pth'):\n model.load_state_dict(torch.load(cfg.workdir + '/weights/best.pth'))\n print(\"loading\", cfg.workdir + '/weights/best.pth')\n\n if os.path.exists(cfg.load_from):\n model.load_state_dict(torch.load(cfg.load_from))\n print(\"loading\", cfg.load_from)\n\n\n if cfg.sparse_mode == 'sparse_pruning':\n cfg.sparsity = args.sparsity\n print(f'sparse_pruning {cfg.sparsity}')\n elif cfg.sparse_mode == 'pattern_pruning':\n print(args.pattern_para)\n pattern_num = int(args.pattern_para.split('_')[0])\n pattern_shape = [int(args.pattern_para.split('_')[1]), int(args.pattern_para.split('_')[2])]\n pattern_nnz = int(args.pattern_para.split('_')[3])\n print(f'pattern_pruning {pattern_num} [{pattern_shape[0]}, {pattern_shape[1]}] {pattern_nnz}')\n cfg.patterns = generate_pattern(pattern_num, pattern_shape, pattern_nnz)\n cfg.pattern_mask = generate_pattern_mask(model, cfg.patterns)\n elif cfg.sparse_mode == 'coo_pruning':\n cfg.coo_shape = [int(args.coo_para.split('_')[0]), int(args.coo_para.split('_')[1])]\n cfg.coo_nnz = int(args.coo_para.split('_')[2])\n # cfg.patterns = generate_pattern(pattern_num, pattern_shape, pattern_nnz)\n print(f'coo_pruning [{cfg.coo_shape[0]}, {cfg.coo_shape[1]}] {cfg.coo_nnz}')\n elif cfg.sparse_mode == 'ptcoo_pruning':\n cfg.pattern_num = int(args.pattern_para.split('_')[0])\n cfg.pattern_shape = [int(args.ptcoo_para.split('_')[1]), int(args.ptcoo_para.split('_')[2])]\n cfg.pt_nnz = int(args.ptcoo_para.split('_')[3])\n cfg.coo_nnz = int(args.ptcoo_para.split('_')[4])\n cfg.patterns = generate_pattern(cfg.pattern_num, cfg.pattern_shape, cfg.pt_nnz)\n cfg.pattern_mask = generate_pattern_mask(model, cfg.patterns)\n print(f'ptcoo_pruning {cfg.pattern_num} [{cfg.pattern_shape[0]}, {cfg.pattern_shape[1]}] {cfg.pt_nnz} {cfg.coo_nnz}')\n\n\n if args.vis_mask == True:\n name_list = list()\n para_list = list()\n for name, para in model.named_parameters():\n name_list.append(name)\n para_list.append(para)\n\n for i, name in enumerate(name_list):\n if name.split(\".\")[-2] != \"bn\" and name.split(\".\")[-1] != \"bias\":\n raw_w = para_list[i]\n\n zero = torch.zeros_like(raw_w)\n one = torch.ones_like(raw_w)\n\n mask = torch.where(raw_w == 0, zero, one)\n vis.save_visualized_mask(mask, name)\n exit()\n\n if args.vis_pattern == True:\n pattern_count_dict = find_pattern_model(model, [16,16])\n patterns = list(pattern_count_dict.keys())\n vis.save_visualized_pattern(patterns)\n exit()\n # build loss\n loss_fn = nn.CTCLoss(blank=0, reduction='none')\n\n #\n scheduler = optim.Adam(model.parameters(), lr=cfg.lr, eps=1e-4)\n # scheduler = optim.lr_scheduler.MultiStepLR(train_step, milestones=[50, 150, 250], gamma=0.5)\n\n # train\n train(train_loader, scheduler, model, loss_fn, val_loader, writer)\n # val\n # loss = validate(val_loader, scheduler, model, loss_fn)\n\nif __name__ == '__main__':\n main()\n","sub_path":"torch_lyuan/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":12112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"326866955","text":"from django.shortcuts import render, get_object_or_404\nfrom django.views import generic\n\nfrom .models import Post\nfrom .forms import CommentForm\n\nclass IndexView(generic.ListView):\n\tqueryset = Post.objects.filter(status=1).order_by('-created_on')\n\ttemplate_name = 'blog/index.html'\n\nclass DetailView(generic.DetailView):\n\tmodel = Post\n\ttemplate_name = 'blog/detail.html'\n\ndef detail(request, slug):\n\ttemplate_name = 'blog/detail.html'\n\tpost = get_object_or_404(Post, slug=slug)\n\tcomments = post.comment_set.filter(active=True)\n\tnew_comment = None\n\n\tif request.method == 'POST':\n\t\tcomment_form = CommentForm(data=request.POST)\n\t\tif( comment_form.is_valid()):\n\t\t\t# create comment object but don't save to database yet\n\t\t\tnew_comment = comment_form.save(commit = False)\n\t\t\tnew_comment.post = post\n\t\t\tnew_comment.save()\n\telse:\n\t\tcomment_form = CommentForm()\n\n\treturn render(request, template_name, \n\t\t{\n\t\t\t'post': post,\n\t\t\t'comments': comments,\n\t\t\t'new_comment': new_comment,\n\t\t\t'comment_form': comment_form\n\t\t})\n\n\n","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"595819041","text":"import urllib.request\n\npage = urllib.request.urlopen(\"http://beans.itcarlow.ie/prices.html\")\ntext = page.read().decode(\"utf8\")\n\nwhere= text.find(\">$\")\n\nstart_of_price = int(where) + 2\nend_of_price = int(start_of_price) + 4\n\nprice = float(text[start_of_price:end_of_price])\n\nprint(price)\n","sub_path":"starbuzz/loyaltycoffeeprice.py","file_name":"loyaltycoffeeprice.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"198899558","text":"import csv\nimport logging\nimport os\nimport re\nimport subprocess\n\nfrom django.conf import settings\n\n\nlogger = logging.getLogger(__name__)\n\nYEARS = range(2009, 2015)\n\nSPM_FIELDNAMES = [\n 'hseq',\n 'pppos',\n 'spmuid',\n 's_totval',\n 's_eitc',\n 's_actc',\n 's_snap',\n 's_wic',\n 's_schoollunch',\n 's_housing',\n 's_energy',\n 's_fedincometax',\n 's_statetax',\n 's_fedpayrolltax',\n 's_childandwork',\n 's_moop',\n 's_childsupport',\n 's_povline',\n]\n\n\ndef race_accessor(row, year):\n if year == 2009:\n PRDTRACE = int(row[23:25])\n PEHSPNON = int(row[26:27])\n else:\n PRDTRACE = int(row[26:28])\n PEHSPNON = int(row[30:31])\n\n if PRDTRACE == 1 and PEHSPNON == 2:\n return 'w' # White\n elif PRDTRACE == 2 and PEHSPNON == 2:\n return 'b' # Black\n elif PRDTRACE == 3 and PEHSPNON == 2:\n return 'n' # Native\n elif (PRDTRACE == 4 or PRDTRACE == 5) and PEHSPNON == 2:\n return 'a' # Asian\n elif PEHSPNON == 1:\n return 'h' # Hispanic\n else:\n return 'o' # Other\n\n\ndef gender_accessor(row, year):\n gender_digit = row[19:20] if year == 2009 else row[23:24]\n return 'm' if gender_digit == 1 else 'f'\n\n\ndef _int(s):\n return re.sub(r'[^\\d]', '', s)\n\n\nASEC_CACHED_FIELD_ACCESSORS = [\n ('o_totval', lambda row: int(row[204:212])),\n ('o_unemployment', lambda row: int(row[71:78])),\n ('o_workerscomp', lambda row: int(row[79:86])),\n ('o_socialsecurity', lambda row: int(row[87:94])),\n ('o_ssi', lambda row: int(row[95:101])),\n ('o_tanf', lambda row: int(row[102:108])),\n ('o_veterans', lambda row: int(row[109:116])),\n ('o_povline', lambda row: int(row[31:36])),\n]\n\nASEC_FIELD_ACCESSORS = [\n ('hseq', lambda row, year: int(row[1:6])),\n ('pppos', lambda row, year: int(row[6:8])),\n\n ('weight', lambda row, year:\n int(row[65:73] if year == 2009 else row[154:162])),\n ('gender', gender_accessor),\n ('age', lambda row, year:\n int(row[14:16] if year == 2009 else row[18:20])),\n ('race', race_accessor),\n]\n\nASEC_SUPPLEMENTARY_FIELD_ACCESSORS = [\n ('s_unemployment', lambda row, year:\n int(row[277:282] if year == 2009 else row[410:415])),\n ('s_workerscomp', lambda row, year:\n int(row[284:289] if year == 2009 else row[417:422])),\n ('s_socialsecurity', lambda row, year:\n int(row[290:295] if year == 2009 else row[423:428])),\n ('s_ssi', lambda row, year:\n int(row[818:823] if year == 2009 else row[432:437])),\n ('s_tanf', lambda row, year:\n int(row[304:309] if year == 2009 else row[444:449])),\n ('s_veterans', lambda row, year:\n int(row[316:321] if year == 2009 else row[456:461])),\n ('s_ctc', lambda row, year:\n int(row[659:663] if year == 2009 else row[725:730])),\n]\n\n\ndef handle_spm_row(row, year):\n \"\"\"\n Preprocess row of SPM CSV for importing.\n \"\"\"\n return {\n 'hseq': int(float(\n row['h_seq' if year >= 2013 else 'H_SEQ']\n )),\n 'pppos': int(float(row['pppos'])),\n 'spmuid': int(float(row['SPMu_ID'])),\n 's_totval': float(row['SPMu_totval']),\n 's_eitc': float(row['SPMu_EITC']),\n 's_actc': float(row['SPMu_ACTC']),\n 's_snap': float(row['SPMu_SNAPSub']),\n 's_wic': float(\n row['SPMu_WICval'if year >= 2012 else 'SPMu_WICVAL']\n ),\n 's_schoollunch': float(row['SPMu_SchLunch']),\n 's_housing': float(row['SPMu_CapHouseSub']),\n 's_energy': float(row['SPMu_EngVal']),\n 's_fedincometax': float(row['SPMu_FedTaxBC']),\n 's_statetax': float(row['SPMu_stTax']),\n 's_fedpayrolltax': float(row['SPMu_FICA']),\n 's_childandwork': float(row['SPMu_CapWknChCareXpns']),\n 's_moop': float(row['SPMu_MedOOPnMCareB']),\n 's_childsupport': float(row['SPMu_ChildSupPd']),\n 's_povline': float(row['SPMu_PovThreshold']),\n 'year': year,\n }\n\n\ndef handle_asec_row(in_row, out_row, year):\n \"\"\"\n Preprocess row of ASEC file for importing.\n This is an impure function that modifies a preexisting ``out_row``\n which should contain the cached values extracted from the preceding\n family record.\n \"\"\"\n for (key, accessor) in ASEC_FIELD_ACCESSORS:\n out_row[key] = accessor(in_row, year)\n return out_row\n\n\ndef process_spm(out_file='spm_out.csv', dest='data'):\n \"\"\"\n Incrementally read SPM input CSVs and output a single big CSV containing\n data with appropriate headers and types.\n \"\"\"\n with open(out_file, 'w') as spm_out:\n spmwriter = csv.DictWriter(\n spm_out,\n fieldnames=SPM_FIELDNAMES + ['year']\n )\n spmwriter.writeheader()\n\n for year in YEARS:\n with open(os.path.join(dest, '%sspm.csv') % year, 'r') as spm_in:\n spmreader = csv.DictReader(spm_in)\n for row in spmreader:\n spmwriter.writerow(handle_spm_row(row, year))\n\n\ndef process_asec(out_file='asec_out.csv', dest='data'):\n \"\"\"\n Incrementally read ASEC input DAT files and output a single big CSV\n containing denormalized data with appropriate headers and types.\n \"\"\"\n with open(out_file, 'w') as asec_out:\n asecwriter = csv.DictWriter(\n asec_out,\n fieldnames=[x[0] for x in ASEC_FIELD_ACCESSORS +\n ASEC_CACHED_FIELD_ACCESSORS +\n ASEC_SUPPLEMENTARY_FIELD_ACCESSORS] + ['year']\n )\n asecwriter.writeheader()\n\n for year in YEARS:\n with open(os.path.join(dest, '%sasec.dat') % year, 'r') as asec_in:\n out_row = {}\n values_cache = {}\n spm_cache = {}\n rows_chunk = []\n\n for in_row in asec_in:\n record = int(in_row[0:1])\n\n if record == 2:\n if int(in_row[34:35]) == 3:\n continue\n\n # If we've hit a family row and there are rows in the\n # row chunk store, then we should update the rows with\n # the final aggregated spm_cache values, output the\n # rows, and then flush the rows_chunk for the next\n # cycle through.\n if rows_chunk:\n for out_row in rows_chunk:\n out_row.update(spm_cache)\n asecwriter.writerow(out_row)\n rows_chunk = []\n spm_cache = {}\n\n # Whether or not the rows_chunk rows have been reset\n # and flushed, values_cache should be set to the new\n # values derived from the family row.\n for (key, accessor) in ASEC_CACHED_FIELD_ACCESSORS:\n values_cache[key] = accessor(in_row)\n\n if record == 3:\n # If we're in an individual row, then we want to create\n # the individual object and fill it in with those\n # those values that can be determined immediately.\n out_row = {'year': year}\n out_row = handle_asec_row(in_row, out_row, year)\n out_row.update(values_cache)\n\n # We also want to iterate over the fields in the\n # \"supplmentary\" field set and add their values to\n # the spm_cache object. These will be attached\n # to individual rows in the next family-row pass.\n for (key, accessor) in ASEC_SUPPLEMENTARY_FIELD_ACCESSORS:\n spm_cache[key] = spm_cache.get(key, 0) + accessor(in_row, year)\n rows_chunk.append(out_row)\n\n\ndef call(cmd, shell=False):\n \"\"\"Spawn a new process and capture its output\"\"\"\n logger.debug(' '.join(cmd))\n p = subprocess.Popen(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=shell\n )\n stdout, stderr = p.communicate()\n if p.returncode != 0:\n raise IOError(stderr)\n if stderr:\n logger.error(stderr.decode('utf-8'))\n return stdout\n\n\ndef run(dest='data'):\n spm_out = os.path.join(dest, 'spm_out.csv')\n asec_out = os.path.join(dest, 'asec_out.csv')\n sql_file = os.path.join('data', 'spm_asec.sql')\n\n if not os.path.exists(spm_out):\n process_spm(spm_out, dest)\n else:\n logger.info('spm_out.csv exists, skipping generation')\n\n if not os.path.exists(asec_out):\n process_asec(asec_out, dest)\n else:\n logger.info('asec_out.csv exists, skipping generation')\n\n os.chmod(spm_out, 0o644)\n os.chmod(asec_out, 0o644)\n\n call(['cp', spm_out, '/tmp/'])\n call(['cp', asec_out, '/tmp/'])\n\n psql_cmd = [\n 'psql',\n '-v', 'spm_file=%s' % os.path.join('/tmp', 'spm_out.csv'),\n '-v', 'asec_file=%s' % os.path.join('/tmp', 'asec_out.csv'),\n '-f', sql_file,\n settings.DATABASES['default']['NAME'],\n ]\n\n call(psql_cmd)\n","sub_path":"data/importer.py","file_name":"importer.py","file_ext":"py","file_size_in_byte":9213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"385235184","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/pierre/workspace/django-survey/survey/migrations/0011_auto_20200210_1928.py\n# Compiled at: 2020-02-25 03:28:34\n# Size of source mod 2**32: 1022 bytes\nfrom django.db import migrations, models\n\nclass Migration(migrations.Migration):\n dependencies = [\n ('survey', '0010_survey_editable_answers')]\n operations = [\n migrations.AddField(model_name='survey',\n name='expire_date',\n field=models.DateField(blank=True, null=True, verbose_name='Validity')),\n migrations.AddField(model_name='survey', name='publish_date', field=models.DateField(auto_now=True)),\n migrations.AlterField(model_name='question',\n name='choices',\n field=models.TextField(blank=True,\n help_text=\"The choices field is only used if the question type\\nif the question type is 'radio', 'select', or\\n'select multiple' provide a comma-separated list of\\noptions for this question .\",\n null=True,\n verbose_name='Choices'))]","sub_path":"pycfiles/django_survey_and_report-1.3.21-py3-none-any/0011_auto_20200210_1928.cpython-37.py","file_name":"0011_auto_20200210_1928.cpython-37.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"23429179","text":"#!/usr/bin/env python\n\nimport hashlib\nimport re\nfrom copy import deepcopy\n\nfrom cssutils import parseStyle\nfrom cssutils.css import CSSStyleDeclaration\nfrom lxml import etree\nfrom lxml.builder import ElementMaker\nfrom tinycss2 import color3\n\nfrom mwlib.pdf import utils\nfrom mwlib.pdf.generators.cover import get_article_count\n\nE = ElementMaker()\n\n\ndef parse(html_data):\n return etree.HTML(html_data)\n\n\ndef add_article_title(article):\n for node in article.dom.xpath(\"//article\"):\n first_node = node.iterchildren().next()\n displaytitle = article.caption if hasattr(article, \"caption\") else article.title\n first_heading = E.h1(displaytitle)\n first_node.addprevious(first_heading)\n first_heading.append(hash_anchor(article.title))\n\n article_count = get_article_count(article.env.metabook.items)\n footer = E.div(\n {\"class\": \"footer\"},\n E.span(displaytitle, {\"class\": \"title\"}),\n E.span(\"Article {} of {}\".format(article.idx, article_count), {\"class\": \"counter\"},),\n )\n first_heading.addnext(footer)\n\n\ndef hash_anchor(title):\n anchor = E.A(\n {\n \"name\": \"article_{}\".format(hashlib.md5(title.encode(\"utf-8\")).hexdigest()),\n \"class\": \"toc_entry\",\n },\n \"\",\n )\n return anchor\n\n\ndef add_pagebreaks(root, article):\n if \"page-break-before\" in article:\n for xp in article[\"page-break-before\"]:\n nodelist = root.xpath(xp)\n for node in nodelist:\n utils.append_class(node, \"page-break-before\")\n\n if \"page-break-after\" in article:\n for xp in article[\"page-break-after\"]:\n nodelist = root.xpath(xp)\n for node in nodelist:\n utils.append_class(node, \"page-break-after\")\n\n return root\n\n\n# def filter_content(root, article_num=1, title=''):\ndef filter_content(article):\n content_filter = [\n '//div[@id=\"mw-content-text\"]',\n '//div[@id=\"bodyContent\"]',\n \"//body\",\n \"//\",\n ]\n for query in content_filter:\n content = article.dom.xpath(query)\n if len(content) == 1:\n break\n assert len(content) == 1\n footer_text = _(\"{} | Article {} of {}\").format(\n article.title, article.idx, len(article.env.metabook.items)\n )\n article_node = E.article(\n {\n \"data-pp-article-num\": str(article.idx),\n \"id\": \"article{}\".format(article.idx),\n \"data-pp-footer-text\": footer_text,\n \"class\": \"pp-chapter\",\n }\n )\n for node in content[0].getchildren():\n article_node.append(node)\n article.dom = E.html(E.head(E.meta({\"charset\": \"utf-8\"})), E.body(article_node))\n\n\ndef remove_nodes_and_content(root):\n query_shorthands = {\n \"div\": {\n \"@class\": [\n \"magnify\",\n \"rellink\",\n \"printfooter\",\n \"dablink\",\n \"collapsed\",\n \"NavFrame\",\n \"mediaContainer\",\n \"metadata\",\n \"homonymie\",\n \"loupe\",\n \"bandeau\",\n ],\n \"@id\": [\n \"siteSub\",\n \"jump-to-nav\",\n \"catlinks\",\n \"normdaten\",\n \"disambig\",\n \"spoiler\", # https://sr.wikipedia.org/wiki/CY-208243\n ],\n },\n \"table\": {\n \"@class\": [\"ambox\", \"metadata\", \"navbox\", \"navigatiesjabloon\"],\n \"@id\": [\"disambigbox\", \"commonscat\"],\n },\n \"span\": {\"@class\": [\"mw-editsection\"], \"@id\": [\"coordinates\"]},\n \"ul\": {\"@id\": [\"bandeau\"], \"@class\": [\"bandeau\"]},\n \"*\": {\n \"@class\": [\n \"noprint\",\n \"noexport\",\n \"hatnote navigation-not-searchable\",\n \"beginnetje\", # https://nl.wikipedia.org/wiki/Tafalisca_bogotensis\n \"UitklapFrame\", # https://nl.wikipedia.org/wiki/Stoodleigh\n \"navigation-only\", # https://fr.wikipedia.org/wiki/Villalval\n \"vedlikehold\", # https://no.wikipedia.org/wiki/Santa_Teresinha\n ],\n \"@id\": [\"tpl_Coordinaten\", \"toc\",], # https://nl.wikipedia.org/wiki/Aldeyjarfoss\n },\n }\n queries = []\n for node in query_shorthands:\n predicates = []\n for attr in query_shorthands[node]:\n for filter_attr_val in query_shorthands[node][attr]:\n predicates.append('contains({attr}, \"{filter_attr_val}\")'.format(**locals()))\n queries.append(\"//{node}[{pred}]\".format(node=node, pred=\" or \".join(predicates)))\n\n queries.extend(\n [\n '//table[.//tr[contains(@class, \"navbox-title\")]]',\n '//table[.//img[contains(@srcset, \"Disambig-dark.svg\")]]',\n '//table[.//img[contains(@srcset, \"Exquisite-kfind.png\")]]',\n '//tr[.//a[@title=\"Portaalicoon\"]]',\n \"//comment()\",\n '//p[./span[@class=\"geo microformat\"]][preceding-sibling::h1]',\n '//span[contains(@class, \"haudio\")]/parent::*',\n ]\n )\n for node in root.xpath(\"|\".join(queries)):\n utils.remove_node(node)\n\n\ndef strip_tags(root):\n \"\"\"strip tags but keep all content/text/tail\"\"\"\n tag_list = [\"a\"]\n etree.strip_tags(root, *tag_list)\n\n\ndef strip_attributes(root):\n attributes = [\n \"cellpadding\",\n \"cellspacing\",\n \"align\",\n \"size\",\n # https://uk.wikipedia.org/wiki/%D0%A1%D0%B0%D0%BD-%D0%91%D0%B0%D1%80%D1%82%D0%BE%D0%BB%D0%BE%D0%BC%D0%B5%D0%BE-%D0%92%D0%B0%D0%BB%D1%8C-%D0%9A%D0%B0%D0%B2%D0%B0%D1%80%D0%BD%D1%8C%D1%8F\n \"border\",\n \"bgcolor\",\n # https://en.wikipedia.org/wiki/Archery_at_the_1988_Summer_Olympics_%E2%80%93_Women%27s_individual\n ]\n xpath = \"//*[{}]\".format(\"|\".join([\"@\" + attr for attr in attributes]))\n for node in root.xpath(xpath):\n for attr in attributes:\n if attr in node.attrib:\n del node.attrib[attr]\n for node in root.xpath(\"//*[not(self::td or self::th)]/@style\"):\n if node.is_attribute:\n del node.getparent().attrib[\"style\"]\n\n\ndef transform_width_and_height_attributes_to_style(root):\n attributes = [\"width\", \"height\"]\n for node in root.xpath(\"//*[{}]\".format(\"|\".join(\"@\" + attr for attr in attributes))):\n for attr in attributes:\n if attr in node.attrib:\n value = node.get(attr)\n del node.attrib[attr]\n style = parseStyle(node.get(\"style\"))\n if attr not in style.keys():\n if value[-1:] == \"%\":\n style.setProperty(attr, value)\n node.attrib[\"style\"] = \";\".join([prop.cssText for prop in style])\n elif value != \"\":\n style.setProperty(attr, value + \"px\")\n node.attrib[\"style\"] = \";\".join([prop.cssText for prop in style])\n\n\ndef strip_style_properties_except_width_and_height(root):\n \"\"\"\n remove all style properties except for width and height\n scale px units to point units according to font relations:\n screen 12px --> print 8pt = 2/3\n :param root: dom tree\n :return:\n \"\"\"\n scale_factor = 2 / 3.0\n unit = \"pt\"\n for node in root.xpath(\"//*[@style]\"):\n old_style = parseStyle(node.get(\"style\"))\n new_style = CSSStyleDeclaration()\n for p in old_style.getProperties(\"width\", \"height\"):\n if p.value[-2:] == \"px\":\n value = str(scale_factor * float(re.sub(r\"[^0-9.]\", r\"\", p.value))) + unit\n new_style.setProperty(p.name, value)\n node.attrib[\"style\"] = \";\".join([prop.cssText for prop in new_style])\n\n\ndef grey_from_style_frag(frag):\n color = color3.parse_color(frag)\n if color is None:\n return frag\n else:\n grey = int(255 * color.red * 0.3 + 255 * color.green * 0.59 + 255 * color.blue * 0.11)\n return \"rgb({g}, {g}, {g})\".format(g=grey)\n\n\ndef convert_grayscale(root):\n for attr in [\"color\", \"bgcolor\"]:\n for node in root.xpath(\"//*[@{}]\".format(attr)):\n node.set(attr, grey_from_style_frag(node.get(attr)))\n\n for node in root.xpath(\"//*[@style]\"):\n new_style = []\n for style_frag in node.get(\"style\", \"\").split(\";\"):\n if not style_frag.strip():\n continue\n try:\n attr, val = (s.strip() for s in style_frag.split(\":\"))\n except ValueError:\n continue\n if val == \"transparent\":\n continue\n new_val = map(grey_from_style_frag, val.split(\" \"))\n new_style.append(u\"{attr}: {val}\".format(attr=attr, val=u\" \".join(new_val)))\n if new_style:\n node.set(\"style\", \"; \".join(new_style))\n\n\ndef add_soft_hyphens(root):\n # https://en.wikipedia.org/wiki/Arbutamine\n max_word_len = 50\n\n def handle_node_txt(node):\n for attr in [\"text\", \"tail\"]:\n txt = getattr(node, attr)\n if txt is None:\n continue\n words = txt.split(\" \")\n found_long = False\n if txt:\n for i, word in enumerate(words):\n word_len = len(word)\n if word_len > max_word_len:\n num_breaks = word_len / max_word_len\n len_frag = word_len / (num_breaks + 1)\n hyphenated = u\"\\u00ad\".join(\n [\n word[frag_idx * len_frag : (frag_idx + 1) * len_frag]\n for frag_idx in range(num_breaks + 1)\n ]\n )\n words[i] = hyphenated\n found_long = True\n if found_long:\n setattr(node, attr, \" \".join(words))\n\n map(handle_node_txt, root.iterdescendants())\n\n\ndef remove_styles(root):\n styles = [\n \"-moz-column-count\", # https://de.wikipedia.org/wiki/Decatur_County_%28Indiana%29\n \"column-count\", # https://de.wikipedia.org/wiki/Decatur_County_%28Indiana%29\n \"font\",\n \"font-size\",\n \"padding\", # https://en.wikipedia.org/wiki/A%26M_Records,_Inc._v._Napster,_Inc.\n ]\n\n _remove_styles = lambda node: utils.remove_node_styles(node, styles)\n\n predicate = \" or \".join(['contains(@style, \"{}\")'.format(style) for style in styles])\n map(_remove_styles, root.xpath(\"//*[{}]\".format(predicate)))\n\n\ndef clean(root):\n for node in root.xpath(\"//*[@_src]\"):\n del node.attrib[\"_src\"]\n\n\ndef remove_container(root):\n def has_siblings(node):\n return node.getnext() is not None or node.getprevious() is not None\n\n removable_container = [\"div\"]\n tags = [\n \"ul\",\n \"ol\", # https://en.wikipedia.org/wiki/A-List_%28Conservative%29\n \"table\", # https://en.wikipedia.org/wiki/Calosoma_striatius\n ]\n\n for node in root.xpath(\"|\".join(\"//{}\".format(tag) for tag in tags)):\n if has_siblings(node):\n continue\n check_node = node\n tails = []\n while not has_siblings(check_node) and (\n check_node.getparent().tag in removable_container or check_node == node\n ):\n tails.append(check_node.tail)\n check_node = check_node.getparent()\n if check_node != node:\n # FIXME move to domtools\n check_node.getparent().replace(check_node, node)\n node.tail = \"\".join(t for t in reversed(tails) if t)\n\n\ndef _combine_references(root):\n ref_nodes = root.xpath('//p[@class=\"pp_figure_ref\"]')\n groups = []\n group = []\n for node in ref_nodes:\n if group:\n between = group[-1].getnext()\n if (\n between is not None\n and between.getnext() == node\n and between.tag == \"div\"\n and \"pp_figure\" in between.get(\"class\")\n ):\n group.append(node)\n else:\n groups.append(group)\n group = [node]\n else:\n group.append(node)\n if group:\n groups.append(group)\n for group in groups:\n if len(group) == 1:\n continue\n txt = group[0].text + \" - \" + group[-1].text.strip().rsplit(\" \", 1)[1]\n group[0].text = txt\n for node in group[1:]:\n utils.remove_node(node)\n\n\ndef add_figure_numbers(root):\n classes = [\n \"pp_singlecol\",\n # 'infobox', # infoboxes are not referenced despite floating\n \"pp_figure\",\n \"pp_twocol_span\",\n ]\n pred = \" or \".join('contains(@class, \"{}\")'.format(cls) for cls in classes)\n total_figures = 0\n for article in root.xpath(\"//article\"):\n figure_num = 0\n for node in article.xpath(\".//*[{}]\".format(pred)):\n utils.remove_class(node, \"infobox\")\n figure_num += 1\n total_figures += 1\n cls = [c for c in classes if c in node.get(\"class\")][0]\n nr = \".\".join([article.get(\"pp_article_num\"), str(figure_num)])\n caption_txt = \"Figure {nr} \".format(nr=nr)\n reference = E.p({\"class\": \"pp_figure_ref\"}, u\"\\u21AA \" + caption_txt)\n if cls == \"pp_figure\":\n caption = node.xpath('.//*[contains(@class, \"thumbcaption\")]')\n if caption:\n node.addnext(reference)\n caption = caption[0]\n prefix = E.b(caption_txt)\n caption.insert(0, prefix)\n prefix.tail = caption.text\n caption.text = None\n utils.append_class(caption, \"pp_figure_caption\")\n continue\n wrapper = utils.wrap_node(node, \"div\", {\"class\": cls})\n caption = E.div({\"class\": \"pp_figure_caption\"}, E.b(caption_txt))\n wrapper.append(caption)\n utils.remove_class(node, cls)\n wrapper.addnext(reference)\n _combine_references(root)\n\n\ndef move_caption(node):\n utils.append_class(node, \"pp-table-caption\")\n wrapper = E.div({\"class\": \"pp-table\"})\n try:\n node[0][0].text = node[0][0].text.replace(\":\", \"\")\n node[0].tail = \"\"\n except:\n print(\"Error at: \" + etree.tostring(node))\n node_pos = node.getparent().index(node)\n nodelist = node.getparent().getchildren()\n indexpos = node_pos - 1\n while nodelist[indexpos].tag in [\"p\", \"ul\"]:\n if nodelist[indexpos].get(\"class\") and \"gallery\" in nodelist[indexpos].get(\"class\"):\n break\n else:\n indexpos -= 1\n # indexpos is the beef\n\n wrapper.append(node)\n if indexpos < 1:\n indexpos = 1\n nodelist[indexpos - 1].addnext(wrapper)\n for i in range(indexpos, node_pos):\n wrapper.append(nodelist[i])\n\n # add second caption to tables\n if wrapper[1].tag == \"table\":\n node2 = deepcopy(node)\n node2.tag = \"caption\"\n utils.append_class(node2, \"following\")\n wrapper[1].append(node2)\n\n\ndef apply_article_options(root, options=\"\"):\n if \"notext\" in options:\n article = root.find(\".//article\")\n utils.append_class(article, \"nodisplay\")\n\n\ndef remove_figure_colon(root):\n for node in root.xpath(\n '//div[@class=\"thumbcaption\"]/i[position()=1 and following-sibling::text()[starts-with(self::text(), \":\") and position()=1]]'\n ):\n node.tail = \"\"\n\n\ndef rebuild_footnotes(root):\n for node in root.xpath('//sup[@class=\"reference\"]'):\n p = re.compile(r\"cite_ref-([A-Za-z0-9]+)_([0-9])-0\")\n ref_id = p.sub(r\"cite_note-\\1-\\2\", node.get(\"id\"))\n ref_nodes = root.xpath(\n '//ol[@class=\"references\"]/li[@id=\"{}\"]/span[@class=\"reference-text\"]'.format(ref_id)\n )\n if len(ref_nodes) == 0:\n continue\n footnote = ref_nodes[0]\n footnote.text = footnote.text.strip()\n footnote.tail = node.tail\n parent = node.getparent()\n parent.insert(parent.index(node) + 1, footnote)\n parent.remove(node)\n\n # remove whitespace between footnote and last character\n if parent.text:\n parent.text = parent.text.rstrip()\n\n for node in root.xpath('//ol[@class=\"references\"]'):\n utils.remove_node(node.getprevious())\n utils.remove_node(node)\n\n\ndef rewrite_links(root):\n for node in root.xpath(\"//a\"):\n if node.get(\"href\") and node.get(\"title\"):\n link = \"#article_{}\".format(hashlib.md5(node.get(\"title\").encode(\"utf-8\")).hexdigest())\n node.set(\"href\", link)\n\n\ndef markup_maps(root):\n target_node = \"//div[{}]\"\n conditions = [\n 'contains(@class, \"thumb\")',\n 'not(contains(@class, \"thumbinner\"))',\n 'not(contains(@class, \"thumbcaption\"))',\n 'not(contains(@class, \"thumbimage\"))',\n './/div[contains(@style, \"relative\") and .//div[contains(@style, \"absolute\")]]',\n ]\n '//div[@class=\"mw-parser-output\"]//div[contains(@style, \"relative\") and .//div[contains(@style, \"absolute\")]]'\n for node in root.xpath(target_node.format(\" and \".join(conditions))):\n utils.append_class(node, \"map\")\n","sub_path":"mwlib/pdf/htmlfilters/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":17210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"148268205","text":"import pandas as pd\nimport sys\n\ndata = pd.read_csv(sys.argv[1], encoding= 'unicode_escape')\ndata[\"StockCode\"] = data[\"StockCode\"].astype(str)\n\nnan_value = float(\"NaN\")\ndata.replace(\"\", nan_value, inplace=True)\ndata.dropna(inplace=True)\n\ngroups_inv = data.groupby(\"InvoiceNo\")[\"StockCode\"]\ninvoices = set(data[\"InvoiceNo\"])\nout_list = []\nfor i in invoices:\n out_list.append(list(groups_inv.get_group(i)))\nwith open(\"FP_Part-2_changed.csv\",\"w\") as f:\n for i in out_list:\n f.write(','.join(i)+\"\\n\")\n\n","sub_path":"Assignment 3/Results/Codes&Output/FP_2/fp_convert_data.py","file_name":"fp_convert_data.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"521202921","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os, sys\n\n#os.environ['CLASSPATH'] = \"/Users/mru/Documents/app/tika-app-1.9.jar\"\n\n\n\nfrom tika import parser\n\n\nfrom jnius import autoclass\n\nclass JniusExtractor:\n \n def __init__(self, cleaner=None):\n \n \n \n self.cleaner = cleaner\n \n Tika = autoclass('org.apache.tika.Tika')\n Metadata = autoclass('org.apache.tika.metadata.Metadata')\n \n self.tika = Tika()\n self.meta = Metadata()\n \n def parse(self, file):\n FileInputStream = autoclass('java.io.FileInputStream')\n \n encoding = sys.getfilesystemencoding()\n text = self.tika.parseToString(FileInputStream(file.decode(encoding)), self.meta)\n \n if (self.cleaner):\n content = self.cleaner.clean(text)\n \n return text.decode('utf8')\n\n\nclass RestExtractor:\n \n TIKA_SERVER = \"TIKA_SERVER\"\n \n def __init__(self, tika_server=None, cleaner=None):\n \n self.cleaner = cleaner\n \n if tika_server == None:\n if self.TIKA_SERVER in os.environ:\n self.tika_server = os.environ[self.TIKA_SERVER]\n else:\n self.tika_server = 'http://127.0.0.1:9998/tika'\n else:\n self.tika_server = tika_server\n \n \n def parse(self, file):\n \n parsed = parser.from_file(file, self.tika_server)\n \n content = parsed[\"content\"]\n \n if (self.cleaner):\n content = self.cleaner.clean(content)\n \n return content\n \n\nclass DefaultCleaner:\n def clean(self, text):\n return text.strip()\n ","sub_path":"batch/indexer/extractor.py","file_name":"extractor.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"97993783","text":"# -*- coding: utf-8 -*-\n\"\"\"\nMail worker`s thread. Collect mail, refresh folders, etc. All work with mail.\n\"\"\"\n\nimport time\nimport imaplib\nimport threading\n\nfrom classes.ImapTools import ImapTools\nfrom classes.FiltersChunk import FiltersChunk\nfrom classes.Logger import Logger\n\n\nclass MailWorkerThread(threading.Thread):\n \"\"\"\n Mail worker`s thread. Collect mail, refresh folders, etc. All work with mail.\n \"\"\"\n db = None\n account_data = None\n daemon = True\n done = False\n filters_chunk = None\n LETTER_GET_REPEATS_LIMIT = 5\n\n def __init__(self, db, account_data):\n \"\"\"\n :param db classes.Database:\n :param account_data nameddict:\n \"\"\"\n threading.Thread.__init__(self)\n\n self.db = db\n self.account_data = account_data\n self.filters_chunk = FiltersChunk(self.db)\n\n self.db.update(\"accounts\", {\"in_work\": 1}, \"id = {0}\".format(account_data['id']))\n\n def get_letter(self, imap, uid, folder, repeat_counter=0):\n \"\"\"\n Get concrete letter by UID and folder. Repeat some counts (self.LETTER_GET_REPEATS_LIMIT) if\n connection problems exists\n :param imap: imaplib.IMAP4\n :param uid: int\n :param folder: nameddict\n :param repeat_counter: int\n :return:\n \"\"\"\n if repeat_counter >= self.LETTER_GET_REPEATS_LIMIT:\n Logger.log_err(\"UID {0} from folder {1}/{2} can`t be fetched\".format(\n uid, folder['id'], folder['full_name']))\n return None\n\n try:\n Logger.log_info(\n \"Start fetch uid {0} from {1}/{2}/{3}/{4}\".format(uid, folder['id'], folder['full_name'],\n self.account_data['host'],\n self.account_data['login']))\n letter = ImapTools.fetch_mail_from_folder_by_uid(imap, int(uid), self.filters_chunk, folder)\n\n if letter is not None:\n letter.flush_to_db(self.db, folder['id'])\n Logger.log_info(\"Successfully fetched uid {0} from {1}/{2}/{3}/{4}\".format(\n uid, folder['id'], folder['full_name'], self.account_data['host'], self.account_data['login']))\n except imaplib.IMAP4.abort:\n time.sleep(3)\n return self.get_letter(imap, uid, folder, repeat_counter+1)\n\n def run(self):\n try:\n imap = imaplib.IMAP4_SSL(self.account_data['host']) if \\\n int(self.account_data['ssl']) else \\\n imaplib.IMAP4(self.account_data['host'])\n imap.login(self.account_data['login'], self.account_data['password'])\n except imaplib.IMAP4.error as ex:\n if str(ex).count('AUTHENTICATIONFAILED') or str(ex).count('Invalid login or password'):\n self.db.update(\"accounts\", {'active': 0}, \"id = {0}\".format(self.account_data['id']))\n Logger.log_err(\n \"Auth failed for {0}/{1} disable it\".format(\n self.account_data['host'], self.account_data['login']))\n else:\n Logger.log_ex(\n ex, \"Account {0}/{1}\".format(\n self.account_data['host'], self.account_data['login']))\n\n self.db.insert(\"accounts_errors\",\n {\"account_id\": self.account_data['id'], \"error\": str(ex), \"when_add\": int(time.time())})\n try:\n imap.close()\n except BaseException:\n pass\n\n return\n\n last_uid = 0\n try:\n ImapTools.refresh_account_folders_list(self.db, imap, self.account_data['id'])\n Logger.log_info(\"Folders list for {0}/{1} successfully refreshed\".format(\n self.account_data['host'], self.account_data['login']))\n\n common_count = 0\n folders = self.db.fetch_all(\"SELECT * FROM folders WHERE account_id = {0} AND removed = 0\".format(\n self.account_data['id']))\n for folder in folders:\n uids = ImapTools.get_all_letters_uids_from_folder(imap, folder)\n already_done_uids = ImapTools.get_already_done_uids_of_folder(self.db, folder['id'])\n\n for uid in uids:\n if int(uid) not in already_done_uids:\n last_uid = uid\n self.get_letter(imap, uid, folder)\n common_count += 1\n\n self.db.update(\"folders\", {'last_checked': int(time.time())}, \"id = {0}\".format(folder['id']))\n\n Logger.log_info(\"Mail refresh for {0}/{1} done, {2} letters loaded\".format(\n self.account_data['host'], self.account_data['login'], common_count))\n\n self.db.update(\n \"accounts\",\n {'last_checked': int(time.time()), 'in_work': '0'},\n \"id = {0}\".format(self.account_data['id'])\n )\n except BaseException as ex:\n Logger.log_ex(\n ex, \"Mail fetch process exception of {0}/{1}/{2}\".format(\n self.account_data['host'], self.account_data['login'], last_uid))\n\n Logger.log_info(\"Start update attachments types/exts\")\n self.update_attachments_txts()\n Logger.log_info(\"Done update attachments types/exts\")\n\n self.done = True\n\n self.db.close()\n\n def update_attachments_txts(self):\n \"\"\"\n Method update ext`s and unknown attachments by mime-type info\n :return:\n \"\"\"\n mime_types = self.db.fetch_pairs(\n \"SELECT DISTINCT mime_type, ext FROM `attachments` WHERE ext <> 'unknown'\") #type: dict\n unknown_attachments = self.db.fetch_all(\"SELECT * FROM `attachments` WHERE LOCATE('.', file_name) = 0\")\n\n for unknown_attachment in unknown_attachments:\n if unknown_attachment['mime_type'] in mime_types.keys():\n self.db.q(\n \"UPDATE attachments SET ext = {0}, file_name = CONCAT(file_name, '.', {0}) \"\n \"WHERE id = {1}\".format(\n self.db.quote(mime_types[unknown_attachment['mime_type']]),\n unknown_attachment['id']\n )\n )\n","sub_path":"classes/MailWorkerThread.py","file_name":"MailWorkerThread.py","file_ext":"py","file_size_in_byte":6294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"75438102","text":"from random import seed\nfrom random import random\nimport numpy as np\nimport os\nimport math\nimport scipy.stats\nfrom numpy import mean, sqrt, square, arange\ndata = open(\"inputf.txt\", \"r\")\nm=np.genfromtxt(data,usecols=(0,1,2,3,4,5,6,7,8,9),delimiter=\" \")\nnumm=m.shape[0]/4\ndata = open(\"input_testf.txt\", \"r\")\nm1=np.genfromtxt(data,usecols=(0,1,2,3,4,5,6,7,8,9),delimiter=\" \")\nnumm1=m1.shape[0]/4\nfor x in range (numm*4):\n\tm[x][9]=int(m[x][9])\nfor x in range (numm1*4):\n\tm1[x][9]=int(m1[x][9])\ndef initialize_network(n_inputs, n_hidden, n_outputs):\n\tnetwork = list()\n\thidden_layer = [{'weights':[random() for i in range(n_inputs + 1)]} for i in range(n_hidden)]\n\tnetwork.append(hidden_layer)\n\toutput_layer = [{'weights':[random() for i in range(n_hidden + 1)]} for i in range(n_outputs)]\n\tnetwork.append(output_layer)\n\treturn network\n\n#seed(1)\n#network = initialize_network(9, 1, 3)\n#for layer in network:\n#\tprint(layer)\n# Calculate neuron activation for an input\ndef activate(weights, inputs):\n\tactivation = weights[-1]\n\t#print(\"weight \"),\n\t#print(weights[-1])\n\tfor i in range(len(weights)-1):\n\t\tactivation += weights[i] * inputs[i]\n\treturn activation\n# Transfer neuron activation\ndef transfer(activation):\n\treturn 1.0 / (1.0 + math.exp(-activation))\n# Forward propagate input to a network output\ndef forward_propagate(network, row):\n\tinputs = row\n\tfor layer in network:\n\t\tnew_inputs = []\n\t\tfor neuron in layer:\n\t\t\tactivation = activate(neuron['weights'], inputs)\n\t\t\t#print('output'),\n\t\t\t#print('weights')\n\t\t\tneuron['output'] = transfer(activation)\n\t\t\tnew_inputs.append(neuron['output'])\n\t\tinputs = new_inputs\n\treturn inputs\n# Calculate the derivative of an neuron output\ndef transfer_derivative(output):\n\treturn output * (1.0 - output)\n# Backpropagate error and store in neurons\ndef backward_propagate_error(network, expected):\n\tfor i in reversed(range(len(network))):\n\t\tlayer = network[i]\n\t\terrors = list()\n\t\tif i != len(network)-1:\n\t\t\tfor j in range(len(layer)):\n\t\t\t\terror = 0.0\n\t\t\t\tfor neuron in network[i + 1]:\n\t\t\t\t\terror += (neuron['weights'][j] * neuron['delta'])\n\t\t\t\terrors.append(error)\n\t\telse:\n\t\t\tfor j in range(len(layer)):\n\t\t\t\tneuron = layer[j]\n\t\t\t\terrors.append(expected[j] - neuron['output'])\n\t\tfor j in range(len(layer)):\n\t\t\tneuron = layer[j]\n\t\t\tneuron['delta'] = errors[j] * transfer_derivative(neuron['output'])\n# Update network weights with error\ndef update_weights(network, row, l_rate):\n\tfor i in range(len(network)):\n\t\tinputs = row[:9]\n\t\tif i != 0:\n\t\t\tinputs = [neuron['output'] for neuron in network[i - 1]]\n\t\tfor neuron in network[i]:\n\t\t\tfor j in range(len(inputs)):\n\t\t\t\tneuron['weights'][j] += l_rate * neuron['delta'] * inputs[j]\n\t\t\tneuron['weights'][-1] += l_rate * neuron['delta']\n# Train a network for a fixed number of epochs\ndef train_network(network, train, l_rate, n_epoch, n_outputs):\n\tfor epoch in range(n_epoch):\n\t\tsum_error = 0\n\t\tfor row in train:\n\t\t\toutputs = forward_propagate(network, row)\n\t\t\texpected = [0 for i in range(n_outputs)]\n\t\t\texpected[int(row[9])] = 1\n\t\t\tsum_error += sum([(expected[i]-outputs[i])**2 for i in range(len(expected))])\n\t\t\tbackward_propagate_error(network, expected)\n\t\t\tupdate_weights(network, row, l_rate)\n\t\t#print('>epoch=%d, lrate=%.3f, error=%.3f' % (epoch, l_rate, sum_error))\n# Make a prediction with a network\ndef predict(network, row):\n\toutputs = forward_propagate(network, row)\n\treturn outputs.index(max(outputs))\na=np.array(m)\nfor i in range(0,9):\n\tmaxi=a[:,i].max()\n\tmini=a[:,i].min()\n\tfor j in range(0,4*numm):\n\t\t#print(a[j][i]),\n\t\t#print(maxi),\n\t\t#print(mini)\n\t\tm[j][i]=(a[j][i]-mini)*1.0/(maxi-mini)\n#print(m)\na=np.array(m1)\nfor i in range(0,9):\n\tmaxi=a[:,i].max()\n\tmini=a[:,i].min()\n\tfor j in range(0,4*numm1):\n\t\t#print(a[j][i]),\n\t\t#print(maxi),\n\t\t#print(mini)\n\t\tm1[j][i]=(a[j][i]-mini)*1.0/(maxi-mini)\nn_inputs = len(m[0]) - 1\nn_outputs = len(set([row[9] for row in m]))\nnetwork = initialize_network(n_inputs, 25, n_outputs)\ntrain_network(network, m, 1 , 10, n_outputs)\n#for layer in network:\n#\tprint(layer)\nc=0\nfor row in m1:\n\tprediction = predict(network, row)\n\tprint('Expected=%d, Got=%d' % (row[9], prediction))\n\tif row[9]==prediction :\n\t\tc+=1\nprint(c)\nprint(\"Accuracy: \"),\nprint(c*100.0/(numm1*4.0))\n","sub_path":"ann.py","file_name":"ann.py","file_ext":"py","file_size_in_byte":4181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"177504674","text":"nome = input('Qual o seu nome?')\nprint('Prazer em te conhecer {}!'.format(nome))\n\nn1 = int(input('Digite um valor:'))\nn2 = int(input('Outro valor:'))\nprint('A soma vale {}'.format(n1+n2))\n\n\nn1 = int(input('Digite um valor:'))\nn2 = int(input('Outro valor:'))\ns = n1+n2\nm = n1 * n2\nd= n1/n2\ndi = n1 //n2 #divisão inteira\ne = n1 ** n2 #expoente\nprint('a soma é {}, o produto é {} e a divisão é {:.3f}'.format(s,m,d), end='') #{:.3f} colocar tres casas decimais, end continua na mesma linha\nprint('A divisão inteira é {} e potencia {}'.format(di,e))\n\n","sub_path":"cursoemVideo/aula07.a.py","file_name":"aula07.a.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"637998676","text":"#Tupla contendo os meses\nmeses = ('janeiro','fevereiro','março','abril','maio','junho','julho','agosto','setembro','outubro','novembro','dezembro')\n\ndata_nasc = input('Entre com sua data de nascimento no formato DD-MM-AAAA: ')\n\nvarAux = int(data_nasc[3:5])-1\n\nmes = meses[varAux]\n\nprint('Voce nasceu no mes de',mes)\n","sub_path":"MesdeNascimento.py","file_name":"MesdeNascimento.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"521824557","text":"import kivy \nfrom kivy.app import App \nkivy.require('1.9.0')\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.config import Config\nConfig.set('graphics', 'resizable', 1) \n\n\n\n\nclass CalcGridLayout(GridLayout): \n\n\tdef calculate(self, calculation): \n\t\tif calculation: \n\t\t\ttry: \n\t\t\t\tself.d_equation.text = str(eval(calculation)) \n\t\t\texcept Exception: \n\t\t\t\tself.d_equation.text = \"Error\"\n\nclass CalculatorApp(App): \n\n\tdef build(self): \n\t\treturn CalcGridLayout() \n\nmyApp = CalculatorApp() \nmyApp.run() \n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"200890273","text":"import pytest\nfrom pytest import fixture\nfrom utilities.constants.common import USERS\nfrom utilities.factories.users import UserFactory\nfrom utilities.models.data_models import User\n\n\n@pytest.mark.factory\n@pytest.mark.unit\nclass TestUserFactory:\n \"\"\"Battery of tests for UserFactory functionality.\"\"\"\n\n @pytest.mark.low\n def test_build__returns_type_user(self) -> None:\n \"\"\"Check that a User is created from the UserFactory.\"\"\"\n user: User = UserFactory.build()\n\n assert type(user) == User\n\n @pytest.mark.low\n def test_create__returns_type_user(self) -> None:\n \"\"\"Check that a User is created from the UserFactory.\"\"\"\n user: User = UserFactory.create()\n\n assert type(user) == User\n\n @pytest.mark.low\n def test_factory__subsequent_calls_return_new_user(self) -> None:\n \"\"\"Check that a new User is returned from the UserFactory.\"\"\"\n user_one: User = UserFactory.create()\n user_two: User = UserFactory.create()\n\n assert user_one != user_two\n\n @pytest.mark.low\n @pytest.mark.parametrize(\n 'username, first_name, last_name, email',\n [('testing123', 'unit', 'testing', 'testing@testing.com')],\n )\n def test_factory__override_values(\n self, username: fixture, first_name: fixture, last_name: fixture, email: fixture,\n ) -> None:\n \"\"\"Check that factory values may be overridden.\"\"\"\n user: User = UserFactory.create(\n username=username, first_name=first_name, last_name=last_name, email=email,\n )\n\n assert (\n user.first_name == first_name\n and user.username == username\n and user.email == email\n and user.last_name == last_name\n )\n\n @pytest.mark.low\n def test_factory__params__account_user(self) -> None:\n \"\"\"Check that a User may be manipulated using a factory trait.\"\"\"\n user: User = UserFactory.create(account_user=True)\n expected_username: str = USERS.USERNAME\n expected_email: str = USERS.EMAIL\n\n assert expected_email in user.email and expected_username in user.username\n","sub_path":"integration/factories/test_user_factory.py","file_name":"test_user_factory.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"216025532","text":"import re\r\n\r\nhand = open(r\"D:\\WORK\\UDINUS\\MOOC\\Coursera\\Learning Python\\re\\sec.txt\")\r\n\r\nlst = list()\r\nfor lines in hand:\r\n numlist = re.findall('[0-9]+',lines)\r\n if len(numlist) == 0: continue\r\n for i in range (0,len(numlist)):\r\n lst.append(int(numlist[i]))\r\n\r\nprint(sum(lst))\r\n\r\n# import re below is not my code\r\n\r\n# hand = open(\"regex_sum_24962.txt\")\r\n# x=list()\r\n# for line in hand:\r\n# y = re.findall('[0-9]+',line)\r\n# x = x+y\r\n\r\n# sum=0\r\n# for z in x:\r\n# sum = sum + int(z)\r\n\r\n# print(sum)\r\n\r\n# need to learn moRE !!","sub_path":"REGULAR EXP/reguexp.py","file_name":"reguexp.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"530951670","text":"# coding=utf-8\n# Copyright 2021 jemix.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python\n\"\"\" Task: Sentiment Analysis Dataset \"\"\"\n\nimport datasets\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\n_CITATION = \"\"\"\\\n@inproceedings{id_citation,\n title = \"\",\n author = \"\",\n booktitle = \"\",\n year = \"\",\n url = \"\",\n pages = \"00--00\",\n}\n\"\"\"\n\n_DESCRIPTION = \"\"\"\\\ndescription about data \n\nFor more details see https://github.com/jemiaymen/TC/sa/\n\"\"\"\n\n\n_URL = \"data/\"\n_TRAINING_FILE = \"train.txt\"\n_DEV_FILE = \"valid.txt\"\n\n\nclass SAConfig(datasets.BuilderConfig):\n \"\"\"BuilderConfig for Sentiment Analysis\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"BuilderConfig Sentiment Analysis\n\n Args:\n **kwargs: keyword arguments forwarded to super.\n \"\"\"\n super(SAConfig, self).__init__(**kwargs)\n\n\nclass SentimentAnalysis(datasets.GeneratorBasedBuilder):\n \"\"\"Sentiment Analysis dataset.\"\"\"\n\n BUILDER_CONFIGS = [\n SAConfig(name=\"sentiment_analysis\", version=datasets.Version(\n \"1.0.0\"), description=\"S.A dataset\"),\n ]\n\n def _info(self):\n return datasets.DatasetInfo(\n description=_DESCRIPTION,\n features=datasets.Features(\n {\n \"text\": datasets.Value(\"string\"),\n \"label\": datasets.features.ClassLabel(\n names=[\n 'NEG', # Negatif Sentiment\n 'NEU', # Neutre Sentiment\n 'POS', # Positif Sentiment\n ]\n ),\n }\n ),\n supervised_keys=None,\n homepage=\"https://github.com/jemiaymen/TC/sa/\",\n citation=_CITATION,\n )\n\n def _split_generators(self, dl_manager):\n \"\"\"Returns SplitGenerators.\"\"\"\n urls_to_download = {\n \"train\": f\"{_URL}{_TRAINING_FILE}\",\n \"dev\": f\"{_URL}{_DEV_FILE}\",\n }\n downloaded_files = dl_manager.download_and_extract(urls_to_download)\n\n return [\n datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={\n \"filepath\": downloaded_files[\"train\"]}),\n datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={\n \"filepath\": downloaded_files[\"dev\"]}),\n ]\n\n def _generate_examples(self, filepath):\n logger.info(\"⏳ Generating examples from = %s\", filepath)\n with open(filepath, encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n splits = line.split(\"\\t\")\n if len(splits) < 2:\n continue\n\n yield i, {\n \"text\": splits[0],\n \"label\": splits[1].rstrip(),\n }\n logger.info(\"Generating examples finish !\")\n","sub_path":"sa_generator.py","file_name":"sa_generator.py","file_ext":"py","file_size_in_byte":3409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"498871407","text":"import numpy as np\nfrom chainer import cuda\n\nclass Data:\n def __init__(self, data, label):\n assert len(data) == len(label)\n self.data = data\n self.label = label\n self.index = np.arange(len(data))\n #print(cupy.get_array_module(self.label))\n #print(cupy.get_array_module(cuda.to_gpu(self.label)))\n\n def get_index_data(self, index_list):\n return cuda.to_gpu(self.data[index_list])\n\n def get(self, n, need_index = False):\n ind = np.random.permutation(self.data.shape[0])\n if need_index:\n return cuda.to_gpu(self.data[ind[:n],:].astype(np.float32)), \\\n cuda.to_gpu(self.label[ind[:n]].astype(np.int32)), \\\n self.index[ind[:n]].astype(np.int32)\n else:\n return cuda.to_gpu(self.data[ind[:n],:].astype(np.float32)), \\\n cuda.to_gpu(self.label[ind[:n]].astype(np.int32))\n\n","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"427871180","text":"class Solution: \n def lengthOfLongestSubstring(self, s: str) -> int:\n d = {};\n length = len(s);\n start = 0;\n ans = 0;\n for i,c in enumerate(s):\n if c in d:\n start = max(start, d[c]);\n d[c] = i+1;\n ans = max(ans, i - start + 1);\n return ans;","sub_path":"algorithms/Longest Substring Without Repeating Characters.py","file_name":"Longest Substring Without Repeating Characters.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"637130199","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport csv\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.metrics import confusion_matrix, accuracy_score\nfrom sklearn.model_selection import cross_val_score\n\n\nfrom sklearn.neighbors import KNeighborsClassifier\n#from sklearn.discriminant_analysis import LinearDiscriminantAnalysis \nfrom sklearn.metrics import confusion_matrix, accuracy_score\nfrom sklearn.model_selection import cross_val_score\n#from sklearn.tree import DecisionTreeClassifier\n#from sklearn.svm import SVC\n#from sklearn.linear_model import LogisticRegression\n\n\n\ndataset = pd.read_csv(\"iris.csv\")\n\nall_columns = ['SepalLengthCm','SepalWidthCm','PetalLengthCm','PetalWidthCm']\nresultsArra = []\n\nneighborsNumber=4\ntest_sizeNumber=0.7\n\ncount=1\nfor a in all_columns:\n for b in all_columns:\n for c in all_columns: \n if a!=b and b!=c and a!=c :\n feature_columns = [a,b,c]\n print(\"Featured Columns : \",feature_columns)\n print(count)\n count=count+1\n X = dataset[feature_columns].values\n y = dataset['Species'].values\n # Apply numerical encoding to convert alphabetical names\n le = LabelEncoder()\n y = le.fit_transform(y)\n # Divide the dataset in testing and training vectors for cross-validation\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = test_sizeNumber, random_state = 0)\n # Instantiate learning model (k = 3)\n classifier = KNeighborsClassifier(n_neighbors=neighborsNumber)\n # Fitting the model\n classifier.fit(X_train, y_train)\n # Predicting the Test set results\n y_pred = classifier.predict(X_test)\n \n classifier = KNeighborsClassifier(n_neighbors=neighborsNumber)\n #classifier = LinearDiscriminantAnalysis()\n #classifier = DecisionTreeClassifier()\n #classifier = SVC(gamma = 'auto')\n #classifier = LogisticRegression()\n # Fitting the model\n classifier.fit(X_train, y_train)\n \n # Predicting the Test set results\n y_pred = classifier.predict(X_test)\n \n # View the accuracy of the model\n accuracy = accuracy_score(y_test, y_pred)*100\n print('Accuracy of our model is equal ' + str(round(accuracy, 2)) + ' %.')\n \n a_temp =\"\"\n b_temp =\"\"\n c_temp =\"\"\n if a== \"SepalLengthCm\":\n a_temp=\"SL\"\n if a== \"SepalWidthCm\":\n a_temp=\"SW\"\n if a== \"PetalLengthCm\":\n a_temp=\"PL\"\n if a== \"PetalWidthCm\":\n a_temp=\"PW\"\n \n if b== \"SepalLengthCm\":\n b_temp=\"SL\"\n if b== \"SepalWidthCm\":\n b_temp=\"SW\"\n if b== \"PetalLengthCm\":\n b_temp=\"PL\"\n if b== \"PetalWidthCm\":\n b_temp=\"PW\"\n \n if c== \"SepalLengthCm\":\n c_temp=\"SL\"\n if c== \"SepalWidthCm\":\n c_temp=\"SW\"\n if c== \"PetalLengthCm\":\n c_temp=\"PL\"\n if c== \"PetalWidthCm\":\n c_temp=\"PW\"\n \n \n tempArr = [a_temp,b_temp,c_temp,round(accuracy, 3)]\n \n resultsArra.append(tempArr)\n\n#NOW WRITING ON CSV FILE\n\nwith open('3FeatureResults/Resut_with_n_'+str(neighborsNumber)+'_testSize_'+str(test_sizeNumber)+'_3_features.csv', mode='w',newline='') as csv_file:\n fieldnames = ['Feature A', 'Feature B','Feature C' ,'Accuracy']\n writer = csv.DictWriter(csv_file, fieldnames=fieldnames)\n \n writer.writeheader()\n for row in resultsArra:\n #writer.writerow({row[]})\n writer.writerow({'Feature A': row[0], 'Feature B': row[1],'Feature C': row[2], 'Accuracy': row[3]})\n #print(row[0])\n \n","sub_path":"KNN_3_Features Result Generator.py","file_name":"KNN_3_Features Result Generator.py","file_ext":"py","file_size_in_byte":4246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"547630159","text":"import pytest\n\nfrom integration import runner, file, ssh\n\nhosts = [\"multiple-1\", \"multiple-2\"]\n\n@runner.cleanup_node(hosts, \"qemu:///system\")\ndef test_multi_nodes():\n \"\"\" \n run 3 instances at the same time. Make sure ssh works correctly\n and hostname is set to instance name\n \"\"\"\n\n env_vars = runner.load_variables_from_env()\n multiple = file.get_test_path(\"multiple\")\n \n runner.run_xii(deffile=multiple, variables=env_vars, cmd=\"start\")\n\n for host in hosts:\n con = ssh.connect_to(host, \"root\", \"linux\", multiple)\n assert(con.run(\"hostname\") == host)\n\n runner.run_xii(deffile=multiple, variables=env_vars, cmd=\"destroy\")\n","sub_path":"tests/integration/test_multiple.py","file_name":"test_multiple.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"508738230","text":"\n''' decorator : callable한 객체들을 직접 수정하지 않고 결과값을 수정할때 사용 '''\n\nimport functools\nimport time\n\n\ndef timer(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n start = time.time()\n result = func(*args, **kwargs)\n print(f'{func.__name__} took {round((time.time() - start), 4)} seconds')\n return result\n\n return wrapper\n\n\n# timer 데코레이터 적용\n@timer\ndef huge_add(a, b):\n '''==add=='''\n result = a + b\n time.sleep(0.4)\n return result\n\n\n@timer\ndef huge_subtract(a, b):\n '''==subtract=='''\n result = a + b\n time.sleep(0.5)\n return result\n\n\n@timer\ndef huge_multiply(a, b):\n '''==multiple=='''\n result = a * b\n time.sleep(0.6)\n return result\n\n\nif __name__ == '__main__':\n huge_number = 10e8\n huge_add(huge_number, huge_number)\n huge_subtract(huge_number, huge_number)\n huge_multiply(huge_number, huge_number)\n\n ''' wraper 디버깅 or 문서화에 필요 '''\n print(huge_add.__doc__)\n print(huge_subtract.__doc__)\n print(huge_multiply.__doc__)\n\n\n# 출력, 이제는 간편하게 호출된 함수 이름도 확인할 수 있습니다\n# huge_add took 0.4009 seconds\n# huge_subtract took 0.5007 seconds\n# huge_multiply took 0.6004 seconds\n\n\n\n'''출처 : https://velog.io/@doondoony/Python-Decorator-101'''","sub_path":"nam/study/decorator.py","file_name":"decorator.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"482305407","text":"from tkinter import *\nfrom tkinter import ttk\nfrom ttkthemes import themed_tk as tk\nfrom PIL import Image, ImageOps, ImageDraw,ImageTk\nfrom tkinter import messagebox\nfrom tkinter import filedialog\nimport datetime\nimport os\nimport pdf\nfrom database import Database as db\ndata = db(\"Data/database/database.db\")\n\ndt = datetime.datetime.now()\ncurrent_date = dt.date().strftime(\"%B %d, %Y\")\ncurrent_time = dt.time().strftime(\"%I:%M\")\n\n\n\n\nclass AdminLeftFrame:\n def __init__(self,frame):\n self.admin_left_frame = Frame(frame, width=180, bg = \"white\")\n self.admin_left_frame.pack(side=LEFT, fill=Y, padx = 10, pady = 10)\n self.admin_left_frame.pack_propagate(False)\n\n #buttons\n #admin imaga\n self.add_img = Image.open(\"Data/pics/admin/personal/current_image.png\")\n self.add_img.thumbnail((180, 180))\n self.new_entry_img = ImageTk.PhotoImage(self.add_img)\n self.image_canvas = Canvas(self.admin_left_frame, width = 170, height = 150, bg = 'white')\n self.prof_pic = self.image_canvas.create_image(90, 80, image=self.new_entry_img, anchor='center')\n\n\n\n self.image_canvas.bind(\"\", self.show_img_chage)\n self.image_canvas.bind(\"\", self.remove_img_chang)\n self.image_canvas.bind(\"\", self.change_admin_pic)\n\n\n self.dasboard_btn = Label(self.admin_left_frame, text = \"Dashboard\", fg = \"blue\", font = \"Arial 12\", bg = \"white\",)\n self.admin_left_itmes_btn = Label(self.admin_left_frame, text = \"Items\", fg = \"blue\", font = \"Arial 12\", bg = \"white\")\n self.partners_btn = Label(self.admin_left_frame, text = 'Partners', fg = 'blue', font = 'Arial 12', bg = 'white')\n self.settings_btn = Label(self.admin_left_frame, text = 'Settings', fg = 'blue', font = 'Arial 12', bg = 'white')\n self.orders_btn = Label(self.admin_left_frame, text = \"Orders\", fg = 'blue', font = \"arial 12\", bg = 'white')\n self.profit_btn = Label(self.admin_left_frame, text = \"Profit\", fg = 'blue', font = \"arial 12\", bg = 'white')\n\n def add(self):\n self.image_canvas.pack(side = 'top', fill = X)\n self.dasboard_btn.pack(side = TOP, fill = X)\n self.dasboard_btn.pack_configure(ipady = 7, pady = 3)\n self.partners_btn.pack(side = TOP, fill = X, ipady = 7)\n self.admin_left_itmes_btn.pack(side = TOP, fill = X, ipady = 7)\n self.orders_btn.pack(side = TOP, fill = X, ipady = 7)\n self.profit_btn.pack(side = TOP, fill = X, ipady = 7)\n self.settings_btn.pack(side = TOP, fill = X, ipady = 7)\n\n\n\n self.dasboard_btn.bind(\"\", lambda event: self.dasboard_btn.configure(bg = '#d6d9d9', cursor = \"hand2\", relief = 'raised',\n fg = 'black'))\n self.dasboard_btn.bind(\"\", lambda event: self.dasboard_btn.configure(bg = 'white', relief = 'flat',fg = 'blue',))\n self.admin_left_itmes_btn.bind(\"\", lambda event: self.admin_left_itmes_btn.configure(bg = '#d6d9d9', cursor = 'hand2', relief = 'raised',\n fg = 'black'))\n self.admin_left_itmes_btn.bind(\"\", lambda event: self.admin_left_itmes_btn.configure(bg = 'white', relief = 'flat',fg = 'blue'))\n\n self.partners_btn.bind(\"\",\n lambda event: self.partners_btn.configure(bg='#d6d9d9', cursor='hand2', relief = 'raised',\n fg = 'black'))\n self.partners_btn.bind(\"\", lambda event: self.partners_btn.configure(bg='white', relief = 'flat',fg = 'blue'))\n\n\n self.settings_btn.bind(\"\", lambda event: self.settings_btn.configure(bg='#d6d9d9', cursor=\"hand2\", relief = 'raised',\n fg = 'black'))\n self.settings_btn.bind(\"\", lambda event: self.settings_btn.configure(bg='white', relief = 'flat',fg = 'blue'))\n\n self.orders_btn.bind(\"\",\n lambda event: self.orders_btn.configure(bg='#d6d9d9', cursor=\"hand2\", relief='raised',\n fg='black'))\n self.orders_btn.bind(\"\",\n lambda event: self.orders_btn.configure(bg='white', relief='flat', fg='blue'))\n\n self.profit_btn.bind(\"\",\n lambda event: self.profit_btn.configure(bg='#d6d9d9', cursor=\"hand2\", relief='raised',\n fg='black'))\n self.profit_btn.bind(\"\",\n lambda event: self.profit_btn.configure(bg='white', relief='flat', fg='blue'))\n\n\n\n\n def show_img_chage(self, event):\n self.image_canvas.configure(cursor='hand2', bg='SystemButtonFace')\n self.rect = self.image_canvas.create_rectangle(10, 123, 170, 150, fill='#dfdfe1')\n self.text = self.image_canvas.create_text(87, 135, text='Change Picture', font='arial 13 underline')\n\n def remove_img_chang(self, event):\n self.image_canvas.delete(self.rect)\n self.image_canvas.delete(self.text)\n\n def change_admin_pic(self,event):\n image = filedialog.askopenfile(initialdir = './', title = 'Select Image', filetypes = ((\"jpeg file\",\"*.jpg\"),(\"png file\", \"*.png\")))\n image_path = image.name\n\n im = Image.open(image_path)\n bigsize = (im.size[0] * 3, im.size[1] * 3)\n mask = Image.new('L', bigsize, 0)\n draw = ImageDraw.Draw(mask)\n draw.ellipse((0, 0) + bigsize, fill=255)\n mask = mask.resize(im.size, Image.ANTIALIAS)\n im.putalpha(mask)\n\n output = ImageOps.fit(im, mask.size, centering=(0.5, 0.5))\n output.putalpha(mask)\n os.remove('./Data/pics/admin/personal/current_image.png')\n output.save('./Data/pics/admin/personal/current_image.png')\n\n self.image_canvas.delete(self.prof_pic)\n self.add_img = Image.open(\"Data/pics/admin/personal/current_image.png\")\n self.add_img.thumbnail((180, 180))\n self.img = ImageTk.PhotoImage(self.add_img)\n self.canv_image = self.image_canvas.create_image(90, 80, image=self.img, anchor='center')\n\n\n\n\n\n\nclass AdminTopFrame:\n def __init__(self,frame):\n self.admin_top_frame = Frame(frame, height=80, bg = '#6b238f')\n self.admin_top_frame.pack(side=TOP, fill=X)\n self.message_image = Label(self.admin_top_frame, bg = '#6b238f')\n self.message_label = Label(self.admin_top_frame, bg = '#6b238f')\n\n self.message_image.place(relx = 0.35, rely = 0.23)\n self.message_label.place(relx = 0.45, rely = 0.38)\n\n\n\n\n\n\n\nclass AdminMiddleFrame:\n def __init__(self,frame):\n self.admin_middle_frame = Frame(frame,)\n self.admin_middle_frame.pack(fill=BOTH, expand=True)\n\n def forget(self):\n its = self.admin_middle_frame.pack_slaves()\n for z in its:\n z.pack_forget()\n\n its = self.admin_middle_frame.grid_slaves()\n for z in its:\n z.grid_forget()\n\n its = self.admin_middle_frame.place_slaves()\n for z in its:\n z.place_forget()\n\n\n\n\n\n\n\n\n\n#todo ITEMS Button ------------------------------------------------------------------------------------------------------------------------------------------>\nclass AdminItems:\n def __init__(self, fr):\n\n #frames\n self.items_frame = Frame(fr, height=40, width=300, bg = \"white\")\n self.show_frame = Frame(fr, width = 700, height = 400, bg = '#ffffff')\n\n\n #buttons\n self.admin_view_items = Label(self.items_frame, text=\"View\", bg = \"white\", fg = \"blue\",)\n self.admin_update_items = Label(self.items_frame, text=\"Updates\", bg = \"white\", fg = \"blue\")\n self.admin_delete_items = Label(self.items_frame, text=\"Delete\", bg = \"white\", fg = \"blue\")\n self.admin_add_items = Label(self.items_frame, text=\"Add\", bg = \"white\", fg = \"blue\")\n self.admin_return_item = Label(self.items_frame, text = \"Return\", bg = 'white',fg = 'blue')\n self.item_message = Label(self.items_frame, image = '')\n\n\n\n #binding events to buttons---------------------->\n self.admin_view_items.bind(\"\", lambda event: self.admin_view_items.configure(fg = 'black', cursor = \"hand2\",font = \"arial 11 underline\"))\n self.admin_view_items.bind(\"\", lambda event: self.admin_view_items.configure(fg = 'blue', cursor = \"hand2\", font = \"arial 11\"))\n self.admin_view_items.bind(\"\", self.view_items)\n\n self.admin_update_items.bind(\"\", lambda event: self.admin_update_items.configure(fg='black', cursor=\"hand2\",font = \"arial 11 underline\"))\n self.admin_update_items.bind(\"\", lambda event: self.admin_update_items.configure(fg='blue', cursor=\"hand2\", font = \"arial 11\"))\n self.admin_update_items.bind(\"\", self.items_update)\n\n\n self.admin_delete_items.bind(\"\", lambda event: self.admin_delete_items.configure(fg='black', cursor=\"hand2\", font = \"arial 11 underline\"))\n self.admin_delete_items.bind(\"\", lambda event: self.admin_delete_items.configure(fg='blue', cursor=\"hand2\", font = \"arial 11\"))\n self.admin_delete_items.bind(\"\", self.item_delete)\n\n self.admin_add_items.bind(\"\", lambda event: self.admin_add_items.configure(fg='black', cursor=\"hand2\",font = \"arial 11 underline\"))\n self.admin_add_items.bind(\"\", lambda event: self.admin_add_items.configure(fg='blue', cursor=\"hand2\", font = \"arial 11\"))\n self.admin_add_items.bind(\"\", self.add_item)\n\n self.admin_return_item.bind(\"\", lambda event: self.admin_return_item.configure(fg='black', cursor=\"hand2\",\n font=\"arial 11 underline\"))\n self.admin_return_item.bind(\"\", lambda event: self.admin_return_item.configure(fg='blue', cursor=\"hand2\",\n font=\"arial 11\"))\n self.admin_return_item.bind(\"\", self.ReturnItem)\n\n\n\n\n\n #changing background color of buttons on click---------------------------------->\n self.bg_current_list = []\n self.bg_list = [self.admin_view_items,self.admin_add_items, self.admin_update_items,\n self.admin_delete_items,self.admin_return_item]\n for x in self.bg_list:\n x.bind(\"\", self.chang_bg)\n\n def chang_bg(self,event):\n if len(self.bg_current_list) >=2:\n for i in self.bg_current_list[0:-1]:\n i.configure(bg = 'white')\n self.bg_current_list.remove(i)\n\n\n\n\n\n\n\n\n #adding item frame and show frame in admin middle frame------------------------------>\n def add(self):\n self.items_frame.pack_propagate(False)\n self.items_frame.pack(side=TOP, pady=5,)\n self.show_frame.pack(side=TOP, padx=10, pady=5)\n self.admin_view_items.pack(side=LEFT, ipadx = 7, ipady = 6)\n self.admin_add_items.pack(side=LEFT, ipadx = 7, ipady = 6)\n self.admin_update_items.pack(side=LEFT, ipadx = 7, ipady = 6)\n self.admin_delete_items.pack(side=LEFT, ipadx = 7, ipady = 6)\n self.admin_return_item.pack(side = LEFT, ipadx = 7, ipady = 6)\n\n\n\n\n #forgeting widgets in show frames--------------------------->\n def forget(self):\n x = self.show_frame.place_slaves()\n for i in x:\n i.place_forget()\n\n y = self.show_frame.pack_slaves()\n for j in y:\n j.pack_forget()\n\n z = self.show_frame.grid_slaves()\n\n for m in z:\n m.grid_forget()\n\n\n\n\n #view items tab------------------------------>\n def view_items(self,event):\n self.admin_view_items.configure(bg = '#9fa59a')\n if self.admin_view_items not in self.bg_current_list:\n self.bg_current_list.append(self.admin_view_items)\n\n self.tree = ttk.Treeview(self.show_frame)\n self.tree['column'] = (\"one\", \"two\", \"three\")\n self.tree.column('#0', width=60, anchor = 'center')\n self.tree.column('one', width=100, anchor = 'center')\n self.tree.column('two', width=120, anchor = 'center')\n self.tree.column('three', width=180, anchor = 'center')\n\n self.tree.heading(\"#0\", text=\"No\", )\n self.tree.heading(\"one\", text=\"Code\")\n self.tree.heading(\"two\", text=\"Size\")\n self.tree.heading(\"three\", text=\"Company\")\n\n self.tree.pack_propagate(False)\n self.scr = ttk.Scrollbar(self.tree, orient='vertical', command=self.tree.yview)\n self.tree.configure(yscroll=self.scr.set)\n self.scr.pack(side=RIGHT, fill=Y)\n\n self.cost_label = Label(self.show_frame, text=\"Cost:\", bg=\"white\", font=\"Arial 12 bold\")\n self.cost_value = Label(self.show_frame, text=\"\", bg=\"white\", font=\"Arial 12 bold\")\n\n self.weight_label = Label(self.show_frame, text=\"Weight:\", bg=\"white\", font=\"Arial 12 bold\")\n self.weight_value = Label(self.show_frame, text=\"\", bg=\"white\", font=\"Arial 12 bold\")\n self.no_items_label = Label(self.show_frame, text=\"Total Items:\", bg=\"white\")\n self.no_items_values = Label(self.show_frame, text=\"\", bg=\"white\")\n\n\n self.current_tab = Label(self.show_frame, text=\"View Items\", bg='white', font='Weight 22 bold underline', fg='#f6546a')\n try:\n set_img = Image.open('Data/pics/admin/views.png')\n set_img.thumbnail((50, 40))\n img = ImageTk.PhotoImage(set_img)\n self.current_img = Label(self.show_frame, image=img, bg='white')\n self.current_img.image = img\n except FileNotFoundError:\n self.current_img = Label(self.show_frame, text='image not found', font='arila 8', bg='white')\n\n\n # adding items to show frame----------------------------->\n\n # self.list_box.pack(side = TOP, pady = 50)\n self.show_frame.pack_propagate(False)\n self.forget()\n self.tree.pack(side=TOP, pady=70)\n # self.cost_label.place(relx=0.3, rely=0.75)\n # self.cost_value.place(relx=0.5, rely=0.75)\n # self.weight_label.place(relx=0.3, rely=0.8)\n # self.weight_value.place(relx=0.5, rely=0.8)\n\n self.current_tab.place(relx=0.3, rely=0.01)\n self.current_img.place(relx = 0.56, rely = 0.03)\n\n data.admin_view_items(self.tree)\n\n\n\n\n\n\n #add item tab------------------------------>\n def add_item(self,event):\n self.admin_add_items.configure(bg = '#9fa59a')\n if self.admin_add_items not in self.bg_current_list:\n self.bg_current_list.append(self.admin_add_items)\n\n\n self.var_code = IntVar()\n self.var_size = StringVar()\n self.comp = StringVar()\n self.compvar = 0\n self.company_back = 0\n self.combo_var = StringVar()\n\n self.code_label = Label(self.show_frame, text=\"Code: \", bg=\"white\")\n self.code_entry = ttk.Entry(self.show_frame, width=15, textvariable=self.var_code, justify=CENTER)\n\n self.size_label = Label(self.show_frame, text=\"Size: \", bg=\"white\")\n self.size_entry = ttk.Entry(self.show_frame, width=15, textvariable=self.var_size, justify=CENTER)\n self.size_info = Label(self.show_frame, text=\"(In mm)\", bg=\"White\")\n\n self.company_list = []\n data.add_new_item_id(self.var_code, self.company_list)\n\n self.company_label = Label(self.show_frame, text=\"Company: \", bg=\"white\")\n self.company_combo = ttk.Combobox(self.show_frame, values=self.company_list, width=14, textvariable=self.combo_var)\n\n if self.company_list:\n self.company_combo.set(self.company_list[0])\n self.add_company_btn = Button(self.show_frame, image=\"\", text=\"add\", )\n\n self.err = Label(self.show_frame, text='', font='Arial 14 italic', width=50, bg='white')\n\n self.x = 0\n\n def submit(event):\n if self.compvar == 0:\n try:\n self.x = 1\n code = self.var_code.get()\n self.x = 2\n size = self.var_size.get()\n self.x = 3\n combo = self.combo_var.get()\n self.x = 4\n\n except TclError:\n if self.x == 1:\n self.err.configure(text=\"Failed: Only 4 Digit Code Is Accepted\", bg='red')\n pass\n\n else:\n data.add_new_item(self.var_code.get(), self.var_size.get(), self.combo_var.get().title(),\n self.err,current_date)\n data.add_new_item_id(self.var_code, self.company_list)\n self.var_size.set(\"\")\n self.combo_var.set(self.company_list[0])\n else:\n try:\n self.x = 1\n code = self.var_code.get()\n self.x = 2\n size = self.var_size.get()\n self.x = 3\n comp = self.comp.get()\n\n except TclError:\n if self.x == 1:\n self.err.configure(text=\"Failed: Only 4 Digit Code Is Accepted\", bg='red')\n pass\n\n else:\n data.add_new_item(self.var_code.get(), self.var_size.get(), self.comp.get().title(),\n self.err, current_date)\n data.add_new_item_id(self.var_code, self.company_list)\n self.var_size.set(\"\")\n self.combo_var.set(self.company_list[0])\n\n self.submit_btn = ttk.Button(self.show_frame, text='Submit', )\n self.submit_btn.bind(\"\", submit)\n\n self.add_company_entry = ttk.Entry(self.show_frame, width=15, textvariable=self.comp)\n\n self.current_tab = Label(self.show_frame, text=\"Add New Item\", bg='white', font='Weight 22 bold underline', fg='#f6546a')\n try:\n set_img = Image.open('Data/pics/admin/dbadd.png')\n set_img.thumbnail((50, 40))\n img = ImageTk.PhotoImage(set_img)\n self.current_img = Label(self.show_frame, image=img, bg='white')\n self.current_img.image = img\n except FileNotFoundError:\n self.current_img = Label(self.show_frame, text='image not found', font='arila 8', bg='white')\n\n\n\n #adding items to show frame--------------------------->\n self.forget()\n\n self.code_label.place(relx=0.15, rely=0.32, )\n self.code_entry.place(relx=0.27, rely=0.32, height=25)\n\n self.size_label.place(relx=0.15, rely=0.42)\n self.size_entry.place(relx=0.27, rely=0.42, height=25)\n self.size_info.place(relx=0.45, rely=0.42)\n\n self.company_label.place(relx=0.15, rely=0.52)\n self.company_combo.place(relx=0.27, rely=0.52, height=25)\n self.add_company_btn.place(relx=0.45, rely=0.51)\n\n self.current_tab.place(relx=0.3, rely=0.01)\n self.current_img.place(relx=0.62, rely=0.01)\n\n self.err.place(relx=0.05, rely=0.92)\n\n self.submit_btn.place(relx=0.4, rely=0.7)\n\n def company_func(event):\n if self.company_back == 0:\n self.compvar = 1\n self.company_combo.place_forget()\n self.add_company_entry.place(relx=0.27, rely=0.52, height=25)\n self.add_company_btn.configure(text='<<', fg='blue')\n self.company_back = 1\n else:\n self.compvar = 0\n self.add_company_entry.place_forget()\n self.add_company_btn.configure(text=\"Add\", fg='black')\n self.company_combo.place(relx=0.27, rely=0.52, height=25)\n self.company_back = 0\n\n self.add_company_btn.bind(\"\", company_func)\n\n\n\n\n\n\n\n\n #update items tab --------------------------------->\n def items_update(self, event):\n self.admin_update_items.configure(bg = '#9fa59a')\n if self.admin_update_items not in self.bg_current_list:\n self.bg_current_list.append(self.admin_update_items)\n\n\n\n self.var_id = StringVar()\n\n self.var_name = StringVar()\n self.var_old_cost = StringVar()\n self.var_old_weight = StringVar()\n\n self.var_add_cost = DoubleVar()\n self.var_add_weight = DoubleVar()\n\n self.var_add_cost.set(\"\")\n self.var_add_weight.set(\"\")\n\n self.code_label = Label(self.show_frame, text=\"Enter Code: \", bg=\"white\")\n self.code_value = ttk.Entry(self.show_frame, width=10, textvariable=self.var_id)\n\n\n self.add_cost_label = Label(self.show_frame, text=\"Add Cost: \", bg=\"white\")\n self.add_cost_value = ttk.Entry(self.show_frame, width=20, textvariable=self.var_add_cost,\n state = 'readonly')\n press_enter = Label(self.show_frame, text = '(Press Enter to continue)', font = 'helvetica 8 italic',bg = 'white')\n\n self.add_wieght_label = Label(self.show_frame, text=\"Add Weight: \", bg=\"white\")\n self.add_weight_value = ttk.Entry(self.show_frame, width=20, textvariable=self.var_add_weight,\n state = 'readonly')\n\n self.name = Label(self.show_frame, text=\"\", bg='white')\n self.name_value = Label(self.show_frame, bg='white', textvariable=self.var_name)\n\n self.old_cost_label = Label(self.show_frame, text='', bg='white')\n self.old_cost_value = Label(self.show_frame, bg='white', textvariable=self.var_old_cost)\n\n self.old_weight_label = Label(self.show_frame, text='', bg='white')\n self.old_weight_value = Label(self.show_frame, bg='white', textvariable=self.var_old_weight)\n\n self.err = Label(self.show_frame, text='', font='Arial 14 italic', width=50, bg='white')\n\n self.update_btn = ttk.Button(self.show_frame, text=\"Update\")\n\n def upd_func(event):\n try:\n self.var_old_cost.set(\"\")\n self.var_old_weight.set(\"\")\n self.var_name.set(\"\")\n self.name.configure(text=\"\")\n self.add_weight_value.configure(state = 'readonly')\n self.add_cost_value.configure(state = 'readonly')\n self.old_weight_label.configure(text='')\n self.old_cost_label.configure(text=\"\")\n self.err.configure(text = '', bg = 'white')\n\n val = data.upd_func_values(self.var_id.get())\n if val == None:\n self.err.configure(text = 'Wrong item id...', bg = 'red')\n return False\n else:\n self.name.configure(text=\"Name: \")\n self.add_cost_value.configure(state = 'normal')\n self.add_weight_value.configure(state = 'normal')\n\n self.old_weight_label.configure(text = 'Current Weight: ')\n self.old_cost_label.configure(text = 'Average cost: ')\n self.var_name.set(val[2] + \" \" + val[1])\n self.var_old_cost.set(val[4])\n self.var_old_weight.set(val[3])\n except EXCEPTION as e:\n print('something went wrong...')\n\n\n\n self.code_value.bind(\"\", upd_func)\n\n def update(event):\n done_dict = {}\n if self.var_id.get() == '':\n return False\n else:\n try:\n cost = self.var_add_cost.get()\n done_dict['cost'] = cost\n except TclError:\n done_dict['cost'] = ''\n try:\n weight = self.var_add_weight.get()\n done_dict['weight'] = weight\n except TclError:\n done_dict['weight'] = ''\n\n if done_dict['cost'] == '' and done_dict['weight'] == '':\n return False\n else:\n data.admin_update(done_dict['cost'], done_dict['weight'], int(self.var_id.get()),current_date)\n self.var_add_weight.set(\"\")\n self.var_add_cost.set(\"\")\n self.err.configure(text = \"Successfully updated item...\", bg = 'green')\n\n self.update_btn.bind(\"\", update)\n\n self.current_tab = Label(self.show_frame, text=\"Update Item\", bg='white', font='Weight 22 bold underline', fg='#f6546a')\n\n try:\n set_img = Image.open('Data/pics/admin/validpng.png')\n set_img.thumbnail((50, 40))\n img = ImageTk.PhotoImage(set_img)\n self.current_img = Label(self.show_frame, image=img, bg='white')\n self.current_img.image = img\n except FileNotFoundError:\n self.current_img = Label(self.show_frame, text='image not found', font='arila 8', bg='white')\n\n\n\n #adding items to show frame ----------------------------->\n self.forget()\n self.code_label.place(relx=0.105, rely=0.15, )\n self.code_value.place(relx=0.25, rely=0.15, height=35)\n press_enter.place(relx = 0.37, rely = 0.17)\n\n self.add_cost_label.place(relx=0.1, rely=0.3)\n self.add_cost_value.place(relx=0.25, rely=0.3, height=25)\n\n self.add_wieght_label.place(relx=0.1, rely=0.4)\n self.add_weight_value.place(relx=0.25, rely=0.4, height=25)\n\n self.name.place(relx=0.65, rely=0.2)\n self.name_value.place(relx=0.75, rely=0.2)\n\n self.old_cost_label.place(relx=0.65, rely=0.3)\n self.old_cost_value.place(relx=0.85, rely=0.3)\n\n self.old_weight_label.place(relx=0.65, rely=0.4)\n self.old_weight_value.place(relx=0.85, rely=0.4)\n\n self.update_btn.place(relx=0.23, rely=0.55)\n\n self.current_tab.place(relx=0.3, rely=0.01)\n self.current_img.place(relx=0.58, rely=0.01)\n\n self.err.pack(side = 'bottom', fill = X)\n\n\n\n\n\n\n #delete items tab ----------------------------------------------------->\n def item_delete(self,event):\n self.admin_delete_items.configure(bg = '#9fa59a')\n if self.admin_delete_items not in self.bg_current_list:\n self.bg_current_list.append(self.admin_delete_items)\n\n\n\n self.var_code = StringVar()\n self.var_item = StringVar()\n\n self.code_label = Label(self.show_frame, text=\"Enter your code: \", bg=\"white\")\n self.code_value = ttk.Entry(self.show_frame, width=10, textvariable=self.var_code)\n press_enter = Label(self.show_frame, text = '(Press Enter to continue)', font = 'helvetica 8 italic',bg = 'white')\n\n\n self.item = Label(self.show_frame, bg='white', textvariable=self.var_item)\n\n self.err = Label(self.show_frame, text='', font='Arial 14 italic', width=50, bg='white')\n\n self.delete_btn = ttk.Button(self.show_frame, text=\"Delete\")\n\n\n def del_show(event):\n try:\n id = int(self.var_code.get())\n except ValueError:\n pass\n else:\n data.delete_items_show(id, self.var_item)\n\n self.code_value.bind(\"\", del_show)\n\n def delete(event):\n try:\n id = int(self.var_code.get())\n except ValueError:\n messagebox.showerror(\"Code Error\", \"Wrong Code...\",parent = self.show_frame)\n pass\n else:\n data.delete(id, self.err)\n\n self.delete_btn.bind(\"\", delete)\n\n self.current_tab = Label(self.show_frame, text=\"Delete Item\", bg='white', font='Weight 22 bold underline', fg='#f6546a')\n try:\n set_img = Image.open('Data/pics/admin/del1.png')\n set_img.thumbnail((50, 40))\n img = ImageTk.PhotoImage(set_img)\n self.current_img = Label(self.show_frame, image=img, bg='white')\n self.current_img.image = img\n except FileNotFoundError:\n self.current_img = Label(self.show_frame, text='image not found', font='arial 8', justify=CENTER, bg='white')\n\n\n\n #adding widgets to show frame -------------------------------->\n self.forget()\n self.code_label.place(relx=0.37, rely=0.2)\n self.code_value.place(relx=0.4, rely=0.3, height=35)\n press_enter.place(relx = 0.54,rely = 0.32)\n\n self.item.place(relx=0.4, rely=0.43)\n self.delete_btn.place(relx=0.35, rely=0.55)\n\n self.current_tab.place(relx=0.33, rely=0.011)\n self.current_img.place(relx=0.58, rely=0.01)\n\n self.err.place(relx=0.05, rely=0.92)\n\n\n\n\n def ReturnItem(self,event):\n self.admin_return_item.configure(bg='#9fa59a')\n if self.admin_return_item not in self.bg_current_list:\n self.bg_current_list.append(self.admin_return_item)\n\n self.forget()\n var_bill = IntVar()\n var_item = IntVar()\n var_weight = DoubleVar()\n var_item.set(\"\")\n var_weight.set(\"\")\n err = Label(self.show_frame, bg = 'white')\n err.pack(side = 'bottom', fill = X)\n\n\n\n\n show_label_frame = LabelFrame(self.show_frame, width = 600, height = 280, text = 'Return Item')\n bill_label = Label(show_label_frame, text = 'Bill No: ', font = 'helvetica 12 italic')\n bill_value = ttk.Entry(show_label_frame, width = 10, textvariable = var_bill,justify = 'center')\n press_enter = Label(show_label_frame, text='(Press Enter to continue)', font='helvetica 8 italic',)\n\n item_labe = Label(show_label_frame,text = 'Item No: ', font = 'helvetica 12 italic')\n item_value = ttk.Entry(show_label_frame, width = 10,state = 'readonly', textvariable = var_item,justify = 'center')\n\n weight_label = Label(show_label_frame, text = 'Weight: ', font = 'helvetica 12 italic')\n weight_value = ttk.Entry(show_label_frame, width = 10, textvariable = var_weight, justify = 'center', state = 'readonly')\n\n return_btn = ttk.Button(show_label_frame, text = 'Return')\n\n tot_items_frame = Frame(show_label_frame, width = 250, height = 300, bg = '#67d3ff',relief = 'raised', bd = 3)\n tot_items_label = Label(tot_items_frame,bg = '#67d3ff', font = 'arial 13 bold', text = 'Bill Items')\n tot_items_label.pack(side = 'top')\n tot_items_frame.pack_propagate(False)\n\n\n show_label_frame.pack(side = 'top', pady = 20)\n bill_label.place(relx = 0.1, rely = 0.1)\n bill_value.place(relx = 0.1, rely = 0.2,height = 35)\n press_enter.place(relx = 0.22, rely = 0.22)\n\n item_labe.place(relx = 0.1, rely = 0.5)\n item_value.place(relx = 0.1, rely = 0.6, height = 35)\n\n weight_label.place(relx = 0.25, rely = 0.5)\n weight_value.place(relx = 0.25, rely = 0.6, height = 35)\n\n return_btn.place(relx = 0.1, rely = 0.8)\n tot_items_frame.place(relx=0.55, rely=0.05)\n\n\n def ReturnShow(event):\n try:\n tot_children = tot_items_frame.pack_slaves()\n for j in tot_children:\n j.pack_forget()\n\n item_value.configure(state = 'readonly')\n err.configure(text = '', bg = 'white')\n item_value.configure(state = 'readonly')\n weight_value.configure(state = 'readonly')\n val = data.admin_return_show(var_bill.get())\n if val == []:\n err.configure(text='Wrong bill id...', bg='red')\n return False\n else:\n for items_tup in val:\n name_ls = data.admin_return_item_name(items_tup[1])\n name = name_ls[0].title() + \" \" + name_ls[1]\n tot_items_label.pack(side = 'top')\n Label(tot_items_frame, text = f\"{items_tup[1]}\" + \" - \" + name,bg = '#67d3ff').pack()\n\n item_value.configure(state = 'normal')\n weight_value.configure(state = 'normal')\n return_btn.bind(\"\", ReturnItemFinish)\n\n\n except TclError:\n err.configure(text = 'enter bill id...', bg = 'red')\n return False\n except EXCEPTION as e:\n print(e)\n\n\n bill_value.bind(\"\", ReturnShow)\n\n\n def ReturnItemFinish(event):\n try:\n err.configure(text = '', bg = 'white')\n data.admin_return_finish(var_bill.get(),var_item.get(),var_weight.get(),err)\n var_item.set(\"\")\n var_weight.set(\"\")\n item_value.configure(state = 'readonly')\n weight_value.configure(state = 'readonly')\n val = data.retriev_bill(var_bill.get())\n pdf.Printer(val, current_time, False)\n except TclError:\n err.configure(text = 'enter item id...',bg = 'red')\n return False\n except EXCEPTION as e:\n return False\n\n\n\n\n\n\n#todo business Partner Button ------------------------------------------------------------------------------------------------------------------------>\nclass BusinessPartners:\n def __init__(self,fr):\n self.show_frame = Frame(fr, width=700, height=400, bg='#ffffff')\n self.bar_frame = Frame(fr, height=40, width= 281, bg = \"white\")\n\n #buttons\n self.bar_partners = Label(self.bar_frame, text = 'Partners', bg= 'white', fg = 'blue')\n self.bar_add = Label(self.bar_frame, text = 'Add', bg = 'white', fg = 'blue')\n self.bar_edit = Label(self.bar_frame, text = 'Edit', bg = 'white', fg = 'blue')\n self.bar_credit = Label(self.bar_frame, text = 'Credit', bg = 'white', fg = 'blue')\n self.bar_debit = Label(self.bar_frame, text = 'Debit', bg = 'white', fg = 'blue')\n\n\n\n\n #binding buttons with events--------------------------->\n self.bar_partners.bind(\"\", lambda event: self.bar_partners.configure(fg='black', cursor=\"hand2\",\n font=\"arial 11 underline\"))\n self.bar_partners.bind(\"\", lambda event: self.bar_partners.configure(fg='blue', cursor=\"hand2\",\n font=\"arial 11\"))\n self.bar_partners.bind(\"\", self.partners)\n\n\n self.bar_add.bind(\"\", lambda event: self.bar_add.configure(fg='black', cursor=\"hand2\",\n font=\"arial 11 underline\"))\n self.bar_add.bind(\"\", lambda event: self.bar_add.configure(fg='blue', cursor=\"hand2\",\n font=\"arial 11\"))\n self.bar_add.bind(\"\", self.add_partner)\n\n\n self.bar_edit.bind(\"\", lambda event: self.bar_edit.configure(fg='black', cursor=\"hand2\",\n font=\"arial 11 underline\"))\n self.bar_edit.bind(\"\", lambda event: self.bar_edit.configure(fg='blue', cursor=\"hand2\",\n font=\"arial 11\"))\n self.bar_edit.bind(\"\", self.partner_edit)\n\n\n\n self.bar_credit.bind(\"\", lambda event: self.bar_credit.configure(fg='black', cursor=\"hand2\",\n font=\"arial 11 underline\"))\n self.bar_credit.bind(\"\", lambda event: self.bar_credit.configure(fg='blue', cursor=\"hand2\",\n font=\"arial 11\"))\n self.bar_credit.bind(\"\", self.partner_credit)\n\n\n # self.bar_debit.bind(\"\", lambda event: self.bar_debit.configure(fg='black', cursor=\"hand2\",\n # font=\"arial 11 underline\"))\n # self.bar_debit.bind(\"\", lambda event: self.bar_debit.configure(fg='blue', cursor=\"hand2\",\n # font=\"arial 11\"))\n # self.bar_debit.bind(\"\", self.partner_debit)\n #<------------------------------------------------------------------------------------------------->\n\n\n\n #changing buttons backgrounds on click-------------------------->\n self.bg_current_list = []\n self.bg_list = [self.bar_partners, self.bar_add, self.bar_edit, self.bar_credit, self.bar_debit]\n for x in self.bg_list:\n x.bind(\"\", self.chang_bg)\n\n\n\n\n\n self.tab = Label(self.show_frame,font='Weight 22 bold underline', fg='#f6546a', bg='white')\n self.err_label = Label(self.show_frame,)\n self.show_frame.pack_propagate(False)\n\n\n\n\n #function for changing button background-------------------------------->\n def chang_bg(self,event):\n if len(self.bg_current_list) >= 2:\n for i in self.bg_current_list[0:-1]:\n i.configure(bg='white')\n self.bg_current_list.remove(i)\n\n\n\n\n\n def add(self):\n self.bar_frame.pack_propagate(False)\n self.show_frame.pack_propagate(False)\n self.bar_frame.pack(side = TOP, pady = 5)\n self.show_frame.pack(padx = 10, pady = 5)\n self.bar_partners.pack(side = 'left', ipadx = 7, ipady =6)\n self.bar_add.pack(side = 'left', ipadx = 7, ipady = 6)\n self.bar_edit.pack(side = 'left', ipadx = 7, ipady = 6)\n self.bar_credit.pack(side = 'left', ipadx = 7, ipady = 6)\n self.bar_debit.pack(side = 'left', ipadx = 7, ipady = 6)\n self.err_label.pack(side='bottom', fill='x')\n self.tab.pack(side = 'top')\n\n\n\n #forgetting all widgets in show frame-------------------------->\n def forget_childs(self):\n its = self.show_frame.pack_slaves()\n for x in its:\n x.pack_forget()\n\n its_grid = self.show_frame.grid_slaves()\n for j in its_grid:\n j.grid_forget()\n\n its_place = self.show_frame.place_slaves()\n\n for z in its_place:\n z.place_forget()\n\n\n\n\n\n\n def partners(self,event):\n self.err_label.configure(text = '', bg = 'SystemButtonFace')\n self.forget_childs()\n self.tab.configure(text = 'Partners')\n self.tab.pack(side = 'top')\n\n self.bar_partners.configure(bg='#9fa59a')\n if self.bar_partners not in self.bg_current_list:\n self.bg_current_list.append(self.bar_partners)\n\n\n #partner tree view------------------------------------->\n self.tree = ttk.Treeview(self.show_frame)\n self.tree['column'] = (\"one\", \"two\", \"three\",'four','five')\n self.tree.column('#0', width=30, anchor = 'center')\n self.tree.column('one', width=80, anchor = 'center')\n self.tree.column('two', width=140, anchor = 'center')\n self.tree.column('three', width= 140, anchor = 'center')\n self.tree.column('four', width=110, anchor = 'center')\n self.tree.column('five', width = 110, anchor = 'center')\n\n self.tree.heading(\"#0\", text=\"No\", )\n self.tree.heading(\"one\", text=\"ID\")\n self.tree.heading(\"two\", text=\"Name\")\n self.tree.heading('three', text = 'Phone')\n self.tree.heading(\"four\", text=\"Balance\")\n self.tree.heading('five', text = 'Due')\n\n self.tree.pack_propagate(False)\n self.scr = ttk.Scrollbar(self.tree, orient='vertical', command=self.tree.yview)\n self.tree.configure(yscroll=self.scr.set)\n self.scr.pack(side=RIGHT, fill=Y)\n #<--------------------------------------------------------------------------------->\n\n\n\n\n self.tree.pack(side = 'top', pady = 20, padx =5 )\n self.err_label.pack(side='bottom', fill='x')\n\n data.ShowPartners(self.tree)\n\n close_val = None\n vals = []\n def get_id(event):\n nonlocal vals, close_val\n curItem = self.tree.focus()\n item = self.tree.item(curItem)\n close_val = item['values'][-1]\n val = item['values'][0:3]\n print(item['values'])\n address = data.partner_balance(val[0])[1]\n val.append(address)\n vals = val\n print_ledger.configure(state = \"normal\")\n close_partner.configure(state = 'normal')\n \n def close(event):\n print(vals[-2])\n if float(close_val) > 500:\n return False\n else:\n x = data.closepartner(vals[0])\n if x != True:\n messagebox.showerror(\"failed\", \"something went wrong\")\n return False\n\n\n print_ledger = ttk.Button(self.show_frame, text = \"Print Ledger\", state = 'disabled', command=lambda: pdf.ledger(self.show_frame,vals,current_date))\n close_partner = ttk.Button(self.show_frame, text = \"Close Partner\", state = 'disabled',)\n \n close_partner.bind(\"\",close)\n\n\n print_ledger.place(relx = 0.78, rely = 0.85)\n close_partner.place(relx = 0.02, rely = 0.85)\n self.tree.bind(\"<>\", get_id)\n \n\n\n\n\n\n\n\n\n def add_partner(self,event):\n self.err_label.configure(text = '', bg = 'SystemButtonFace') \n self.forget_childs()\n self.err_label.configure(text='', bg='white')\n self.err_label.pack(side='bottom', fill=X)\n self.tab.pack(side = 'top')\n self.tab.configure(text = 'Add Partner')\n\n self.bar_add.configure(bg='#9fa59a')\n if self.bar_add not in self.bg_current_list:\n self.bg_current_list.append(self.bar_add)\n\n self.p_id_var = StringVar()\n self.p_name_var = StringVar()\n self.p_phone_var = StringVar()\n self.p_address_var = StringVar()\n self.p_credit_var = DoubleVar()\n self.p_ref_no_var = StringVar()\n\n\n\n # self.tab = Label(self.show_frame, text='Add Partner', font='Weight 22 bold underline', fg='#f6546a', bg='white')\n self.id = Label(self.show_frame, text = 'Enter Id: ', bg = 'white')\n self.id_value = ttk.Entry(self.show_frame, width = 10, textvariable = self.p_id_var)\n\n self.name = Label(self.show_frame, text = 'Name: ', bg = 'white')\n self.name_value = ttk.Entry(self.show_frame, width = 25, textvariable = self.p_name_var)\n\n self.phone = Label(self.show_frame, text = 'Phone: ', bg = 'white')\n self.phone_value = ttk.Entry(self.show_frame, width = 25, textvariable = self.p_phone_var)\n\n self.address = Label(self.show_frame, text = 'Address: ', bg = 'white')\n self.address_value = ttk.Entry(self.show_frame, width = 25, textvariable = self.p_address_var)\n\n self.credit_label = Label(self.show_frame, text = 'Credit: ', bg = 'white')\n self.credit_value = ttk.Entry(self.show_frame, width = 25, textvariable = self.p_credit_var, justify = 'center')\n\n self.add_ref_label = Label(self.show_frame, text = 'Ref No: ', bg = 'white')\n self.add_ref_value = ttk.Entry(self.show_frame, width = 25, textvariable = self.p_ref_no_var)\n\n\n self.submit_btn = ttk.Button(self.show_frame, text = 'Submit')\n\n self.tab.pack(side = 'top',)\n\n self.id.place(relx = 0.3, rely = 0.22)\n self.name.place(relx = 0.05, rely = 0.37)\n self.phone.place(relx = 0.45, rely = 0.37)\n self.address.place(relx = 0.05, rely =0.47)\n self.credit_label.place(relx = 0.45, rely = 0.47)\n self.add_ref_label.place(relx = 0.05, rely = 0.55)\n\n\n self.id_value.place(relx = 0.4, rely = 0.2, height = 35)\n self.name_value.place(relx = 0.15, rely = 0.37, height = 25)\n self.phone_value.place(relx = 0.55, rely = 0.37, height = 25)\n self.address_value.place(relx = 0.15, rely = 0.47, height = 25)\n self.credit_value.place(relx = 0.55, rely = 0.47, height = 25)\n self.add_ref_value.place(relx = 0.15, rely = 0.55, height = 25)\n\n self.submit_btn.place(relx = 0.3, rely = 0.7)\n\n\n def add_partner_funct(event):\n\n id = self.p_id_var.get()\n name = self.p_name_var.get()\n phone = self.p_phone_var.get()\n address = self.p_address_var.get()\n credit = self.p_credit_var.get()\n ref = self.p_ref_no_var.get()\n\n if id != '' and name != '':\n data.AddPartner(id, name, phone, address, current_date, credit, ref,self.err_label)\n\n\n self.submit_btn.bind(\"\",add_partner_funct)\n\n\n\n\n\n\n\n def partner_edit(self,event):\n self.forget_childs()\n self.err_label.configure(text='', bg='white')\n self.err_label.pack(side='bottom', fill=X)\n self.tab.configure(text = ' Edit Partner')\n\n self.bar_edit.configure(bg='#9fa59a')\n if self.bar_edit not in self.bg_current_list:\n self.bg_current_list.append(self.bar_edit)\n\n\n var_id = StringVar()\n var_name = StringVar()\n var_phone = StringVar()\n var_address = StringVar()\n\n # self.tab = Label(self.show_frame, text='Edit Partner', font='Weight 22 bold underline', fg='#f6546a', bg='white')\n self.id = Label(self.show_frame, text = 'Enter Id: ', bg = 'white')\n self.id_value = ttk.Entry(self.show_frame, width = 10, textvariable = var_id)\n press_enter = Label(self.show_frame, text = '(Press Enter to continue)', font = 'helvetica 8 italic',bg = 'white')\n\n\n self.name = Label(self.show_frame, text = 'Name: ', bg = 'white')\n self.name_value = ttk.Entry(self.show_frame, width = 20, state = 'readonly', textvariable = var_name)\n\n self.phone = Label(self.show_frame, text = 'Phone: ', bg = 'white')\n self.phone_value = ttk.Entry(self.show_frame, width = 20, state = 'readonly', textvariable = var_phone)\n\n self.address = Label(self.show_frame, text = 'Address: ', bg = 'white')\n self.address_value = ttk.Entry(self.show_frame, width = 20, state = 'readonly', textvariable = var_address)\n\n\n self.submit_btn = ttk.Button(self.show_frame, text = 'Submit')\n\n self.tab.pack(side = 'top',)\n\n self.id.place(relx=0.3, rely=0.22)\n self.name.place(relx=0.3, rely=0.35)\n self.phone.place(relx=0.3, rely=0.43)\n self.address.place(relx=0.3, rely=0.53)\n\n self.id_value.place(relx=0.4, rely=0.2, height=35)\n press_enter.place(relx = 0.53, rely = 0.22)\n self.name_value.place(relx=0.4, rely=0.35, height=25)\n self.phone_value.place(relx=0.4, rely=0.43, height=25)\n self.address_value.place(relx=0.4, rely=0.53, height=25)\n self.submit_btn.place(relx=0.4, rely=0.7)\n\n self.err_label.pack(side='bottom', fill='x')\n\n def partner_edit_show(event):\n try:\n self.err_label.configure(text = '', bg = 'white')\n val = data.PartnerEditShow(var_id.get())\n if val == None:\n self.err_label.configure(text = \"Wrong partner Id...\", bg = 'red')\n return False\n else:\n var_name.set(val[0])\n var_phone.set(val[1])\n var_address.set(val[2])\n\n\n self.name_value.configure(state = 'normal')\n self.phone_value.configure(state = 'normal')\n self.address_value.configure(state = 'normal')\n except EXCEPTION:\n return False\n\n\n def edit_partner_funct(event):\n data.EditPartner(var_id.get(),var_name.get(),var_phone.get(),var_address.get(),self.err_label)\n\n\n self.id_value.bind(\"\", partner_edit_show)\n self.submit_btn.bind(\"\", edit_partner_funct)\n\n\n\n\n\n def partner_credit(self,event):\n self.forget_childs()\n self.err_label.configure(text = '', bg = 'white')\n self.err_label.pack(side = 'bottom', fill = X)\n self.tab.configure(text = 'Credits',)\n self.tab.pack(side = 'top')\n\n self.bar_credit.configure(bg='#9fa59a')\n if self.bar_credit not in self.bg_current_list:\n self.bg_current_list.append(self.bar_credit)\n\n var_id = StringVar()\n var_name = StringVar()\n var_credit = DoubleVar()\n var_ref = StringVar()\n var_date = StringVar(value= current_date)\n\n self.id_label = Label(self.show_frame, text = 'Enter Id: ', bg = 'white')\n self.id_value = ttk.Entry(self.show_frame, width=10, textvariable = var_id)\n press_enter = Label(self.show_frame, text = '(Press Enter to continue)', font = 'helvetica 8 italic',bg = 'white')\n\n\n self.credit_label = Label(self.show_frame, text = 'Add Credit: ', bg = 'white')\n self.credit_value = ttk.Entry(self.show_frame, width = 20, stat = 'readonly', textvariable = var_credit)\n\n self.partner_name_label = Label(self.show_frame, bg = 'white')\n self.partner_name_value = Label(self.show_frame,bg= 'white', textvariable = var_name)\n\n self.ref_label = Label(self.show_frame, text = 'Ref No: ', bg = 'white')\n self.ref_value = ttk.Entry(self.show_frame, width = 20, state = 'readonly', textvariable = var_ref)\n\n self.date_label = Label(self.show_frame, text = 'Date: ', bg = 'white')\n self.date_value = ttk.Entry(self.show_frame, width = 20, state = 'readonly', textvariable = var_date)\n\n self.add_btn = ttk.Button(self.show_frame, text = 'Submit')\n\n\n\n\n #adding widgets to show frame -------------------------------->\n self.id_label.place(relx = 0.05, rely = 0.2)\n self.id_value.place(relx = 0.23, rely = 0.2, height = 35)\n press_enter.place(relx = 0.35, rely = 0.22)\n\n self.credit_label.place(relx = 0.05, rely = 0.3,)\n self.credit_value.place(relx = 0.23, rely = 0.3, height = 26)\n\n self.ref_label.place(relx = 0.05, rely = 0.4)\n self.ref_value.place(relx = 0.23, rely = 0.4, height = 26)\n\n self.partner_name_label.place(relx = 0.6, rely = 0.2)\n self.partner_name_value.place(relx = 0.7, rely = 0.2)\n\n self.date_label.place(relx = 0.05, rely = 0.5,)\n self.date_value.place(relx = 0.23, rely = 0.5)\n\n self.add_btn.place(relx = 0.23, rely = 0.6)\n\n self.err_label.pack(side='bottom', fill='x')\n\n def credit_show(event):\n try:\n self.partner_name_label.configure(text=\"\")\n self.credit_value.configure(state='readonly')\n self.ref_value.configure(state='readonly')\n self.date_value.configure(state = 'readonly')\n self.err_label.configure(text = \"\", bg = 'white')\n val = data.EditCreditShow(var_id.get(),)\n if val == None:\n self.err_label.configure(text = \"Wrong partner id...\", bg = 'red')\n return False\n else:\n var_name.set(val[0])\n self.partner_name_label.configure(text = \"Name: \")\n self.credit_value.configure(state = 'normal')\n self.ref_value.configure(state = 'normal')\n self.date_value.configure(state = 'normal')\n except EXCEPTION:\n pass\n\n\n def add_credit_funct(event):\n data.AddCredit(var_id.get(),var_credit.get(),var_ref.get(),var_date.get(), self.err_label)\n self.partner_name_label.configure(text=\"\")\n self.credit_value.configure(state='readonly')\n self.ref_value.configure(state='readonly')\n self.date_value.configure(state='readonly')\n var_name.set(\"\")\n var_ref.set(\"\")\n var_credit.set(\"\")\n var_id.set(\"\")\n var_date.set(current_date)\n\n\n self.id_value.bind(\"\", credit_show)\n self.add_btn.bind(\"\", add_credit_funct)\n\n\n\n # def partner_debit(self,event):\n # self.err_label.configure(text = '', bg = 'SystemButtonFace') \n\n # var_partner = StringVar()\n # var_bill = IntVar()\n # self.forget_childs()\n # self.err_label.configure(text = '', bg = 'white')\n # self.err_label.pack(side = 'bottom', fill = X)\n # self.tab.configure(text = 'debit',)\n # self.tab.pack(side = 'top')\n\n # self.bar_debit.configure(bg='#9fa59a')\n # if self.bar_debit not in self.bg_current_list:\n # self.bg_current_list.append(self.bar_debit)\n\n # def debit_procceed():\n # try:\n # self.err_label.configure(text = '', bg = 'SystemButtonFace')\n # p_id = var_partner.get()\n # b_id = var_bill.get()\n # msg = data.AddDebit(p_id,b_id)\n # if msg == True:\n # self.err_label.configure(text = 'Debit has been successfully added', bg = 'green')\n # var_partner.set(\"\")\n # var_bill.set(0)\n # else:\n # self.err_label.configure(text = 'something went wrong...', bg = 'red') \n # except TclError:\n # self.err_label.configure(text = 'Wrong Inputs...', bg = 'red') \n\n # show_frame = LabelFrame(self.show_frame, width = 450, height = 200, text = 'Debit')\n # show_frame.pack(side = 'top', pady = 10)\n # show_frame.pack_propagate(False)\n\n # id_label = Label(show_frame, text = \"Partner Id: \", )\n # id_entry = ttk.Entry(show_frame, width = 20, justify = 'center', textvariable = var_partner)\n\n # bill_label = Label(show_frame, text = \"Bill No\")\n # bill_entry = ttk.Entry(show_frame, width = 15, justify = 'center', textvariable = var_bill)\n\n # submit_btn = ttk.Button(show_frame, text = 'Submit', command = debit_procceed)\n\n # id_label.place(relx = 0.01, rely = 0.25)\n # id_entry.place(relx = 0.2, rely = 0.25, height = 26) \n\n # bill_label.place(relx = 0.01, rely = 0.4)\n # bill_entry.place(relx = 0.2, rely = 0.4, height = 26)\n\n # submit_btn.place(relx = 0.15, rely = 0.7)\n\n \n\n\n\n\n#todo Dashboard ------------------------------------------------------------------------------------------------------------------------------------------------->\n\nclass Dashboard:\n def __init__(self,fr):\n self.frame = fr\n self.bar = Frame(self.frame, height = 45, bg = 'red')\n self.show_frame = Frame(self.frame, bg = 'blue')\n self.graph_frame1 = Frame(self.show_frame, bg = 'gray')\n self.graph_frame2 = Frame(self.show_frame, bg = 'gray')\n\n\n\n\n def add(self):\n\n self.forget_main()\n self.bar.pack(side = 'top', fill = X,)\n self.show_frame.pack(side = 'top', fill = 'both', expand = 1)\n self.graph_frame1.pack(side = 'left', fill = 'both', expand = True)\n self.graph_frame2.pack(side = 'left',fill = 'both', expand = True)\n \n\n def forget_main(self):\n vals = self.frame.pack_slaves()\n for x in vals:\n x.pack_forget()\n\n vals1 = self.frame.place_slaves()\n for x in vals1:\n x.place_forget()\n\n vals2 = self.frame.place_slaves()\n for x in vals2:\n x.place_forget()\n\n def forget(self):\n its = self.show_frame.pack_slaves()\n for x in its:\n x.pack_forget()\n\n its_grid = self.show_frame.grid_slaves()\n for j in its_grid:\n j.grid_forget()\n\n its_place = self.show_frame.place_slaves()\n\n for z in its_place:\n z.place_forget()\n\n\n\nclass Orders:\n def __init__(self, fr):\n self.show_frame = Frame(fr, width=700, height=400, bg='#ffffff')\n self.bar_frame = Frame(fr, height=40, width=280, bg=\"white\")\n\n\n\n\n # buttons\n self.log_orders_btn = Label(self.bar_frame, text='Orders Log', bg='white', fg='blue')\n self.add_order_btn = Label(self.bar_frame, text='add Order', bg='white', fg='blue')\n self.cancel_order_btn = Label(self.bar_frame, text='Cancel Order', bg='white', fg='blue')\n\n\n # binding buttons with events--------------------------->\n self.log_orders_btn.bind(\"\", lambda event: self.log_orders_btn.configure(fg='black', cursor=\"hand2\",\n font=\"arial 11 underline\"))\n self.log_orders_btn.bind(\"\", lambda event: self.log_orders_btn.configure(fg='blue', cursor=\"hand2\",\n font=\"arial 11\"))\n self.log_orders_btn.bind(\"\", self.Order_Logs_Show)\n\n self.add_order_btn.bind(\"\", lambda event: self.add_order_btn.configure(fg='black', cursor=\"hand2\",\n font=\"arial 11 underline\"))\n self.add_order_btn.bind(\"\", lambda event: self.add_order_btn.configure(fg='blue', cursor=\"hand2\",\n font=\"arial 11\"))\n self.add_order_btn.bind(\"\", self.add_orders_show)\n\n self.cancel_order_btn.bind(\"\", lambda event: self.cancel_order_btn.configure(fg='black', cursor=\"hand2\",\n font=\"arial 11 underline\"))\n self.cancel_order_btn.bind(\"\", lambda event: self.cancel_order_btn.configure(fg='blue', cursor=\"hand2\",\n font=\"arial 11\"))\n self.cancel_order_btn.bind(\"\", self.order_cancel_show)\n\n\n self.data_list = []\n self.companies_list = []\n\n # <------------------------------------------------------------------------------------------------->\n\n # changing buttons backgrounds on click-------------------------->\n self.bg_current_list = []\n self.bg_list = [self.log_orders_btn, self.add_order_btn, self.cancel_order_btn,]\n\n for x in self.bg_list:\n x.bind(\"\", self.chang_bg)\n\n self.tab = Label(self.show_frame, font='Weight 22 bold underline', fg='#f6546a', bg='white')\n self.err_label = Label(self.show_frame, )\n self.show_frame.pack_propagate(False)\n\n # function for changing button background-------------------------------->\n\n\n def chang_bg(self, event):\n if len(self.bg_current_list) >= 2:\n for i in self.bg_current_list[0:-1]:\n i.configure(bg='white')\n self.bg_current_list.remove(i)\n\n\n def add(self):\n self.bar_frame.pack_propagate(False)\n self.show_frame.pack_propagate(False)\n self.bar_frame.pack(side=TOP, pady=5)\n self.show_frame.pack(padx=10, pady=5)\n self.log_orders_btn.pack(side='left', ipadx=7, ipady=6)\n self.add_order_btn.pack(side='left', ipadx=7, ipady=6)\n self.cancel_order_btn.pack(side='left', ipadx=7, ipady=6)\n self.err_label.pack(side='bottom', fill='x')\n self.tab.pack(side='top')\n\n # forgetting all widgets in show frame-------------------------->\n\n\n def forget_childs(self):\n its = self.show_frame.pack_slaves()\n for x in its:\n x.pack_forget()\n\n its_grid = self.show_frame.grid_slaves()\n for j in its_grid:\n j.grid_forget()\n\n its_place = self.show_frame.place_slaves()\n\n for z in its_place:\n z.place_forget()\n\n\n\n\n def add_orders_show(self,event):\n self.add_order_btn.configure(bg='#9fa59a')\n if self.add_order_btn not in self.bg_current_list:\n self.bg_current_list.append(self.add_order_btn)\n\n if len(self.data_list) != 0:\n f = messagebox.askyesno(\"Cancel\", \"Are you sure,want to cancel...\",parent = self.show_frame)\n if f == False:\n return False\n\n #adding companies to list\n try:\n self.companies_list = []\n for c in data.companies_for_orders():\n self.companies_list.append(c[0])\n except EXCEPTION as e:\n pass\n\n self.forget_childs()\n self.data_list = []\n self.dict_count = 0\n self.var_order_no = IntVar(value = data.order_id())\n self.var_company = StringVar()\n self.var_date = StringVar()\n\n add_show_frame = LabelFrame(self.show_frame, width = 600, height = 250, text = 'Order Information')\n add_show_frame.pack(pady= 50)\n\n\n\n self.order_label = Label(add_show_frame, text = \"Order No:\", font = 'arial 13')\n self.order_no = ttk.Entry(add_show_frame, width = 10, state = 'readonly', textvariable = self.var_order_no, justify = 'center')\n\n self.company_label = Label(add_show_frame, text = \"Company:\", font = 'arial 13')\n self.company_combo = ttk.Combobox(add_show_frame, values=self.companies_list, width=15, textvariable = self.var_company)\n if self.companies_list:\n self.company_combo.set(self.companies_list[0])\n else:\n self.company_combo.set(\"No company\")\n\n self.date_label = Label(add_show_frame, text = \"Date:\", font = 'arial 13')\n self.date_value = ttk.Entry(add_show_frame, width = 18, textvariable = self.var_date)\n\n self.cancel_btn = ttk.Button(self.show_frame, text = \"Cancel\",)\n self.cancel_btn.bind(\"\", self.add_orders_show)\n self.continue_btn = ttk.Button(self.show_frame, text = 'Continue', command = self.add_orders_continue)\n\n\n self.order_label.place(relx = 0.2,rely = 0.2)\n self.order_no.place(relx = 0.35, rely = 0.2, height = 35)\n\n self.company_label.place(relx = 0.2, rely = 0.43)\n self.company_combo.place(relx = 0.35, rely = 0.43, height = 26)\n\n self.date_label.place(relx = 0.2, rely = 0.6)\n self.date_value.place(relx = 0.35, rely = 0.6, height = 26)\n\n self.cancel_btn.place(relx = 0.2, rely = 0.85)\n self.continue_btn.place(relx = 0.4, rely = 0.85)\n\n\n\n\n\n def add_orders_continue(self):\n self.var_code = IntVar(value = '')\n self.var_cost = DoubleVar(value = '')\n self.var_weight = DoubleVar(value = '')\n\n if self.var_company.get() == \"\" or self.var_date.get() == \"\":\n return False\n if self.dict_count == 0:\n self.data_list.append({'order_no': self.var_order_no.get(), 'company': self.var_company.get(),'date':self.var_date.get()})\n self.forget_childs()\n\n\n show_frame = LabelFrame(self.show_frame, width = 380, height = 200, text = \"Insert Items\")\n show_frame.place(relx = 0.01, rely = 0.2)\n self.ord_label = Label(self.show_frame, text = \"Order Id: \", bg = 'white')\n self.ord_id = ttk.Entry(self.show_frame, width = 8, state = 'readonly', textvariable = self.var_order_no,\n justify = 'center')\n\n\n self.comp_label = Label(self.show_frame, text = \"Company: \", bg = 'white')\n self.comp_value = Label(self.show_frame, bg = 'white', textvariable = self.var_company)\n\n self.item_label = Label(show_frame, text = \"Item Code: \", font = \"arial 13\")\n self.item_value = ttk.Entry(show_frame, width = 16, justify = 'center', textvariable = self.var_code)\n\n self.weight_label = Label(show_frame, text = 'Weight: ', font = \"arial 13\")\n self.weight_value = ttk.Entry(show_frame, width = 16, justify = 'center', textvariable = self.var_weight)\n\n self.cost_label = Label(show_frame, text=\"Cost: \", font=\"arial 13\")\n self.cost_value = ttk.Entry(show_frame, width=16, justify='center', textvariabl=self.var_cost)\n\n\n self.next_item_btn = Button(show_frame, text = \"Next Item\", command = self.next_item_func)\n self.confirm_btn = ttk.Button(self.show_frame, text = \"Confirm Order\", command = self.confirm_order_funct)\n self.insert_btn = Button(show_frame, text = \"Insert item\", command = self.insert_item_func, cursor = 'hand2')\n\n\n\n self.no_frame = Frame(self.show_frame, width = 230, height = 280, bg = '#99d6db', relief = 'raised', bd = 3)\n self.no_frame.pack_propagate(False)\n self.no_items = Label(self.no_frame, text = \"Items added so far: \", bg = '#99d6db', font = 'Arial 15 italic underline')\n self.no_frame.place(relx = 0.63, rely = 0.28)\n self.no_items.pack()\n\n\n self.ord_label.place(relx = 0.7, rely = 0.02)\n self.ord_id.place(relx = 0.9, rely = 0.01, height = 30)\n\n self.comp_label.place(relx = 0.7, rely = 0.1)\n self.comp_value.place(relx = 0.9, rely = 0.1,)\n\n self.item_label.place(relx = 0, rely = 0.1)\n self.item_value.place(relx = 0.25, rely = 0.1,height = 26)\n\n self.weight_label.place(relx = 0, rely = 0.28)\n self.weight_value.place(relx = 0.25, rely = 0.28, height = 26)\n\n self.cost_label.place(relx = 0, rely = 0.46)\n self.cost_value.place(relx = 0.25, rely = 0.46, height = 26)\n self.insert_btn.place(relx = 0.25, rely = 0.75)\n\n\n self.cancel_btn.place(relx = 0.01, rely = 0.9)\n # self.next_item_btn.place(relx = 0.7, rely = 0.75)\n self.confirm_btn.place(relx = 0.41, rely = 0.9)\n\n codes_ls = data.items_codes_list()\n if len(self.data_list) > 1:\n print(\"here\")\n for dicts in self.data_list[1:]:\n j = codes_ls[dicts['code']]['company'] + \" Steels Grade-60 \" + codes_ls[dicts['code']]['name'].replace(\n \" \", \"\")\n Label(self.no_frame, text=j, bg='#99d6db', font='arial 10 bold').pack(side='top')\n\n def next_item_func(self):\n # self.add_orders_continue()\n self.var_cost.set(\"\")\n self.var_code.set(\"\")\n self.var_weight.set(\"\")\n\n\n def insert_item_func(self):\n try:\n codes_ls = data.items_codes_list()\n\n for v in self.data_list:\n if self.var_code.get() in v.values():\n return False\n elif codes_ls[self.var_code.get()]['company'] != self.var_company.get():\n return False\n\n j = codes_ls[self.var_code.get()]['company'] + \" Steels Grade-60 \" + codes_ls[self.var_code.get()]['name'].replace(\" \", \"\")\n Label(self.no_frame, text = j, bg = '#99d6db', font = 'arial 10 bold').pack(side = 'top')\n\n\n self.data_list.append({'code':self.var_code.get(),'cost':self.var_cost.get(),'weight':self.var_weight.get()})\n self.dict_count += 1\n self.var_cost.set(\"\")\n self.var_code.set(\"\")\n self.var_weight.set(\"\")\n self.ord_id.focus_set()\n\n\n except TclError:\n self.err_label.configure(text = \"Fill all entries correctly...\", bg = 'red')\n return False\n except KeyError:\n messagebox.showerror(\"Code\",\"Wrong item code...\",parent = self.show_frame)\n return False\n\n\n\n\n\n def confirm_order_funct(self):\n if len(self.data_list) <= 1:\n return False\n\n self.forget_childs()\n codes_ls = data.items_codes_list()\n\n self.var_tot_weight = DoubleVar()\n self.var_tot_amount = DoubleVar()\n self.var_extra = DoubleVar()\n self.var_total_amount = DoubleVar()\n self.var_extra_amount = DoubleVar()\n\n\n self.tree = ttk.Treeview(self.show_frame)\n self.tree['column'] = (\"one\", \"two\", \"three\",\"four\",\"five\")\n self.tree.column('#0', width=40, anchor='center')\n self.tree.column('one', width=70, anchor='center')\n self.tree.column('two', width=210, anchor='center')\n self.tree.column('three', width=100, anchor='center')\n self.tree.column('four', width=80, anchor='center')\n self.tree.column('five', width=110, anchor='center')\n\n self.tree.heading(\"#0\", text=\"No\", )\n self.tree.heading(\"one\", text=\"Code\")\n self.tree.heading(\"two\", text=\"Item\")\n self.tree.heading(\"three\", text=\"Weight\")\n self.tree.heading(\"four\", text=\"Cost\")\n self.tree.heading(\"five\", text=\"Amount\")\n\n self.tree.pack_propagate(False)\n self.scr = ttk.Scrollbar(self.tree, orient='vertical', command=self.tree.yview)\n self.tree.configure(yscroll=self.scr.set)\n self.scr.pack(side=RIGHT, fill=Y)\n\n self.tree.pack(side = 'top', pady = 30)\n\n self.extra_label = Label(self.show_frame, text = \"Extra Charges: \", bg = 'white', font = \"arial 13 bold\")\n self.extra_value = ttk.Entry(self.show_frame, width = 18, justify = 'center', textvariable = self.var_extra_amount)\n self.extra_info = Label(self.show_frame, text = \"(Press Enter to continue)\", bg = 'white', font = 'helvetica 8')\n\n self.order_weight_label = Label(self.show_frame, text = \"Order Weight: \", bg = 'white', font = \"helvetica 12 italic\")\n self.order_weight_value = Label(self.show_frame,textvariable = self.var_tot_weight, font = \"arial 13 bold\", bg = 'white')\n\n self.order_amount_label = Label(self.show_frame, text = \"Order Amount: \", bg = 'white', font = 'helvetica 12 italic')\n self.order_amount_value = Label(self.show_frame,textvariable = self.var_tot_amount, font = \"arial 13 bold\", bg = 'white')\n\n self.total_amount_label = Label(self.show_frame, text=\"Total Amount: \", font = \"helvetica 12 italic\", bg = 'white')\n self.total_amount_value = Label(self.show_frame, textvariable=self.var_total_amount, font = \"arial 13 bold\", bg = 'white')\n\n self.finish_btn = ttk.Button(self.show_frame, text = \"Finish!\", state = 'disabled',)\n self.back_btn = ttk.Button(self.show_frame, text = 'Back', command = self.add_orders_continue)\n\n self.order_weight_label.place(relx = 0.6, rely = 0.68)\n self.order_weight_value.place(relx = 0.8, rely = 0.68)\n\n self.order_amount_label.place(relx = 0.6, rely = 0.739)\n self.order_amount_value.place(relx = 0.8, rely = 0.739, height = 30)\n\n self.total_amount_label.place(relx = 0.6, rely = 0.8)\n self.total_amount_value.place(relx = 0.8, rely = 0.8)\n\n self.extra_label.place(relx = 0.01, rely = 0.68)\n self.extra_value.place(relx = 0.21, rely = 0.68, height = 27)\n self.extra_info.place(relx = 0.17, rely = 0.77)\n\n self.finish_btn.place(relx = 0.78, rely = 0.9)\n self.back_btn.place(relx = 0.01,rely = 0.9)\n\n\n\n self.finish_btn.bind(\"\", self.Put_Order)\n\n\n\n #values for tree and database---------------------------------->\n self.weight_total = 0\n self.amount_total = 0\n count = 1\n for n in self.data_list[1:]:\n code = n['code']\n item = codes_ls[n['code']]['company'] + \" Steels Grade-60 \" + codes_ls[n['code']]['name'].replace(\" \", \"\")\n weight = n['weight']\n cost = n['cost']\n self.weight_total = self.weight_total + weight\n self.amount_total = self.amount_total + (cost*weight)\n amt = float(f\"%.2f\"%(cost*weight))\n val = (code,item,weight,cost,amt)\n\n self.tree.insert('', 'end', text=count, values=val)\n count +=1\n\n self.var_tot_weight.set(self.weight_total)\n self.var_tot_amount.set(self.amount_total)\n\n\n\n def extra_func(event):\n self.var_total_amount.set(self.var_tot_amount.get() + self.var_extra_amount.get())\n self.finish_btn.configure(state = 'normal')\n self.extra_value.bind(\"\", extra_func)\n\n\n\n def Put_Order(self, event):\n try:\n data.PutOrderDetail(self.data_list,self.weight_total,self.amount_total,self.var_extra_amount.get())\n m = messagebox.showinfo(\"Order Loaded\", \"Successfully loaded the order\", parent = self.show_frame)\n if m == 'ok':\n self.data_list = []\n self.add_orders_show(event)\n\n except EXCEPTION as e:\n print(e)\n\n\n\n\n def order_cancel_show(self,event):\n self.forget_childs()\n self.data_list = []\n\n var_id = IntVar()\n var_id.set(\"\")\n self.cancel_order_btn.configure(bg='#9fa59a')\n if self.cancel_order_btn not in self.bg_current_list:\n self.bg_current_list.append(self.cancel_order_btn)\n\n show_frame = LabelFrame(self.show_frame,width = 550, height = 200, text = 'Order Cancellations')\n show_frame.pack(side = 'top',pady = 10)\n order_id_label = Label(show_frame, text = \"Enter Order Id: \")\n press_enter_label = Label(show_frame, text = \"(Press Enter to continue)\", font = 'helvetica 8')\n order_id_value = ttk.Entry(show_frame, width = 10,justify = 'center', textvariable = var_id)\n\n\n var_com_name = StringVar()\n var_ord_weight = StringVar()\n var_ord_amount = StringVar()\n var_ord_date = StringVar()\n\n com_name = Label(self.show_frame, bg = 'white', textvariable = var_com_name, font = 'helvetica 15 bold')\n\n ord_weight_label = Label(self.show_frame, bg = 'white', text ='Order Weight: ', font = \"helvetica 12 italic\")\n ord_weight_value = Label(self.show_frame, bg = 'white', font = \"arial 14 bold\", textvariable = var_ord_weight)\n\n ord_amount_label = Label(self.show_frame, bg = 'white', text = 'Order amount: ', font = \"helvetica 12 italic\")\n ord_amount_value = Label(self.show_frame, bg = 'white', font = \"arial 14 bold\", textvariable = var_ord_amount)\n\n ord_date_label = Label(self.show_frame, bg = 'white', text = 'Order date: ', font = \"helvetica 12 italic\")\n ord_date_value = Label(self.show_frame, bg = 'white', font = \"arial 14 bold\", textvariable = var_ord_date)\n\n\n canc_btn = ttk.Button(self.show_frame, text = \"Cancel Order\", state = 'disabled',)\n\n\n order_id_label.place(relx = 0.1, rely = 0.1)\n order_id_value.place(relx = 0.126, rely = 0.28, height = 35)\n press_enter_label.place(relx = 0.25, rely = 0.288)\n\n canc_btn.place(relx = 0.15, rely = 0.35)\n\n\n def show_order_info(event):\n try:\n\n vals = data.order_info(var_id.get())\n if vals[-1] == 'Cancelled':\n messagebox.showerror(\"Cancelled\", \"This order has been already cancelled\", parent = self.show_frame)\n return False\n name = vals[1].title() + \"'s \" + \"Order\"\n weight = vals[2]\n amount = vals[3]\n date = vals[5]\n var_com_name.set(name)\n var_ord_weight.set(weight)\n var_ord_amount.set(amount)\n var_ord_date.set(date)\n except TypeError:\n messagebox.showerror(\"Code\", \"Wrong order id...\",parent = self.show_frame)\n return False\n except TclError:\n messagebox.showerror(\"Code\", \"Wrong order id...\",parent = self.show_frame)\n return False\n\n com_name.place(relx = 0.7, rely = 0.6)\n\n ord_weight_label.place(relx = 0.63, rely = 0.7)\n ord_weight_value.place(relx = 0.8, rely = 0.7)\n\n ord_amount_label.place(relx = 0.63, rely = 0.8)\n ord_amount_value.place(relx = 0.8, rely = 0.8)\n\n ord_date_label.place(relx = 0.63, rely = 0.9)\n ord_date_value.place(relx = 0.8, rely = 0.9)\n\n canc_btn.configure(state = 'normal')\n canc_btn.bind(\"\", Cancel_funct)\n\n order_id_value.bind(\"\",show_order_info)\n\n\n def Cancel_funct(event):\n ask = Toplevel(self.show_frame)\n main_window_width = ask.winfo_screenwidth() - 100\n main_window_height = ask.winfo_screenheight() - 100\n splash_width = main_window_width / 2 - 150\n splash_height = main_window_height / 2 - 70\n ask.geometry(\"310x140+%d+%d\"%(splash_width,splash_height))\n ask.title(\"Password\")\n ask.iconbitmap(\"Data/pics/login.ico\")\n ask.grab_set()\n var_password = StringVar()\n\n\n def dest():\n ask.destroy()\n ask.grab_release()\n return False\n\n def on_closing():\n ask.grab_release()\n ask.destroy()\n ask.protocol(\"WM_DELETE_WINDOW\", on_closing)\n\n\n\n\n\n err = Label(ask, font=\"arial 11 italic\", justify='center')\n pass_label = Label(ask, text=\"Password: \", )\n pass_value = ttk.Entry(ask, width=20, textvariable = var_password, show = '*')\n ask_cancel = Button(ask, text=\"Cancel Order\", width=10,)\n ask_quit = Button(ask, text=\"Quit\", width=10, command = dest)\n\n err.pack(side='top', fill='x')\n\n pass_label.place(relx=0.13, rely=0.3)\n pass_value.place(relx=0.37, rely=0.3, height=27)\n\n ask_cancel.place(relx=0.15, rely=0.6)\n ask_quit.place(relx=0.5, rely=0.6)\n\n def Cancel(event):\n if var_password.get() != data.admin_login()[1]:\n err.configure(text='Wrong Password...', bg='red')\n return False\n\n data.CancelOrder(var_id.get())\n messagebox.showinfo(\"Cancelled\", \"Successfully cancelled the order\",parent = self.show_frame)\n self.order_cancel_show(event)\n\n ask.destroy()\n ask.grab_release()\n return 'break'\n\n ask_cancel.bind(\"\", Cancel)\n\n\n\n\n\n\n def Order_Logs_Show(self,event):\n self.log_orders_btn.configure(bg='#9fa59a')\n if self.log_orders_btn not in self.bg_current_list:\n self.bg_current_list.append(self.log_orders_btn)\n\n self.forget_childs()\n self.data_list = []\n self.tab.configure(text = \"Print Orders Log\")\n\n #variables------------------->\n var_year = StringVar()\n var_month = StringVar()\n var_results = StringVar()\n var_id = IntVar()\n var_company = StringVar()\n var_results.set(f\"({data.OrderResults('Since Start','All','All')[0]})\")\n var_id.set(\"\")\n\n\n companies_list = [\"All\"]\n for c in data.companies_for_orders():\n companies_list.append(c[0])\n\n years_list = ['Since Start']\n for y in range(2019,2051):\n years_list.append(y)\n\n months_list = ['All','January','february','march','April','May','June','July','August','September',\n 'October','November','December']\n\n hist_frame = LabelFrame(self.show_frame, text = 'Order History', width = 600, height = 200)\n hist_frame.pack_propagate(False)\n detail_frame = LabelFrame(self.show_frame, text = \"Order Details\", width = 600, height = 200)\n detail_frame.pack_propagate(False)\n hist_frame.pack(side = 'top', pady = 10)\n detail_frame.pack(side = 'top', pady = 10)\n\n\n select_yr_label = Label(hist_frame, text = \"Select Year: \", )\n select_yr_combo = ttk.Combobox(hist_frame, width = 15, values = years_list, textvariable = var_year)\n var_year.set(years_list[0])\n\n select_mth_label = Label(hist_frame, text = \"Select Month:\")\n select_mth_combo = ttk.Combobox(hist_frame, width = 15, values = months_list , textvariable = var_month)\n var_month.set(months_list[0])\n\n select_comp_label = Label(hist_frame, text=\"Select Company:\")\n select_comp_combo = ttk.Combobox(hist_frame, width=15, values=companies_list, textvariable=var_company)\n var_company.set(companies_list[0])\n\n prnt_btn = ttk.Button(hist_frame, text = \"Print Results\")\n\n\n select_yr_label.place(relx = 0.05, rely = 0.2)\n select_yr_combo.place(relx = 0.23, rely = 0.2)\n\n select_mth_label.place(relx = 0.05, rely = 0.4)\n select_mth_combo.place(relx = 0.23, rely = 0.4)\n\n select_comp_label.place(relx = 0.05, rely = 0.6)\n select_comp_combo.place(relx = 0.23, rely = 0.6)\n\n results_label = Label(hist_frame, text = 'Results: ')\n results_value = Label(hist_frame, textvariable = var_results)\n\n results_label.place(relx = 0.8, rely = 0.2)\n results_value.place(relx = 0.83, rely = 0.35)\n\n prnt_btn.place(relx = 0.77, rely = 0.78)\n\n\n\n #filtering order result set----------------->\n def SetResults(event):\n var_results.set(f\"({data.OrderResults(var_year.get(),var_month.get(),var_company.get())[0]})\")\n\n\n select_yr_combo.bind(\"<>\", SetResults)\n select_mth_combo.bind(\"<>\", SetResults)\n select_comp_combo.bind(\"<>\", SetResults)\n\n\n #printing the orders log------------------>\n def PrintOrdersLog(event):\n datas = data.OrderResults(var_year.get(),var_month.get(),var_company.get())[1]\n ask_msg = messagebox.askyesno(\"Print\", \"Are you sure want to print the log\",parent = self.show_frame)\n if ask_msg == True:\n pdf.Orders(datas)\n else:\n return False\n prnt_btn.bind(\"\", PrintOrdersLog)\n\n\n\n\n\n def OrderIdEnter(event):\n chk_order = data.order_info(var_id.get())\n if chk_order == None:\n messagebox.showerror(\"Id\", \"The order does not exists\",parent = self.show_frame)\n return False\n else:\n prnt_detail_btn.configure(state = 'normal')\n prnt_detail_btn.bind(\"\", PrintOrderDetail)\n\n\n\n\n def PrintOrderDetail(evetn):\n detail_order = data.order_info(var_id.get())\n pdf.OrderDetail(detail_order, current_time)\n\n\n\n #order_detail--------------->\n order_no_label = Label(detail_frame, text = \"Order No: \")\n order_no_value = ttk.Entry(detail_frame, width = 10, textvariable = var_id, justify = 'center')\n prss_enter_info = Label(detail_frame, text = \"(Press enter to continue)\", font = 'helvetica 8')\n prnt_detail_btn = ttk.Button(detail_frame, text = \"Print Order\", state = 'disabled')\n\n order_no_label.place(relx = 0.35, rely = 0.1)\n order_no_value.place(relx = 0.35, rely = 0.3, height = 35)\n\n prss_enter_info.place(relx = 0.3, rely = 0.6)\n prnt_detail_btn.place(relx = 0.7, rely = 0.7)\n\n\n order_no_value.bind(\"\", OrderIdEnter)\n\n\n\n\n\n\n#todo admin settings -------------------------------------------------------------------------------------------------->\n\n\nclass Settings:\n def __init__(self,fr):\n self.var_username = StringVar()\n self.var_password = StringVar()\n\n\n self.show_frame = Frame(fr, width=700, height=400, bg='#ffffff')\n self.tab = Label(self.show_frame, font='Weight 22 bold underline', fg='#f6546a', bg='white')\n\n self.show_labelframe = LabelFrame(self.show_frame, width = 500, height = 270, text = 'Admin Info')\n\n self.username_label = Label(self.show_labelframe, text = 'UserName: ',font = \"helvetica 12 italic\",)\n self.username_value = ttk.Entry(self.show_labelframe, width = 20, justify = 'center',\n textvariable = self.var_username,state = 'readonly')\n\n self.password_label = Label(self.show_labelframe, text = 'Password: ',font = \"helvetica 12 italic\",)\n self.password_value = ttk.Entry(self.show_labelframe, width = 20, justify = 'center',\n textvariable = self.var_password,state = 'readonly')\n\n\n self.edit_btn = Button(self.show_labelframe, text = 'Edit',command = self.EditClick)\n self.change_btn = Button(self.show_labelframe, text = 'Save Changes', command = self.SaveChanges)\n\n self.err = Label(self.show_labelframe,)\n\n\n def add(self):\n try:\n log_in = data.admin_login()\n username = log_in[0]\n password = log_in[1]\n self.var_username.set(username)\n self.var_password.set(password)\n except TypeError:\n pass\n self.username_value.configure(state = 'readonly')\n self.password_value.configure(state = 'readonly')\n self.err.configure(text = \"\", bg = \"SystemButtonFace\")\n\n self.show_frame.pack(side = 'top', pady = 30,padx = 20)\n self.show_frame.pack_propagate(False)\n self.tab.pack(side = 'top')\n self.show_labelframe.pack(side = 'top', pady = 30)\n self.show_labelframe.pack_propagate(False)\n self.err.pack(side = 'bottom', fill = 'x')\n\n self.username_label.place(relx = 0.2, rely = 0.2)\n self.username_value.place(relx = 0.4, rely = 0.2,height = 28)\n\n self.password_label.place(relx = 0.2, rely = 0.35)\n self.password_value.place(relx = 0.4, rely = 0.35, height = 28)\n\n self.edit_btn.place(relx = 0.35, rely = 0.6)\n self.change_btn.place(relx = 0.45, rely = 0.6)\n\n\n def EditClick(self):\n self.username_value.configure(state = 'normal')\n self.password_value.configure(state = 'normal')\n\n\n def SaveChanges(self):\n try:\n data.UpdateAdminInfo(self.var_username.get().lower(),self.var_password.get())\n self.err.configure(text = 'Saved Changes ', bg ='green')\n except EXCEPTION:\n self.err.configure(text = \"Something went wrong\", bg = 'red')\n\n\n\n\n\n#todo profits ----------------------------------------------------------------------------------------------------->\n\n\nclass Profits:\n def __init__(self,fr):\n self.var_year = StringVar()\n self.var_month = StringVar()\n self.var_profit = DoubleVar()\n self.years_list = ['Since Start']\n for y in range(2019, 2051):\n self.years_list.append(y)\n\n self.months_list = ['All', 'January', 'february', 'march', 'April', 'May', 'June', 'July', 'August', 'September',\n 'October', 'November', 'December']\n\n\n self.show_frame = Frame(fr, width=700, height=400, bg='#ffffff')\n self.tab = Label(self.show_frame, font='Weight 22 bold underline', fg='#f6546a', bg='white')\n\n self.show_labelframe = LabelFrame(self.show_frame, width = 550, height = 270, text = 'Admin Info')\n\n self.year_label = Label(self.show_labelframe, text = \"Select Year: \",font = \"helvetica 12 italic\")\n self.year_combo = ttk.Combobox(self.show_labelframe, values = self.years_list, width = 15, textvariable = self.var_year)\n self.var_year.set(self.years_list[0])\n\n self.month_label = Label(self.show_labelframe, text = \"Select Month: \",font = \"helvetica 12 italic\")\n self.month_combo = ttk.Combobox(self.show_labelframe, values = self.months_list, width = 15, textvariable = self.var_month)\n self.var_month.set(self.months_list[0])\n\n self.profit_label = Label(self.show_labelframe, text = \"Profit: \", font = \"helvetica 12 italic\")\n self.profit_value = Label(self.show_labelframe, font = 'arial 14 bold', textvariable = self.var_profit)\n self.var_profit.set(\"(goes here)\")\n\n self.err = Label(self.show_labelframe,)\n\n self.year_combo.bind(\"<>\", self.FilterProfit)\n self.month_combo.bind(\"<>\", self.FilterProfit)\n\n\n def add(self):\n profit = f\"%.2f\"%data.GetProfit(self.var_year.get(),self.var_month.get())\n self.var_profit.set(f'{float(profit):,}')\n self.show_frame.pack(side = 'top', pady = 30,padx = 20)\n self.show_frame.pack_propagate(False)\n self.tab.pack(side = 'top')\n self.show_labelframe.pack(side = 'top', pady = 30)\n self.show_labelframe.pack_propagate(False)\n self.err.pack(side = 'bottom', fill = 'x')\n\n self.year_label.place(relx = 0.05, rely = 0.2)\n self.year_combo.place(relx = 0.27, rely = 0.2, height = 30)\n\n self.month_label.place(relx = 0.05, rely = 0.35)\n self.month_combo.place(relx = 0.27, rely = 0.35, height = 30)\n\n self.profit_label.place(relx =0.2, rely = 0.71)\n self.profit_value.place(relx = 0.35, rely = 0.7)\n\n\n def FilterProfit(self,event):\n profit = f\"%.2f\" % data.GetProfit(self.var_year.get(), self.var_month.get())\n self.var_profit.set(f'{float(profit):,}')\n","sub_path":"admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":88455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"567798188","text":"# -*- coding: utf-8 -*-\n\ntry:\n from _json_keys import *\n from _util import *\n from dissector_ipv6 import hdr_map_ipv6\n from dissector_ipv4 import hdr_map_ipv4\n from dissector_icmpv6 import hdr_map_icmpv6\n from dissector_udp import hdr_map_udp\n from dissector_coap import hdr_map_coap\nexcept:\n from ._json_keys import *\n from ._util import *\n from .dissector_ipv6 import hdr_map_ipv6\n from .dissector_ipv4 import hdr_map_ipv4\n from .dissector_icmpv6 import hdr_map_icmpv6\n from .dissector_udp import hdr_map_udp\n from .dissector_coap import hdr_map_coap\n\nencode_hdr_map = {\n JK_IPV6: hdr_map_ipv6,\n JK_IPV4: hdr_map_ipv4,\n JK_ICMPV6: hdr_map_icmpv6,\n JK_UDP: hdr_map_udp,\n JK_COAP: hdr_map_coap,\n}\n\ndef encode_hdr(hdr_map, hdr_list):\n '''\n hdr_map is defined each protocol.\n hdr_list must be OrderedDict.\n '''\n def get_elm(k0):\n for k, v in hdr_list.items():\n if k0 == k:\n return v\n return None\n # \n ba = bytearray(0)\n offset = 0\n fld_size_prev = 0\n fld_fmt_prev = None\n fld_v = None\n for i in hdr_map:\n fld_name = i[0]\n if fld_name == JK_SW:\n if hdr_list.get(i[1], None) != i[2]:\n # just skip it\n continue\n # otherwise\n _ba = encode_hdr(i[3], hdr_list)\n ba += _ba\n continue\n #\n fld_fmt = i[1]\n fld_size = struct.calcsize(fld_fmt)\n fld_bits = i[2]\n fld_def = i[3]\n if 8*fld_size < fld_bits:\n raise ValueError(\"fld_bits is bigger than fld_size. {:d} > {:d}\".\n format(fld_bits, 8*fld_size))\n if fld_bits == 0 and fld_size_prev != 0:\n # flush the fld_v to the header.\n # move offset and finish the operation for a field.\n ba += struct.pack(fld_fmt_prev, fld_v)\n fld_v = None\n fld_fmt_prev = None\n offset += fld_size_prev\n fld_size_prev = 0\n # get the value.\n v = get_elm(fld_name)\n if v is None:\n raise ValueError(\"{:s} is not found in the map.\".format(fld_name))\n # set the value to the bytearray.\n if fld_bits:\n if fld_v is None:\n # initialize if None\n fld_v = 0\n fld_fmt_prev = fld_fmt\n fld_size_prev = fld_size\n # the value is in bits.\n fld_v <<= fld_bits\n fld_v |= v\n continue\n # otherwise\n if isinstance(v, (IPAddr, MACAddr)):\n v = v.decode()\n ba += struct.pack(fld_fmt, v)\n #\n return ba\n\ndef encoder(jo, hm=encode_hdr_map):\n if isinstance(jo, (bytes, bytearray)):\n return jo\n #\n proto = jo.get(JK_PROTO)\n header = jo.get(JK_HEADER)\n payload = jo.get(JK_PAYLOAD)\n if not proto:\n raise ValueError(\"protocol is not defined.\")\n if not header:\n raise ValueError(\"header is not defined.\")\n hdr_map = hm.get(proto)\n if not hdr_map:\n raise ValueError(\"unknown protocol {:s}\".format(proto))\n ba = encode_hdr(hdr_map, header)\n if ba == None:\n raise ValueError(\"error in {:s}\".format(proto))\n if payload:\n ba += encoder(payload)\n return ba\n\n","sub_path":"src/schctest/pypacket_dissector/encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":3310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"563928981","text":"#! /usr/bin/python\n\n# Run 2 turtles in terminal\n# rosrun turtlesim turtlesim_node\n# rosservice call /spawn \"{x: 3.0, y: 3.0, theta: 0.0, name: 'leo'}\"\n\nimport rospy\nfrom geometry_msgs.msg import Twist\nimport time\nimport math\n\ndef move_forward(pub, msg):\n msg.linear.x = 4.0\n msg.angular.z = 0.0\n pub.publish(msg)\n\ndef move_rotate(pub, msg):\n msg.linear.x = 0.0\n msg.angular.z = math.pi / 2\n pub.publish(msg)\n\nrospy.init_node('name_by_default')\n\npub1 = rospy.Publisher('turtle1/cmd_vel', Twist, queue_size=1)\npub2 = rospy.Publisher('leo/cmd_vel', Twist, queue_size=1)\nmsg = Twist()\n\nr = rospy.Rate(0.5) #Hz\n# square inf\nwhile not rospy.is_shutdown():\n move_forward(pub1, msg)\n move_forward(pub2, msg)\n time.sleep(1)\n move_rotate(pub1, msg)\n move_rotate(pub2, msg)\n\n r.sleep() # control iteration cycle\n\n","sub_path":"turtles_run.py","file_name":"turtles_run.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"551239257","text":"# Problem 158\n# Junlin Wang\n\n\ndef isOK(str_list):\n #determine if a type fits the requirement\n bigger = 0\n for i in range(len(str_list), 1):\n if (str_list[i] > str_list[i-1]):\n bigger += 1\n if (bigger == 1): return True\n else: return False\n\ndef getLetters():\n a = [i for i in range(97, 123)]\n a = map(lambda x: chr(x), a)\n return a\n\nletters = getLetters()\n\ndef calculate(num):\n # a list of 26 letters\n num_of_letters = num\n # initial list\n combo = [i for i in letters[0:num_of_letters]]\n\n helper(combo, num_of_letters - 1)\n\n\ncalculate(3)\n\n\n\n\n\n\n\n\n\n\n\n#Equation when length is 3\n'''\ndef getone(num):\n #num is a number between 0 and 25 inclusively\n first_part = 24*num-(num*(num-1))/2.0\n second_part = (num + 24) * (25-num)/2.0\n result = first_part + second_part\n return result\nnumber = 0\nresult = 0\nfor i in range(0, 26):\n result += getone(i)\n print(\"%d:%d\" % (i,getone(i)))\nprint(result)\n'''\n","sub_path":"#158.py","file_name":"#158.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"309777185","text":"import quandl, math\nimport numpy as np\nimport pandas as pd\nimport pickle\nfrom sklearn import preprocessing, svm, model_selection\nfrom sklearn.linear_model import LinearRegression\n# For plotting\nimport datetime\nimport matplotlib.pyplot as plt\nfrom matplotlib import style\n\n\n\ndf = quandl.get(\"WIKI/GOOGL\")\n\ndf = df[['Adj. Open', 'Adj. High', 'Adj. Low', 'Adj. Close', 'Adj. Volume']]\n\ndf['HL_PCT'] = (df['Adj. High'] - df['Adj. Low']) / df['Adj. Close'] * 100.0\n\ndf['PCT_change'] = (df['Adj. Close'] - df['Adj. Open']) / df['Adj. Open'] * 100.0\n\ndf = df[['Adj. Close', 'HL_PCT', 'PCT_change', 'Adj. Volume']]\n\nforecast_col = 'Adj. Close'\ndf.fillna(value=-99999, inplace=True)\nforecast_out = int(math.ceil(0.01 * len(df)))\n\ndf['label'] = df[forecast_col].shift(-forecast_out)\n\nX = np.array(df.drop(['label'], 1))\nX = preprocessing.scale(X)\n# X_lately = X[-forecast_out:]\nX = X[:-forecast_out]\n\ndf.dropna(inplace=True)\n\ny = np.array(df['label'])\n\nX_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.2)\n\n# clf = svm.SVR(kernel=k)\nclf = LinearRegression(n_jobs=-1)\n\nclf.fit(X_train, y_train)\n\nconfidence = clf.score(X_test, y_test)\n\nwith open('linearregression.pickle', 'wb') as f:\n pickle.dump(clf, f)\n print(\"\\nModel has been trained. Please run regression_predict.py\")\n","sub_path":"quandl_google/regression_fit.py","file_name":"regression_fit.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"21741292","text":"# utilities for taking a high-dimensional matrix and producing rgb color values from its rows. \nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.decomposition import PCA\n\ndef squeeze(v):\n # translate and scale a vector onto the interval [0,1]\n if min(v) < 0:\n v -= min(v)\n return v/np.linalg.norm(v)\n\n\ndef low_d(Z):\n # suppose Z is some high-dimensional matrix. \n # project it onto 3 dimensions.\n pca = PCA(n_components=3)\n return pca.fit_transform(Z)\n\n\ndef main(n_vectors=100, vector_dim=1000):\n Z = np.random.random(size=(n_vectors, vector_dim))\n xs,ys = np.random.random(size=(2,n_vectors))\n z = np.array(list(map(squeeze, low_d(Z)))) \n #squeeze each vector into the interval [0,1]. \n plt.scatter(xs,ys,facecolors=z)\n plt.show()\n\n \n","sub_path":"patvis/pca_to_rgb.py","file_name":"pca_to_rgb.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"551026310","text":"__author__ = 'randy'\n\nfrom src.ADS_BDataFrame import *\nfrom src.ADS_BDataProcessor import *\nfrom src.ADS_BDriver import *\nfrom src.Action import *\nfrom src.Aircraft import *\nfrom src.Display import *\nfrom src.FrontPanelButtons import *\nfrom src.LoudSpeaker import *\nfrom queue import *\n\naircraftQueue = PriorityQueue()\n\n\nclass MainClass:\n def __init__(self):\n self.ownShip = Aircraft(0, 0, 0, 0, 0, 0, 0, 0)\n self.aircraftQueue = PriorityQueue()\n self.myAction = Action(0,0)\n self.ads_bInt = ADS_BDriver()\n self.display = Display()\n self.loudSpeaker = LoudSpeaker()\n self.panelBtns = FrontPanelButtons()\n\n def refreshAircraftListFromADSB(self):\n adsData = self.ads_bInt.refresh()\n return ADS_BDataProcessor.getAircraftList(adsData)\n\n def buildAircraftPriorityQueue(self, aircraftList):\n self.aircraftQueue = PriorityQueue()\n for a in aircraftList:\n self.aircraftQueue.put(a)\n\n def updateDisplay(self, aircraftList, alertLevel, action):\n self.display.updateAircraftList(aircraftList)\n self.display.updateCurAlertLevel(alertLevel)\n self.display.updateCurAction(action)\n\n def updateLoudSpeaker(self, action):\n self.loudSpeaker.annunciate(action)\n\n def loopIter(self):\n aircraftList = self.refreshAircraftListFromADSB()\n\n if len(aircraftList) > 0:\n self.buildAircraftPriorityQueue(aircraftList)\n\n curAlertLevel = self.aircraftQueue.queue[0].getAlertLevel()\n curAction = self.determineAction\n\n self.updateDisplay(aircraftList, curAlertLevel, curAction)\n self.updateLoudSpeaker(curAction)\n\n def eventLoop(self):\n while True:\n self.loopIter()\n\n @property\n def determineAction(self):\n myAircraft = self.aircraftQueue.get()\n if(myAircraft.getAlertLevel() == AlertLevel.AL_PrevAdvisory):\n return Action(0,1)\n elif(myAircraft.getAlertLevel() == AlertLevel.AL_ResolAdvisoryLow):\n if(myAircraft.getZInFeet()>self.ownShip.getZInFeet()):\n return Action(-1,-1)\n elif(myAircraft.getZInFeet()self.ownShip.getYInFeet()):\n return Action(-1,-1)\n elif(myAircraft.getYInFeet()self.ownShip.getXInFeet()):\n return Action(-1,-1)\n elif(myAircraft.getXInFeet()self.ownShip.getZInFeet()):\n return Action(-1,0)\n elif(myAircraft.getZInFeet()self.ownShip.getYInFeet()):\n return Action(-1,0)\n elif(myAircraft.getYInFeet()self.ownShip.getXInFeet()):\n return Action(-1,0)\n elif(myAircraft.getXInFeet()self.ownShip.getZInFeet()):\n return Action(-1,1)\n elif(myAircraft.getZInFeet()self.ownShip.getYInFeet()):\n return Action(-1,1)\n elif(myAircraft.getYInFeet()self.ownShip.getXInFeet()):\n return Action(-1,1)\n elif(myAircraft.getXInFeet() 0:\r\n\t\t\tto_pred_dict[key] = c\r\n\treturn to_pred_dict\r\n\r\ndef count_missing_per_sample(df):\r\n\tmissing = []\r\n\tfor _, row in df.iterrows():\r\n\t\tmissing.append(count_empty_cells(row))\r\n\treturn missing\r\n\r\ndef complete(dest, source):\r\n\tfor i in np.intersect1d(np.where(df[dest] == ''),np.where(df[source] != '')):\r\n\t\tprint(df[dest][i],df[source][i])\r\n\t\tdf[dest][i] = df[source][i]\r\n\tprint(\"LOL\")\r\n\r\n\r\ndef date2season(month):\r\n\tif month in ['1', '2', '12']:\r\n\t\treturn \"winter\"\r\n\tif month in ['3', '4', '5']:\r\n\t\treturn \"spring\"\r\n\tif month in ['6', '7', '8']:\r\n\t\treturn \"summer\"\r\n\treturn \"fall\"\r\n\r\ndef race_replace(race):\r\n\tif race == \"chinese\":\r\n\t\treturn str(2)\r\n\telif race == \"malay\":\r\n\t\treturn str(3)\r\n\telif race == \"brazilwhite\" or race == \"dutch\":\r\n\t\treturn str(6)\r\n\telif race == \"brazilblack\":\r\n\t\treturn str(5)\r\n\telif race == \"brazilbrown\":\r\n\t\treturn str(7)\r\n\treturn race\r\n\r\ndef art_preferance(x):\r\n\tif x == '':\r\n\t\treturn x\r\n\tx = float(x)\r\n\tif x > 0:\r\n\t\treturn '1'\r\n\treturn '0'\r\n\r\n\r\ndef create_dict(dfr):\r\n\t# create a dictionary to save for each inital feature, its corresponding indices after the one-hot encoding\r\n\tprev = '' \r\n\tfeat_dict = {}\r\n\tfor i, key in enumerate(dfr.keys()):\r\n\t\tcurr = key.split(\"__\")[0]\r\n\t\tif curr != prev:\r\n\t\t\tfeat_dict[curr] = [i]\r\n\t\telse:\r\n\t\t\tfeat_dict[curr].append(i)\r\n\t\tprev = curr\r\n\treturn feat_dict\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n\r\n\tfilename = sys.argv[1]\r\n\t#filename = \"../ML1/Tab.delimited.Cleaned.dataset.WITH.variable.labels.csv\"\r\n\t\r\n\t# read csv file and drop garbage columns\r\n\tdf = pd.read_csv(filename, sep='\\t', encoding='latin-1', dtype=str)\r\n\tprint(\"Inital shape of dataframe: {}\".format(df.shape))\r\n\t\r\n\tdv_list = ['sunkDV', 'gainlossDV', 'anchoring1', 'anchoring2', 'anchoring3', 'anchoring4', 'Ranch1', 'Ranch2', 'Ranch3', 'Ranch4', 'scales',\\\r\n\t\t\t\t 'reciprocityother', 'reciprocityus', 'allowedforbidden', 'quote', 'flagdv', 'Sysjust', 'Imagineddv', 'IATexpart', 'IATexpmath', 'IATexp.overall']\r\n\r\n\tdrop_list = ['user_id', 'last_update_date', 'session_last_update_date', 'creation_date', 'session_creation_date',\\\r\n\t\t\t\t 'expcomments', 'numparticipants_actual', 'numparticipants', 'sample', 'beginlocaltime', 'text', 'session_status',\\\r\n\t\t\t\t 'previous_session_id', 'feedback', 'previous_session_schema', 'user_agent', 'task_status', 'task_sequence',\\\r\n\t\t\t\t 'session_created_by', 'study_url', 'sunkgroup', 'gainlossgroup', 'anch1group', 'anch2group', 'anch3group', \\\r\n\t\t\t\t 'anch4group', 'gambfalgroup', 'gambfalDV', 'gamblerfallacya_sd', 'gamblerfallacyb_sd', 'scalesgroup',\\\r\n\t\t\t\t 'reciprocitygroup', 'allowedforbiddenGroup', 'quoteGroup', 'flagGroup', 'MoneyGroup', 'ContactGroup', 'study_name',\\\r\n\t\t\t\t 'Ranchori', 'RAN001', 'RAN002', 'RAN003', 'd_donotuse', 'iatorder', 'exprunafter2', 'scalesreca', 'scalesrecb',\\\r\n\t\t\t\t 'quotearec', 'quotebrec', 'flagtimeestimate1', 'flagtimeestimate2', 'flagtimeestimate3', 'flagtimeestimate4',\\\r\n\t\t\t\t 'noflagtimeestimate1', 'noflagtimeestimate2', 'noflagtimeestimate3', 'noflagtimeestimate4', 'totalflagestimations',\\\r\n\t\t\t\t 'totalnoflagtimeestimations', 'moneyagea', 'moneyageb', 'moneyethnicitya', 'moneyethnicityb', 'moneygendera', 'moneygenderb',\\\r\n\t\t\t\t 'partgender', 'imagineddescribe', 'IATfilter', 'totexpmissed', 'IATEXPfilter', 'citizenship', 'imptaskto',\\\r\n\t\t\t\t 'nativelang', 'nativelang2', 'citizenship2', 'omdimc3rt', 'omdimc3trt', 'anchoring1akm', 'anchoring1bkm','iat_exclude',\\\r\n\t\t\t\t 'anchoring3ameter', 'anchoring3bmeter', 'religion', 'filter_$', 'race', 'mturk.non.US', 'mturk.Submitted.PaymentReq',\r\n\t\t\t\t 'mturk.total.mini.exps', 'mturk.duplicate', 'mturk.exclude.null', 'mturk.keep', 'mturk.exclude', 'meanlatency', 'meanerror', \\\r\n\t\t\t\t 'block2_meanerror', 'block3_meanerror', 'block5_meanerror', 'block6_meanerror', 'lat11', 'lat12', 'lat21', 'lat22', 'sd1', 'sd2', 'd_art1', 'd_art2'] \\\r\n\t\t\t\t + ['o'+str(i) for i in range(1,12)] + ['task_id.'+str(i) for i in range(46)] + ['task_url.'+str(i) for i in range(46)]\\\r\n\t\t\t\t + ['task_creation_date.'+str(i) for i in range(46)] + ['priorexposure'+str(i) for i in range(1,14)]\r\n\t\r\n\tdrop_list = drop_list + dv_list\r\n\tdf = df.drop(columns=drop_list)\r\n\tprint(\"Shape of dataframe after some features dropped: {}\".format(df.shape))\r\n\t\r\n\tmetadata_list = ['session_date', 'referrer', 'expgender', 'exprace', 'runalone', 'compensation', 'recruitment', 'separatedornot', 'age',\\\r\n\t\t\t\t\t 'flag-american', 'money-first', 'ethnicity', 'major', 'omdimc3-pass', 'politicalid', 'sex', 'scalesorder',\\\r\n\t\t\t\t\t 'reciprocorder', 'diseaseforder','quoteorder', 'flagprimorder', 'sunkcostorder', 'anchorinorder', 'allowedforder', 'gamblerforder',\\\r\n\t\t\t\t\t 'moneypriorder', 'imaginedorder']\r\n\r\n\r\n\t# replace '.' with whitespace and and remove all leading and trailing whitespaces form strings\r\n\tdf['sex'] \t= df['sex'].replace({'f':'female','m': 'male', '.': 'prefernot'})\r\n\tdf['expgender'] = df['expgender'].replace({'.': 'prefernot'})\r\n\tdf['ethnicity'] = df['ethnicity'].replace({'.': '3'})\r\n\tdf = df.apply(lambda x: x.replace('.',' ') if x.dtype == \"object\" else x)\r\n\tdf = df.apply(lambda x: x.str.strip() if x.dtype == \"object\" else x)\r\n\t\r\n\r\n\t## -----\r\n\t# preprocess features to use\r\n\t\r\n\t## change the values of some features to make them more useful\r\n\t#\r\n\r\n\t# metadata -- don't predict\r\n\tdf['exprace'] = df['exprace'].apply(lambda x: race_replace(x))\r\n\tdf['session_date'] = df['session_date'].apply(lambda x: date2season( x.split(\"/\")[0] ) )\r\n\t\r\n\tdf['session_date'] = df['session_date'].replace({'summer': '0', 'fall': '1'})\r\n\tdf['exprace'] = df['exprace'].replace({'1':'American-Indian/Alaska-Native','2':'East-Asian','3':'South-Asian',\\\r\n\t\t\t\t\t\t\t\t\t\t '4':'Native-Hawaiian/Pacific-Islander','5':'Black/African-American','6':'White',\\\r\n\t\t\t\t\t\t\t\t\t\t '7':'More-than-one-race-Black/White','8':'More-than-one-race-Other','9':'Other/Unknown',\\\r\n\t\t\t\t\t\t\t\t\t\t '10':'Hispano/Latino'})\r\n\tdf['exprunafter'] = df['exprunafter'].replace({'runafter': '0', 'runalone' : '1'})\r\n\t\r\n\t#age_classes = [12:18, 19:22, 23:29, 30:49, 50:90]\r\n\tage_buckets = [0, 11, 18, 22, 29, 49, 100]\r\n\tdf['age'] = df['age'].replace({'': '1'})\r\n\tdf['age'] = pd.to_numeric(df['age'], errors='ignore')\r\n\tdf['age'] = pd.cut(df['age'], age_buckets, labels=[\"\", \"12-18\", \"19-22\", \"23-29\", \"30-49\", \"50-100\"])\r\n\t\r\n\tdf['order'] = df['order'].replace({'1': '0', '2': '1'})\r\n\tdf['recruitment'] = df['recruitment'].replace({'othersubjpool': 'other', 'advertisements': 'other'})\r\n\tdf['lab_or_online'] = df['lab_or_online'].replace({'In-lab': '0', 'Online': '1'})\r\n\tdf['flagfilter'] = df['flagfilter'].replace({'exclude': '0', 'include': '1'})\r\n\tdf['omdimc3'] = df['omdimc3'].replace({'Fail': '0', 'Pass': '1'})\r\n\tdf['us_or_international'] = df['us_or_international'].replace({'US': '0', 'International' : '1'})\r\n\r\n\t# questions -- features to predict\r\n\tdf['allowedforbiddena'] = df['allowedforbiddena'].replace({'No': '0', 'Yes': '1'})\r\n\tdf['allowedforbiddenb'] = df['allowedforbiddenb'].replace({'No': '0', 'Yes': '1'})\r\n\tdf['diseaseframinga'] = df['diseaseframinga'].replace({'200 people will be saved': '0', '1/3 probability to save all, 2/3 nobody will be saved' : '1'})\r\n\tdf['diseaseframingb'] = df['diseaseframingb'].replace({'400 people will die': '0', '1/3 probability nobody will die, 2/3 that 600 will die' : '1'})\r\n\tdf['flagsupplement1'] = df['flagsupplement1'].replace({'Not at all': '1', 'Very much': '11'})\r\n\tdf['flagsupplement2'] = df['flagsupplement2'].replace({'Democrat': '1', 'Republican': '7'})\r\n\tdf['flagsupplement3'] = df['flagsupplement3'].replace({'Liberal': '1', 'Conservative': '7'})\r\n\t\r\n\tdf['iatexplicitart1'] = df['iatexplicitart1'].replace({'Moderately bad' : '6', 'Very bad': '7'})\r\n\tdf['iatexplicitart2'] = df['iatexplicitart2'].replace({'Moderately Sad' : '6', 'Very Sad': '7'})\r\n\tdf['iatexplicitart3'] = df['iatexplicitart3'].replace({'Moderately Ugly' : '6', 'Very Ugly': '7'})\r\n\tdf['iatexplicitart4'] = df['iatexplicitart4'].replace({'Moderately Disgusting' : '6', 'Very Disgusting': '7'})\r\n\tdf['iatexplicitart5'] = df['iatexplicitart5'].replace({'Moderately Avoid' : '6', 'Very Avoid': '7'})\r\n\tdf['iatexplicitart6'] = df['iatexplicitart6'].replace({'Moderately Afraid' : '6', 'Very Afraid': '7'})\r\n\t\r\n\tdf['iatexplicitmath1'] = df['iatexplicitmath1'].replace({'Slightly bad': '5', 'Moderately bad' : '6', 'Very bad': '7'})\r\n\tdf['iatexplicitmath2'] = df['iatexplicitmath2'].replace({'Slightly Sad': '5', 'Moderately Sad' : '6', 'Very Sad': '7'})\r\n\tdf['iatexplicitmath3'] = df['iatexplicitmath3'].replace({'Slightly Ugly': '5', 'Moderately Ugly' : '6', 'Very Ugly': '7'})\r\n\tdf['iatexplicitmath4'] = df['iatexplicitmath4'].replace({'Slightly Disgusting': '5', 'Moderately Disgusting' : '6', 'Very Disgusting': '7'})\r\n\tdf['iatexplicitmath5'] = df['iatexplicitmath5'].replace({'Slightly Avoid': '5', 'Moderately Avoid' : '6', 'Very Avoid': '7'})\r\n\tdf['iatexplicitmath6'] = df['iatexplicitmath6'].replace({'Slightly Afraid': '5', 'Moderately Afraid' : '6', 'Very Afraid': '7'})\r\n\r\n\tdf['reciprocityusa'] = df['reciprocityusa'].replace({'No': '0', 'Yes': '1'})\r\n\tdf['reciprocityusb'] = df['reciprocityusb'].replace({'No': '0', 'Yes': '1'})\r\n\tdf['reciprocityothera'] = df['reciprocityothera'].replace({'No': '0', 'Yes': '1'}) \r\n\tdf['reciprocityotherb'] = df['reciprocityotherb'].replace({'No': '0', 'Yes': '1'})\r\n\t\r\n\tfor i in range(1,9):\r\n\t\tdf['sysjust'+str(i)] = df['sysjust'+str(i)].replace({'Strongly disagree': '1', 'Strongly agree': '7'})\r\n\r\n\r\n\tdf['d_art'] = df['d_art'].apply(lambda x: art_preferance(x))\r\n\tdf['artwarm'] = df['artwarm'].replace({'': '-1'})\r\n\tdf['artwarm'] = pd.to_numeric(df['artwarm'], errors='ignore')\r\n\tdf['artwarm'] = pd.cut(df['artwarm'], [-2, -1, 25, 50, 75, 100], labels=['', '0-25', '26-50', '51-75', '76-100'])\r\n\tdf['mathwarm'] = df['mathwarm'].replace({'': '-1'})\r\n\tdf['mathwarm'] = pd.to_numeric(df['mathwarm'], errors='ignore')\r\n\tdf['mathwarm'] = pd.cut(df['mathwarm'], [-2, -1, 25, 50, 75, 100], labels=['', '0-25', '26-50', '51-75', '76-100'])\r\n\t\r\n\tdf['gamblerfallacya'] = df['gamblerfallacya'].replace({'': '-1'})\r\n\tdf['gamblerfallacya'] = pd.to_numeric(df['gamblerfallacya'], errors='ignore')\r\n\tdf['gamblerfallacya'] = pd.cut(df['gamblerfallacya'], [-2, -1, 0, 1, 2, 3, 4, 100], labels=['', '0', '1', '2', '3', '4', '5-and-more'])\r\n\tdf['gamblerfallacyb'] = df['gamblerfallacyb'].replace({'': '-1'})\r\n\tdf['gamblerfallacyb'] = pd.to_numeric(df['gamblerfallacyb'], errors='ignore')\r\n\tdf['gamblerfallacyb'] = pd.cut(df['gamblerfallacyb'], [-2, -1, 0, 1, 2, 3, 4, 100], labels=['', '0', '1', '2', '3', '4', '5-and-more'])\r\n\t\r\n\t# --- change variables from continuous to discrete ---\t\r\n\t# change anchoring1a variables from numerical to classes, with 500 per class. minimum is 1501, maximum is 5903.015\r\n\tanchoring1a_bins = range(1500, 6001, 500) # 9 bins\r\n\tdf['anchoring1a'] = pd.to_numeric(df['anchoring1a'], errors='ignore')\r\n\tdf['anchoring1a'] = pd.cut(df['anchoring1a'], anchoring1a_bins)\r\n\r\n\t# change anchoring1b variables from numerical to classes, with 500 per class. minimum is 1553, maximum is 5999\r\n\tanchoring1b_bins = range(1500, 6001, 500) # 9 bins\r\n\tdf['anchoring1b'] = pd.to_numeric(df['anchoring1b'], errors='ignore')\r\n\tdf['anchoring1b'] = pd.cut(df['anchoring1b'], anchoring1b_bins)\r\n\r\n\t# change anchoring2a variables from numerical to classes with 400000 in each bin min is 200001, max is 4521987\r\n\tanchoring2a_bins = range(200000, 4600001, 400000) # 12 bins\r\n\tdf['anchoring2a'] = pd.to_numeric(df['anchoring2a'], errors='ignore')\r\n\tdf['anchoring2a'] = pd.cut(df['anchoring2a'], anchoring2a_bins)\r\n\r\n\t# change anchoring2b variables from numerical to classes with 400000 in each bin min is 236785, max is 4999999\r\n\tanchoring2b_bins = range(200000, 5000001, 400000) # 13 bins\r\n\tdf['anchoring2b'] = pd.to_numeric(df['anchoring2b'], errors='ignore')\r\n\tdf['anchoring2b'] = pd.cut(df['anchoring2b'], anchoring2b_bins)\r\n\r\n\t# change anchoring3a variables from numerical to classes with 4300 in each bin min is 2001, max is 45000\r\n\tanchoring3a_bins = range(2000, 45001, 4300) # 13 bins\r\n\tdf['anchoring3a'] = pd.to_numeric(df['anchoring3a'], errors='ignore')\r\n\tdf['anchoring3a'] = pd.cut(df['anchoring3a'], anchoring3a_bins)\r\n\r\n\t# change anchoring3b variables from numerical to classes with 4360 in each bin min is 2432, max is 45499\r\n\tanchoring3b_bins = range(2400, 46001, 4360) # 11 bins\r\n\tdf['anchoring3b'] = pd.to_numeric(df['anchoring3b'], errors='ignore')\r\n\tdf['anchoring3b'] = pd.cut(df['anchoring3b'], anchoring3b_bins)\r\n\r\n\t# change anchoring4a variables from numerical to classes with 4790 in each bin min is 101, max is 48000\r\n\tanchoring4a_bins = range(100, 48001, 4790) # 11 bins\r\n\tdf['anchoring4a'] = pd.to_numeric(df['anchoring4a'], errors='ignore')\r\n\tdf['anchoring4a'] = pd.cut(df['anchoring4a'], anchoring4a_bins)\r\n\r\n\t# change anchoring4b variables from numerical to classes with 4988 in each bin min is 120, max is 49999\r\n\tanchoring4b_bins = range(120, 50000, 4988) # 11 bins\r\n\tdf['anchoring4b'] = pd.to_numeric(df['anchoring4b'], errors='ignore')\r\n\tdf['anchoring4b'] = pd.cut(df['anchoring4b'], anchoring4b_bins)\r\n\r\n\r\n\t# rename columns in the dataset\r\n\tdf = df.rename(index=str, \\\r\n\t\t\t\t columns={'exprunafter': 'runalone', 'lab_or_online': 'exp-online', 'us_or_international': 'subject-international',\\\r\n\t\t\t\t \t\t 'allowedforbiddena': 'forbidden', 'allowedforbiddenb': 'allowed', 'diseaseframinga':'disease-save-choseprob', \\\r\n\t\t\t\t \t\t 'diseaseframingb':'disease-kill-choseprob', 'flagfilter': 'flag-american', 'moneyfilter': 'money-first', \\\r\n\t\t\t\t \t\t 'flagsupplement1': 'flagsuppl-american', 'flagsupplement2': 'flagsuppl-republican', 'flagsupplement3': 'flagsuppl-conservative',\\\r\n\t\t\t\t \t\t 'iatexplicitart1': 'art-good2bad', 'iatexplicitart2': 'art-happy2sad', 'iatexplicitart3': 'art-beautiful2ugly', 'iatexplicitart4': 'art-delightful2disgusting',\\\r\n\t\t\t\t \t\t 'iatexplicitart5': 'art-approach2avoid', 'iatexplicitart6': 'art-unafraid2afraid', 'omdimc3': 'omdimc3-pass',\\\r\n\t\t\t\t \t\t 'iatexplicitmath1': 'math-good2bad', 'iatexplicitmath2': 'math-happy2sad', 'iatexplicitmath3': 'math-beautiful2ugly', 'iatexplicitmath4': 'math-delightful2disgusting',\\\r\n\t\t\t\t \t\t 'iatexplicitmath5': 'math-approach2avoid', 'iatexplicitmath6': 'math-unafraid2afraid', 'quotea': 'quote-washington', 'quoteb': 'quote-binladen',\\\r\n\t\t\t\t \t\t 'd_art': 'prefer_art'})\r\n\t\r\n\r\n\t## -----\r\n\t\t\r\n\tfor key in df.keys():\r\n\t\tdf[key] = df[key].replace('', np.nan)\r\n\t\r\n\tmissing_list = count_missing_per_sample(df)\r\n\t# create a histogram with missing values\r\n\tfig, ax = plt.subplots()\r\n\tplt.hist(missing_list)\r\n\tplt.xlabel(\"number of missing values\")\r\n\tplt.savefig(\"missing_values.png\")\r\n\tdf = df.drop(df.index[np.where(np.array(missing_list)>30)[0]])\t\r\n\t\r\n\r\n\t# list with binary featrures\r\n\tbin_list = ['session_date', 'runalone', 'flag-american', 'money-first', 'forbidden', 'allowed', 'disease-save-choseprob',\r\n\t\t\t'disease-kill-choseprob', 'omdimc3-pass', 'reciprocityothera', 'reciprocityotherb', 'reciprocityusa', 'reciprocityusb',\r\n\t\t\t'subject-international', 'exp-online', 'order', 'prefer_art']\r\n\r\n\t# add one-hot encoded features\r\n\tdummies_list = [\"referrer\", \"expgender\", \"exprace\", \"compensation\", \"recruitment\", \"separatedornot\", \"age\", 'ethnicity', 'flagsuppl-american', 'flagsuppl-republican', \\\r\n\t\t\t 'flagsuppl-conservative', 'artwarm', 'art-good2bad', 'art-happy2sad', 'art-beautiful2ugly', 'art-delightful2disgusting', 'art-approach2avoid', \\\r\n\t\t\t 'art-unafraid2afraid', 'math-good2bad', 'mathwarm', 'math-happy2sad', 'math-beautiful2ugly', 'math-delightful2disgusting', 'math-approach2avoid',\\\r\n\t\t\t 'math-unafraid2afraid','imaginedexplicit1', 'imaginedexplicit2', 'imaginedexplicit3', 'imaginedexplicit4', 'major', 'politicalid', \\\r\n\t\t\t 'quote-washington', 'quote-binladen', 'gamblerfallacya', 'gamblerfallacyb', 'sunkcosta','sunkcostb', 'sex', 'scalesorder', 'reciprocorder', 'diseaseforder',\\\r\n\t\t\t 'quoteorder', 'flagprimorder', 'sunkcostorder', 'anchorinorder', 'allowedforder', 'gamblerforder', 'moneypriorder', 'imaginedorder',\\\r\n\t\t\t 'anchoring1a', 'anchoring1b','anchoring2a', 'anchoring2b', 'anchoring3a', 'anchoring3b', 'anchoring4a', 'anchoring4b',\r\n\t\t\t 'scalesa', 'scalesb']\\\r\n\t\t\t + ['flagdv'+str(i) for i in range(1,9)] + ['sysjust'+str(i) for i in range(1,9)]\r\n\r\n\tfor dum in dummies_list:\r\n\t\tdf = pd.concat([df, pd.get_dummies(df[dum], prefix=dum+\"_\")], axis=1)\r\n\t\t\r\n\tdf = df.drop(columns=dummies_list+['age__','artwarm__', 'mathwarm__', 'gamblerfallacya__', 'gamblerfallacyb__'])\r\n\t\r\n\t# substitute with NaN, the zero slices that may result from applying get_dummies() \r\n\t# this step might take some time to compute\r\n\tfeat_dict = create_dict(df)\r\n\tfor feat in feat_dict:\r\n\t\tif len(feat_dict[feat]) > 1:\r\n\t\t\tfor i in range(len(df)):\r\n\t\t\t\tif not any(df.iloc[i, feat_dict[feat]]):\r\n\t\t\t\t\tdf.iloc[i, feat_dict[feat]] = [np.nan]*len(feat_dict[feat])\r\n\t\r\n\r\n\t# create tran/val/test split (80%/10%/10%)\r\n\trandom_indexes = np.random.permutation(df.shape[0])\r\n\ttrain_indices = random_indexes[:4437]\r\n\tval_indices = random_indexes[4437:4437+951]\r\n\ttest_indices = random_indexes[4437+951:]\r\n\r\n\tdf_train = df.iloc[train_indices]\r\n\tdf_val = df.iloc[val_indices]\r\n\tdf_test = df.iloc[test_indices]\r\n\r\n\tprint(\"Final shape for Train/Val/Test datasets: {} -- {} -- {}\".format(df_train.shape, df_val.shape, df_test.shape))\r\n\t\r\n\twith open(\"train_set.csv\", 'w') as ftrain, open(\"val_set.csv\", 'w') as fval, open(\"test_set.csv\", 'w') as ftest:\r\n\t\tftrain.write(df_train.to_csv(index=False))\r\n\t\tfval.write(df_val.to_csv(index=False))\r\n\t\tftest.write(df_test.to_csv(index=False))\r\n\t\r\n","sub_path":"preprocesing.py","file_name":"preprocesing.py","file_ext":"py","file_size_in_byte":17852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"10910380","text":"# -*- coding: utf-8 -*-\n\nfrom datetime import datetime\nimport graphene\nfrom graphql import GraphQLError\nfrom odoo import _\nfrom odoo.osv import expression\n\nfrom odoo.addons.graphql_vuestorefront.schemas.objects import PaymentAcquirer, Order\n\n\nclass PaymentQuery(graphene.ObjectType):\n payment_acquirers = graphene.List(\n graphene.NonNull(PaymentAcquirer),\n required=True,\n order_id=graphene.Int(),\n )\n\n def resolve_payment_acquirers(self, info, order_id):\n env = info.context[\"env\"]\n order = env['sale.order'].search([('id', '=', order_id)], limit=1)\n if not order:\n raise GraphQLError(_(\"Sale Order does not exist.\"))\n website = env['website'].get_current_website()\n domain = expression.AND([\n ['&', ('state', 'in', ['enabled', 'test']), ('company_id', '=', order.company_id.id)],\n ['|', ('website_id', '=', False), ('website_id', '=', website.id)],\n ['|', ('country_ids', '=', False), ('country_ids', 'in', [order.partner_id.country_id.id])]\n ])\n return env['payment.acquirer'].search(domain)\n\n\ndef validate_expiry(expiry_month, expiry_year):\n # Validate expiry month and year\n if expiry_month > 12 or expiry_month < 1:\n raise GraphQLError(_('Invalid Month'))\n\n cc_expiry = '%s / %s' % (\"{:02d}\".format(expiry_month), expiry_year)\n\n expiry_date = datetime.strptime(cc_expiry, '%m / %Y').strftime('%Y%m')\n\n if datetime.now().strftime('%Y%m') > expiry_date:\n raise GraphQLError(_('Invalid Month / Year'))\n return cc_expiry\n\n\ndef prepare_payment_transaction(env, data, payment_acquire, order):\n payment_token = payment_acquire.ogone_s2s_form_process(data)\n\n # create normal s2s transaction\n transaction = env['payment.transaction'].sudo().create({\n 'amount': order.amount_total,\n 'acquirer_id': payment_acquire.id,\n 'type': 'server2server',\n 'currency_id': order.currency_id.id,\n 'reference': order.name,\n 'payment_token_id': payment_token.id,\n 'partner_id': order.partner_id.id,\n 'sale_order_ids': [(6, 0, order.ids)]\n\n })\n return transaction\n\n\nclass MakePayment(graphene.Mutation):\n class Arguments:\n payment_acquire_id = graphene.Int(required=True)\n order_id = graphene.Int(required=True)\n expiry_month = graphene.Int(required=True)\n expiry_year = graphene.String(required=True)\n holder_name = graphene.String(required=True)\n card_number = graphene.String(required=True)\n cvc = graphene.String(required=True)\n brand = graphene.String(required=True)\n\n Output = Order\n\n @staticmethod\n def mutate(self, info, payment_acquire_id, order_id, expiry_month, expiry_year,\n holder_name, card_number, cvc, brand):\n env = info.context['env']\n cc_expiry = validate_expiry(expiry_month, expiry_year)\n\n order = env['sale.order'].sudo().search([('id', '=', order_id)], limit=1)\n if not order:\n raise GraphQLError(_('Sale Order does not exist.'))\n\n payment_acquire = env['payment.acquirer'].sudo().search([('id', '=', payment_acquire_id)], limit=1)\n if not payment_acquire:\n raise GraphQLError(_('Payment Acquire does not exist.'))\n\n data = {\n 'cc_number': card_number,\n 'cc_cvc': cvc,\n 'cc_holder_name': holder_name,\n 'cc_expiry': cc_expiry,\n 'cc_brand': brand,\n 'acquirer_id': payment_acquire_id,\n 'partner_id': order.partner_id.id\n }\n transaction = prepare_payment_transaction(env, data, payment_acquire, order)\n\n params = {'CVC': cvc, '3d_secure': True}\n transaction.ogone_s2s_do_transaction(**params)\n\n # check if transaction is done confirm sale order and create invoice\n if transaction.state == 'done':\n transaction._post_process_after_done()\n else:\n raise GraphQLError(_(transaction.state_message))\n return order\n\n\nclass PaymentMutation(graphene.ObjectType):\n make_payment = MakePayment.Field(description='Creates a new payment request.')\n","sub_path":"odoo-addons/14.0/graphql_vuestorefront/schemas/payment.py","file_name":"payment.py","file_ext":"py","file_size_in_byte":4169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"320534511","text":"# Models Used for the Telco Project\n\n#---------------Imports---------------------------------\n\nimport pandas as pd\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.tree import export_graphviz\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n#---------------Functions-------------------------------\n# Decision Tree:\ndef decision_tree(X_train, y_train, X_validate, y_validate, threshold=0.05, max_dep=25):\n '''\n This function uses the sklearn DecisionTreeClassifier to create a Decision Tree\n '''\n threshold = threshold # Set our threshold for how overfit we'll tolerate\n\n models = [] # Initiate models list for outputs\n metrics = [] # Initiate metrics list for outputs\n\n for i in range(2, max_dep):\n # Make the model\n tree = DecisionTreeClassifier(max_depth=i, random_state=123)\n\n # Fit the model (on train and only train)\n tree = tree.fit(X_train, y_train)\n\n # Use the model\n # We'll evaluate the model's performance on train, first\n in_sample_accuracy = tree.score(X_train, y_train) \n out_of_sample_accuracy = tree.score(X_validate, y_validate)\n\n # Calculate the difference\n difference = in_sample_accuracy - out_of_sample_accuracy\n \n # Add a conditional to check vs. the threshold\n if difference > threshold:\n break\n \n # Formulate the output for each model's performance on train and validate\n output = {\n \"max_depth\": i,\n \"train_accuracy\": in_sample_accuracy,\n \"validate_accuracy\": out_of_sample_accuracy,\n \"difference\": difference\n }\n \n # Add the metrics dictionary to the list, so we can make a dataframe\n metrics.append(output)\n \n # Add the specific tree to a list of trained models\n models.append(tree)\n\n # make a dataframe\n results = pd.DataFrame(metrics)\n # print(results)\n\n # plot the data\n results[['max_depth', 'train_accuracy', 'validate_accuracy']].set_index('max_depth').plot(figsize = (16,9), linewidth=2)\n plt.ylim(0.50, 1)\n plt.title('Decision Tree', fontsize = 20)\n plt.xlabel(\"Max Depth\", fontsize = 16)\n plt.ylabel('Accuracy', fontsize = 18)\n plt.xticks(np.arange(1, i+1, 1))\n plt.grid(b=True)\n\n return results\n\n#-------------------------------------------------------\n# Random Forest\ndef rand_forest(X_train, y_train, X_validate, y_validate, threshold=0.05, max_dep=7):\n '''\n This function uses the sklearn RandomForestClassifier \n to create a random forrest model\n '''\n models = [] # For output\n metrics = [] # For output\n for i in range(2, max_dep): # Max Depth\n for n in range(2, max_dep): # Min sample leaf\n # Make the model\n rf = RandomForestClassifier(bootstrap=True, \n class_weight=None, \n criterion='gini',\n min_samples_leaf=n,\n n_estimators=100,\n max_depth=i, \n random_state=123)\n\n # Fit the model (on train and only train)\n rf = rf.fit(X_train, y_train)\n\n # We'll evaluate the model's performance on train and validate\n in_sample_accuracy = rf.score(X_train, y_train) \n out_of_sample_accuracy = rf.score(X_validate, y_validate)\n\n # Calculate the difference\n difference = in_sample_accuracy - out_of_sample_accuracy\n\n # Add a conditional to check vs. the threshold\n if difference > threshold:\n break\n\n # Formulate the output for each model's performance on train and validate\n output = {\n \"max_depth\": i,\n \"min_samples_leaf\": n,\n \"train_accuracy\": in_sample_accuracy,\n \"validate_accuracy\": out_of_sample_accuracy,\n \"difference\": difference\n }\n\n # Add the metrics dictionary to the list, so we can make a dataframe\n metrics.append(output)\n\n # Add the specific tree to a list of trained models\n models.append(rf)\n\n df = pd.DataFrame(metrics)\n df\n\n # make a dataframe\n results = pd.DataFrame(metrics)\n # print(results)\n\n results[['max_depth', 'train_accuracy', 'validate_accuracy']].set_index('max_depth').plot(figsize = (16,9), linewidth=2)\n plt.ylim(0.50, 1)\n plt.title('Random Forest', fontsize = 20)\n plt.xlabel(\"Max Depth\", fontsize = 16)\n plt.ylabel('Accuracy', fontsize = 18)\n plt.xticks(np.arange(1, i+1, 1))\n plt.grid(b=True)\n\n return results\n\n#-------------------------------------------------------\n# KNN\ndef knn(X_train, y_train, X_validate, y_validate, max_k = 26):\n '''\n This function uses the sklearn KNeighborsClassifier \n to create a k neraest neighbors model\n '''\n metrics = [] # For output\n\n # loop through different values of k\n for k in range(1, max_k):\n \n # define the thing\n knn = KNeighborsClassifier(n_neighbors=k, weights='uniform')\n \n # fit the thing (remmeber only fit on training data)\n knn.fit(X_train, y_train)\n \n # use the thing (calculate accuracy)\n train_accuracy = knn.score(X_train, y_train)\n validate_accuracy = knn.score(X_validate, y_validate)\n difference = train_accuracy - validate_accuracy\n \n output = {\n \"k\": k,\n \"train_accuracy\": train_accuracy,\n \"validate_accuracy\": validate_accuracy,\n \"difference\": difference\n }\n \n metrics.append(output)\n\n # make a dataframe\n results = pd.DataFrame(metrics)\n # print(results)\n\n # plot the data\n results[['k', 'train_accuracy', 'validate_accuracy']].set_index('k').plot(figsize = (16,9), linewidth=2)\n plt.ylim(0.50, 1)\n plt.title('KNN', fontsize = 20)\n plt.xlabel(\"k\", fontsize = 16)\n plt.ylabel('Accuracy', fontsize = 18)\n plt.xticks(np.arange(1, k+1, 1))\n plt.grid(b=True)\n\n return results\n\n#-------------------------------------------------------\n# Logistic Regression\ndef log_regression(X_train, y_train):\n '''\n This function uses the sklearn LogisticRegression \n to create a logistic regression model for the train data\n '''\n # Train Data\n logit = LogisticRegression(C=1, random_state=123) # Create the model\n logit.fit(X_train, y_train) # Fit the model with Train Data\n print('Coefficient: \\n', logit.coef_) # Print coeffecients\n print('Intercept: \\n', logit.intercept_) # Print the intercept\n \n y_pred = logit.predict(X_train) # y prediction\n y_pred_proba = logit.predict_proba(X_train) # y prob\n print(\"Train Confusion Matrix:\") \n print(confusion_matrix(y_train, y_pred)) # Confusion Matrix\n print(\"\")\n print(\"Train Data:\")\n train_class_report = pd.DataFrame(classification_report(y_train, y_pred, output_dict=True))\n #print(train_class_report) # Print accuracy report on Train Data\n\n return train_class_report\n\ndef log_regression_val(X_train, y_train, X_validate, y_validate):\n '''\n This function uses the sklearn LogisticRegression \n to create a logistic regression model for the train data\n '''\n # Validate Data\n logit = LogisticRegression(C=1, random_state=123) # Create the model\n logit.fit(X_train, y_train) # Fit the model with Train Data\n print('Coefficient: \\n', logit.coef_) # Print coeffecients\n print('Intercept: \\n', logit.intercept_) # Print the intercept\n\n y_pred = logit.predict(X_validate) # y prediction\n y_pred_proba = logit.predict_proba(X_validate) # y prob\n print(\"Validate Confusion Matrix:\") \n print(confusion_matrix(y_validate, y_pred)) # Confusion Matrix\n print(\"\")\n print(\"Validate Data:\")\n val_class_report = pd.DataFrame(classification_report(y_validate, y_pred, output_dict=True))\n #print(val_class_report) # Print accuracy report on Validate Data\n\n return val_class_report\n\n\n# Model Comparison Report\n\ndef model_report_all_data():\n '''\n This is a function to output the best models based on train accuracy and minimizing oversampling\n Utilizing All Data features from Telco\n '''\n report1 = {\n 'Model': ['DT', 'RF', 'KNN', 'LR'],\n 'Parameters' : ['Max Depth = 3', 'Max Depth = 6 & Min Sample Leaf = 2', 'KNN = 19', 'Default'],\n 'Train' : [0.792, 0.818, 0.796, 0.805],\n 'Validate' : [0.794, 0.808, 0.789, 0.792]\n } \n report = pd.DataFrame(report1)\n report['Difference'] = report.Train - report.Validate\n return report\n\ndef model_report_select_data():\n '''\n This is a function to output the best models based on train accuracy and minimizing oversampling\n Utilizing select features from Telco\n '''\n report1 = {\n 'Model': ['DT', 'RF', 'KNN', 'LR'],\n 'Parameters' : ['Max Depth = 5', 'Max Depth = 6 & Min Sample Leaf = 4', 'KNN = 14', 'Default'],\n 'Train' : [0.798, 0.812, 0.813, 0.807],\n 'Validate' : [0.794, 0.802, 0.800, 0.801]\n } \n report = pd.DataFrame(report1)\n report['Difference'] = report.Train - report.Validate\n return report\n\n\n# Best Model to run on test data\n\ndef best_rf(X_train, y_train, y, X):\n '''This function outputs a classification report for the best TELCO model''' \n # Create the model\n rf = RandomForestClassifier(bootstrap=True, \n class_weight=None, \n criterion='gini',\n min_samples_leaf=2,\n n_estimators=100,\n max_depth=6, \n random_state=123)\n\n # Fit the model (on train and only train)\n rf = rf.fit(X_train, y_train)\n y_pred = rf.predict(X)\n \n # Create the report\n report = pd.DataFrame(classification_report(y, y_pred, output_dict=True))\n return report\n\n\n# Prediction on Test\ndef best_model_churn_prediction(X_train, y_train, y, X):\n '''a CSV file with customer_id, probability of churn, and prediction of churn. \n (1=churn, 0=not_churn). These predictions should be from your best performing \n model ran on X_test. Note that the order of the y_pred and y_proba are numpy \n arrays coming from running the model on X_test. The order of those values will \n match the order of the rows in X_test, so you can obtain the customer_id from \n X_test and concatenate these values together into a dataframe to write to CSV.'''\n rf = RandomForestClassifier(bootstrap=True, \n class_weight=None, \n criterion='gini',\n min_samples_leaf=2,\n n_estimators=100,\n max_depth=6, \n random_state=123)\n # Fit the model (on train and only train)\n\n rf = rf.fit(X_train, y_train)\n y_pred = rf.predict(X) \n return y_pred\n\n\n ","sub_path":"model_functions.py","file_name":"model_functions.py","file_ext":"py","file_size_in_byte":11723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"442686567","text":"#!/usr/bin/env python\nimport json, pynotify, time, urllib\n\ndef getip():\n return json.load(urllib.urlopen(\"http://ip-api.com/json\"))['query']\n\npynotify.init(\"IP\")\ncurrentip=getip()\nwhile 1:\n new=getip()\n if new!=currentip:\n n=pynotify.Notification(\"IP CHANGE\", \"Your new IP is %s\"%new)\n currentip=new\n n.show()\n time.sleep(30)\n","sub_path":"ipchange.py","file_name":"ipchange.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"572828333","text":"def isPrime(A):\n if A == 1:\n return 0\n ceil = int(A**0.5)\n for i in range(2, ceil + 1):\n if (A % i == 0):\n return 0\n return 1\n\n\nr = isPrime(21)\nprint(r)\n","sub_path":"Math/verify-prime.py","file_name":"verify-prime.py","file_ext":"py","file_size_in_byte":190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"346594241","text":"# coding: utf-8\r\n\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver import ActionChains\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\n\r\nprint(\"hello world\")\r\noptions = webdriver.ChromeOptions()\r\noptions.add_experimental_option('excludeSwitches', ['enable-logging'])\r\noptions.add_argument(\"--disable-notifications\")\r\n\r\ndriver = webdriver.Chrome(options=options)\r\ndriver.get('https://www.99acres.com/')\r\n\r\ndriver.find_element_by_xpath('//*[@id=\"keyword\"]').send_keys('delhi')\r\ndriver.implicitly_wait(10)\r\n\r\ndriver.find_element_by_xpath('//*[@id=\"keyword\"]').send_keys(Keys.ENTER)\r\n\r\n\r\n#//*[@id=\"srp_tuple_price\"]\r\ndriver.maximize_window()\r\n#element = driver.find_element_by_xpath(\"class_name\").text\r\ndriver.implicitly_wait(10)\r\nlist1=[]\r\nj=2\r\nwhile True:\r\n \r\n for i in range(1,30):\r\n a=[]\r\n try:\r\n element1 = driver.find_element_by_xpath('(//*[@id=\"srp_tuple_price\"])'+'['+str(i)+']').text\r\n element1=element1.splitlines()[0]\r\n if element1.find('Cr') != -1:\r\n price=float(element1.split(' ')[1])*100\r\n else :\r\n price=float(element1.split(' ')[1])\r\n except:\r\n price=-1\r\n #print(price)\r\n try:\r\n element2 = driver.find_element_by_xpath('(//*[@id=\"srp_tuple_primary_area\"])'+'['+str(i)+']').text\r\n area=float(element2.split(' ')[0].replace(',','').replace('-',''))\r\n except:\r\n area=-1\r\n try:\r\n element3 = driver.find_element_by_xpath('(//*[@id=\"srp_tuple_bedroom\"])'+'['+str(i)+']').text\r\n bhk=float(element3.split(' ')[0])\r\n except:\r\n bhk=-1\r\n a.append([price,area,bhk])\r\n list1.append(a)\r\n #print(list1)\r\n j=j+1\r\n print(j)\r\n driver.implicitly_wait(5)\r\n if (j<=3):\r\n driver.find_element_by_xpath('//*[@id=\"app\"]/div/div/div[2]/div[2]/div[4]/div[2]/a['+ str(j)+ ']').click()\r\n else:\r\n driver.find_element_by_xpath('//*[@id=\"app\"]/div/div/div[2]/div[2]/div[3]/div[2]/a['+ str(j)+ ']').click()\r\n\r\n if (j>=10):\r\n break\r\n \r\n \r\nimport csv\r\nwith open('newdelhi1.csv', 'w', newline='') as f:\r\n writer = csv.writer(f)\r\n writer.writerows(list1)","sub_path":"99acres.py","file_name":"99acres.py","file_ext":"py","file_size_in_byte":2453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"49916831","text":"from tkinter import *\nfrom tkinter.filedialog import asksaveasfile, askopenfilename\nfrom .ftp_client_service import AngryFtpClientService\n\n\nclass AngryFtpClientApplication:\n def __init__(self, master):\n\n self.master = master\n self.master.title(\"Angry FTP Client\")\n\n self.main_frame = Frame(self.master).pack(padx=10)\n\n self.address = {\n \"ftp_ip\": StringVar(value=\"192.168.0.183\"),\n \"ftp_port\": StringVar(value=2121)\n }\n self.username = StringVar(value=\"anonymous\")\n self.password = StringVar(value=\"blue banana\")\n # Init in login_ui\n self.connection_state_label = None\n self.auth_button = None\n\n self.status = StringVar(value=\"Welcome to AngryFtpClient\")\n # Init in explorer ui\n self.current_directory = \"/\"\n self.current_directory_label = StringVar(value=\"/\")\n self.file_explorer_listbox = None\n self.rename_to = StringVar()\n self.upload_file_path = StringVar()\n self.new_folder_name = StringVar()\n self.data_connection_mode = StringVar(value=\"PASV\")\n\n self.ftp = AngryFtpClientService(self.status)\n self.master.protocol(\"WM_DELETE_WINDOW\", self.quit)\n self.ui()\n\n def ui(self):\n self.login_ui()\n self.file_explorer_ui()\n self.status_and_download_ui()\n self.upload_ui()\n self.folder_ui()\n self.rename_ui()\n self.others_ui()\n self.connection_mode_ui()\n\n def quit(self):\n try:\n if self.connection_state_label.cget(\"text\") == \"Connected\":\n self.ftp.disconnect()\n finally:\n self.master.destroy()\n\n def login_ui(self):\n # This will create a LabelFrame\n login_frame = LabelFrame(self.main_frame, text='Login', padx=5, pady=5)\n # this wil create a label widget\n ip_label = Label(login_frame, text=\"IP:\", anchor=W, width=8)\n ip_input = Entry(login_frame, textvariable=self.address[\"ftp_ip\"])\n port_label = Label(login_frame, text=\"Port:\", anchor=W, width=8)\n port_input = Entry(login_frame, textvariable=self.address[\"ftp_port\"])\n username_label = Label(login_frame, text=\"Username:\")\n username_input = Entry(login_frame, textvariable=self.username)\n password_label = Label(login_frame, text=\"Password:\")\n password_input = Entry(login_frame, textvariable=self.password, show=\"*\")\n\n self.auth_button = Button(self.main_frame, text=\"Connect\", font='Helvetica 9 bold',\n width=20, fg=\"white\", bg=\"green\",\n command=self.auth)\n\n self.connection_state_label = \\\n Label(self.main_frame, text=\"Disconnected\", bg=\"red\", fg=\"white\",\n font='Helvetica 11 bold')\n\n self.connection_state_label.pack(side=TOP, fill=BOTH)\n login_frame.pack(side=TOP, padx=5, pady=2, expand=1, fill=X)\n\n ip_label.grid(row=0, column=0, pady=2)\n ip_input.grid(row=0, column=1, padx=4)\n port_label.grid(row=0, column=2)\n port_input.grid(row=0, column=3)\n\n username_label.grid(row=1, column=0)\n username_input.grid(row=1, column=1)\n password_label.grid(row=1, column=2)\n password_input.grid(row=1, column=3)\n\n self.auth_button.pack(side=TOP, pady=2)\n\n def auth(self):\n # Login\n if self.connection_state_label.cget(\"text\") == \"Disconnected\":\n return_val = self.ftp.connect(\n self.address[\"ftp_ip\"].get(), int(self.address[\"ftp_port\"].get()),\n self.username.get(), self.password.get()\n )\n if return_val == 0:\n self.connection_state_label.config(text=\"Connected\", bg=\"green\")\n self.auth_button.config(text=\"Disconnect\", bg=\"red\")\n self.update_list()\n # Logout\n else:\n self.ftp.disconnect()\n self.file_explorer_listbox.delete(0, END)\n self.connection_state_label.config(text=\"Disconnected\", bg=\"red\")\n self.auth_button.config(text=\"Connect\", bg=\"green\")\n\n def file_explorer_ui(self):\n file_explorer_control_frame = Frame(self.main_frame, padx=10)\n file_explorer_frame = Frame(self.main_frame, padx=10)\n\n file_explorer_label = Label(file_explorer_control_frame, text=\"Directory Path: \")\n file_explorer_path = Label(file_explorer_control_frame,\n textvariable=self.current_directory_label, width=32, anchor=W)\n go_to_parent_button = Button(file_explorer_control_frame, text=\"Go back\",\n command=self.go_to_parent_dir)\n\n self.file_explorer_listbox = Listbox(file_explorer_frame, height=10, width=62, activestyle=\"none\")\n self.file_explorer_listbox.bind(\"\", self.change_directory)\n\n scrollbar = Scrollbar(file_explorer_frame)\n\n self.file_explorer_listbox.config(yscrollcommand=scrollbar.set)\n scrollbar.config(command=self.file_explorer_listbox.yview)\n\n file_explorer_control_frame.pack(side=TOP)\n file_explorer_label.pack(side=LEFT)\n file_explorer_path.pack(side=LEFT)\n go_to_parent_button.pack(side=LEFT, padx=5)\n\n file_explorer_frame.pack(side=TOP, pady=3)\n self.file_explorer_listbox.pack(side=LEFT, fill=BOTH, pady=(0, 10))\n scrollbar.pack(side=RIGHT, fill=BOTH)\n\n def update_list(self):\n self.file_explorer_listbox.delete(0, END)\n self.update_directory_label()\n self.ftp.update_list(self.file_explorer_listbox)\n\n def update_directory_label(self):\n # remove code and \"\"\n directory = self.ftp.print_current_directory()\n self.current_directory_label.set(directory)\n\n def go_to_parent_dir(self):\n last_index = self.current_directory.rfind(\"/\")\n new_dir = self.current_directory[:last_index]\n self.change_directory(new_dir=new_dir)\n\n def change_directory(self, event=None, new_dir=None):\n if new_dir is None:\n # 01234\n # _>_dir\n selected_dir = self.get_selected_listbox_item()\n if selected_dir == -1:\n return -1\n # If it is a file, return -1\n if selected_dir[1] == '-':\n return -1\n selected_dir_path = selected_dir[3:]\n self.current_directory = self.current_directory + '/' + selected_dir_path\n else:\n self.current_directory = new_dir\n\n self.ftp.change_current_directory(self.current_directory)\n self.update_list()\n\n def status_and_download_ui(self):\n status_download_frame = Frame(self.main_frame, padx=5)\n\n status_frame = LabelFrame(status_download_frame, text=\"Status\")\n status_label = \\\n Label(status_frame, textvariable=self.status, anchor=W, width=45)\n\n # Save as file\n download_button = Button(status_download_frame, text=\"Download\", command=self.download)\n\n status_download_frame.pack(side=TOP, pady=(0, 5), padx=0, expand=1, fill=X)\n status_frame.pack(side=LEFT)\n status_label.pack()\n download_button.pack(side=RIGHT, pady=(8, 0), padx=5)\n\n def get_selected_listbox_item(self):\n selected_dir = (self.file_explorer_listbox.curselection())\n if len(selected_dir) < 1:\n return -1\n selected_dir = self.file_explorer_listbox.get(selected_dir[0])\n return selected_dir\n\n def download(self):\n\n selected_dir = self.get_selected_listbox_item()\n if selected_dir == -1:\n return -1\n # If it is a folder, return -1\n if selected_dir[1] == '>':\n return -1\n\n download_file_name = selected_dir[3:]\n downloaded_data = self.ftp.download_file(download_file_name)\n # Download failed\n if downloaded_data == -1:\n return -1\n downloaded_file = asksaveasfile(title=\"Save file as...\", mode=\"wb\",\n initialfile=download_file_name, filetype=[('All Files', '*.*')])\n downloaded_file.write(downloaded_data)\n if downloaded_file:\n downloaded_file.close()\n\n def rename_ui(self):\n rename_frame = LabelFrame(self.main_frame, text=\"Rename to\", padx=5, pady=2)\n rename_input = Entry(rename_frame, textvariable=self.rename_to, width=50)\n rename_button = Button(rename_frame, text=\"Confirm\", command=self.rename)\n\n rename_frame.pack(side=TOP, padx=5, pady=2, expand=1, fill=X)\n rename_input.pack(side=LEFT, padx=5)\n rename_button.pack(side=RIGHT)\n\n def rename(self):\n selected_dir = self.get_selected_listbox_item()\n if selected_dir == -1 or len(self.rename_to.get()) == 0:\n return -1\n # If it is a folder, return -1\n if selected_dir[1] == '>':\n return -1\n old_file_name = selected_dir[3:]\n new_file_name = self.rename_to.get()\n\n self.ftp.rename_file(old_file_name, new_file_name)\n self.update_list()\n\n def upload_ui(self):\n upload_frame = LabelFrame(self.main_frame, text=\"Upload\", padx=5, pady=2)\n upload_label = Label(upload_frame, text=\"File:\")\n upload_input = Entry(upload_frame, width=40, textvariable=self.upload_file_path)\n upload_browse_button = Button(upload_frame, text=\"Browse\", command=self.browse_upload_file)\n upload_button = Button(upload_frame, text=\"Upload\", command=self.upload)\n\n upload_frame.pack(side=TOP, padx=5, pady=2, expand=1, fill=X)\n upload_label.pack(side=LEFT)\n upload_input.pack(side=LEFT, padx=5)\n upload_browse_button.pack(side=LEFT, padx=5)\n upload_button.pack(side=LEFT)\n\n def browse_upload_file(self):\n file_path = askopenfilename()\n if len(file_path) > 0:\n self.upload_file_path.set(file_path)\n\n def upload(self):\n path = self.upload_file_path.get()\n if len(path) <= 0:\n return -1\n if self.ftp.upload_file(path) == -1:\n return -1\n self.update_list()\n\n def folder_ui(self):\n folder_frame = LabelFrame(self.main_frame, text=\"Folder\", padx=5, pady=2)\n folder_label = Label(folder_frame, text=\"Name:\")\n folder_input = Entry(folder_frame, width=40, textvariable=self.new_folder_name)\n create_folder_button = Button(folder_frame, text=\"Make\", command=self.create_folder)\n delete_folder_button = Button(folder_frame, text=\"Delete\", command=self.delete_folder)\n\n folder_frame.pack(side=TOP, padx=5, pady=2, expand=1, fill=X)\n folder_label.pack(side=LEFT)\n folder_input.pack(side=LEFT, padx=5)\n create_folder_button.pack(side=LEFT, padx=5)\n delete_folder_button.pack(side=LEFT)\n\n def create_folder(self):\n folder_name = self.new_folder_name.get()\n if len(folder_name) == 0:\n return -1\n self.ftp.create_folder(folder_name)\n self.update_list()\n\n def delete_folder(self):\n selected_dir = self.get_selected_listbox_item()\n if selected_dir == -1:\n return -1\n # If it is a file, return -1\n if selected_dir[1] == '-':\n return -1\n folder_name = selected_dir[3:]\n\n self.ftp.delete_folder(folder_name)\n self.update_list()\n\n def others_ui(self):\n others_frame = LabelFrame(self.main_frame, text=\"Others\", padx=5)\n delete_button = Button(others_frame, text=\"Delete File\", width=15, command=self.delete_file)\n\n others_frame.pack(side=TOP, padx=5, pady=2, expand=1, fill=X)\n delete_button.pack(side=TOP, pady=5)\n\n def delete_file(self):\n selected_dir = self.get_selected_listbox_item()\n if selected_dir == -1:\n return -1\n # If it is a folder, return -1\n if selected_dir[1] == '>':\n return -1\n file_name = selected_dir[3:]\n\n self.ftp.delete_file(file_name)\n self.update_list()\n\n def connection_mode_ui(self):\n mode_frame = Frame(self.main_frame, pady=5)\n mode_label = Label(mode_frame, text=\"Connection Mode\")\n port_button = \\\n Radiobutton(mode_frame, text=\"PORT\", value=\"PORT\",\n variable=self.data_connection_mode, indicator=0, command=self.update_connection_mode)\n pasv_button = \\\n Radiobutton(mode_frame, text=\"PASV\", value=\"PASV\",\n variable=self.data_connection_mode, indicator=0, command=self.update_connection_mode)\n\n mode_frame.pack(side=TOP, padx=5, pady=2, expand=1, fill=X)\n mode_label.pack(side=TOP)\n port_button.pack(side=LEFT, expand=1, fill=X)\n pasv_button.pack(side=LEFT, expand=1, fill=X)\n\n def update_connection_mode(self):\n if self.data_connection_mode.get() == \"PASV\":\n self.ftp.set_pasv(True)\n print(\"Connection Mode: PASV\")\n else:\n self.ftp.set_pasv(False)\n print(\"Connection Mode: PORT\")\n","sub_path":"angryftp/ftp_client_application.py","file_name":"ftp_client_application.py","file_ext":"py","file_size_in_byte":13058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"168887252","text":"# -*- coding: utf-8 -*-\nfrom django.contrib import admin\nimport models\n\nclass SubsribeAdmin(admin.ModelAdmin):\n list_display = ('email', 'from_page', 'time')\n search_fields = ('email',)\n ordering = ('time', )\n\nadmin.site.register(models.Subscribe, SubsribeAdmin)","sub_path":"subscribe/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"354162368","text":"from django.conf.urls import include, url\nfrom rest_framework import routers\n\nfrom apps.views import AppViewSet, PlanViewSet, SubscriptionViewSet\n\nrouter = routers.DefaultRouter()\nrouter.register('app', AppViewSet, 'apps')\nrouter.register('plan', PlanViewSet, 'plans')\nrouter.register('subscription', SubscriptionViewSet, 'subscriptions')\n\nurlpatterns = [\n url(r'^', include(router.urls)),\n]","sub_path":"apps/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"141327314","text":"'''\nCreated on Mar 29, 2016\n\n@author: karthik\n\nThis GUI is for setting up the spiral coordinates to the quadcopter.\nThe script allows the user to enter angle and bearing of start position of spiral, radius of spiral,\nnumber of rotations and whether the user wants it in GUIDED or AUTO mode\n\nhave the units in editboxes\nminimum height of spiral\nrtl height\n\nbonecam,fishcam\n\ndescribe spiral, film everything from fixed distance\nextend follow me on qgroundstation with mavlink id\nconnect qgcs 2 drones\nsoftware stack working\nimage stuff\ncam get low latency encode to h264 \nget onvif wrapper\nsend via 4g\ntry vlc gstreamer ffmpg\nred balloon \n\n'''\n\nimport sys\nfrom PyQt4 import QtGui, QtCore\n\nclass Example(QtGui.QWidget):\n \n def error(self,errortype):\n msg = QtGui.QMessageBox()\n msg.setIcon(QtGui.QMessageBox.Information)\n msg.setText(\"Check the %s. See the tooltip for exact dimensions\"%errortype)\n msg.exec_()\n \n def __init__(self):\n super(Example, self).__init__()\n self.initUI()\n \n def test(self):\n self.usrStartHeight=0.0\n self.usrRadius=0.0\n self.usrRotations=0\n self.usrDistance=0.0\n self.usrBearing=0\n self.usrFlightMode=\"AUTO\"\n check=True\n \n if float(self.startHeightEdit.text()) >=5.0 and float(self.startHeightEdit.text()) <=30.0:\n self.usrStartHeight=float(self.startHeightEdit.text())\n else:\n check=False\n self.error(\"height\")\n \n \n if float(self.radiusSpiralEdit.text()) >= 3.0 and float(self.radiusSpiralEdit.text()) <=50.0:\n self.usrRadius=float(self.radiusSpiralEdit.text())\n else:\n check=False\n self.error(\"radius\")\n \n \n if int(self.numRotationsEdit.text()) >=1 and int(self.numRotationsEdit.text()) <=10:\n self.usrRotations=int(self.numRotationsEdit.text())\n else:\n check=False\n self.error(\"rotation\")\n \n \n if float(self.startDistanceEdit.text()) >=0.0 and float(self.startDistanceEdit.text()) <=50.0:\n self.usrDistance=float(self.startDistanceEdit.text())\n else:\n check=False\n self.error(\"distance\")\n \n \n if int(self.startBearingEdit.text()) >= 0 and int(self.startBearingEdit.text()) <=360:\n self.usrBearing=int(self.startBearingEdit.text())\n else:\n check=False\n self.error(\"bearing\")\n \n \n if check == True:\n msg = QtGui.QMessageBox()\n msg.setIcon(QtGui.QMessageBox.Information)\n msg.setText(\"Values are going to be sent to the Quadcopter. Please connect to a GCS\")\n msg.exec_()\n \n \n \n def initUI(self):\n \n vbox = QtGui.QVBoxLayout()\n QtGui.QToolTip.setFont(QtGui.QFont('SansSerif', 10))\n \n self.startHeight=QtGui.QLabel('Minimum Height to start')\n self.radiusSpiral = QtGui.QLabel('Radius of Spiral')\n self.numRotations = QtGui.QLabel('Number of Rotations')\n self.startCoords = QtGui.QLabel('Distance and Bearing (North=0 degrees)\\n to start of spiral')\n self.startDistance=QtGui.QLabel('Distance')\n self.startBearing=QtGui.QLabel('Bearing')\n self.modeQuad =QtGui.QLabel('Flight Mode')\n \n self.startHeight.setToolTip('This is the height the quadcopter first \\ntravels and then proceeds to head to the starting\\n point of spiral. After doing the said spiral, the copter\\n returns to this height and lands at home.\\n Minimum 5 meters, Maximum 30 meters')\n self.radiusSpiral.setToolTip('The radius must be minimum of 3 meters and maximum of 50 meters')\n self.numRotations.setToolTip('The number of rotations must be more than 1 and less than 10')\n self.startCoords.setToolTip('The bearing assumes north is 0 degree.\\n The quadcopter flies along this bearing\\n for the given distance and \\nstarts to make a spiral path.\\n Distance =0 to 50 Bearing = 0 to 360')\n self.modeQuad.setToolTip('The mode describes how the coordinates are sent to the Quadcopter.\\n Check the ardupilot website for more details')\n \n self.startHeightEdit = QtGui.QLineEdit()\n self.radiusSpiralEdit = QtGui.QLineEdit()\n self.numRotationsEdit = QtGui.QLineEdit()\n self.startDistanceEdit = QtGui.QLineEdit()\n self.startBearingEdit=QtGui.QLineEdit()\n \n \n hboxstartHeight = QtGui.QHBoxLayout()\n hboxstartHeight.addWidget(self.startHeight)\n hboxstartHeight.addStretch(1)\n self.startHeightEdit.setPlaceholderText(\"meters\")\n hboxstartHeight.addWidget(self.startHeightEdit)\n \n \n hboxradiusSpiral=QtGui.QHBoxLayout()\n hboxradiusSpiral.addWidget(self.radiusSpiral)\n hboxradiusSpiral.addStretch(1)\n self.radiusSpiralEdit.setPlaceholderText(\"meters\")\n hboxradiusSpiral.addWidget(self.radiusSpiralEdit)\n \n \n hboxnumRotations=QtGui.QHBoxLayout()\n hboxnumRotations.addWidget(self.numRotations)\n hboxnumRotations.addStretch(1)\n hboxnumRotations.addWidget(self.numRotationsEdit)\n \n \n hboxstartCoords=QtGui.QHBoxLayout()\n hboxstartCoords.addWidget(self.startCoords)\n hboxstartCoords.addStretch(1)\n self.startDistanceEdit.setFixedWidth(100)\n self.startBearingEdit.setFixedWidth(100)\n hboxstartCoords.addWidget(self.startDistance)\n self.startDistanceEdit.setPlaceholderText(\"meters\")\n hboxstartCoords.addWidget(self.startDistanceEdit)\n hboxstartCoords.addWidget(self.startBearing)\n hboxstartCoords.addWidget(self.startBearingEdit)\n self.startBearingEdit.setPlaceholderText(\"degrees\")\n \n \n hboxModeQuad=QtGui.QHBoxLayout()\n hboxModeQuad.addWidget(self.modeQuad)\n option=QtGui.QComboBox(self)\n option.addItem(\"AUTO\")\n option.addItem(\"GUIDED\")\n hboxModeQuad.addStretch(1)\n hboxModeQuad.addWidget(option)\n \n hboxAccept=QtGui.QHBoxLayout()\n okButton = QtGui.QPushButton(\"OK\")\n cancelButton = QtGui.QPushButton(\"Cancel\")\n hboxAccept.addStretch(1)\n hboxAccept.addWidget(okButton)\n hboxAccept.addWidget(cancelButton)\n \n \n vbox.addLayout(hboxstartHeight)\n vbox.addLayout(hboxradiusSpiral)\n vbox.addLayout(hboxnumRotations)\n vbox.addLayout(hboxstartCoords)\n vbox.addLayout(hboxModeQuad)\n vbox.addStretch(1)\n vbox.addLayout(hboxAccept)\n \n self.setLayout(vbox)\n okButton.clicked.connect(self.test)\n \n self.setGeometry(300, 300,700,0)\n self.setWindowTitle('Spiral Test for x8+') \n self.show()\n \ndef main():\n \n app = QtGui.QApplication(sys.argv)\n ex = Example()\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n main() \n\n","sub_path":"pythonGUI/src/spiral_GUI.py","file_name":"spiral_GUI.py","file_ext":"py","file_size_in_byte":7036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"490888324","text":"# credit to: http://bookshadow.com/weblog/2017/05/21/leetcode-design-in-memory-file-system/\n\nclass FileSystem(object):\n\n def __init__(self):\n self.root = {'dir' : {}, 'file': {}}\n \n def getNode(self, path):\n node = self.root\n for dir in filter(len, path.split('/')):\n if dir in node['dir']:\n node = node['dir'][dir]\n else:\n return node, 'file'\n return node, 'dir'\n\n def ls(self, path):\n \"\"\"\n :type path: str\n :rtype: List[str]\n \"\"\"\n #print(self.root)\n node, type = self.getNode(path)\n if type == 'dir':\n return sorted(node['dir'].keys() + node['file'].keys())\n return [path.split('/')[-1]] \n\n def mkdir(self, path):\n \"\"\"\n :type path: str\n :rtype: None\n \"\"\"\n node = self.root\n for dir in filter(len, path.split('/')):\n\n if dir not in node['dir']:\n node['dir'][dir] = {'dir': {}, 'file': {}}\n \n node = node['dir'][dir]\n \n def addContentToFile(self, filePath, content):\n \"\"\"\n :type filePath: str\n :type content: str\n :rtype: None\n \"\"\"\n dir = filePath.split('/')\n path, file = '/'.join(dir[:-1]), dir[-1]\n self.mkdir(path)\n node, type = self.getNode(path)\n if file not in node['file']:\n node['file'][file] = ''\n\n node['file'][file] += content\n\n def readContentFromFile(self, filePath):\n \"\"\"\n :type filePath: str\n :rtype: str\n \"\"\"\n dir = filePath.split('/')\n path, file = '/'.join(dir[:-1]), dir[-1] \n node, type = self.getNode(path)\n return node['file'][file]\n \n \n\n\n# Your FileSystem object will be instantiated and called as such:\n# obj = FileSystem()\n# param_1 = obj.ls(path)\n# obj.mkdir(path)\n# obj.addContentToFile(filePath,content)\n# param_4 = obj.readContentFromFile(filePath)\n","sub_path":"Amazon/588 Design In-Memory File System.py","file_name":"588 Design In-Memory File System.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"387886650","text":"import json\nimport eero\nimport six\n\nclass CookieStore(eero.SessionStorage):\n def __init__(self, cookie_file):\n from os import path\n self.cookie_file = path.abspath(cookie_file)\n\n try:\n with open(self.cookie_file, 'r') as f:\n self.__cookie = f.read()\n except IOError:\n self.__cookie = None\n @property\n def cookie(self):\n return self.__cookie\n @cookie.setter\n def cookie(self, cookie):\n self.__cookie = cookie\n with open(self.cookie_file, 'w+') as f:\n f.write(self.__cookie)\n\nsession = CookieStore('session.cookie')\neero = eero.Eero(session)\n\ndef print_connected_devices(data):\n device = \"\"\n count = 3\n flag = False\n for item in data.split(\"\\n\"):\n if count > 0:\n count = count - 1\n if \"phone\" in item or \"Phone\" in item:\n flag = True\n device = device + \"\\n\" + item.strip()\n continue\n if \"connected\" in item:\n count = 2\n device = device + \"\\n\" + item.strip()\n if \"true\" in item:\n if flag:\n print(device)\n flag = False\n\n device = \"\"\n\n\ndef parse_json(data):\n data_string = json.dumps(data, indent=4)\n count = 0\n parsed_string = \"\"\n for item in data_string.split(\"\\n\"):\n if count > 0:\n count = count - 1\n parsed_string = parsed_string + \"\\n\" + item.strip()\n continue\n if \"nickname\" in item:\n count = 2\n parsed_string = parsed_string + \"\\n\" + item.strip()\n\n print_connected_devices(parsed_string)\n\ndef print_json(data):\n print(json.dumps(data, indent=4))\n\nif __name__ == '__main__':\n while eero.needs_login():\n phone_number = six.moves.input('your eero login (email address or phone number): ')\n user_token = eero.login(phone_number)\n verification_code = six.moves.input('verification key from email or SMS: ')\n eero.login_verify(verification_code, user_token)\n print('Login successful')\n\n #this is where i mess with things for GUI\n account = eero.account()\n\n print('Command options: info, details, devices, eeros, reboot')\n command = six.moves.input('enter a command: ')\n\n for network in account['networks']['data']:\n if command == 'info': #just gives network name\n print_json(network)\n if command == 'details': #gives details on network\n network_details = eero.networks(network['url'])\n print_json(network_details)\n if command == 'devices': #gives devices and details on devices\n devices = eero.devices(network['url'])\n parse_json(devices)\n if command == 'eeros': #gives details on gateway, routers, and boosters\n eeros = eero.eeros(network['url'])\n print_json(eeros)\n if command == 'reboot': #reboots an eero device\n print('Eero options are: office , upstairs (gateway), family room, hallway')\n name = six.moves.input('Name of Eero to be rebooted: ')\n\n if name == 'office':\n reboot = eero.reboot()\n print_json(reboot)\n elif name == 'upstairs':\n reboot = eero.reboot()\n print_json(reboot)\n elif name == 'family room':\n reboot = eero.reboot()\n print_json(reboot)\n elif name == 'hallway':\n reboot = eero.reboot()\n print_json(reboot)\n else:\n print('Please put in a valid name')\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"280967572","text":"from flask import Flask, url_for;\nfrom app import app;\n\n#Server/\n@app.route('/')\ndef hello():\n \"\"\"Renders a sample page.\"\"\"\n createlink = \"Create Page\" #'create' refers to the function name not the route\n return \"\"\"\n \n \n Hello World!\n \n \n Hello, friend. Hello, friend? That's lame. Maybe I should give you a name :P \n \"\"\" + createlink + \"\"\"\n \n \"\"\";\n\n#Server/create\n@app.route('/create')\ndef create():\n if request.method == 'GET':\n #send the form to the user\n return render_template('CreateQuestion');\n elif request.method == 'POST':\n #read data from the form and save it to database\n title = request.form['title'];\n question = request.form['question'];\n answer = request.form['answer'];\n #store the above data in a database\n return render_template('CreatedQuestion.html', question=question);\n else:\n return \"Invalid request
\";\n\n#Server/question\n@app.route('/question/')\ndef question(title):\n return \"Hey \" + title + \"
\"","sub_path":"Trivia App/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"115562362","text":"from django.urls import path\nfrom . import views\nurlpatterns = [\n path('login/', views.user_login, name='login'),\n path('register/', views.user_register, name='register'),\n path('logout/', views.user_logout, name='logout'),\n path('user_info/', views.user_info, name='user_info'),\n path('change_nickname/', views.change_nickname, name='change_nickname'),\n path('bind_email/', views.bind_email, name='bind_email'),\n path('send_verification_code/', views.send_verification_code, name='send_verification_code'),\n]","sub_path":"user/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"179993053","text":"#!/usr/bin/env python\n\n# Copyright (c) 2015, Riverbank Computing Limited\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# \n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# \n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n# \n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\n\n\"\"\"This script extracts various items of meta-data from a Mercurial repository\nor a Mercurial archive. It is not part of a packaged release.\n\"\"\"\n\n\nimport os\nimport sys\nimport time\n\n\n# The root directory, i.e. the one containing this script.\n_RootDir = os.path.dirname(os.path.abspath(__file__))\n\n\ndef _release_tag(ctx):\n \"\"\" Get the release tag (i.e. a tag of the form x.y[.z]) converted to a\n 3-tuple of integers if there is one.\n\n :param ctx:\n The Mercurial change context containing the tags.\n :return:\n The 3-tuple of integers or ``None`` if there was no release tag.\n \"\"\"\n\n for tag in ctx.tags():\n if tag != 'tip':\n parts = tag.split('.')\n\n if len(parts) == 2:\n parts.append('0')\n\n if len(parts) == 3:\n major, minor, micro = parts\n\n try:\n return (int(major), int(minor), int(micro))\n except ValueError:\n pass\n\n return None\n\n\ndef _format_changelog(ctx):\n \"\"\" Format the log message for a changeset.\n\n :param ctx:\n The Mercurial change context containing the tags.\n :return:\n The formatted change log.\n \"\"\"\n\n from mercurial.util import datestr\n\n log = \"changeset: %s\\ndate: %s\\n%s\" % (str(ctx), datestr(ctx.date()), ctx.description())\n\n return log\n\n\ndef _get_release():\n \"\"\" Get the release of the package.\n\n :return:\n A tuple of the full release name, the version number, the hexadecimal\n version number and a list of changelog entries (all as strings).\n \"\"\"\n\n # The root directory should contain dot files that tell us what sort of\n # package we are.\n\n release_suffix = ''\n\n if os.path.exists(os.path.join(_RootDir, '.hg')):\n # Handle a Mercurial repository.\n\n from mercurial import hg, ui\n\n # Get the repository.\n repo = hg.repository(ui.ui(), _RootDir)\n\n # The last changeset is the \"parent\" of the working directory.\n ctx = repo[None].parents()[0]\n\n # If the one before the last changeset has a release tag then the last\n # changeset refers to the tagging and not a genuine change.\n before = ctx.parents()[0]\n\n version = _release_tag(before)\n\n if version is not None:\n ctx = before\n else:\n release_suffix = time.strftime('.dev%y%m%d%H%M',\n time.localtime(ctx.date()[0]))\n\n changelog = [_format_changelog(ctx)]\n\n # Go back through the line of the first parent to find the last\n # release.\n parent_version = None\n\n parents = ctx.parents()\n while len(parents) != 0:\n parent_ctx = parents[0]\n if parent_ctx.rev() < 0:\n break\n\n changelog.append(_format_changelog(parent_ctx))\n\n parent_version = _release_tag(parent_ctx)\n if parent_version is not None:\n break\n\n parents = parent_ctx.parents()\n\n if version is None and parent_version is not None:\n # This is a development release so work out what the next version\n # will be based on the previous version.\n major, minor, micro = parent_version\n\n if ctx.branch() == 'default':\n minor += 1\n\n # This should be 0 anyway.\n micro = 0\n else:\n micro += 1\n\n version = (major, minor, micro)\n else:\n # Handle a Mercurial archive.\n\n changelog = None\n name = os.path.basename(_RootDir)\n\n release_suffix = \"-unknown\"\n version = None\n\n parts = name.split('-')\n if len(parts) > 1:\n name = parts[-1]\n\n if len(name) == 12:\n # This is the best we can do without access to the repository.\n release_suffix = '-' + name\n\n # Format the results.\n if version is None:\n version = (0, 1, 0)\n\n major, minor, micro = version\n\n if micro == 0:\n version = '%d.%d' % (major, minor)\n else:\n version = '%d.%d.%d' % (major, minor, micro)\n\n if 'dev' in release_suffix:\n level = 0x0\n elif 'alpha' in release_suffix:\n level = 0xa\n elif 'beta' in release_suffix:\n level = 0xb\n elif 'rc' in release_suffix:\n level = 0xc\n else:\n level = 0xf\n\n release = '%s%s' % (version, release_suffix)\n hex_version = '%02x%02x%02x%01x0' % (major, minor, micro, level)\n\n return release, version, hex_version, changelog\n\n\ndef changelog(output_dir):\n \"\"\" The description of each change set going back to the last release are\n written to a file object.\n\n :param output_dir:\n The name of the directory that the log is created in.\n :return:\n ``True`` if the log was written or ``False`` if the information wasn't\n available (because this is a Mercurial archive).\n \"\"\"\n\n release, _, _, changelog = _get_release()\n\n if changelog is None:\n return False\n\n changelog_name = 'ChangeLog'\n if 'dev' in release:\n changelog_name += '-' + release\n\n out_file = open(os.path.join(output_dir, changelog_name), 'w')\n out_file.write(\"\\n\\n\".join(changelog) + \"\\n\")\n out_file.close()\n\n return True\n\n\ndef pyversion(py_file):\n \"\"\" Write the version of the package as a string and a hexversion to a\n file. If it is a release then it will be of the form x.y[.z]. If it is a\n development release then it will be of the form x.y[.z].dev{timestamp}\n where x.y[.z] is the version number of the next release (not the previous\n one). If this is a Mercurial archive (rather than a repository) then it\n does the best it can (based on the name of the directory) with the limited\n information available.\n\n :param py_file:\n The file that the Python code is written to.\n \"\"\"\n\n release, _, hexversion, _ = _get_release()\n\n py_file.write('PYQTDEPLOY_RELEASE = \\'%s\\'\\n' % release)\n py_file.write('PYQTDEPLOY_HEXVERSION = 0x%s\\n' % hexversion)\n\n\nif __name__ == '__main__':\n\n def _changelog(options):\n \"\"\"get the changelog entries since the last release\"\"\"\n\n output_dir = options.output\n if output_dir is None:\n output_dir = '.'\n\n if not changelog(output_dir):\n sys.stderr.write(\"Unable to produce a changelog without a repository\\n\")\n sys.exit(2)\n\n\n def _pyversion(options):\n \"\"\"create Python code implementing the version of the package\"\"\"\n\n if options.output is not None:\n out_file = open(options.output, 'w')\n else:\n out_file = sys.stdout\n\n pyversion(out_file)\n\n if options.output is not None:\n out_file.close()\n\n\n actions = (_changelog, _pyversion)\n\n import optparse\n\n class MyParser(optparse.OptionParser):\n\n def get_usage(self):\n \"\"\" Reimplemented to add the description of the actions. We don't\n use the description because the default formatter strips newlines.\n \"\"\"\n\n usage = optparse.OptionParser.get_usage(self)\n\n usage += \"\\n\" + __doc__ + \"\\nActions:\\n\"\n\n for action in actions:\n usage += \" %-9s %s\\n\" % (action.__name__[1:], action.func_doc)\n\n return usage\n\n\n action_names = [action.__name__[1:] for action in actions]\n\n rel, _, _, _ = _get_release()\n\n parser = MyParser(\n usage=\"%%prog [options] %s\" % '|'.join(action_names), version=rel)\n\n parser.add_option(\"-o\", \"--output\", metavar=\"FILE or DIR\", dest='output',\n help=\"write output to FILE or DIR\")\n\n options, args = parser.parse_args()\n\n if len(args) != 1:\n parser.print_help()\n sys.exit(1)\n\n for action in actions:\n if action.__name__[1:] == args[0]:\n action(options)\n break\n else:\n parser.print_help()\n sys.exit(1)\n\n sys.exit()\n","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":9357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"240828319","text":"from django.contrib import messages\nfrom django.contrib.auth.models import User\nfrom django.shortcuts import redirect, render_to_response\nfrom django.template import RequestContext\nfrom django.utils.translation import ugettext as _\n\nfrom evap.evaluation.forms import NewKeyForm\nfrom evap.fsr.models import EmailTemplate\n\n\ndef index(request):\n new_key_form = NewKeyForm(request.POST or None)\n \n if request.method == 'POST':\n if new_key_form.is_valid():\n # user wants a new login key\n try:\n user = User.objects.get(email__iexact=new_key_form.cleaned_data['email'])\n profile = user.get_profile()\n profile.generate_logon_key()\n profile.save()\n \n EmailTemplate.get_logon_key_template().send_user(user)\n \n except User.DoesNotExist:\n messages.warning(request, _(u\"No user with this e-mail address was found.\"))\n \n if not request.user.is_active:\n return render_to_response(\n \"index.html\",\n dict(\n new_key_form=new_key_form\n ),\n context_instance=RequestContext(request))\n else:\n # redirect user to appropriate start page\n if request.user.is_staff:\n return redirect('evap.fsr.views.index')\n elif request.user.get_profile().is_lecturer:\n return redirect('evap.lecturer.views.index')\n else:\n return redirect('evap.student.views.index')\n \ndef faq(request):\n return render_to_response(\"faq.html\", dict(), context_instance=RequestContext(request))\n","sub_path":"evap/evaluation/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"525113791","text":"# coding=utf-8\nfrom __future__ import print_function\nimport re\nimport time\nimport argparse\nfrom genpass.lib.person import Person\n\n\ndef email(string):\n if not re.match(r'^[\\w\\d.-_]+@[\\w\\d.-]+\\.[\\w]{2,8}$', string):\n raise ValueError(string)\n return string\n\n\ndef date(date_string):\n if not date_string:\n return None\n return time.strptime(date_string, '%Y-%m-%d')\n\n\ndef cmd_parser():\n parser = argparse.ArgumentParser(description='User information')\n\n parser.add_argument('-n', '--name', dest='name', action='store',\n help='real name of target', nargs='*', default=[])\n parser.add_argument('-u', '--username', dest='username', action='store',\n help='usernames of target, English only', nargs='*', default=[])\n parser.add_argument('-q', '--qq', dest='qq', action='store',\n help='QQ numbers of target', nargs='*', type=int, default=[])\n parser.add_argument('-e', '--email', dest='email', action='store',\n help='email addresses of target', nargs='*', type=email, default=[])\n parser.add_argument('-m', '--mobile', dest='mobile', action='store',\n help='mobile phone/phone numbers of target', nargs='*', type=int, default=[])\n parser.add_argument('-b', '--birthday', dest='birthday', action='store',\n help='birthday of target, format: %%Y-%%m-%%d', type=date, default=None)\n parser.add_argument('-c', '--company', dest='company', nargs='*', action='store',\n help='company / website domain of target', type=str, default=[])\n parser.add_argument('--with-dict', dest='with_dict', action='store_true',\n help='generate passwords with weak password dictionary')\n parser.add_argument('-o', '--output', dest='output_file', action='store',\n help='output result to a json file', type=argparse.FileType('w'))\n\n args = parser.parse_args()\n if not any(args.__dict__.values()):\n parser.print_help()\n raise SystemExit\n\n person_list = []\n person_list.append(Person(information=args.__dict__))\n\n return (args, person_list)\n","sub_path":"genpass/lib/cmdline.py","file_name":"cmdline.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"125408036","text":"from webob import Request\n\nfrom kotti.testing import TestingRootFactory\nfrom kotti.testing import UnitTestBase\n\nclass TestApp(UnitTestBase):\n def required_settings(self):\n return {'sqlalchemy.url': 'sqlite://',\n 'kotti.secret': 'dude'}\n\n def test_override_settings(self):\n from kotti import main\n from kotti import get_settings\n \n class MyType(object):\n pass\n\n def my_configurator(conf):\n conf['kotti.base_includes'] = ''\n conf['kotti.available_types'] = [MyType]\n \n settings = self.required_settings()\n settings['kotti.configurators'] = [my_configurator]\n main({}, **settings)\n\n self.assertEqual(get_settings()['kotti.base_includes'], [])\n self.assertEqual(get_settings()['kotti.available_types'], [MyType])\n\n def test_auth_policies_no_override(self):\n from pyramid.interfaces import IAuthenticationPolicy\n from pyramid.interfaces import IAuthorizationPolicy\n from pyramid.threadlocal import get_current_registry\n from kotti import main\n\n settings = self.required_settings()\n main({}, **settings)\n\n registry = get_current_registry()\n assert registry.queryUtility(IAuthenticationPolicy) != None\n assert registry.queryUtility(IAuthorizationPolicy) != None\n\n def test_auth_policies_override(self):\n from pyramid.interfaces import IAuthenticationPolicy\n from pyramid.interfaces import IAuthorizationPolicy\n from pyramid.threadlocal import get_current_registry\n from kotti import main\n\n settings = self.required_settings()\n settings['kotti.authn_policy_factory'] = 'kotti.none_factory'\n settings['kotti.authz_policy_factory'] = 'kotti.none_factory'\n main({}, **settings)\n\n registry = get_current_registry()\n assert registry.queryUtility(IAuthenticationPolicy) == None\n assert registry.queryUtility(IAuthorizationPolicy) == None\n\n def test_persistent_settings(self):\n from kotti import get_settings\n from kotti import get_version\n from kotti import DBSession\n from kotti.resources import Settings\n\n session = DBSession()\n [settings] = session.query(Settings).all()\n self.assertEqual(settings.data, {'kotti.db_version': get_version()})\n self.assertEqual(get_settings()['kotti.db_version'], get_version())\n settings.data['foo.bar'] = u'baz'\n self.assertEqual(get_settings()['foo.bar'], u'baz')\n\n def test_persistent_settings_add_new(self):\n from kotti import DBSession\n from kotti import get_settings\n from kotti.resources import Settings\n\n [settings] = DBSession.query(Settings).all()\n data = {'foo.bar': u'spam', 'kotti.db_version': u'next'}\n new_settings = settings.copy(data)\n DBSession.add(new_settings)\n self.assertEqual(get_settings()['foo.bar'], u'spam')\n self.assertEqual(get_settings()['kotti.db_version'], u'next')\n\n def test_asset_overrides(self):\n from kotti import main\n \n settings = self.required_settings()\n settings['kotti.asset_overrides'] = 'pyramid:scaffold/ pyramid.fixers'\n main({}, **settings)\n\n @staticmethod\n def _includeme_login(config):\n from kotti.resources import Node\n from kotti.views.login import login\n\n config.add_view(\n login,\n name='login',\n context=Node,\n renderer='kotti:templates/login.pt',\n )\n\n def test_includes_overrides(self):\n from kotti import main\n\n settings = self.required_settings()\n settings['kotti.includes'] = (self._includeme_login,)\n main({}, **settings)\n\n def test_use_tables(self):\n from kotti import main\n\n settings = self.required_settings()\n settings['kotti.populators'] = ''\n settings['kotti.use_tables'] = 'principals'\n main({}, **settings)\n\n def test_root_factory(self):\n from kotti import main\n from kotti.resources import get_root\n\n settings = self.required_settings()\n settings['kotti.root_factory'] = (TestingRootFactory,)\n app = main({}, **settings)\n assert isinstance(get_root(), TestingRootFactory)\n assert isinstance(app.root_factory(), TestingRootFactory)\n\n def test_render_master_edit_template_with_minimal_root(self, settings=None):\n from kotti import main\n\n settings = settings or self.required_settings()\n settings['kotti.root_factory'] = (TestingRootFactory,)\n settings['kotti.site_title'] = 'My Site'\n app = main({}, **settings)\n \n request = Request.blank('/@@login')\n (status, headers, response) = request.call_application(app)\n assert status == '200 OK'\n\n @staticmethod\n def _includeme_layout(config):\n # override edit master layout with view master layout\n config.override_asset(\n to_override='kotti:templates/edit/master.pt',\n override_with='kotti:templates/view/master.pt',\n ) \n\n def test_render_master_view_template_with_minimal_root(self):\n settings = self.required_settings()\n settings['kotti.includes'] = (self._includeme_layout,)\n return self.test_render_master_edit_template_with_minimal_root(settings)\n\n def test_setting_values_as_unicode(self):\n from kotti import get_settings\n from kotti import main\n\n settings = self.required_settings()\n settings['kotti.site_title'] = 'K\\xc3\\xb6tti' # Kötti\n\n main({}, **settings)\n self.assertEqual(get_settings()['kotti.site_title'], u'K\\xf6tti')\n","sub_path":"kotti/tests/test_app.py","file_name":"test_app.py","file_ext":"py","file_size_in_byte":5724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"342860160","text":"from turtle import Turtle,Screen\nimport random\n\nscreen=Screen()\nscreen.colormode(255)\n\nclass Food(Turtle):\n\n def __init__(self):\n super().__init__()\n self.shape(\"circle\")\n self.penup()\n self.shapesize(stretch_len=0.5, stretch_wid=0.5)\n self.speed(\"fastest\")\n self.refresh()\n\n def refresh(self):\n \n # Generating random food colurs\n r=random.randint(0,255)\n g=random.randint(0,255)\n b=random.randint(0,255)\n self.color(r,g,b)\n\n # Generating food at random locations\n random_x = random.randint(-280, 280)\n random_y = random.randint(-280, 280)\n self.goto(random_x, random_y)\n\n","sub_path":"food.py","file_name":"food.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"307552000","text":"import pandas as pd\nimport datetime\nimport yfinance as yf\nimport requests\nimport io\n\nsymbol_url = \"https://pkgstore.datahub.io/core/nasdaq-listings/nasdaq-listed_csv/data/7665719fb51081ba0bd834fde71ce822/nasdaq-listed_csv.csv\"\ns = requests.get(symbol_url).content\nsymbols = pd.read_csv(io.StringIO(s.decode('utf-8')))['Symbol'].tolist()\nprint(\"Stock Symbols\", symbols)\n\n# Comment this to get the all-symbols\nsymbols = [\"AAL\", \"AAPL\", \"DAL\", \"FB\", \"AMZN\", \"TSLA\", \"MSFT\", \"CRM\"]\nstock_data = pd.DataFrame()\nstart = datetime.datetime(2020, 1, 1)\nend = datetime.datetime(2020, 11, 28)\n\n\ndef sma(data, n):\n sma_values = pd.Series(data['Close'].rolling(n).mean(), name='Sma')\n data = data.join(sma_values)\n return data\n\n\ndef ewma(data, n):\n ema = pd.Series(data['Close'].ewm(span=n, min_periods=n - 1).mean(),\n name='Ewma_' + str(n))\n data = data.join(ema)\n return data\n\n\ndef cci(data, n):\n TP = (data['High'] + data['Low'] + data['Close']) / 3\n cci_values = pd.Series((TP - TP.rolling(n).mean()) / (0.015 * TP.rolling(n).std()),\n name='Cci')\n data = data.join(cci_values)\n return data\n\n\nfor symbol in symbols:\n try:\n s = []\n n = 15\n s = yf.download(symbol, start=start, end=end)\n if s is not None and len(s) > 0:\n s['Name'] = symbol\n\n # Getting simple moving average\n s = sma(s, n)\n s = s.dropna()\n\n # Exponentially weighted moving average\n s = ewma(s, n)\n s = s.dropna()\n\n # Commodity Channel Index\n s = cci(s, n)\n s = s.dropna()\n\n stock_data = stock_data.append(s, sort=False)\n except Exception:\n None\n\nprint(stock_data)\n","sub_path":"final_project/scripts/data_agg_prep.py","file_name":"data_agg_prep.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"47094360","text":"\"\"\" Evolving Stochastic Cellular automata 1D - Self-organized criticality\"\"\"\n\nimport evodynamic.experiment as experiment\nimport evodynamic.connection.random_boolean_net as rbn\nimport evodynamic.cells.activation as act\nimport evodynamic.connection as connection\nfrom evodynamic.evolution import ga\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression\nimport time\nimport powerlaw\nimport csv\nimport os\nimport sys\n\nwidth = 1000\ntimesteps = 1000\n\ndef KSdist(theoretical_pdf, empirical_pdf):\n return np.max(np.abs(np.cumsum(theoretical_pdf) - np.cumsum(empirical_pdf)))\n\ndef getdict_cluster_size(arr1d):\n cluster_dict = {}\n current_number = None\n for a in arr1d:\n if current_number == a:\n cluster_dict[a][-1] = cluster_dict[a][-1]+1\n else:\n current_number = a\n if a in cluster_dict:\n cluster_dict[a].append(1)\n else:\n cluster_dict[a] = [1]\n return cluster_dict\n\ndef getarray_avalanche_size(x, value):\n list_avalance_size = []\n if value in x:\n x0size, x1size = x.shape\n for i in range(x0size):\n if value in x[i,:]:\n list_avalance_size.extend(getdict_cluster_size(x[i,:])[value])\n return np.array(list_avalance_size)\n\ndef getarray_avalanche_duration(x, value):\n list_avalance_duration = []\n if value in x:\n x0size, x1size = x.shape\n for i in range(x1size):\n if value in x[:,i]:\n list_avalance_duration.extend(getdict_cluster_size(x[:,i])[value])\n return np.array(list_avalance_duration)\n\ndef norm_coef(coef):\n return -np.mean(coef)\n\ndef norm_linscore(linscore):\n return np.mean(linscore)#5*np.max(linscore)+5*np.mean(linscore)\n\n# Normalize values from 0 to inf to be from 10 to 0\ndef norm_ksdist(ksdist, smooth=1):\n return np.exp(-smooth * (0.9*np.min(ksdist)+0.1*np.mean(ksdist)))\n\n# Normalize values from -inf to inf to be from 0 to 1\ndef norm_R(R, smooth=0.01):\n return 1. / (1.+np.exp(-smooth * (0.9*np.max(R)+0.1*np.mean(R))))\n\ndef normalize_avalanche_pdf_size(mask_avalanche_s_0_bc, mask_avalanche_d_0_bc,\\\n mask_avalanche_s_1_bc, mask_avalanche_d_1_bc):\n norm_avalanche_pdf_size_s_0 = sum(mask_avalanche_s_0_bc)/width\n norm_avalanche_pdf_size_d_0 = sum(mask_avalanche_d_0_bc)/timesteps\n norm_avalanche_pdf_size_s_1 = sum(mask_avalanche_s_1_bc)/width\n norm_avalanche_pdf_size_d_1 = sum(mask_avalanche_d_1_bc)/timesteps\n\n mean_avalanche_pdf_size = np.mean([norm_avalanche_pdf_size_s_0,\\\n norm_avalanche_pdf_size_d_0,\\\n norm_avalanche_pdf_size_s_1,\\\n norm_avalanche_pdf_size_d_1])\n max_avalanche_pdf_size = np.max([norm_avalanche_pdf_size_s_0,\\\n norm_avalanche_pdf_size_d_0,\\\n norm_avalanche_pdf_size_s_1,\\\n norm_avalanche_pdf_size_d_1])\n\n return np.tanh(5*(0.9*max_avalanche_pdf_size+0.1*mean_avalanche_pdf_size))\n\ndef sigmoid(x, smooth=0.01):\n return 1. / (1. + np.exp(-x*smooth))\n\ndef norm_comparison_ratio(R_list):\n return sigmoid(0.9*np.max(R_list) + 0.1*np.mean(R_list))\n\ndef calculate_comparison_ratio(data):\n fit = powerlaw.Fit(data, xmin =1, discrete= True)\n R_exp, p_exp = fit.distribution_compare('power_law', 'exponential', normalized_ratio=True)\n R = R_exp if p_exp < 0.1 else 0\n\n return R\n\n\ndef evaluate_result(ca_result, filename=None):\n avalanche_s_0 = getarray_avalanche_size(ca_result, 0)\n avalanche_d_0 = getarray_avalanche_duration(ca_result, 0)\n avalanche_s_0_bc = np.bincount(avalanche_s_0)[1:] if len(avalanche_s_0) > 5 else []\n avalanche_d_0_bc = np.bincount(avalanche_d_0)[1:] if len(avalanche_d_0) > 5 else []\n\n avalanche_s_1 = getarray_avalanche_size(ca_result, 1)\n avalanche_d_1 = getarray_avalanche_duration(ca_result, 1)\n avalanche_s_1_bc = np.bincount(avalanche_s_1)[1:] if len(avalanche_s_1) > 5 else []\n avalanche_d_1_bc = np.bincount(avalanche_d_1)[1:] if len(avalanche_d_1) > 5 else []\n\n avalanche_s_0_bc = avalanche_s_0_bc/sum(avalanche_s_0_bc)\n avalanche_d_0_bc = avalanche_d_0_bc/sum(avalanche_d_0_bc)\n avalanche_s_1_bc = avalanche_s_1_bc/sum(avalanche_s_1_bc)\n avalanche_d_1_bc = avalanche_d_1_bc/sum(avalanche_d_1_bc)\n\n mask_avalanche_s_0_bc = avalanche_s_0_bc > 0\n mask_avalanche_d_0_bc = avalanche_d_0_bc > 0\n mask_avalanche_s_1_bc = avalanche_s_1_bc > 0\n mask_avalanche_d_1_bc = avalanche_d_1_bc > 0\n\n log_avalanche_s_0_bc = np.log10(avalanche_s_0_bc)\n log_avalanche_d_0_bc = np.log10(avalanche_d_0_bc)\n log_avalanche_s_1_bc = np.log10(avalanche_s_1_bc)\n log_avalanche_d_1_bc = np.log10(avalanche_d_1_bc)\n\n log_avalanche_s_0_bc = np.where(mask_avalanche_s_0_bc, log_avalanche_s_0_bc, 0)\n log_avalanche_d_0_bc = np.where(mask_avalanche_d_0_bc, log_avalanche_d_0_bc, 0)\n log_avalanche_s_1_bc = np.where(mask_avalanche_s_1_bc, log_avalanche_s_1_bc, 0)\n log_avalanche_d_1_bc = np.where(mask_avalanche_d_1_bc, log_avalanche_d_1_bc, 0)\n\n fitness = 0\n norm_avalanche_pdf_size = 0\n norm_linscore_res = 0\n norm_ksdist_res = 0\n norm_coef_res = 0\n norm_unique_states = 0\n norm_R_res = 0\n\n if sum(mask_avalanche_s_0_bc[:10]) > 5 and sum(mask_avalanche_d_0_bc[:10]) > 5 and\\\n sum(mask_avalanche_s_1_bc[:10]) > 5 and sum(mask_avalanche_d_1_bc[:10]) > 5:\n\n # Fit PDF using least square error\n fit_avalanche_s_0_bc = LinearRegression().fit(np.log10(np.arange(1,len(avalanche_s_0_bc)+1)[mask_avalanche_s_0_bc]).reshape(-1,1), log_avalanche_s_0_bc[mask_avalanche_s_0_bc])\n fit_avalanche_d_0_bc = LinearRegression().fit(np.log10(np.arange(1,len(avalanche_d_0_bc)+1)[mask_avalanche_d_0_bc]).reshape(-1,1), log_avalanche_d_0_bc[mask_avalanche_d_0_bc])\n fit_avalanche_s_1_bc = LinearRegression().fit(np.log10(np.arange(1,len(avalanche_s_1_bc)+1)[mask_avalanche_s_1_bc]).reshape(-1,1), log_avalanche_s_1_bc[mask_avalanche_s_1_bc])\n fit_avalanche_d_1_bc = LinearRegression().fit(np.log10(np.arange(1,len(avalanche_d_1_bc)+1)[mask_avalanche_d_1_bc]).reshape(-1,1), log_avalanche_d_1_bc[mask_avalanche_d_1_bc])\n\n linscore_list = []\n linscore_list.append(fit_avalanche_s_0_bc.score(np.log10(np.arange(1,len(avalanche_s_0_bc)+1)[mask_avalanche_s_0_bc]).reshape(-1,1), log_avalanche_s_0_bc[mask_avalanche_s_0_bc]))\n linscore_list.append(fit_avalanche_d_0_bc.score(np.log10(np.arange(1,len(avalanche_d_0_bc)+1)[mask_avalanche_d_0_bc]).reshape(-1,1), log_avalanche_d_0_bc[mask_avalanche_d_0_bc]))\n linscore_list.append(fit_avalanche_s_1_bc.score(np.log10(np.arange(1,len(avalanche_s_1_bc)+1)[mask_avalanche_s_1_bc]).reshape(-1,1), log_avalanche_s_1_bc[mask_avalanche_s_1_bc]))\n linscore_list.append(fit_avalanche_d_1_bc.score(np.log10(np.arange(1,len(avalanche_d_1_bc)+1)[mask_avalanche_d_1_bc]).reshape(-1,1), log_avalanche_d_1_bc[mask_avalanche_d_1_bc]))\n\n # Fit PDF using least square error\n fit_avalanche_s_0_bc = LinearRegression().fit(np.log10(np.arange(1,len(avalanche_s_0_bc)+1)[mask_avalanche_s_0_bc]).reshape(-1,1), log_avalanche_s_0_bc[mask_avalanche_s_0_bc], sample_weight=[1 if idx < 10 else 0 for idx in np.arange(len(avalanche_s_0_bc))[mask_avalanche_s_0_bc]])\n fit_avalanche_d_0_bc = LinearRegression().fit(np.log10(np.arange(1,len(avalanche_d_0_bc)+1)[mask_avalanche_d_0_bc]).reshape(-1,1), log_avalanche_d_0_bc[mask_avalanche_d_0_bc], sample_weight=[1 if idx < 10 else 0 for idx in np.arange(len(avalanche_d_0_bc))[mask_avalanche_d_0_bc]])\n fit_avalanche_s_1_bc = LinearRegression().fit(np.log10(np.arange(1,len(avalanche_s_1_bc)+1)[mask_avalanche_s_1_bc]).reshape(-1,1), log_avalanche_s_1_bc[mask_avalanche_s_1_bc], sample_weight=[1 if idx < 10 else 0 for idx in np.arange(len(avalanche_s_1_bc))[mask_avalanche_s_1_bc]])\n fit_avalanche_d_1_bc = LinearRegression().fit(np.log10(np.arange(1,len(avalanche_d_1_bc)+1)[mask_avalanche_d_1_bc]).reshape(-1,1), log_avalanche_d_1_bc[mask_avalanche_d_1_bc], sample_weight=[1 if idx < 10 else 0 for idx in np.arange(len(avalanche_d_1_bc))[mask_avalanche_d_1_bc]])\n\n theor_avalanche_s_0_bc = np.power(10,fit_avalanche_s_0_bc.predict(np.log10(np.arange(1,len(avalanche_s_0_bc)+1).reshape(-1,1))))\n theor_avalanche_d_0_bc = np.power(10,fit_avalanche_d_0_bc.predict(np.log10(np.arange(1,len(avalanche_d_0_bc)+1).reshape(-1,1))))\n theor_avalanche_s_1_bc = np.power(10,fit_avalanche_s_1_bc.predict(np.log10(np.arange(1,len(avalanche_s_1_bc)+1).reshape(-1,1))))\n theor_avalanche_d_1_bc = np.power(10,fit_avalanche_d_1_bc.predict(np.log10(np.arange(1,len(avalanche_d_1_bc)+1).reshape(-1,1))))\n\n ksdist_list = []\n ksdist_list.append(KSdist(theor_avalanche_s_0_bc, avalanche_s_0_bc))\n ksdist_list.append(KSdist(theor_avalanche_d_0_bc, avalanche_d_0_bc))\n ksdist_list.append(KSdist(theor_avalanche_s_1_bc, avalanche_s_1_bc))\n ksdist_list.append(KSdist(theor_avalanche_d_1_bc, avalanche_d_1_bc))\n\n coef_list = []\n coef_list.append(fit_avalanche_s_0_bc.coef_[0])\n coef_list.append(fit_avalanche_d_0_bc.coef_[0])\n coef_list.append(fit_avalanche_s_1_bc.coef_[0])\n coef_list.append(fit_avalanche_d_1_bc.coef_[0])\n #print(coef)\n\n norm_avalanche_pdf_size = normalize_avalanche_pdf_size(mask_avalanche_s_0_bc,\\\n mask_avalanche_d_0_bc,\\\n mask_avalanche_s_1_bc,\\\n mask_avalanche_d_1_bc)\n\n print(\"linscore_list\", linscore_list)\n print(\"coef_list\", coef_list)\n print(\"ksdist_list\", ksdist_list)\n\n norm_linscore_res = norm_linscore(linscore_list)\n norm_ksdist_res = norm_ksdist(ksdist_list)\n norm_coef_res = norm_coef(coef_list)\n norm_unique_states = ((np.unique(ca_result, axis=0).shape[0]) / ca_result.shape[1])\n\n print(\"norm_avalanche_pdf_size\", norm_avalanche_pdf_size)\n print(\"norm_linscore_res\", norm_linscore_res)\n print(\"norm_ksdist_res\", norm_ksdist_res)\n print(\"norm_coef_res\", norm_coef_res)\n print(\"norm_unique_states\", norm_unique_states)\n\n fitness = norm_ksdist_res**2 + norm_unique_states + norm_avalanche_pdf_size + norm_linscore_res**2\n\n if fitness > 3.5:\n R_list = []\n R_list.append(calculate_comparison_ratio(avalanche_s_0))\n R_list.append(calculate_comparison_ratio(avalanche_d_0))\n R_list.append(calculate_comparison_ratio(avalanche_s_1))\n R_list.append(calculate_comparison_ratio(avalanche_d_1))\n print(\"R_list\", R_list)\n norm_R_res = norm_comparison_ratio(R_list)\n print(\"norm_R_res\", norm_R_res)\n fitness = fitness + norm_R_res\n\n val_dict = {}\n val_dict[\"norm_ksdist_res\"] = norm_ksdist_res\n val_dict[\"norm_coef_res\"] = norm_coef_res\n val_dict[\"norm_unique_states\"] = norm_unique_states\n val_dict[\"norm_avalanche_pdf_size\"] = norm_avalanche_pdf_size\n val_dict[\"norm_linscore_res\"] = norm_linscore_res\n val_dict[\"norm_R_res\"] = norm_R_res\n val_dict[\"fitness\"] = fitness\n\n print(\"Fitness\", fitness)\n return fitness, val_dict\n\n# genome is a list of float numbers between 0 and 1\ndef evaluate_genome(genome=8*[0.5], filename=None):\n print(genome)\n gen_rule = [(genome,)]\n\n exp = experiment.Experiment()\n g_ca = exp.add_group_cells(name=\"g_ca\", amount=width)\n g_ca_bin = g_ca.add_binary_state(state_name='g_ca_bin')\n g_ca_bin_conn = rbn.create_conn_matrix('g_ca_bin_conn', width)\n\n exp.add_connection(\"g_ca_conn\",\n connection.WeightedConnection(g_ca_bin,g_ca_bin,\n act.rule_binary_sca_1d_width3_func,\n g_ca_bin_conn, fargs_list=gen_rule))\n\n exp.add_monitor(\"g_ca\", \"g_ca_bin\", timesteps)\n\n exp.initialize_cells()\n\n start = time.time()\n\n exp.run(timesteps=timesteps)\n #ca_result .append()\n\n print(\"Execution time:\", time.time()-start)\n\n exp.close()\n\n fitness, val_dict = evaluate_result(exp.get_monitor(\"g_ca\", \"g_ca_bin\")[:,:,0])\n\n if isinstance(filename, str):\n if \".csv\" in filename:\n with open(filename, \"a+\", newline=\"\") as f:\n wr = csv.writer(f, delimiter=\";\")\n if os.stat(filename).st_size == 0:\n wr.writerow([\"genome\", \"fitness\", \"norm_ksdist_res\", \"norm_coef_res\", \"norm_unique_states\",\\\n \"norm_avalanche_pdf_size\", \"norm_linscore_res\", \"norm_R_res\"])\n\n wr.writerow([str(list(genome)), val_dict[\"fitness\"], val_dict[\"norm_ksdist_res\"],\\\n val_dict[\"norm_coef_res\"], val_dict[\"norm_unique_states\"],\\\n val_dict[\"norm_avalanche_pdf_size\"],val_dict[\"norm_linscore_res\"],\\\n val_dict[\"norm_R_res\"]])\n\n return fitness, val_dict\n\nstart_total = time.time()\n\nbest_genome = ga.evolve_probability(evaluate_genome, pop_size=40, generation=10)\n\nprint(\"TOTAL Execution time:\", time.time()-start_total)\n\nprint(best_genome)\n\nprint(\"Final fitness\", evaluate_genome(best_genome, sys.argv[1] if (len(sys.argv) > 1) else \"out.csv\"))\n","sub_path":"examples/evolve_criticality/evolve_stochastic_rbn_criticality.py","file_name":"evolve_stochastic_rbn_criticality.py","file_ext":"py","file_size_in_byte":12896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"442251196","text":"\"\"\"This module provides functions for performing linear algebra operations.\n\"\"\"\n\nimport numpy\nimport cupy\nimport cupy.prof\nimport cupyx\n\n\n@cupy.prof.TimeRangeDecorator(\"_batch_posv\")\ndef _batch_posv(a, b):\n \"\"\"Solve the linear equations A x = b via Cholesky factorization of A, where A\n is a real symmetric or complex Hermitian positive-definite matrix.\n\n If matrix ``a[i]`` is not positive definite, Cholesky factorization fails and\n it raises an error.\n\n Args:\n a (cupy.ndarray): Array of real symmetric or complex hermitian\n matrices with dimension (..., N, N).\n b (cupy.ndarray): right-hand side (..., N).\n Returns:\n x (cupy.ndarray): Array of solutions (..., N).\n \"\"\"\n if not cupy.cusolver.check_availability('potrsBatched'):\n raise RuntimeError('potrsBatched is not available')\n\n dtype = numpy.promote_types(a.dtype, b.dtype)\n dtype = numpy.promote_types(dtype, 'f')\n\n if dtype == 'f':\n potrfBatched = cupy.cuda.cusolver.spotrfBatched\n potrsBatched = cupy.cuda.cusolver.spotrsBatched\n elif dtype == 'd':\n potrfBatched = cupy.cuda.cusolver.dpotrfBatched\n potrsBatched = cupy.cuda.cusolver.dpotrsBatched\n elif dtype == 'F':\n potrfBatched = cupy.cuda.cusolver.cpotrfBatched\n potrsBatched = cupy.cuda.cusolver.cpotrsBatched\n elif dtype == 'D':\n potrfBatched = cupy.cuda.cusolver.zpotrfBatched\n potrsBatched = cupy.cuda.cusolver.zpotrsBatched\n else:\n msg = ('dtype must be float32, float64, complex64 or complex128'\n ' (actual: {})'.format(a.dtype))\n raise ValueError(msg)\n\n # Cholesky factorization\n a = a.astype(dtype, order='C', copy=True)\n ap = cupy.core._mat_ptrs(a)\n lda, n = a.shape[-2:]\n batch_size = int(numpy.prod(a.shape[:-2]))\n\n handle = cupy.cuda.device.get_cusolver_handle()\n uplo = cupy.cuda.cublas.CUBLAS_FILL_MODE_LOWER\n dev_info = cupy.empty(batch_size, dtype=numpy.int32)\n\n potrfBatched(handle, uplo, n, ap.data.ptr, lda, dev_info.data.ptr,\n batch_size)\n cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed(\n potrfBatched, dev_info)\n\n # Cholesky solve\n b_shape = b.shape\n b = b.conj().reshape(batch_size, n, -1).astype(dtype, order='C', copy=True)\n bp = cupy.core._mat_ptrs(b)\n ldb, nrhs = b.shape[-2:]\n dev_info = cupy.empty(1, dtype=numpy.int32)\n\n potrsBatched(handle, uplo, n, nrhs, ap.data.ptr, lda, bp.data.ptr, ldb,\n dev_info.data.ptr, batch_size)\n cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed(\n potrsBatched, dev_info)\n\n return b.conj().reshape(b_shape)\n\n@cupy.prof.TimeRangeDecorator(\"_posv\")\ndef _posv(a, b):\n \"\"\"Solve the linear equations A x = b via Cholesky factorization of A,\n where A is a real symmetric or complex Hermitian positive-definite matrix.\n\n If matrix ``A`` is not positive definite, Cholesky factorization fails\n and it raises an error.\n\n Note: For batch input, NRHS > 1 is not currently supported.\n\n Args:\n a (cupy.ndarray): Array of real symmetric or complex hermitian\n matrices with dimension (..., N, N).\n b (cupy.ndarray): right-hand side (..., N) or (..., N, NRHS).\n Returns:\n x (cupy.ndarray): The solution (shape matches b).\n \"\"\"\n\n cupy.linalg._util._assert_cupy_array(a, b)\n cupy.linalg._util._assert_nd_squareness(a)\n\n if a.ndim > 2:\n return _batch_posv(a, b)\n\n dtype = numpy.promote_types(a.dtype, b.dtype)\n dtype = numpy.promote_types(dtype, 'f')\n\n if dtype == 'f':\n potrf = cupy.cuda.cusolver.spotrf\n potrf_bufferSize = cupy.cuda.cusolver.spotrf_bufferSize\n potrs = cupy.cuda.cusolver.spotrs\n elif dtype == 'd':\n potrf = cupy.cuda.cusolver.dpotrf\n potrf_bufferSize = cupy.cuda.cusolver.dpotrf_bufferSize\n potrs = cupy.cuda.cusolver.dpotrs\n elif dtype == 'F':\n potrf = cupy.cuda.cusolver.cpotrf\n potrf_bufferSize = cupy.cuda.cusolver.cpotrf_bufferSize\n potrs = cupy.cuda.cusolver.cpotrs\n elif dtype == 'D':\n potrf = cupy.cuda.cusolver.zpotrf\n potrf_bufferSize = cupy.cuda.cusolver.zpotrf_bufferSize\n potrs = cupy.cuda.cusolver.zpotrs\n else:\n msg = ('dtype must be float32, float64, complex64 or complex128'\n ' (actual: {})'.format(a.dtype))\n raise ValueError(msg)\n\n a = a.astype(dtype, order='F', copy=True)\n lda, n = a.shape\n\n handle = cupy.cuda.device.get_cusolver_handle()\n uplo = cupy.cuda.cublas.CUBLAS_FILL_MODE_LOWER\n dev_info = cupy.empty(1, dtype=numpy.int32)\n\n worksize = potrf_bufferSize(handle, uplo, n, a.data.ptr, lda)\n workspace = cupy.empty(worksize, dtype=dtype)\n\n # Cholesky factorization\n potrf(handle, uplo, n, a.data.ptr, lda, workspace.data.ptr,\n worksize, dev_info.data.ptr)\n cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed(\n potrf, dev_info)\n\n b_shape = b.shape\n b = b.reshape(n, -1).astype(dtype, order='F', copy=True)\n ldb, nrhs = b.shape\n\n # Solve: A * X = B\n potrs(handle, uplo, n, nrhs, a.data.ptr, lda, b.data.ptr, ldb,\n dev_info.data.ptr)\n cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed(\n potrs, dev_info)\n\n return cupy.ascontiguousarray(b.reshape(b_shape))\n\n@cupy.prof.TimeRangeDecorator(\"cholesky_solve\")\ndef cholesky_solve(a, b):\n return _posv(a, b)\n\n@cupy.prof.TimeRangeDecorator(\"clipped_eigh\")\ndef clipped_eigh(a, clip_scale=1e-14):\n assert clip_scale >= 0\n w, v = cupy.linalg.eigh(a)\n #- clip eigenvalues relative to maximum eigenvalue\n #- TODO: assuming w is sorted, can skip cupy.max and use the appropriate index\n w = cupy.clip(w, a_min=clip_scale*cupy.max(w))\n return w, v\n\n@cupy.prof.TimeRangeDecorator(\"compose_eigh\")\ndef compose_eigh(w, v):\n return cupy.einsum('...ik,...k,...jk->...ij', v, w, v)\n\n@cupy.prof.TimeRangeDecorator(\"matrix_sqrt\")\ndef matrix_sqrt(a):\n #- eigen decomposition\n w, v = clipped_eigh(a)\n #- compose sqrt from eigen decomposition\n q = compose_eigh(cupy.sqrt(w), v)\n return q\n\n@cupy.prof.TimeRangeDecorator(\"diag_block_matrix_sqrt\")\ndef diag_block_matrix_sqrt(a, block_size):\n a_shape = a.shape\n n, m = a_shape[-2:]\n batch_size = numpy.prod(a_shape[:-2], dtype=int)\n nblocks, remainder = divmod(n, block_size)\n assert n == m\n assert remainder == 0\n #- flatten batch dimensions\n a = a.reshape(batch_size, n, m)\n #- eigen decomposition\n w, v = clipped_eigh(a)\n #- compose inverse from eigen decomposition\n ainv = compose_eigh(1.0/w, v)\n #- extract diagonal blocks\n #- TODO: use a view of diagonal blocks instead of copy?\n ainv_diag_blocks = cupy.empty(\n (batch_size * nblocks, block_size, block_size),\n dtype=a.dtype\n )\n for i in range(batch_size):\n for j, s in enumerate(range(0, n, block_size)):\n bs = slice(s, s + block_size)\n ainv_diag_blocks[i*nblocks + j] = ainv[i, bs, bs]\n #- eigen decomposition\n w, v = clipped_eigh(ainv_diag_blocks)\n #- compose inverse sqrt from eigen decomposition\n q_diag_blocks = compose_eigh(cupyx.rsqrt(w), v)\n #- insert block sqrts into result\n #- TODO: is there a way to avoid this new alloc/copy?\n q = cupy.zeros_like(a)\n for i in range(batch_size):\n for j, s in enumerate(range(0, n, block_size)):\n bs = slice(s, s + block_size)\n q[i, bs, bs] = q_diag_blocks[i*nblocks + j]\n return q.reshape(a_shape)\n\n","sub_path":"py/gpu_specter/linalg.py","file_name":"linalg.py","file_ext":"py","file_size_in_byte":7598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"20295858","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\n#!/usr/bin/env python\n# coding: utf-8\n\nimport json, math, copy\nfrom geosnap.io import store_ltdb\nfrom geosnap import Community, datasets\nfrom geosnap.io import store_census\nimport pandas as pd\nimport shapely.wkt\nimport shapely.geometry\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom pathlib import Path\nimport urllib.parse\nimport webbrowser\nimport os\nimport pprint\nimport numpy as np\nfrom notebook import notebookapp\nfrom IPython.core.display import display, HTML\nimport geopandas as gpd\n\ndef write_LOG(param):\n #Create a new folder where GEO_CONFIG.js GEO_JSON.js VARIABLES.js will be saved\n oDir = 'ACM_' + param['filename_suffix']\n path = Path(oDir + '/data')\n path.mkdir(parents=True, exist_ok=True)\n \n contents = pprint.pformat(param)\n #print(oDir+\"/data/param.log\")\n #print(contents)\n #write new outfiles: GEO_CONFIG.js GEO_JSON.js VARIABLES.js\n ofile = open(oDir+\"/data/param.log\", \"w\", encoding=\"utf-8\")\n create_at = datetime.now()\n ofile.write('%s %s\\r\\n' % (create_at.strftime('%Y-%m-%d'), create_at.strftime('%H:%M:%S')))\n #ofile.write('\\r\\n\\r\\n')\n ofile.write(' '+contents.replace('\\n', '\\n '))\n ofile.close()\n\ndef write_INDEX_html(param):\n \n #Create a new folder where CONFIG.js GEO_JSON.js VARIABLES.js will be saved\n oDir = 'ACM_' + param['filename_suffix']\n path = Path(oDir + '/data')\n path.mkdir(parents=True, exist_ok=True)\n \n contents = []\n #open Adaptive_Choropleth_Mapper.html (the excutable file for the visualization)\n ifile = open(\"template/Adaptive_Choropleth_Mapper.html\", \"r\", encoding=\"utf-8\")\n contents = ifile.read()\n \n #Replace variables based on the user's selection in each of four files below.\n contents = contents.replace(\"Adaptive Choropleth Mapper\", param['title'])\n contents = contents.replace(\"data/CONFIG.js\", \"data/CONFIG_\"+param['filename_suffix']+\".js\")\n contents = contents.replace(\"data/GEO_JSON.js\", \"data/GEO_JSON_\"+param['filename_suffix']+\".js\")\n contents = contents.replace(\"data/VARIABLES.js\", \"data/VARIABLES_\"+param['filename_suffix']+\".js\")\n \n #write new outfiles: CONFIG.js GEO_JSON.js VARIABLES.js\n ofile = open(oDir+\"/index.html\", \"w\")\n ofile.write(contents)\n ofile.close()\n\n\ndef write_CONFIG_js(param):\n # read CONFIG.js\n ifile = open(\"template/CONFIG.js\", \"r\", encoding=\"utf-8\")\n contents = ifile.read()\n \n # Automatically identify variables for \"NumOfMaps\" and \"InitialLayers\"\n '''when the user selects more than one year among 1970, 1980, 1990, 200 and 2010, \"NumOfMaps\" will be equal to the number of the selected years. However, when the user selects only one year among 5 years, \"NumOfMaps\" will be the number of variables that the user selected. (The maximum number of maps that can be visualized is 15) In this case, when the user selects more than 15 variables, the first 15 maps will be created at the initial view, and the rest of variables will be available in the dropdown box of the top of each map. In brief, there is no limit in terms of variables that the user can visualize, but the user can visualize upto 15 maps at the same time.'''\n NumOfMaps = len(param['years'])\n chart = param['chart'] if 'chart' in param else ''\n if (chart == \"Scatter Plot\"): NumOfMaps = 2\n InitialLayers = []\n if (NumOfMaps > 1):\n for i, year in enumerate(param['years']):\n InitialLayers.append(str(year)+' '+param['labels'][0])\n else:\n NumOfMaps = len(param['labels'])\n if ('NumOfMaps' in param): NumOfMaps = param['NumOfMaps']\n if (NumOfMaps > 15): NumOfMaps = 15\n for i, variable in enumerate(param['labels']):\n InitialLayers.append(str(param['years'][0])+' '+variable)\n \n # Automatically set Map_width, Map_height. \n Map_width = \"360px\"\n Map_height = \"360px\"\n if (NumOfMaps <= 4):\n Map_width = \"400px\"\n Map_height = \"400px\"\n if (NumOfMaps <= 3):\n Map_width = \"450px\"\n Map_height = \"450px\"\n if (NumOfMaps <= 2):\n Map_width = \"500px\"\n Map_height = \"500px\"\n if (NumOfMaps <= 1):\n Map_width = \"700px\"\n Map_height = \"700px\"\n \n # replace newly computed \"NumOfMaps\", \"InitialLayers\", \"Map_width\", \"Map_height\" in CONFIG.js. See the example replacement below\n '''\n NumOfMaps : 4 -> 'var NumOfMaps = 4;'\n InitialLayers : [ … ] -> 'var InitialLayers = [\"1980 p_nonhisp_white_persons\", \"1980 p_nonhisp_black_persons\", \"1980 p_hispanic_persons\", … ];'\n Map_width : \"400px\" -> 'var Map_width = \"400px\";'\n Map_height : \"400px\" -> 'var Map_height = \"400px\";'\n '''\n NumOfMaps = \"var NumOfMaps = \" + str(NumOfMaps) + \";\"\n InitialLayers = \"var InitialLayers = \" + json.dumps(InitialLayers) + \";\"\n Map_width = 'var Map_width = \"' + Map_width + '\";'\n Map_height = 'var Map_height = \"' + Map_height + '\";'\n \n contents = contents.replace(\"var NumOfMaps = 1;\", NumOfMaps)\n contents = contents.replace(\"var InitialLayers = [];\", InitialLayers)\n contents = contents.replace('var Map_width = \"400px\";', Map_width)\n contents = contents.replace('var Map_height = \"400px\";', Map_height)\n \n chart = param['chart'] if 'chart' in param else ''\n #print('chart: ' + chart )\n #print(chart == \"Stacked Chart\")\n \n Stacked_Chart = \"var Stacked_Chart = false;\"\n Correlogram = \"var Correlogram = false;\"\n Scatter_Plot = \"var Scatter_Plot = false;\"\n Parallel_Coordinates_Plot = \"var Parallel_Coordinates_Plot = false;\"\n \n if (chart == \"Stacked Chart\"): Stacked_Chart = \"var Stacked_Chart = true;\"\n elif (chart == \"Correlogram\"): Correlogram = \"var Correlogram = true;\"\n elif (chart == \"Scatter Plot\"): Scatter_Plot = \"var Scatter_Plot = true;\"\n elif (chart == \"Parallel Coordinates Plot\"): Parallel_Coordinates_Plot = \"var Parallel_Coordinates_Plot = true;\"\n else: Stacked_Chart = \"var Stacked_Chart = false;\"\n \n contents = contents.replace(\"var Stacked_Chart = false;\", Stacked_Chart)\n contents = contents.replace(\"var Correlogram = false;\", Correlogram)\n contents = contents.replace(\"var Scatter_Plot = false;\", Scatter_Plot)\n contents = contents.replace(\"var Parallel_Coordinates_Plot = false;\", Parallel_Coordinates_Plot)\n\n #Write output including the replacement above\n filename_CONFIG = \"ACM_\" + param['filename_suffix'] + \"/data/CONFIG_\"+param['filename_suffix']+\".js\"\n ofile = open(filename_CONFIG, 'w')\n ofile.write(contents)\n ofile.close()\n\n\ndef write_GEO_JSON_js(community, param):\n # query geometry for each tract\n geoid = community.gdf.columns[0]\n tracts = community.gdf[[geoid, 'geometry']].copy()\n tracts.drop_duplicates(subset=geoid, inplace=True) # get unique geoid\n #print(tracts)\n \n # open GEO_JSON.js write heading for geojson format\n filename_GEO_JSON = \"ACM_\" + param['filename_suffix'] + \"/data/GEO_JSON_\"+param['filename_suffix']+\".js\"\n ofile = open(filename_GEO_JSON, 'w')\n ofile.write('var GEO_JSON =\\n')\n ofile.write('{\"type\":\"FeatureCollection\", \"features\": [\\n')\n \n #Convert geometry in GEOJSONP to geojson format\n for tract in tracts.itertuples():\n feature = {\"type\":\"Feature\"}\n if (tract.geometry is None): # check is NaN?\n #print(tract.geometry)\n continue\n feature[\"geometry\"] = shapely.geometry.mapping(tract.geometry)\n #feature[\"properties\"] = {geoid: tract.__getattribute__(geoid), \"tractID\": tract.__getattribute__(geoid)}\n feature[\"properties\"] = {geoid: tract.__getattribute__(geoid)}\n ofile.write(json.dumps(feature)+',\\n')\n # complete the geojosn format by adding parenthesis at the end. \n ofile.write(']}\\n')\n ofile.close()\n\n\ndef write_VARIABLES_js(community, param):\n #print(param) \n geoid = community.gdf.columns[0]\n years = param['years']\n variables = param['variables']\n \n ## filtering by years\n #community.gdf = community.gdf[community.gdf.year.isin(years)]\n #print(community.gdf)\n #selectedCommunity = community.gdf[variables]\n #print(community.gdf)\n #return\n \n #make heading: community.gdf.columns[0] has \"geoid\" (string)\n heading = [geoid]\n for i, year in enumerate(years):\n for j, variable in enumerate(param['labels']):\n heading.append(str(year)+' '+variable)\n \n #Make Dictionary\n mydictionary = {} # key: geoid, value: variables by heading\n h = -1\n selectedColumns = [geoid]\n selectedColumns.extend(variables)\n #print(\"selectedColumns:\", type(selectedColumns), selectedColumns)\n for i, year in enumerate(years):\n aYearDF = community.gdf[community.gdf.year==year][selectedColumns]\n #print(year, type(aYearDF), aYearDF)\n for j, variable in enumerate(variables):\n h += 1\n for index, row in aYearDF.iterrows():\n #print(index, row)\n key = row[geoid]\n val = row[variable]\n if (math.isnan(val)): #converts Nan in GEOSNAP data to -9999\n #print(i, j, key, year, val)\n val = -9999\n if (key in mydictionary):\n value = mydictionary[key]\n value[h] = val\n else:\n value = [-9999] * (len(heading) - 1) \n value[h] = val\n mydictionary[key] = value\n \n #Select keys in the Dictionary and sort\n keys = list(mydictionary.keys())\n keys.sort()\n # use Keys and Dictionary created above and write them VARIABLES.js\n filename_VARIABLES = \"ACM_\" + param['filename_suffix'] + \"/data/VARIABLES_\"+param['filename_suffix']+\".js\"\n ofile = open(filename_VARIABLES, 'w')\n ofile.write('var GEO_VARIABLES =\\n')\n ofile.write('[\\n')\n ofile.write(' '+json.dumps(heading)+',\\n')\n for i, key in enumerate(keys):\n values = mydictionary[key]\n values.insert(0, key)\n #print(key, values)\n ofile.write(' '+json.dumps(values)+',\\n')\n ofile.write(']\\n')\n ofile.close()\n\n\ndef Adaptive_Choropleth_Mapper_viz(param):\n \n # convert year, variable to years, variables in the param\n if ('years' not in param and 'year' in param): param['years'] = [param['year']]\n if ('years' not in param and 'year' not in param and 'periods' in param): param['years'] = param['periods']\n if ('years' not in param and 'year' not in param and 'periods' not in param and 'period' in param): param['years'] = [param['period']]\n if ('variables' not in param and 'variable' in param): param['variables'] = [param['variable']]\n #print(param['years'])\n \n # select community by state_fips, msa_fips, county_fips\n community = None\n if ('msa_fips' in param and param['msa_fips']):\n community = Community.from_ltdb(years=param['years'], msa_fips=param['msa_fips'])\n #community = Community.from_ltdb(msa_fips=param['msa_fips'])\n elif ('county_fips' in param and param['county_fips']):\n community = Community.from_ltdb(years=param['years'], county_fips=param['county_fips'])\n elif ('state_fips' in param and param['state_fips']):\n community = Community.from_ltdb(years=param['years'], state_fips=param['state_fips'])\n #print(community.gdf)\n\n# if the user enters CSV and shapefile, use the files from the user\n\n#### This is executed when the user enter attributes in csv file and geometroy in shapefile ###################### \n if (community is None and 'inputCSV' in param):\n community = Community()\n #community.gdf = pd.read_csv(param['inputCSV'], dtype={'geoid':str})\n community.gdf = param[\"inputCSV\"]\n #print(community.gdf)\n geoid = community.gdf.columns[0]\n #community.gdf = community.gdf.astype(str)\n #print(\"inputCSV: \" + community.gdf.geoid) \n community.gdf[community.gdf.columns[0]] = community.gdf[geoid].astype(str)\n #print(\"community.gdf.columns[0]:\", community.gdf.columns[0])\n \n # read shape file to df_shape\n #df_shape = gpd.read_file(param['shapefile'])\n df_shape = param['shapefile']\n df_shape = df_shape.astype(str) \n #print(\"shapefile: \" + df_shape.GEOID10)\n geokey = df_shape.columns[0]\n #print(geokey) \n df_shape = df_shape.set_index(geokey)\n \n # insert geometry to community.gdf\n geometry = []\n for index, row in community.gdf.iterrows():\n tractid = row[geoid]\n try:\n tract = df_shape.loc[tractid]\n geometry.append(shapely.wkt.loads(tract.geometry))\n except KeyError:\n #print(\"Tract ID [{}] is not found in the shape file {}\".format(tractid, param['shapefile']))\n geometry.append(None)\n # print( \"geometry\" in community.gdf ) \n #f hasattr(community.gdf, \"geoemtry\"):\n #if (community.gdf[\"geoemtry\"] is None):\n # pass \n #else:\n if((\"geometry\" in community.gdf) == False):\n community.gdf.insert(len(community.gdf.columns), \"geometry\", geometry)\n community.gdf.rename(columns={'period':'year'}, inplace=True)\n #print(community.gdf)\n################################################################################################################ \n \n community.gdf = community.gdf.replace([np.inf, -np.inf], np.nan)\n # check if geometry is not null for Spatial Clustering \n community.gdf = community.gdf[pd.notnull(community.gdf['geometry'])]\n #print(community.gdf)\n\n codebook = pd.read_csv('template/conversion_table_codebook.csv')\n codebook.set_index(keys='variable', inplace=True)\n labels = copy.deepcopy(param['variables'])\n label = 'short_name' # default\n if (param['label'] == 'variable'): label = 'variable'\n if (param['label'] == 'full_name'): label = 'full_name'\n if (param['label'] == 'short_name'): label = 'short_name'\n if (label != 'variable'):\n for idx, variable in enumerate(param['variables']):\n try:\n codeRec = codebook.loc[variable]\n labels[idx] = codeRec[label]\n except:\n print(\"variable not found in codebook. variable:\", variable)\n param['labels'] = labels\n \n write_INDEX_html(param)\n write_CONFIG_js(param)\n write_VARIABLES_js(community, param)\n write_GEO_JSON_js(community, param)\n \n '''\n #Create directory for local machine\n local_dir = os.path.dirname(os.path.realpath(__file__))\n fname =urllib.parse.quote('index.html')\n template_dir = os.path.join(local_dir, 'ACM_' + param['filename_suffix'])\n url = 'file:' + os.path.join(template_dir, fname)\n webbrowser.open(url)\n \n print('Please run ' + '\"ACM_' + param['filename_suffix']+'/index.html\"'+' to your web browser.')\n print('Advanced options are available in ' + '\"ACM_' + param['filename_suffix']+'/data/CONFIG.js\"')\n '''\n\n #Create directory for Visualization \n servers = list(notebookapp.list_running_servers())\n servers1 = 'https://cybergisx.cigi.illinois.edu'+servers[0][\"base_url\"]+ 'view'\n servers2 = 'https://cybergisx.cigi.illinois.edu'+servers[0][\"base_url\"]+ 'edit' \n cwd = os.getcwd()\n prefix_cwd = \"/home/jovyan/work\"\n cwd = cwd.replace(prefix_cwd, \"\")\n \n # This is for Jupyter notebbok installed in your PC\n local_dir1 = cwd\n local_dir2 = cwd \n \n #This is for CyberGISX. Uncomment two command lines below when you run in CyberGIX Environment\n #local_dir1 = servers1 + cwd\n #local_dir2 = servers2 + cwd \n \n \n #print(local_dir)\n fname =urllib.parse.quote('index.html')\n template_dir = os.path.join(local_dir1, 'ACM_' + param['filename_suffix'])\n #url = 'file:' + os.path.join(template_dir, fname)\n url = os.path.join(template_dir, fname) \n webbrowser.open(url)\n print('To see your visualization, click the URL below (or locate the files):')\n print(url) \n print('Advanced options are available in ') \n print(local_dir2 + '/'+ 'ACM_' + param['filename_suffix']+'/data/CONFIG_' + param['filename_suffix']+'.js') \n \n \nif __name__ == '__main__':\n started_datetime = datetime.now()\n dateYYMMDD = started_datetime.strftime('%Y%m%d')\n timeHHMMSS = started_datetime.strftime('%H%M%S')\n print('This program started at %s %s' % (started_datetime.strftime('%Y-%m-%d'), started_datetime.strftime('%H:%M:%S')))\n \n #sample = \"downloads/LTDB_Std_All_Sample.zip\"\n #full = \"downloads/LTDB_Std_All_fullcount.zip\"\n #store_ltdb(sample=sample, fullcount=full)\n #store_census()\n #geosnap.io.store_census()\n\n input_attributes = pd.read_csv(\"attributes/Chicago_1980_1990_2000_2010.csv\", dtype={'geoid':str})\n input_attributes = input_attributes.rename(columns={'geoid': 'tractID'})\n shapefile = gpd.read_file(\"shp/Cook_County_Tract.shp\")\n shapefile = shapefile.rename(columns={'GEOID10': 'tractID'})\n\n param = {\n 'title': \"Adaptive Choropleth Mapper with Scatter Plot\",\n 'filename_suffix': \"Chicago_ACM_Scatter\",\n 'inputCSV': input_attributes, \n 'shapefile': shapefile, \n 'year': 2000,\n 'label': \"short_name\", #Pick variable,short_name,full_name from template/conversion_table_codebook.csv \n 'variables': [ #enter variable names of the column you selected above.\n \"p_nonhisp_white_persons\",\n \"p_nonhisp_black_persons\",\n \"p_hispanic_persons\",\n \"p_asian_persons\",\n \"p_foreign_born_pop\",\n \"p_edu_college_greater\",\n \"p_unemployment_rate\",\n \"p_employed_manufacturing\",\n \"p_poverty_rate\",\n \"p_vacant_housing_units\",\n \"p_owner_occupied_units\",\n \"p_housing_units_multiunit_structures\",\n \"median_home_value\",\n \"p_structures_30_old\",\n \"p_household_recent_move\",\n \"p_persons_under_18\",\n \"p_persons_over_60\", \n ],\n 'chart': \"Scatter Plot\", \n }\n \n input_attributes = pd.read_csv(\"attributes/Chicago_1980_1990_2000_2010.csv\", dtype={'geoid':str})\n input_attributes = input_attributes.rename(columns={'geoid': 'tractID'})\n shapefile = gpd.read_file(\"shp/Cook_County_Tract.shp\")\n shapefile = shapefile.rename(columns={'GEOID10': 'tractID'})\n\n param1 = {\n 'title': \"Adaptive Choropleth Mapper with Correlogram\",\n 'filename_suffix': \"Chicago_ACM_Correlogram\",\n 'inputCSV': input_attributes, \n 'shapefile': shapefile,\n 'period': 2010,\n 'NumOfMaps': 4, \n 'label': \"short_name\", #Pick variable,short_name,full_name from template/conversion_table_codebook.csv \n 'variables': [ #enter variable names of the column you selected above.\n \"p_nonhisp_white_persons\",\n \"p_nonhisp_black_persons\",\n \"p_hispanic_persons\",\n \"p_asian_persons\", \n \"p_other_language\",\n \"p_female_headed_families\",\n \"median_income_blackhh\",\n \"median_income_hispanichh\",\n \"median_income_asianhh\",\n \"per_capita_income\", \n ],\n 'chart': \"Correlogram\", \n }\n \n input_attributes = pd.read_csv(\"attributes/Copy of San_Diego_ACS_2010.csv\", dtype={'geoid':str})\n shapefile = gpd.read_file(\"shp/San_Diego2010.shp\")\n \n param2 = {\n 'title': \"Adaptive Choropleth Mapper with Correlogram\",\n 'filename_suffix': \"SD_correlogram\",\n 'state_fips': None,\n 'msa_fips': \"41740\", #For more options: http://osnap.cloud/~suhan/LNE/pick_POI.html\n 'county_fips': None,\n 'year': 2000,\n 'NumOfMaps': 6,\n 'variables': [\n \"p_other_language\",\n \"p_female_headed_families\",\n \"median_income_blackhh\",\n \"median_income_hispanichh\",\n \"median_income_asianhh\",\n \"per_capita_income\", \n ],\n 'chart': \"Correlogram\",\n 'label': \"short_name\", # variable, short_name or full_name\n }\n input_attributes = pd.read_csv(\"attributes/Copy of San_Diego_ACS_2010.csv\", dtype={'geoid':str})\n shapefile = gpd.read_file(\"shp/San_Diego2010.shp\")\n\n param3 = {\n 'title': \"Adaptive Choropleth Mapper with Correlogram\",\n 'filename_suffix': \"SD_correlogram_from_csv\",\n 'inputCSV': input_attributes, \n 'shapefile': shapefile,\n 'year': 2000,\n 'NumOfMaps': 6,\n 'variables': [\n \"p_other_language\",\n \"p_female_headed_families\",\n \"median_income_blackhh\",\n \"median_income_hispanichh\",\n \"median_income_asianhh\",\n \"per_capita_income\", \n ],\n 'label': \"short_name\", # variable, short_name or full_name\n #'chart': \"Stacked Chart\", \n #'chart': \"Correlogram\",\n #'chart': \"Scatter Plot\", \n #'chart': \"Parallel Coordinates Plot\", \n }\n \n Adaptive_Choropleth_Mapper_viz(param)\n \n ended_datetime = datetime.now()\n elapsed = ended_datetime - started_datetime\n total_seconds = int(elapsed.total_seconds())\n hours, remainder = divmod(total_seconds,60*60)\n minutes, seconds = divmod(remainder,60) \n print('This program ended at %s %s Elapsed %02d:%02d:%02d' % (ended_datetime.strftime('%Y-%m-%d'), ended_datetime.strftime('%H:%M:%S'), hours, minutes, seconds))\n ","sub_path":"PYTHON_Quantitative_Data_VIZ/Adaptive_Choropleth_Mapper.py","file_name":"Adaptive_Choropleth_Mapper.py","file_ext":"py","file_size_in_byte":21992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"127060960","text":"# coding=utf-8\nimport base64\nimport colorsys\nimport json\nimport os\nimport sys\nimport threading\nimport time\nfrom timeit import default_timer as timer\n\nimport cv2\nimport numpy as np\nimport redis\nfrom keras import backend as K\nfrom keras.layers import Input\nfrom keras.models import load_model\nfrom keras.utils import multi_gpu_model\nfrom PIL import Image, ImageDraw, ImageFont\n\nfrom yolo3.model import tiny_yolo_body, yolo_body, yolo_eval\nfrom yolo3.utils import letterbox_image\nimport gParam\nfrom utils.utils import setting\n\n# 多目标检测,yolov3版\nclass Smoking_yolo3():\n \"\"\"吸烟检测类\n \"\"\"\n _defaults = {\n \"score\" : 0.7,\n \"iou\" : 0.75,\n \"model_image_size\" : (416, 416),\n \"gpu_num\" : 0,\n \"VOC_LABELS\": {\n 'smoking': (0, 'smoking'),\n 'cigarette': (1, 'cigarette')\n } \n }\n\n @classmethod\n def get_defaults(cls, n):\n if n in cls._defaults:\n return cls._defaults[n]\n else:\n return \"Unrecognized attribute name '\" + n + \"'\"\n\n def __init__(self, h5_file, name='wtx'):\n \"\"\"\n args: \n h5_file: string, 权重的路径\n name: string, 决定了初始redis存放key值\n \n self.name: string, 决定了存放到redis缓存中时的key是什么,初始时设定默认值,根据机器人平台发来的消息再修改\n self.period: int, 检测周期值,初始时默认为2s\n \"\"\"\n K.clear_session()\n self.__dict__.update(self._defaults) # set up default values\n\n self.setting_path = gParam.Setting_Json\n self.videosplit_path = gParam.VideoSplit_Path\n self.smoking_path = gParam.Smoking_Path \n\n self.classes_path = gParam.Smoking_Classes\n if 'tiny' in h5_file:\n self.model_path = gParam.Smoking_Model\n self.anchors_path = gParam.Smoking_Anchors\n else:\n self.model_path = gParam.Smoking_Model\n self.anchors_path = gParam.Smoking_Anchors\n self.class_names = self._get_class()\n self.anchors = self._get_anchors()\n self.sess = K.get_session()\n self.boxes, self.scores, self.classes = self.generate()\n\n def _get_class(self):\n classes_path = os.path.expanduser(self.classes_path)\n with open(classes_path) as f:\n class_names = f.readlines()\n class_names = [c.strip() for c in class_names]\n return class_names\n\n def _get_anchors(self):\n anchors_path = os.path.expanduser(self.anchors_path)\n with open(anchors_path) as f:\n anchors = f.readline()\n anchors = [float(x) for x in anchors.split(',')]\n return np.array(anchors).reshape(-1, 2)\n\n def generate(self):\n model_path = os.path.expanduser(self.model_path)\n assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'\n\n # Load model, or construct model and load weights.\n num_anchors = len(self.anchors)\n num_classes = len(self.class_names)\n is_tiny_version = num_anchors==6 # default setting\n try:\n self.yolo_model = load_model(model_path, compile=False)\n except:\n self.yolo_model = tiny_yolo_body(Input(shape=(None,None,3)), num_anchors//2, num_classes) \\\n if is_tiny_version else yolo_body(Input(shape=(None,None,3)), num_anchors//3, num_classes)\n self.yolo_model.load_weights(self.model_path) # make sure model, anchors and classes match\n else:\n assert self.yolo_model.layers[-1].output_shape[-1] == \\\n num_anchors/len(self.yolo_model.output) * (num_classes + 5), \\\n 'Mismatch between model and given anchor and class sizes'\n\n print('{} model, anchors, and classes loaded.'.format(model_path))\n\n # Generate colors for drawing bounding boxes.\n hsv_tuples = [(x / len(self.class_names), 1., 1.)\n for x in range(len(self.class_names))]\n self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))\n self.colors = list(\n map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),\n self.colors))\n np.random.seed(10101) # Fixed seed for consistent colors across runs.\n np.random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes.\n np.random.seed(None) # Reset seed to default.\n\n # Generate output tensor targets for filtered bounding boxes.\n self.input_image_shape = K.placeholder(shape=(2, ))\n if self.gpu_num>=2:\n self.yolo_model = multi_gpu_model(self.yolo_model, gpus=self.gpu_num)\n boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors,\n len(self.class_names), self.input_image_shape,\n score_threshold=self.score, iou_threshold=self.iou)\n return boxes, scores, classes\n\n def detect_image(self, image):\n \"\"\"\n 利用yolov3网络对图片进行识别\n 注意 对于cv2来说,h,w,channel = shape[0],shape[1],shape[2]\n args:\n image: ndarray\n returns:\n image: ndarray\n out_boxes: \n out_scores: \n out_classes: \n\n \"\"\"\n start = timer()\n if self.model_image_size != (None, None):\n assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required'\n assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required'\n boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))\n else:\n # 这里也要改过来\n # new_image_size = (image.width - (image.width % 32),\n # image.height - (image.height % 32))\n new_image_size = (image.height-(image.height%32),\n image.width-(image.width%32))\n boxed_image = letterbox_image(image, new_image_size)\n image_data = np.array(boxed_image, dtype='float32')\n\n image_data /= 255.\n image_data = np.expand_dims(image_data, 0) # Add batch dimension.\n\n out_boxes, out_scores, out_classes = self.sess.run(\n [self.boxes, self.scores, self.classes],\n feed_dict={\n self.yolo_model.input: image_data,\n self.input_image_shape: [image.size[1], image.size[0]],\n # K.learning_phase(): 0\n })\n # return out_boxes, out_scores, out_classes\n font = ImageFont.truetype(font=gParam.Font_Path+'/FiraMono-Medium.otf',\n size=np.floor(2e-2 * image.size[1] + 0.5).astype('int32'))\n thickness = (image.size[0] + image.size[1]) // 600\n for i, c in reversed(list(enumerate(out_classes))):\n predicted_class = self.class_names[c]\n box = out_boxes[i]\n score = out_scores[i]\n\n label = '{} {:.2f}'.format(predicted_class, score)\n draw = ImageDraw.Draw(image)\n label_size = draw.textsize(label, font)\n\n top, left, bottom, right = box\n top = max(0, np.floor(top + 0.5).astype('int32'))\n left = max(0, np.floor(left + 0.5).astype('int32'))\n bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))\n right = min(image.size[0], np.floor(right + 0.5).astype('int32'))\n\n if top - label_size[1] >= 0:\n text_origin = np.array([left, top - label_size[1]])\n else:\n text_origin = np.array([left, top + 1])\n\n # My kingdom for a good redistributable image drawing library.\n for i in range(thickness):\n draw.rectangle(\n [left + i, top + i, right - i, bottom - i],\n outline=self.colors[c])\n draw.rectangle(\n [tuple(text_origin), tuple(text_origin + label_size)],\n fill=self.colors[c])\n draw.text(text_origin, label, fill=(0, 0, 0), font=font)\n del draw\n \n end = timer()\n # print('Time:', end-start)\n return image, out_boxes, out_scores, out_classes\n\n def getclass(self, index):\n for label in self.VOC_LABELS:\n if self.VOC_LABELS[label][0] == index:\n return label\n\n def event_smoking(self):\n \"\"\"event_smoking的逻辑是这样的\n 从截图目录下(videosplit_path)找到最新的一张图片,然后检测之,将检测结果返回\n returns:\n result: dict, 若未检测到图片则是空集合\n \"\"\"\n lists = os.listdir(self.videosplit_path)\n try:\n lists.sort(key=lambda fn: os.path.getmtime(self.videosplit_path + \"/\" + fn))\n frame = os.path.join(self.videosplit_path, lists[-1])\n frame = cv2.imread(frame)\n print(\"smokings----进入检测的图片为\", lists[-1]) \n except:\n return '没有图片可以检测', {}\n\n try:\n frame = np.array(frame).astype(np.int32)\n image = cv2.cvtColor(frame.astype('uint8'), cv2.COLOR_RGB2BGR)\n image = Image.fromarray(image).convert('RGB')\n except:\n print(\"smoking----bug\")\n\n image, rbboxes, rscores, rclasses = self.detect_image(image)\n sw_ret = {}\n\n for x in range(len(rclasses)):\n mclass = self.getclass(rclasses[x])\n if mclass in sw_ret.keys():\n sw_ret[mclass] = sw_ret[mclass] + 1\n else:\n sw_ret[mclass] = 1\n \n vfile = self.smoking_path+str(lists[-1])\n sw_ret['file'] = vfile\n sw_ret['cam'] = str(lists[-1])\n\n image.save(vfile)\n return sw_ret\n","sub_path":"src/smokingins.py","file_name":"smokingins.py","file_ext":"py","file_size_in_byte":9777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"239892338","text":"#\n# @lc app=leetcode.cn id=1 lang=python3\n#\n# [1] 两数之和\n#\n\n# @lc code=start\nclass Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n d = {}\n for i,n in enumerate(nums):\n if target - n in d:\n return [i, d[target-n]]\n elif n not in d:\n d[n] = i\n# @lc code=end\n\n","sub_path":"Week_02/1.两数之和.py","file_name":"1.两数之和.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"431118250","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0022_prize_hover_description'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='PrizeCompetitionRef',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('prize_type', models.CharField(max_length=2, choices=[(b'rv', b'Random vote'), (b'bp', b'Best photo')])),\n ],\n ),\n migrations.RemoveField(\n model_name='competition',\n name='best_photo_prize',\n ),\n migrations.RemoveField(\n model_name='competition',\n name='prizes',\n ),\n migrations.RemoveField(\n model_name='competition',\n name='random_voter_prize',\n ),\n migrations.AddField(\n model_name='prizecompetitionref',\n name='competition',\n field=models.ForeignKey(to='main.Competition'),\n ),\n migrations.AddField(\n model_name='prizecompetitionref',\n name='prize',\n field=models.ForeignKey(to='main.Prize'),\n ),\n ]\n","sub_path":"main/migrations/0023_auto_20150612_1530.py","file_name":"0023_auto_20150612_1530.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"150221980","text":"\"\"\"\nGiven a binary tree, flatten it to a linked list in-place.\n\nFor example,\nGiven\n\n 1\n / \\\n 2 5\n / \\ \\\n 3 4 6\nThe flattened tree should look like:\n 1\n \\\n 2\n \\\n 3\n \\\n 4\n \\\n 5\n \\\n 6\n\"\"\"\n\nfrom . import TreeNode\n\ndef flatten(root):\n \"\"\"\n :type root: TreeNode\n :rtype: void Do not return anything, modify root in-place instead.\n \"\"\"\n if not root:\n return\n flatten_helper(root)\n\n\ndef flatten_helper(root):\n if not root or (not root.left and not root.right):\n return root\n left_last = flatten_helper(root.left)\n right_last = flatten_helper(root.right)\n if left_last:\n left_last.right = root.right\n root.right = root.left\n root.left = None\n return right_last or left_last","sub_path":"leetcode/flatten_binary_tree_to_linked_list.py","file_name":"flatten_binary_tree_to_linked_list.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"200102454","text":"from bot import AbstractChatPlugin\n\n\nclass ChatPlugin(AbstractChatPlugin):\n def call(self, event):\n if event[0] != 4:\n return\n if (event[3] - 2000000000) not in self.bot.config['chat_titles'].keys():\n return\n if 'source_act' not in event[7]:\n return\n if event[7]['source_act'] != 'chat_title_update':\n return\n if int(event[7]['from']) == self.bot.bot_id:\n return\n print(event)\n self.bot.vkapi.messages.editChat(chat_id=event[3] - 2000000000,\n title=self.bot.config['chat_titles'][event[3] - 2000000000])\n","sub_path":"chatplugins/forcechattitle.py","file_name":"forcechattitle.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"218732527","text":"from ..core.aggregate import BaseBar, BaseChoropleth, BaseLine, BaseDataSizeIndicator\n\nimport pandas as pd\nimport numpy as np\nfrom typing import Type\nfrom bokeh import events\nfrom bokeh.plotting import figure\nimport bokeh\nfrom bokeh.models.annotations import Title\nfrom bokeh.models import ColumnDataSource, LinearColorMapper, LogColorMapper, ColorBar, BasicTicker, PrintfTickFormatter, HoverTool, BoxSelectTool\n\nclass Bar(BaseBar):\n \"\"\"\n Description:\n \"\"\"\n reset_event = events.Reset\n data_y_axis = 'top'\n data_x_axis = 'x'\n \n\n def format_source_data(self, source_dict, patch_update=False):\n \"\"\"\n Description:\n format source\n \n Input:\n source_dict = {\n 'X': [],\n 'Y': []\n }\n \n\n Ouput:\n \"\"\"\n range_x_origin = [round(x,4) for x in source_dict['X']]\n range_x = []\n \n if self.max_value < 1:\n \"\"\"\n handling labels in bokeh plots when max value is under 1\n \"\"\"\n range_x = [int(x*100) for x in range_x_origin]\n if self.x_label_map is None:\n temp_mapper_index = list(range(int(round(self.min_value)),int(round(self.max_value))*100+1))\n temp_mapper_value = [str(x/100) for x in temp_mapper_index]\n self.x_label_map = dict(zip(temp_mapper_index, temp_mapper_value))\n else:\n range_x = range_x_origin\n \n if patch_update == False:\n self.source = ColumnDataSource(dict(x=np.array(range_x), top=np.array(source_dict['Y'])))\n self.source_backup = self.source.to_df()\n else:\n patch_dict = {\n self.data_y_axis: [(slice(len(source_dict['Y'])), np.array(source_dict['Y']))],\n }\n self.source.patch(patch_dict)\n\n def get_source_y_axis(self):\n \"\"\"\n Description:\n\n \n Input:\n\n \n\n Ouput:\n \"\"\"\n if self.source is not None:\n return self.source.data[self.data_y_axis] #return list\n return self.source\n \n def generate_chart(self):\n \"\"\"\n Description:\n\n \n Input:\n\n \n\n Ouput:\n \"\"\"\n self.chart = figure(title=self.x, tools=\"pan, wheel_zoom, reset\", active_scroll='wheel_zoom', active_drag='pan')\n self.chart.vbar(x=self.data_x_axis, top=self.data_y_axis, width=0.9, source = self.source, color=self.color)\n # self.chart.toolbar.active_drag = 'auto'\n # self.chart.toolbar.active_scroll = 'auto'\n\n\n def update_dimensions(self, width=None, height=None):\n \"\"\"\n Description:\n\n \n Input:\n\n \n\n Ouput:\n \"\"\"\n if width is not None:\n self.chart.plot_width = width\n if height is not None:\n self.chart.plot_height = height\n\n def apply_mappers(self):\n \"\"\"\n Description:\n\n \n Input:\n\n \n\n Ouput:\n \"\"\"\n if self.x_label_map is not None:\n self.chart.xaxis.major_label_overrides = self.x_label_map\n if self.y_label_map is not None:\n self.chart.yaxis.major_label_overrides = self.y_label_map\n\n def reload_chart(self, data, patch_update=True):\n \"\"\"\n Description: \n\n \n Input:\n\n \n\n Ouput:\n \"\"\"\n self.calculate_source(data, patch_update=patch_update)\n \n\n def reset_chart(self, data:np.array=np.array([])):\n \"\"\"\n Description: \n if len(data) is 0, reset the chart using self.source_backup\n \n Input:\n data = list() --> update self.data_y_axis in self.source\n \n\n Ouput:\n \"\"\"\n if data.size == 0:\n data = self.source_backup[self.data_y_axis] #np array\n \n #verifying length is same as x axis\n x_axis_len = self.source.data[self.data_x_axis].size\n data = data[:x_axis_len]\n\n patch_dict = {\n self.data_y_axis: [(slice(data.size), data)],\n }\n self.source.patch(patch_dict)\n\nclass Line(BaseLine):\n \"\"\"\n Description:\n \"\"\"\n reset_event = events.Reset\n data_y_axis = 'y'\n data_x_axis = 'x'\n\n\n def format_source_data(self, source_dict, patch_update=False):\n \"\"\"\n Description:\n format source\n \n Input:\n source_dict = {\n 'X': [],\n 'Y': []\n }\n \n\n Ouput:\n \"\"\"\n range_x_origin = [round(x,4) for x in source_dict['X']]\n range_x = []\n \n if self.max_value < 1:\n \"\"\"\n handling labels in bokeh plots when max value is under 1\n \"\"\"\n range_x = [int(x*100) for x in range_x_origin]\n if self.x_label_map is None:\n temp_mapper_index = list(range(int(round(self.min_value)),int(round(self.max_value))*100+1))\n temp_mapper_value = [str(x/100) for x in temp_mapper_index]\n self.x_label_map = dict(zip(temp_mapper_index, temp_mapper_value))\n else:\n range_x = range_x_origin\n \n if patch_update == False:\n self.source = ColumnDataSource(dict(x=np.array(range_x), y=np.array(source_dict['Y'])))\n self.source_backup = self.source.to_df()\n else:\n patch_dict = {\n self.data_y_axis: [(slice(len(source_dict['Y'])), np.array(source_dict['Y']))],\n }\n self.source.patch(patch_dict)\n\n def get_source_y_axis(self):\n \"\"\"\n Description:\n\n \n Input:\n\n \n\n Ouput:\n \"\"\"\n if self.source is not None:\n return self.source.data[self.data_y_axis] #return list\n return self.source\n \n def generate_chart(self):\n \"\"\"\n Description:\n\n \n Input:\n\n \n\n Ouput:\n \"\"\"\n\n self.chart = figure(title=self.x, tools=\" pan, wheel_zoom, reset\", active_scroll='wheel_zoom', active_drag='pan')\n self.chart.line(x=self.data_x_axis, y=self.data_y_axis, source = self.source, color=self.color)\n\n def update_dimensions(self, width=None, height=None):\n \"\"\"\n Description:\n\n \n Input:\n\n \n\n Ouput:\n \"\"\"\n if width is not None:\n self.chart.plot_width = width\n if height is not None:\n self.chart.plot_height = height\n\n\n def apply_mappers(self):\n \"\"\"\n Description:\n\n \n Input:\n\n \n\n Ouput:\n \"\"\"\n if self.x_label_map is not None:\n self.chart.xaxis.major_label_overrides = self.x_label_map\n if self.y_label_map is not None:\n self.chart.yaxis.major_label_overrides = self.y_label_map\n\n def reload_chart(self, data, patch_update=True):\n \"\"\"\n Description: \n\n \n Input:\n\n \n\n Ouput:\n \"\"\"\n self.calculate_source(data, patch_update=patch_update)\n\n\n def reset_chart(self, data:np.array=np.array([])):\n \"\"\"\n Description: \n if len(data) is 0, reset the chart using self.source_backup\n \n Input:\n data = list() --> update self.data_y_axis in self.source\n \n\n Ouput:\n \"\"\"\n if data.size == 0:\n data = self.source_backup[self.data_y_axis] #np array\n \n #verifying length is same as x axis\n x_axis_len = self.source.data[self.data_x_axis].size\n data = data[:x_axis_len]\n\n patch_dict = {\n self.data_y_axis: [(slice(data.size), data)],\n }\n self.source.patch(patch_dict)\n\n\nclass Choropleth(BaseChoropleth):\n reset_event = None #reset event handling not required, as the default behavior unselects all selected points, and that is already taken care of\n data_y_axis = 'rates'\n data_x_axis = 'x'\n\n def format_source_data(self, source_dict, patch_update= False):\n \"\"\"format source\n\n Parameters:\n ---\n source_dict : {\n 'X': [],\n 'Y': []\n }\n \n\n Ouput:\n ---\n \"\"\"\n self.source: Type[ColumnDataSource]\n\n res_df = pd.DataFrame(source_dict)\n \n\n if patch_update == False:\n lats = []\n longs = []\n rates = []\n prop = []\n for i in self.geo_mapper:\n for polygon in self.geo_mapper[i]:\n lat, long = np.array(polygon[0]).T.tolist()\n prop.append(i)\n lats.append(lat)\n longs.append(long)\n if i in source_dict['X']:\n rates.append(res_df.loc[res_df['X'] == i, 'Y'].iloc[0])\n else:\n rates.append(np.nan)\n rates = np.array(rates)\n\n self.source = ColumnDataSource({self.data_x_axis:np.array([]), 'xs':np.array([]), 'ys':np.array([]), self.data_y_axis:np.array([])})\n data = {\n self.data_x_axis:np.array(prop),\n 'xs':np.array(lats), 'ys':np.array(longs),\n self.data_y_axis:rates\n }\n self.source.stream(data)\n\n else:\n rates = []\n for i in source_dict['X']:\n if i in self.geo_mapper:\n temp_list = [res_df.loc[res_df['X'] == float(i), 'Y'].iloc[0]]*len(self.geo_mapper[i])\n rates = rates+temp_list\n rates = np.array(rates)\n patch_dict = {\n self.data_y_axis: [(slice(len(rates)), rates)],\n }\n self.source.patch(patch_dict)\n\n def get_source_y_axis(self):\n \"\"\"\n Parameters:\n ---\n \n\n Ouput:\n ---\n \"\"\"\n if self.source is not None:\n unique_x_axis = np.unique(self.source.data[self.data_x_axis]).tolist()\n # unique_y_axis = .unique(self.source.data[self.data_y_axis])\n return_val = np.zeros(self.data_points)\n for index, x in enumerate(unique_x_axis):\n return_val[int(x)] = self.source.data[self.data_y_axis][int(x)]\n return return_val#return np array\n return self.source\n \n def generate_chart(self):\n \"\"\"\n Parameters:\n ---\n \n\n Ouput:\n ---\n \"\"\"\n if self.geo_color_palette is None:\n self.geo_color_palette = bokeh.palettes.Purples9\n\n mapper = LinearColorMapper(palette=self.geo_color_palette, nan_color=self.nan_color, low=np.nanmin(self.source.data[self.data_y_axis]),high=np.nanmax(self.source.data[self.data_y_axis]))\n\n tooltips_r = [\n (self.x,\"@\"+self.data_x_axis),\n (self.data_y_axis, \"@\"+self.data_y_axis)\n ]\n\n self.chart = figure(title=\"Geo Map for \"+self.name, toolbar_location=\"left\", tooltips=tooltips_r, tools=\"hover, pan, wheel_zoom, tap, reset\",\n active_scroll='wheel_zoom', active_drag='pan',\n **self.library_specific_params)\n\n patch = self.chart.patches(xs='xs', ys='ys',source=self.source,\n fill_color={'field':self.data_y_axis, 'transform':mapper})\n\n\n color_bar = ColorBar(color_mapper=mapper, major_label_text_font_size=\"7pt\",\n ticker=BasicTicker(desired_num_ticks=10),\n formatter=PrintfTickFormatter(format=\"%f\"),\n label_standoff=6, border_line_color=None, location=(0, 0))\n \n self.chart.add_layout(color_bar, 'left')\n \n self.source = patch.data_source\n self.source_backup = self.source.data.copy()\n\n def update_dimensions(self, width=None, height=None):\n \"\"\"\n Description:\n\n \n Input:\n\n \n\n Ouput:\n \"\"\"\n if width is not None:\n self.chart.plot_width = width\n if height is not None:\n self.chart.plot_height = height\n\n\n def apply_mappers(self):\n \"\"\"\n Parameters:\n ---\n \n\n Ouput:\n ---\n \"\"\"\n if self.x_label_map is not None:\n self.chart.xaxis.major_label_overrides = self.x_label_map\n if self.y_label_map is not None:\n self.chart.yaxis.major_label_overrides = self.y_label_map\n\n def reload_chart(self, data, patch_update=True):\n \"\"\"\n Parameters:\n ---\n \n\n Ouput:\n ---\n \"\"\"\n self.calculate_source(data, patch_update=patch_update)\n \n\n def reset_chart(self, data:np.array = np.array([])):\n \"\"\"if len(data) is 0, reset the chart using self.source_backup\n\n Parameters:\n ---\n data: list()\n update self.data_y_axis in self.source\n\n Ouput:\n ---\n\n \"\"\"\n if data.size == 0:\n data = self.source_backup[self.data_y_axis].tolist()\n \n #verifying length is same as x axis\n x_axis_len = self.source.data[self.data_x_axis].size\n data = data[:x_axis_len]\n\n rates = []\n for i in range(data.size):\n if i in self.geo_mapper:\n temp_list = [data[i]]*len(self.geo_mapper[i])\n rates = rates+temp_list\n rates = np.array(rates)\n patch_dict = {\n self.data_y_axis: [(slice(len(rates)), rates)],\n }\n\n self.source.patch(patch_dict)\n\n def map_indices_to_values(self, indices:list):\n \"\"\"\n Parameters:\n ---\n \n\n Ouput:\n ---\n \"\"\"\n list_final = []\n for n in indices:\n list_final.append(int(self.source.data[self.data_x_axis][n]))\n return list_final\n\n def get_selected_indices(self):\n \"\"\"\n Parameters:\n ---\n \n\n Ouput:\n ---\n \"\"\"\n return self.map_indices_to_values(self.source.selected.indices)\n\n def add_selection_event(self, callback):\n \"\"\"\n Parameters:\n ---\n \n\n Ouput:\n ---\n \"\"\"\n def temp_callback(attr, old, new):\n old = self.map_indices_to_values(old)\n new = self.map_indices_to_values(new)\n callback(old, new)\n \n self.source.selected.on_change('indices', temp_callback)\n\n\nclass DataSizeIndicator(BaseDataSizeIndicator):\n \"\"\"\n Description:\n \"\"\"\n data_y_axis = 'right'\n data_x_axis = 'y'\n\n\n def format_source_data(self, source_dict, patch_update=False):\n \"\"\"\n Description:\n format source\n \n Input:\n source_dict = {\n 'X': [],\n 'Y': []\n }\n \n\n Ouput:\n \"\"\"\n if patch_update == False:\n self.source = ColumnDataSource({self.data_x_axis:np.array(source_dict['X']), self.data_y_axis:np.array(source_dict['Y'])})\n self.source_backup = self.source.to_df()\n else:\n patch_dict = {\n self.data_y_axis: [(slice(len(source_dict['Y'])), np.array(source_dict['Y']))],\n }\n self.source.patch(patch_dict)\n\n def get_source_y_axis(self):\n \"\"\"\n Description:\n\n \n Input:\n\n \n\n Ouput:\n \"\"\"\n if self.source is not None:\n return self.source.data[self.data_y_axis] #return list\n return self.source\n \n def generate_chart(self):\n \"\"\"\n Description:\n\n \n Input:\n\n \n\n Ouput:\n \"\"\"\n self.chart = figure(height=20, x_range=(0,self.max_value), tools=\"\")\n self.chart.hbar(right=self.data_y_axis, y=self.data_x_axis, height=2.0, source = self.source)\n self.chart.yaxis.visible = False\n self.chart.xaxis.visible = False\n self.chart.ygrid.visible = False\n self.chart.xgrid.visible = False\n self.chart.toolbar.logo = None\n \n def update_dimensions(self, width=None, height=None):\n \"\"\"\n Description:\n\n \n Input:\n\n \n\n Ouput:\n \"\"\"\n if width is not None:\n self.chart.plot_width = width\n if height is not None:\n self.chart.plot_height = height\n\n def reload_chart(self, data):\n \"\"\"\n Description: \n\n \n Input:\n\n \n\n Ouput:\n \"\"\"\n self.calculate_source(data, patch_update=True)\n self.update_title()\n\n def update_title(self):\n \"\"\"\n Description: \n\n \n Input:\n\n \n\n Ouput:\n \"\"\"\n self.data_points_label.object = ' Points Selected: '+str(self.get_source_y_axis()[0])\n # t = Title()\n # t.text = 'Points Selected: '+str(self.get_source_y_axis()[0])\n # # self.chart.title = t\n # self.chart.js_link('value', self.chart.title, t)\n\n def reset_chart(self, data:np.array=np.array([])):\n \"\"\"\n Description: \n if len(data) is 0, reset the chart using self.source_backup\n \n Input:\n data = list() --> update self.data_y_axis in self.source\n \n\n Ouput:\n \"\"\"\n if data.size == 0:\n data = self.source_backup[self.data_y_axis] #np array\n \n #verifying length is same as x axis\n x_axis_len = self.source.data[self.data_x_axis].size\n data = data[:x_axis_len]\n\n patch_dict = {\n self.data_y_axis: [(slice(data.size), data)],\n }\n self.source.patch(patch_dict)\n self.update_title()","sub_path":"python/cuXfilter/charts/bokeh/plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":17825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"435465464","text":"\nfrom swampy.TurtleWorld import *\nimport math\n\n# Refers from 4.7 Refactoring of http://www.greenteapress.com/thinkpython/html/thinkpython005.html#toc51\n# --------start------\ndef polyline(t, n, length, angle):\n for i in range(n):\n fd(t, length)\n lt(t, angle)\n\ndef arc(t, r, angle):\n arc_length = 2 * math.pi * r * angle / 360\n n = int(arc_length / 3) + 1\n step_length = arc_length / n\n step_angle = float(angle) / n\n polyline(t, n, step_length, step_angle)\n\ndef move(t, length):\n pu(t)\n fd(t, length)\n pd(t)\n\n# --------end--------\n\ndef part(t, r, angle):\n arc(t, r, angle)\n lt(t, 180-angle)\n arc(t, r, angle)\n lt(t, 180-angle)\n\ndef flower(t, r, angle, piece):\n for i in range(piece):\n part(t, r, angle)\n lt(t, 360.0/piece)\n\n\nworld = TurtleWorld()\nbob = Turtle()\nbob.delay = 0.01\n\nmove(bob, -100)\nflower(bob, 60, 60, 7)\n\nmove(bob, 100)\nflower(bob, 40, 80, 10)\n\nmove(bob, 100)\nflower(bob, 140, 20, 20)\n\ndie(bob)\n\nwait_for_user()","sub_path":"hw/code/2/ex-code/ex4-2.py","file_name":"ex4-2.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"142964192","text":"import statistics\nimport pandas as pd\n\n\ndef isclose(a, b, rel_tol=1e-09, abs_tol=0.0):\n return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)\n\n\ndef run_test(test):\n calc = statistics.BasicStatistics()\n result = calc.perform(test[0], test[1])\n for i in range(len(result)):\n for j in range(len(result[i])):\n if not isclose(result[i][j], test[2][i][j]):\n return False\n return True\n\n\ndef run_calculationbasis_test():\n testset = [[pd.DataFrame([[1, 2, 3, 4], [4, 3, 2, 1], [-1, -2, -3, -4], [0, 0, 0, 0], [0, 0, 0, 0]]),\n [],\n [[4, 3, 3, 4], [-1, -2, -3, -4], [4, 3, 2, 1],\n [0.8, 0.6, 0.4, 0.2]]],\n [pd.DataFrame([[1, 2, 3, 4], [4, 3, 2, 1]]),\n [],\n [[4, 3, 3, 4], [1, 2, 2, 1], [5, 5, 5, 5],\n [2.5, 2.5, 2.5, 2.5]]]]\n\n for i in range(len(testset)):\n assert run_test(testset[i])\n","sub_path":"tests/calculationbasis_test.py","file_name":"calculationbasis_test.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"444101612","text":"from torch.utils.data import DataLoader\nimport Reader\nfrom torchvision import transforms\n\ndef CreateDataLoader(opt):\n data_loader = CustomDatasetDataLoader()\n data_loader.initialize(opt)\n return data_loader\n\ndef create_dataset(opt):\n dataset = Reader.myData(opt.path, opt.filename, opt.image_path, transform=comptrans(opt)) #transform=comptrans(opt)\n return dataset\n\ndef comptrans(opt):\n list = []\n if opt.flip != 0:\n list.append(transforms.RandomHorizontalFlip())\n if opt.rotate_by !=0:\n list.append(transforms.Pad(4,padding_mode='edge'))\n list.append(transforms.RandomRotation(opt.rotate_by))\n list.append(transforms.CenterCrop(32))\n if opt.crop != 0:\n list.append(transforms.RandomResizedCrop(32,scale=(0.8,1.0)))\n if opt.contrast !=0:\n list.append(transforms.ColorJitter(brightness=0.2))\n list.append(transforms.ToTensor())\n return transforms.Compose(list)\n\nclass CustomDatasetDataLoader(object):\n\n \"\"\"\n used to be memory efficient by creating an iterator over the data. This means\n that the files are read when used and not stored in the memory.\n \"\"\"\n\n def name(self):\n return 'CustomDatasetDataLoader'\n\n def initialize(self,opt):\n\n self.dataset = create_dataset(opt)\n self.batchsize= opt.batchsize\n self.dataloader = DataLoader(\n self.dataset,\n batch_size=opt.batchsize,\n )\n\n def load_data(self):\n return self\n\n def __len__(self):\n return len(self.dataset)\n\n def __iter__(self):\n for i, data in enumerate(self.dataloader):\n if i * self.batchsize >= len(self):\n break\n yield data","sub_path":"Create_Dataloader.py","file_name":"Create_Dataloader.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"609102633","text":"# Copyright (c) 2013-2017 Uber Technologies, Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function, division, absolute_import\n\nimport re\n\nfrom struct import unpack\n\nfrom ..message import BackendMessage\n\n\nclass CommandComplete(BackendMessage):\n message_id = b'C'\n\n def __init__(self, data):\n BackendMessage.__init__(self)\n data = unpack('{0}sx'.format(len(data) - 1), data)[0]\n\n if re.match(b\"INSERT\", data) is not None:\n splitstr = data.split(b' ', 3)\n self.tag = splitstr[0]\n if len(splitstr) >= 2:\n self.oid = int(splitstr[1])\n if len(splitstr) >= 3:\n self.rows = int(splitstr[2])\n elif re.match(b\"(DELETE|UPDATE|MOVE|FETCH|COPY)\", data) is not None:\n splitstr = data.split(b' ', 2)\n self.tag = splitstr[0]\n if len(splitstr) >= 2:\n self.rows = int(splitstr[1])\n else:\n self.tag = data\n\n\nBackendMessage.register(CommandComplete)\n","sub_path":"vertica_python/vertica/messages/backend_messages/command_complete.py","file_name":"command_complete.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"398499631","text":"class Cart:\n\n def __init__(self, pos, direction):\n self.i, self.j = pos\n self.dir = direction\n self.intersections = 0\n\n def move(self, track_map):\n if self.dir == \">\":\n self.j += 1\n elif self.dir == \"<\":\n self.j -= 1\n elif self.dir == \"^\":\n self.i -= 1\n elif self.dir == \"v\":\n self.i += 1\n\n ne_sw = {\"<\": \"^\", \"^\": \"<\", \">\": \"v\", \"v\": \">\"}\n nw_se = {\"<\": \"v\", \"v\": \"<\", \">\": \"^\", \"^\": \">\"}\n if track_map[self.i][self.j] == \"\\\\\":\n self.dir = ne_sw[self.dir]\n elif track_map[self.i][self.j] == \"/\":\n self.dir = nw_se[self.dir]\n elif track_map[self.i][self.j] == \"+\":\n if self.intersections % 3 == 1:\n self.dir = self.dir\n elif self.intersections % 3 == 0:\n self.dir = nw_se[self.dir] if self.dir in \"<>\" else ne_sw[self.dir]\n elif self.intersections % 3 == 2:\n self.dir = ne_sw[self.dir] if self.dir in \"<>\" else nw_se[self.dir]\n self.intersections += 1\n return self.i, self.j\n\n def __repr__(self):\n return f\"Cart({self.i}, {self.j}, {self.dir})\"\n\n def __eq__(self, other):\n return (self.i, self.j) == (other.i, other.j)\n\n def __lt__(self, other):\n return (self.i < other.i) or (self.i == other.i and self.j < other.j)\n\n\ndef print_tracks(grid, carts):\n for cart in carts:\n grid[cart.i][cart.j] = cart.dir\n for i, g in enumerate(grid):\n print(\n \"\".join(g)\n )\n\ndef parse_input(filename=\"test_input.txt\"):\n with open(filename) as f:\n return [list(line.strip(\"\\n\")) for line in f]\n\n\ndef initialise_carts(grid):\n carts = []\n for i, g in enumerate(grid):\n for j, c in enumerate(g):\n if c in \"<>^v\":\n carts.append(Cart((i, j), c))\n grid[i][j] = \"-\" if c in \"<>\" else \"|\"\n return grid, sorted(carts)\n\n\nfyle = \"input.txt\"\n\ntrack_map = parse_input(fyle)\ntrack_map, tracks = initialise_carts(track_map)\n\nwhile len(tracks) > 1:\n crashes = set()\n\n for i, cart in enumerate(tracks):\n if (cart.i, cart.j) in crashes:\n continue\n new_pos = cart.move(track_map)\n if cart in tracks[:i]+tracks[i+1:]:\n crashes.add(new_pos)\n\n tracks = [cart for cart in sorted(tracks) if (cart.i, cart.j) not in crashes]\n\nprint(tracks)\n","sub_path":"13/minecart.py","file_name":"minecart.py","file_ext":"py","file_size_in_byte":2439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"607337784","text":"\"\"\"\nV2 Add Opt for 2 thread process\nV3 Using Class for GA\nVisualize Genetic Algorithm to find a maximum point in a function.\nVisit my tutorial website for more: https://morvanzhou.github.io/tutorials/\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\n\n\ndef constrained_sum_sample_pos(n, total):\n # Return a randomly chosen list of n positive integers summing to total.\n # Each such list is equally likely to occur\n dividers = sorted(random.sample(range(1, total), n - 1))\n return [a - b for a, b in zip(dividers + [total], [0] + dividers)]\n\n\nn = 10\ntotal = 1000\nTARGET_VECTOR = constrained_sum_sample_pos(n, total)\nTARGET_VECTOR = sorted(TARGET_VECTOR)\nprint(TARGET_VECTOR)\n\nDNA_SIZE = len(TARGET_VECTOR) # DNA length\nPOP_SIZE = pow(2, DNA_SIZE - 4) # population size\nCROSS_RATE = 0.8 # mating probability (DNA crossover)\nMUTATION_RATE = 0.003 # 0.003 # mutation probability\nN_GENERATIONS = 200\n\n\nclass GA(object):\n def __init__(self, DNA_size, cross_rate, mutation_rate, pop_size, ):\n self.DNA_size = DNA_size\n self.cross_rate = cross_rate\n self.mutate_rate = mutation_rate\n self.pop_size = pop_size\n\n self.pop = np.random.randint(2, size=(POP_SIZE, DNA_SIZE)) # initialize the pop DNA\n\n def translateDNA(self, pop, ): # get cities' coord in order\n totalTime = sum(TARGET_VECTOR)\n sumTemp = []\n for elems in pop:\n temp = 0\n for i in range(len(elems)):\n if elems[i] == 1:\n temp += TARGET_VECTOR[i]\n temp2 = totalTime - temp\n sumTemp.append(max(temp, temp2))\n # print(sumTemp)\n sumTemp = [1 / elem for elem in sumTemp]\n return sumTemp\n\n def get_fitness(self, pred):\n temp = np.array(pred)\n return temp\n\n def select(self, fitness):\n idx = np.random.choice(np.arange(POP_SIZE), size=POP_SIZE, replace=True,\n p=fitness / fitness.sum())\n return self.pop[idx]\n\n def crossover(self, parent, pop):\n if np.random.rand() < CROSS_RATE:\n i_ = np.random.randint(0, POP_SIZE, size=1) # select another individual from pop\n cross_points = np.random.randint(0, 2, size=DNA_SIZE).astype(np.bool) # choose crossover points\n parent[cross_points] = pop[i_, cross_points] # mating and produce one child\n return parent\n\n def mutate(self, child):\n for point in range(DNA_SIZE):\n if np.random.rand() < MUTATION_RATE:\n child[point] = 1 if child[point] == 0 else 0\n return child\n\n def evolve(self, fitness):\n pop = self.select(fitness)\n pop_copy = pop.copy()\n for parent in pop: # for every parent\n child = self.crossover(parent, pop_copy)\n child = self.mutate(child)\n parent[:] = child\n self.pop = pop\n\n\nga = GA(DNA_size=DNA_SIZE, cross_rate=CROSS_RATE, mutation_rate=MUTATION_RATE, pop_size=POP_SIZE)\n\nplt.ion() # something about plotting\nplotIdx = 0\nplotX = []\nplotY = []\n# print(translateDNA_v2(pop))\n\nfor _ in range(N_GENERATIONS):\n # compute function value by extracting DNA\n F_values = ga.translateDNA(ga.pop)\n\n # GA part (evolution)\n fitness = ga.get_fitness(F_values)\n print(\"Most fitted DNA: \", ga.pop[np.argmax(fitness), :])\n\n popMostFittedDNA = ga.pop[np.argmax(fitness), :]\n valueTemp = []\n for i in range(len(popMostFittedDNA)):\n if popMostFittedDNA[i] == 1:\n valueTemp.append(TARGET_VECTOR[i])\n print(valueTemp)\n print(int(1 / max(fitness)))\n print(plotIdx)\n print(\"\")\n plt.xlim(0, N_GENERATIONS), plt.ylim(total / 2, total / 2 + 20)\n plotX.append(plotIdx), plotY.append(1 / max(fitness))\n plotIdx += 1\n plt.plot(plotX, plotY, color='black')\n plt.pause(0.05)\n\n ga.evolve(fitness)\n\nplt.ioff();\nplt.show()\n","sub_path":"GA.py","file_name":"GA.py","file_ext":"py","file_size_in_byte":3889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"272233057","text":"#_*_coding:utf-8_*_\n\n'''\nCopyright (c) 2021 FranciscoCharles\n\npara informações de licenca consulte o LICENCE.txt.\n'''\nimport time#importa o modulo time.\nimport numpy#importa o modulo numpy\nimport socket#importa o modulo socket.\n\ndef ping(sock, ip_alvo, porta_alvo = 8080, n_repeticoes = 10):#define uma funcao.\n \n intervalos = numpy.zeros((n_repeticoes))#cria um array de zeros de intervalos.\n mensagem = b': mensagem.'#cria uma mensagem em bytes.\n endereco_alvo = (ip_alvo,porta_alvo)#cria uma tupla com o endereco alvo.\n\n for index in range(n_repeticoes+1):#itera n_repeticoes+1 vezes.\n try:\n\n if index == n_repeticoes:#verifica se é a ultima iteracao.\n mensagem = b'sair'#troca a mensagem\n sock.sendto(mensagem, endereco_alvo)#envia a mensagem ao endereco alvo.\n \n if mensagem != b'sair':#verifica se não é para sair.\n t0 = time.perf_counter()#regista o segundos atuais.\n sock.recvfrom(1024)#aguarda por dados.\n intervalo = (time.perf_counter()-t0)*1000#calcula os segundos decorritos e converte para milisegundos.\n print(f'ping:{index:2d} tempo decorrido: {intervalo:.2f}ms')#exibe o ping atual e o intervalo decorrido.\n intervalos[index] = intervalo#armazena o intervalo.\n\n except socket.timeout:#captura a excecao de timeout.\n print('Erro: ops, tempo esgotado...')#exibe uma mensagem.\n except ConnectionResetError:\n print('Erro: ops, a conexao foi cancelada...')#exibe uma mensagem.\n #exibe as informacoes referentes aos intervalos em milisegundos.\n print(f'informacoes de intervalo:')\n print(f'\\tmedio: {intervalos.mean():.2f}ms')\n print(f'\\tdevio padrao: {intervalos.std():.2f}ms')\n print(f'\\tmaximo: {intervalos.max():.2f}ms')\n print(f'\\tminimo: {intervalos.min():.2f}ms')\n\nif __name__ == '__main__':#verica se este programa/arquivo é o arquivo/programa/chamada de execução principal.\n\n IP_SERVIDOR = 'localhost'#ip do servidor.\n\n with socket.socket(type=socket.SOCK_DGRAM) as cliente:#abre um socket do tipo UDP.\n cliente.bind(('localhost',8081))#associa o socket ao a um IP=localhost e Porta=8081.\n cliente.settimeout(10)#seta o timeout em 10s.\n ping(cliente,IP_SERVIDOR)#realiza os pings.","sub_path":"code/client/udp_client.py","file_name":"udp_client.py","file_ext":"py","file_size_in_byte":2355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"475764998","text":"####################\n# ES-DOC CIM Questionnaire\n# Copyright (c) 2014 ES-DOC. All rights reserved.\n#\n# University of Colorado, Boulder\n# http://cires.colorado.edu/\n#\n# This project is distributed according to the terms of the MIT license [http://www.opensource.org/licenses/MIT].\n####################\n\n__author__ = \"allyn.treshansky\"\n__date__ = \"Dec 01, 2014 3:00:00 PM\"\n\n\"\"\"\n.. module:: views_edit\n\nviews for AJAX\neventually, these will be replaced w/ the views in \"views_api.py\"\n\"\"\"\n\n\nimport json\nfrom django.http import HttpResponse\n\nfrom Q.questionnaire.models.models_customizations import QStandardPropertyCustomization, get_existing_customization_set\nfrom Q.questionnaire.models.models_proxies import get_existing_proxy_set\nfrom Q.questionnaire.models.models_realizations_bak import MetadataStandardProperty, get_new_subrealization_set\nfrom Q.questionnaire.forms.bak.forms_edit_bak import create_new_edit_subforms_from_models, get_data_from_existing_edit_forms\nfrom Q.questionnaire.views.views_realizations_bak import convert_customization_set, convert_proxy_set, get_rid_of_non_displayed_subitems\nfrom Q.questionnaire.q_utils import QError, get_joined_keys_dict\n\n# NEW CODE!\n# DOESN'T LET USERS SELECT EXISTING REALIZATIONS\n# JUST RETURNS FORM DATA FOR A NEW REALIZATION\n# DONE FOR v0.14.0.0\n# WILL BE RENDERED OBSOLETE ONCE ANGULAR & REST ARE IN-PLACE\n\ndef ajax_select_realization(request, **kwargs):\n\n # I can get all of the info I need (version/proxy/project) from the customizer\n # (I still need to check for existing properties (using property_id) to exclude items from the queryset below)\n customizer_id = request.GET.get('c', None)\n standard_property_id = request.GET.get(\"s\", None)\n prefix = request.GET.get(\"p\", None)\n parent_vocabulary_key = request.GET.get(\"p_v_k\", \"\")\n parent_component_key = request.GET.get(\"p_c_k\", \"\")\n n_forms = int(request.GET.get(\"n\", \"0\"))\n realizations_to_exclude = request.GET.get(\"e\", [])\n if realizations_to_exclude:\n realizations_to_exclude = realizations_to_exclude.split(\",\")\n if n_forms > 0:\n n_forms -= 1 # don't forget to take into account the current form being added (it has already been created in the DOM)\n if not customizer_id and prefix:\n msg = \"unable to select realization (no customizer id or form prefix specified)\"\n raise QError(msg)\n if standard_property_id:\n standard_property = MetadataStandardProperty.objects.get(pk=standard_property_id)\n\n parent_standard_property_customizer = QStandardPropertyCustomization.objects.get(pk=customizer_id)\n assert parent_standard_property_customizer.relationship_show_subform\n realization_customizer = parent_standard_property_customizer.relationship_subform_customization\n\n vocabularies = realization_customizer.get_active_vocabularies()\n\n realization_parameters = {\n \"project\": realization_customizer.project,\n \"proxy\": realization_customizer.proxy,\n }\n\n status = 200\n\n customization_set = get_existing_customization_set(\n project=realization_customizer.project,\n ontology=realization_customizer.ontology,\n proxy=realization_customizer.proxy,\n customization_name=realization_customizer.name,\n customization_id=realization_customizer.pk,\n )\n customization_set = convert_customization_set(customization_set)\n customization_set[\"scientific_category_customizers\"] = get_joined_keys_dict(customization_set[\"scientific_category_customizers\"])\n customization_set[\"scientific_property_customizers\"] = get_joined_keys_dict(customization_set[\"scientific_property_customizers\"])\n proxy_set = get_existing_proxy_set(\n ontology=realization_customizer.ontology,\n proxy=realization_customizer.proxy,\n vocabularies=vocabularies,\n )\n proxy_set = convert_proxy_set(proxy_set)\n realization_set = get_new_subrealization_set(\n realization_customizer.project, realization_customizer.ontology, realization_customizer.proxy,\n proxy_set[\"standard_property_proxies\"], proxy_set[\"scientific_property_proxies\"],\n customization_set[\"model_customizer\"], vocabularies,\n parent_vocabulary_key, parent_component_key,\n )\n\n get_rid_of_non_displayed_subitems(realization_set, proxy_set, customization_set)\n\n subform_min = int(parent_standard_property_customizer.get_cardinality_min())\n subform_max = parent_standard_property_customizer.get_cardinality_max()\n if subform_max != \"*\":\n subform_max = int(subform_max)\n\n (model_formset, standard_properties_formsets, scientific_properties_formsets) = \\\n create_new_edit_subforms_from_models(\n realization_set[\"models\"], customization_set[\"model_customizer\"],\n realization_set[\"standard_properties\"], customization_set[\"standard_property_customizers\"],\n realization_set[\"scientific_properties\"], customization_set[\"scientific_property_customizers\"],\n subform_prefix=prefix, subform_min=subform_min, subform_max=subform_max, increment_prefix=n_forms,\n )\n\n # b/c I will only be in this function if I clicked add/replace from w/in a loaded subform,\n # these forms must also be loaded (so that I can update things appropriately)\n # by default most forms have \"loaded\" set to \"False\" and then JS sets the loaded field at some point\n # but this situation is different\n for model_form in model_formset.forms:\n model_form.load()\n for standard_property_formset in standard_properties_formsets.values():\n for standard_property_form in standard_property_formset:\n standard_property_form.load()\n for scientific_propery_formset in scientific_properties_formsets.values():\n for scientific_propery_form in scientific_propery_formset:\n scientific_propery_form.load()\n\n # get the data that will be used to populate the form...\n data = get_data_from_existing_edit_forms(model_formset, standard_properties_formsets, scientific_properties_formsets)\n\n # now clean it up a bit...\n\n # no need to use the management form, since I'm only ever adding a single form\n fields_to_remove_from_data = [u\"%s-%s\" % (model_formset.prefix, field_key) for field_key in model_formset.management_form.fields.keys()]\n for field_to_remove_from_data in fields_to_remove_from_data:\n if field_to_remove_from_data in data:\n data.pop(field_to_remove_from_data)\n\n # but do need to pass the prefix to make sure that js updates all added fields appropriately\n adjusted_prefix = model_formset.forms[0].prefix\n data[\"prefix\"] = adjusted_prefix\n data[\"label\"] = u\"%s\" % realization_set[\"models\"][0].get_label()\n\n # ...okay, I'm done cleaning up the data\n\n # finally return a JSON version of all of the fields used in this subform\n json_data = json.dumps(data)\n response = HttpResponse(json_data, content_type=\"text/html\", status=status)\n return response\n\n # # (also get the proxies b/c I'll need them when setting up new properties below)\n # standard_property_proxies = [standard_property_customizer.proxy for standard_property_customizer in standard_property_customizers]\n # scientific_property_proxies = {}\n # scientific_property_customizers = {}\n # for vocabulary_key,scientific_property_customizer_dict in nested_scientific_property_customizers.iteritems():\n # for component_key,scientific_property_customizer_list in scientific_property_customizer_dict.iteritems():\n # model_key = u\"%s_%s\" % (vocabulary_key, component_key)\n # # I have to restructure this; in the customizer views it makes sense to store these as a dictionary of dictionary\n # # but here, they should only be one level deep (hence the use of \"nested_\" above\n # scientific_property_customizers[model_key] = scientific_property_customizer_list\n # scientific_property_proxies[model_key] = [scientific_property_customizer.proxy for scientific_property_customizer in scientific_property_customizer_list]\n #\n # # get the full realization set\n # (models, standard_properties, scientific_properties) = \\\n # MetadataModel.get_new_subrealization_set(model_customizer.project, model_customizer.version, model_customizer.proxy, standard_property_proxies, scientific_property_proxies, model_customizer, MetadataVocabulary.objects.none(), parent_vocabulary_key, parent_component_key)\n #\n # # clean it up a bit based on properties that have been customized not to be displayed\n # for i, model in enumerate(models):\n #\n # model_key = model.get_model_key()\n # submodel_key = model.get_model_key() + \"-%s\" % i\n #\n # standard_property_list = standard_properties[submodel_key]\n # standard_properties_to_remove = []\n # for standard_property, standard_property_customizer in zip(standard_property_list,standard_property_customizers):\n # if not standard_property_customizer.displayed:\n # standard_properties_to_remove.append(standard_property)\n # # this list might actually be a queryset, so remove doesn't work\n # # instead, I have to use exclude\n # if standard_properties_to_remove:\n # standard_properties_to_remove_names = [sp.name for sp in standard_properties_to_remove]\n # standard_property_list = [sp for sp in standard_property_list if sp.name not in standard_properties_to_remove_names]\n # # for sp in standard_properties_to_remove:\n # # standard_property_list.remove(sp)\n #\n # # TODO: JUST A LIL HACK UNTIL I CAN FIGURE OUT WHERE TO SETUP THIS LOGIC\n # if submodel_key not in scientific_property_customizers:\n # scientific_property_customizers[submodel_key] = []\n #\n # scientific_property_list = scientific_properties[submodel_key]\n # scientific_properties_to_remove = []\n # for scientific_property, scientific_property_customizer in zip(scientific_property_list,scientific_property_customizers[submodel_key]):\n # if not scientific_property_customizer.displayed:\n # scientific_properties_to_remove.append(scientific_property)\n # # (as above) this list might actually be a queryset, so remove doesn't work\n # # instead, I have to use exclude\n # if scientific_properties_to_remove:\n # scientific_properties_to_remove_names = [sp.name for sp in scientific_properties_to_remove]\n # scientific_property_list = [sp for sp in scientific_property_list if sp.name not in scientific_properties_to_remove_names]\n #\n # subform_min, subform_max = [int(val) if val != \"*\" else val for val in parent_standard_property_customizer.relationship_cardinality.split(\"|\")]\n #\n # (model_formset, standard_properties_formsets, scientific_properties_formsets) = \\\n # create_new_edit_subforms_from_models(models, model_customizer, standard_properties, standard_property_customizers, scientific_properties, scientific_property_customizers, subform_prefix=prefix, subform_min=subform_min, subform_max=subform_max, increment_prefix=n_forms)\n #\n # # b/c I will only be in this function if I clicked add/replace from w/in a loaded subform,\n # # these forms must also be loaded (so that I can update things appropriately)\n # # by default most forms have \"loaded\" set to \"False\" and then JS sets the loaded field at some point\n # # but this situation is different\n # for model_form in model_formset.forms:\n # model_form.load()\n # for standard_property_formset in standard_properties_formsets.values():\n # for standard_property_form in standard_property_formset:\n # standard_property_form.load()\n # for scientific_propery_formset in scientific_properties_formsets.values():\n # for scientific_propery_form in scientific_propery_formset:\n # scientific_propery_form.load()\n #\n # # get the data that will be used to populate the form...\n # data = get_data_from_existing_edit_forms(model_formset, standard_properties_formsets, scientific_properties_formsets)\n #\n # # now clean it up a bit...\n #\n # # no need to use the management form, since I'm only ever adding a single form\n # fields_to_remove_from_data = [u\"%s-%s\" % (model_formset.prefix, field_key) for field_key in model_formset.management_form.fields.keys()]\n # for field_to_remove_from_data in fields_to_remove_from_data:\n # if field_to_remove_from_data in data:\n # data.pop(field_to_remove_from_data)\n #\n # # but do need to pass the prefix to make sure that js updates all added fields appropriately\n # adjusted_prefix = model_formset.forms[0].prefix\n # data[\"prefix\"] = adjusted_prefix\n # data[\"label\"] = u\"%s\" % models[0].get_label()\n #\n # # ...okay, I'm done cleaning up the data\n #\n # # finally return a JSON version of all of the fields used in this subform\n # json_data = json.dumps(data)\n # response = HttpResponse(json_data, content_type=\"text/html\", status=status)\n # return response\n #\n #\n","sub_path":"Q/questionnaire/views/views_ajax_bak.py","file_name":"views_ajax_bak.py","file_ext":"py","file_size_in_byte":13113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"468791064","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\nimport pandas as pd\n\nclass Demo3Pipeline:\n def process_item(self, item, spider):\n title = item['title']\n link = item['link']\n releasetime = item['releasetime']\n print(\"hello wxf!!!!!!!!!!!!!!!!!!!!!!!!!\")\n output = f'|{title}|\\t|{link}|\\t|{releasetime}|\\n\\n'\n movie1 = pd.DataFrame(data = output)\n movie1.to_csv('./movie_maoyan.csv', mode='a', encoding='utf8', index=False, header=False)\n return item\n","sub_path":"week01/Demo3/Demo3/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"282413193","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport rospy\nfrom cmoon_msgs.srv import cmoon, cmoonRequest, cmoonResponse\nimport sys\n\n\nclass Main:\n def __init__(self, name):\n rospy.init_node(name, anonymous=True)\n client = rospy.ServiceProxy('add', cmoon)\n client.wait_for_service(timeout=5)\n rospy.loginfo('Request completed.')\n if len(sys.argv) == 3:\n num1 = int(sys.argv[1])\n num2 = int(sys.argv[2])\n response = client.call(num1, num2)\n print(response.sum)\n else:\n while not rospy.is_shutdown():\n num1 = int(input('num1: '))\n num2 = int(input('num2: '))\n response = client.call(num1, num2)\n print(response.sum)\n\n # num = cmoonRequest(1, 2)\n # sum = send_sum(num)\n\n # num = cmoonRequest()\n # num.num1 = 4\n # num.num2 = 5\n # sum = send_sum(num)\n\n\nif __name__ == '__main__':\n try:\n Main('send_num')\n except rospy.ROSInterruptException:\n rospy.loginfo(\"Keyboard interrupt.\")\n","sub_path":"src/remake/src/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"195385493","text":"\"\"\"add groups and permissions tables\n\nRevision ID: b4e59ddf3c5f\nRevises: 494020788fe3\nCreate Date: 2018-05-30 21:07:03.377748\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\nfrom sqlalchemy import func\nfrom sqlalchemy.dialects.postgresql import UUID, JSON\n\n# revision identifiers, used by Alembic.\nrevision = 'b4e59ddf3c5f'\ndown_revision = '494020788fe3'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.create_table(\n 'user_groups',\n sa.Column('created_at', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),\n sa.Column('updated_at', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),\n sa.Column('id', UUID(), nullable=False),\n sa.Column('parent_id', UUID(), nullable=True),\n sa.Column('name', sa.String(), nullable=False),\n sa.Column('is_system_group', sa.Boolean(), nullable=False),\n sa.ForeignKeyConstraint(['parent_id'], ['user_groups.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_user_groups_created_at'), 'user_groups', ['created_at'], unique=False)\n op.create_index(op.f('ix_user_groups_updated_at'), 'user_groups', ['updated_at'], unique=False)\n op.create_index(op.f('ix_user_groups_name'), 'user_groups', ['name'], unique=False)\n\n op.create_table(\n 'permissions',\n sa.Column('created_at', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),\n sa.Column('updated_at', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),\n sa.Column('id', UUID(), nullable=False),\n sa.Column('name', sa.String(), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_permissions_created_at'), 'permissions', ['created_at'], unique=False)\n op.create_index(op.f('ix_permissions_updated_at'), 'permissions', ['updated_at'], unique=False)\n\n op.create_table(\n 'user_group_members',\n sa.Column('created_at', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),\n sa.Column('user_group_id', UUID(), nullable=False),\n sa.Column('user_id', UUID(), nullable=False),\n sa.ForeignKeyConstraint(['user_group_id'], ['user_groups.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),\n sa.PrimaryKeyConstraint('user_group_id', 'user_id')\n )\n\n op.create_table(\n 'user_group_permissions',\n sa.Column('created_at', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),\n sa.Column('user_group_id', UUID(), nullable=False),\n sa.Column('permission_id', UUID(), nullable=False),\n sa.ForeignKeyConstraint(['user_group_id'], ['user_groups.id'], ),\n sa.ForeignKeyConstraint(['permission_id'], ['permissions.id'], ),\n sa.PrimaryKeyConstraint('user_group_id', 'permission_id')\n )\n\n\ndef downgrade():\n op.drop_table('user_group_permissions')\n op.drop_table('user_group_members')\n op.drop_table('user_groups')\n op.drop_table('permissions')\n","sub_path":"migrations/versions/20180530_b4e59ddf3c5f_add_groups_and_permissions_tables.py","file_name":"20180530_b4e59ddf3c5f_add_groups_and_permissions_tables.py","file_ext":"py","file_size_in_byte":3041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"611850597","text":"import myModule\r\nfrom myModule import *\r\n\r\nclass Tank(object):\r\n\tdef __init__(self,name,w,barrels,power):\r\n\t\tself.name = name\r\n\t\tself.weight = w\r\n\t\tself.barrels = barrels\r\n\t\tself.power = power*barrels\r\n\t\tself.speed = toFixed(70-(w/power),1)\r\n\r\n\tdef shoot(self):\r\n\t\tdistance = ri(self.power*4,(self.power*6)+ri(-self.power,self.power))\r\n\r\n\t\tprint(f\"Tank {self.name} shot on {distance*1000} meters\")\r\n\r\n\tdef info(self):\r\n\t\tprint(f\"\"\"\r\n\t\t\t<=== {self.name} ===>\r\n\t\t\tWeight: {self.weight} T\r\n\t\t\tBarrels: x{self.barrels}\r\n\t\t\tPower: {self.power} p.\r\n\t\t\tSpeed: {self.speed} Km/h\r\n\t\t\"\"\")\r\n\r\n\r\nzhalizyaka = Tank(\"zhalizyaka\",15,2,3)\r\nzhalizyaka.info()\r\nzhalizyaka.shoot()\r\n\r\ninput()","sub_path":"functionAndClasses/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"107119133","text":"import random\n\nrandom.seed(1)\n\nbank_account = 1000\nbet_amount = 0\nbet_color = None\nbet_number = None\n\ngreen = [0, 37]\nred = [1, 3, 5, 7, 9, 12, 14, 16, 18, 19, 21, 23, 25, 27, 30, 32, 34, 36]\nblack = [2, 4, 6, 8, 10, 11, 13, 15, 17, 20, 22, 24, 26, 28, 29, 31, 33, 35]\n\n\ndef take_bet(color, number, amount):\n bet_color = color\n bet_number = number\n bet_amount = amount\n\n return [bet_color, bet_number, bet_amount]\n\n\ndef roll_ball():\n return random.randint(0, 38)\n\n\ndef check_results(ball_roll, bet_info):\n print(\"The ball number is \" + str(bet_info[2]) + \"...\\n\")\n payout(ball_roll == bet_info[2], bet_info)\n\n\ndef payout(did_win, bet_info):\n if did_win:\n print(\"YOU WON! Your now have $\", str(bank_account + bet_info[2]))\n\n else:\n print(\"YOU LOST!!! HAHAHA! You now have $\",\n str(bank_account - bet_info[2]))\n\n\ndef play_game():\n color = input(\"Choose color\\n\")\n number = 0\n\n if color == \"green\":\n number = input(\"Choose a number from this list: \" + str(green) + \"\\n\")\n\n elif color == \"red\":\n number = input(\"Choose a number from this list: \" + str(red) + \"\\n\")\n\n elif color == \"black\":\n number = input(\"Choose a number from this list: \" + str(black) + \"\\n\")\n\n else:\n print(\"Whoops! That's not a color supported by this game. Try again..\")\n play_game()\n\n check_results(roll_ball(), take_bet(color, int(number), int(input(\"Bet amount?\\n\"))))\n\n\nplay_game()\n","sub_path":"roulette.py","file_name":"roulette.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"238828965","text":"#!/usr/bin/env python\n# coding=utf-8\n\nfrom django.conf.urls import url, include\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.PostIndexView.as_view(), name='index'),\n url(r'^post/(?P[0-9]+)/$', views.PostDetailView.as_view(),\n name='detail'),\n url(r'^archive/(?P[0-9]{4})/(?P[0-9]{1,2})/$',\n views.ArchivesView.as_view(), name='archives'),\n url(r'^category/(?P[0-9]+)/$', views.CategoryView.as_view(),\n name='category'),\n url(r'^tag/(?P[0-9]+)/$', views.TagView.as_view(), name='tag'),\n # url(r'^post_edit/(?P[0-9]+)/$', views.add_or_edit, name='post_edit'),\n url(r'^post_edit/(?P[0-9]+)?$', views.PostAddOrEditView.as_view(),\n name='post_edit'),\n url(r'^post_delete/(?P[0-9]+)/$', views.post_delete,\n name='post_delete'),\n url(r'^search/', include('haystack.urls')),\n]\n","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"238530153","text":"import math\n\ndef read_file(input_file):\n lines = []\n lines.append(input_file)\n with open(input_file) as f:\n for line in f:\n if ' ' in line:\n lines.append(map(int, line.split(' ')))\n else:\n lines.append(int(line))\n return lines\n\ndef write_file(output_file, value):\n with open(output_file, 'w') as f:\n f.write(str(value))\n\ndef main():\n input_file = \"bugtrk.in\"\n output_file = \"bugtrk.out\"\n\n # read data\n lines = read_file(input_file)\n\n N = lines[1][0]\n W = lines[1][1]\n H = lines[1][2]\n\n # find minimum side of the square\n side = max(W, H, int(math.ceil(math.sqrt(N * W * H))))\n\n # test side\n small_side = min(W, H)\n big_side = max(W, H)\n\n while True:\n elements_in_row = (side - side % small_side) / small_side\n number_of_rows = (side - side % big_side) / big_side\n if elements_in_row * number_of_rows >= N:\n break\n else:\n side = min(small_side * (elements_in_row + 1), big_side * (number_of_rows + 1))\n\n result = side\n\n # write data\n write_file(output_file, result)\n\nif __name__ == \"__main__\":\n main()","sub_path":"bugtrk/bugtrk.py","file_name":"bugtrk.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"222618226","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom scipy.stats import norm\nfrom random import randint\n\n\n\ncolPool = [ '#bd2309', '#bbb12d', '#1480fa', '#14fa2f', '#000000',\\\n '#faf214', '#2edfea', '#ea2ec4', '#ea2e40', '#cdcdcd',\\\n '#577a4d', '#2e46c0', '#f59422', '#219774', '#8086d9' ]\n\ncolToInt = pd.Index(['Occupation_Satisfaction', 'Last_school_grades',\\\n 'Number_of_differnt_parties_voted_for','Number_of_valued_Kneset_members',\\\n 'Num_of_kids_born_last_10_years'])\n\n### Print plot from training set on category dtype###\ndef describeAndPlot(df:pd.DataFrame):\n # df.describe()\n\n #for categorical columns\n catFeat = df.keys()[df.dtypes.map(lambda x: x!=np.number)]\n catFeat = catFeat.drop('Vote')\n catFeat = catFeat.union(colToInt)\n for key in catFeat:\n new_plot = pd.crosstab([df.Vote], df[key])\n new_plot.plot(kind='bar', stacked=True,\\\n color=colPool, grid=False)\n title = \"Distribution of {} in different parties\"\n plt.title(title.format(key))\n plt.xlabel('Name of Party')\n plt.ylabel('Number of Voters')\n plt.legend(loc=\"center left\", bbox_to_anchor=(1, 0.5))\n title += '.png'\n plotName = './plots/' + title.format(key)\n plt.savefig(plotName,bbox_inches=\"tight\")\n plt.clf()\n\n # for numeric columns\n numFeat = df.keys()[df.dtypes.map(lambda x: x == np.number)]\n numFeat = numFeat.difference(colToInt)\n partyMap = {p:i for i,p in enumerate(df['Vote'].unique())}\n indexList = [i for i in partyMap.values()]\n partyList = [p for p in partyMap]\n\n for key in numFeat:\n rows = df[key].notnull()\n x = df.loc[rows,key]\n y = df.loc[rows,'Vote']\n y = y.map(partyMap)\n\n plt.scatter(x,y)\n title = \"Scatter plot of {} in different parties\"\n plt.title(title.format(key))\n plt.xlabel('TBD')\n plt.xlim(np.floor(np.min(x)),np.ceil(np.max(x)))\n plt.ylabel('Name of Party')\n plt.yticks(indexList,partyList)\n title += '.png'\n plotName = './plots/' + title.format(key)\n plt.savefig(plotName, bbox_inches=\"tight\")\n plt.clf()\n\n\n\n### Creates HIST plots for numerical categories ###\n\ndef histForFloat(df:pd.DataFrame):\n numFeat = df.keys()[df.dtypes.map(lambda x: x == np.number)]\n numFeat = numFeat.difference(colToInt)\n partyMap = {p:i for i,p in enumerate(df['Vote'].unique())}\n\n\n for key in numFeat:\n partyList = df['Vote'].unique()\n plt.figure(figsize=(40,30))\n mainTitle = \"Hist plots of {}\"\n plt.suptitle(mainTitle.format(key))\n rows = df[key].notnull()\n maxXValue = np.ceil(np.max(df[key])) ## to assure all subplots will have same x scale\n minXValue = np.floor(np.min(df[key])) ## same as above\n for i,p in enumerate(partyList):\n mask = df.Vote == p\n x = df.loc[mask & rows,key]\n plt.subplot(3,4,i+1)\n n,bins,patches = plt.hist(x=x,bins=20)\n plt.title(p)\n plt.ylabel('Number of Voters')\n plt.xlim(minXValue, maxXValue)\n plt.ylim(0,1+np.max(n).astype(int))\n ## Trying to add line for normal distribution\n # mu = x.mean()\n # sigma = np.std(x.values)\n # print('mean is:',mu,'std is:',sigma)\n # normDis = np.linspace(np.floor(np.min(x)), np.ceil(np.max(x)), bins.shape[0])\n # y = norm.pdf(bins, mu, sigma)\n # plt.plot(bins, y, 'r--')\n plt.plot(bins)\n\n mainTitle += '.png'\n plotName = './plots/' + mainTitle.format(key)\n plt.savefig(plotName, bbox_inches=\"tight\")\n plt.close()\n\n \n### Function for TRAIN DATA that fill nan cells in object categories with mode value ###\ndef fillNAByLabelMode(X:pd.DataFrame,Y:pd.DataFrame,index):\n if X.index.dtype == 'float':\n print('ERROR needs to be a discrete category')\n df = X\n df['Vote'] = Y.copy().values\n # df.loc[:, 'Vote'] = Y['Vote']\n partyList = df['Vote'].unique()\n df[index + 'FillByMode'] = df[index]\n for p in partyList:\n mask = df.Vote == p\n colByLabel = df[mask]\n currMode = colByLabel[index].mode().iloc[0] # just the first mode, could be more than 1\n # print('party',p,'mode is:',currMode) # TODO remove\n # df.loc[df[df[mask][index].isnull()],index + 'FillByMode'] = currMode\n # df[mask][index] = df[mask][index].fillna(currMode)\n df.loc[(mask) & (df[index + 'FillByMode'].isnull()),index + 'FillByMode'] = currMode\n return df.drop('Vote', axis=1)\n\n\n\n### Function for TEST/VALIDATION DATA that fill nan cells in object categories with mode value ###\ndef fillNATestValMode(X:pd.DataFrame,index):\n if X.index.dtype == 'float':\n print('ERROR needs to be a discrete category')\n df = X\n df[index + 'FillByMode'] = df[index]\n currMode = df[index].mode().iloc[0]\n df.loc[(df[index + 'FillByMode'].isnull()), index + 'FillByMode'] = currMode\n return df\n\n\n\n### Function for TRAIN DATA that fill nan cells in numeric categories with mean or median value ###\ndef fillNAByLabelMeanMedian(X:pd.DataFrame,Y:pd.DataFrame,index,meanOrMedian):\n if not meanOrMedian in ('Mean','Median'):\n print('ERROR should state mean or median only')\n return X\n if X.index.dtype == np.number:\n print('ERROR needs to be a numeric category')\n return X\n df = X\n df['Vote'] = Y.copy().values\n partyList = df['Vote'].unique()\n newColName = index + 'FillBy' + meanOrMedian\n df[newColName] = df[index]\n for p in partyList:\n mask = df.Vote == p\n colByLabel = df[mask]\n curr = np.nanmean(colByLabel[index]) if meanOrMedian == 'Mean' else np.nanmedian(colByLabel[index])\n df.loc[(mask) & (df[newColName].isnull()),newColName] = curr\n return df.drop('Vote', axis=1)\n\n\n### Function for TEST/VALIDATION DATA that fill nan cells in numeric categories with mean or median value ###\ndef fillNATestValMeanMedian(X:pd.DataFrame,index,meanOrMedian):\n if not meanOrMedian in ('Mean','Median'):\n print('ERROR should state mean or median only')\n return X\n if X.index.dtype == np.number:\n print('ERROR needs to be a numeric category')\n return X\n df = X\n newColName = index + 'FillBy' + meanOrMedian\n df[newColName] = df[index]\n curr = np.nanmean(df[index]) if meanOrMedian == 'Mean' else np.nanmedian(df[index])\n df.loc[(df[newColName].isnull()),newColName] = curr\n return df\n\n\ndef distanceBetween2Samples(sam1,sam2):\n \"\"\"\n Function that compute the distane between 2 samples from DataFrame. Should get normalized data, w/o nan values\n Let x1,x2,...,xN values of N numeric features of sam1\n and y1,y2,...,yN values of N numeric features of sam2\n Return: sqrt((x1-y1)^2+(x2-y2)^2+...+(xN-yN)^2)\n \"\"\"\n sam1 = sam1.select_dtypes(include=[np.number]).values\n sam2 = sam2.select_dtypes(include=[np.number]).values\n res = np.sqrt(np.nansum((sam1-sam2)**2))\n return res\n\n\n\ndef findNearestHitMiss(X:pd.DataFrame,Y:pd.DataFrame,samIndex,hitMiss='h'):\n \"\"\"\n Finds closet sample to sam in the same/different label. Uses distanceBetween2Samples(), should get normalized data\n params: X- copy of DataFrame w/o labels, Y- labels , samIndex- index of the sample in X with iloc (X relative row's index)\n hitMiss- 'h' for hit(same label), 'm' for miss (closest in other label)\n Return: index of closest sample in the same\\other label, original index use with loc\n \"\"\"\n if hitMiss != 'h' and hitMiss != 'm':\n print('ERROR must state \\'h\\' for hit or \\'m\\' for miss')\n return -1\n # merge X+Y\n df = X\n df['Vote'] = Y.values\n\n sampleToCompare = df.iloc[[samIndex]] \n realSamIndex = df.iloc[[samIndex]].index[0] # beacuse its easier to iterate over iloc but loc gives exact location\n # print('samIndex=',samIndex,'but real index is:',realSamIndex)\n\n label = sampleToCompare['Vote'] # gets sam's label\n # print(label)\n label = label.get_values()[0]\n # print('The label is:',label)\n if hitMiss == 'h':\n mask = df.Vote == label\n else:\n mask = df.Vote != label\n rowsByLabel = df[mask]\n minIndex = -1\n minScore = np.inf\n \n for i in range(rowsByLabel.shape[0]): # iterate over rows\n currIndex = rowsByLabel.iloc[[i]].index[0] # gets the index of the row in the original df\n # print(currIndex)\n if realSamIndex == currIndex: \n continue\n curr = distanceBetween2Samples(sampleToCompare, rowsByLabel.iloc[[i]])\n # print(curr)\n if curr < minScore:\n minScore = curr\n minIndex = currIndex\n return minIndex\n\n\n\ndef heuristicFindNearestHitMiss(X: pd.DataFrame, Y: pd.DataFrame, samIndex, hitMiss='h'):\n \"\"\"\n Finds closet sample to sam in the same/different label. Uses distanceBetween2Samples(), should get normalized data\n params: X- copy of DataFrame w/o labels, Y- labels , samIndex- index of the sample in X with iloc (X relative row's index)\n hitMiss- 'h' for hit(same label), 'm' for miss (closest in other label)\n Return: index of closest sample in the same\\other label, original index use with loc\n \"\"\"\n if hitMiss != 'h' and hitMiss != 'm':\n print('ERROR must state \\'h\\' for hit or \\'m\\' for miss')\n return -1\n # merge X+Y\n df = X\n df['Vote'] = Y.values\n\n sampleToCompare = df.iloc[[samIndex]]\n realSamIndex = df.iloc[[samIndex]].index[\n 0] # beacuse its easier to iterate over iloc but loc gives exact location\n # print('samIndex=',samIndex,'but real index is:',realSamIndex)\n\n label = sampleToCompare['Vote'] # gets sam's label\n # print(label)\n label = label.get_values()[0]\n # print('The label is:',label)\n if hitMiss == 'h':\n mask = df.Vote == label\n else:\n mask = df.Vote != label\n rowsByLabel = df[mask]\n minIndex = -1\n minScore = np.inf\n # print('shape of label=',rowsByLabel.shape[0])\n randArray = np.random.randint(rowsByLabel.shape[0],size=100)\n for i in randArray: # Sample 100 indices\n # print('index is:',heuristicIndex)\n currIndex = rowsByLabel.iloc[[i]].index[0] # gets the index of the row in the original df\n # print(currIndex)\n if realSamIndex == currIndex:\n continue\n curr = distanceBetween2Samples(sampleToCompare, rowsByLabel.iloc[[i]])\n # print(curr)\n if curr < minScore:\n minScore = curr\n minIndex = currIndex\n return minIndex\n\n # return np.min(np.vectorize(\\\n # lambda row:distanceBetween2Samples(df.iloc[[samIndex]],row)(rowsByLabel)))\n # # if row.index != samIndex else np.inf)(rowsByLabel)))\n\n\ndef fillNanWithOtherColumns(X:pd.DataFrame,Y:pd.DataFrame,listOfColsWithConnection):\n col2edit = X[listOfColsWithConnection]\n # for col in listOfColsWithConnection:\n for i in np.arange(col2edit.shape[0]):\n if i % 100 == 0:\n print(i) # TODO remove\n counter = 0\n while col2edit.iloc[i].hasnans and counter < 3:\n # nearestHit = findNearestHitMiss(col2edit,Y,i,'h') # slower iterate over all the data\n # print(col2edit.shape[0]) # TODO remove\n nearestHit = heuristicFindNearestHitMiss(col2edit, Y, i, 'h') # faster\n if nearestHit != -1:\n goodSample = col2edit.loc[nearestHit]\n col2edit.iloc[i] = col2edit.iloc[i].fillna(goodSample)\n goodSample = goodSample.fillna(col2edit.iloc[i])\n counter += 1\n return col2edit\n\n\n\ndef changeOutlierToMean(X:pd.DataFrame,Y:pd.DataFrame,index,label,lowerBound,upperBound):\n # merge X+Y\n df = X\n df['Vote'] = Y.values\n mask = df.Vote == label\n rowsByLabel = df[mask]\n meanValue = np.nanmean(rowsByLabel[index])\n # print('mean of',index,'is:',meanValue)\n if lowerBound != None:\n df.loc[(mask) & (df[index] < lowerBound),index] = meanValue\n if upperBound != None:\n df.loc[(mask) & (df[index] > upperBound),index] = meanValue\n return df.drop('Vote', axis=1)","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":12242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"210104040","text":"#! /usr/bin/env python\r\n\r\n\"\"\"\r\nThis script checks HighGUI's cvGetCaptureProperty functionality for correct return\r\nof the frame width and height of an .avi file containing uncompressed 24bit Bitmap frames.\r\n\"\"\"\r\n\r\n# name if this test and it's requirements\r\nTESTNAME = \"size_bmp24\"\r\nREQUIRED = []\r\n\r\n# needed for sys.exit(int), .works file handling and check routine\r\nimport sys\r\nimport works\r\nimport size_test\r\n\r\n# check requirements and delete old flag file, if it exists\r\nif not works.check_files(REQUIRED,TESTNAME):\r\n\tsys.exit(77)\r\n\r\n# name of file we check here\r\nFILENAME='bmp24.avi'\r\n\r\n# run check routine\r\nresult=size_test.size_ok(FILENAME)\r\n\r\n# create flag file for following tests\r\nworks.set_file(TESTNAME)\r\n\r\n # return result of test routine\r\nsys.exit(result)\r\n","sub_path":"opencv/win/tests/python/highgui/size_bmp24.py","file_name":"size_bmp24.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"79061594","text":"import pytest\n\nfrom . import Pull\n\n\ndef test_missing_type():\n pull = Pull()\n with pytest.raises(AssertionError) as excinfo:\n pull.do({}, \"path\")\n assert \"source must have a type\" in str(excinfo.value)\n\n\ndef test_unknown_type():\n pull = Pull()\n with pytest.raises(ValueError) as excinfo:\n pull.do({\"type\": \"foo\"}, \"path\")\n assert \"Unknown source type: foo\" in str(excinfo.value)\n","sub_path":"worker/jobs/pull/__init__test.py","file_name":"__init__test.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"222639868","text":"\"\"\"Support for Climate devices of (EMEA/EU-based) Honeywell TCC systems.\"\"\"\nfrom datetime import datetime\nimport logging\nfrom typing import Optional, List\n\nimport requests.exceptions\nimport evohomeclient2\n\nfrom homeassistant.components.climate import ClimateDevice\nfrom homeassistant.components.climate.const import (\n HVAC_MODE_HEAT, HVAC_MODE_AUTO, HVAC_MODE_OFF,\n PRESET_AWAY, PRESET_ECO, PRESET_HOME,\n SUPPORT_TARGET_TEMPERATURE, SUPPORT_PRESET_MODE)\n\nfrom . import CONF_LOCATION_IDX, _handle_exception, EvoDevice\nfrom .const import (\n DOMAIN, EVO_STRFTIME,\n EVO_RESET, EVO_AUTO, EVO_AUTOECO, EVO_AWAY, EVO_DAYOFF, EVO_CUSTOM,\n EVO_HEATOFF, EVO_FOLLOW, EVO_TEMPOVER, EVO_PERMOVER)\n\n_LOGGER = logging.getLogger(__name__)\n\nPRESET_RESET = 'Reset' # reset all child zones to EVO_FOLLOW\nPRESET_CUSTOM = 'Custom'\n\nHA_HVAC_TO_TCS = {\n HVAC_MODE_OFF: EVO_HEATOFF,\n HVAC_MODE_HEAT: EVO_AUTO,\n}\nHA_PRESET_TO_TCS = {\n PRESET_AWAY: EVO_AWAY,\n PRESET_CUSTOM: EVO_CUSTOM,\n PRESET_ECO: EVO_AUTOECO,\n PRESET_HOME: EVO_DAYOFF,\n PRESET_RESET: EVO_RESET,\n}\nTCS_PRESET_TO_HA = {v: k for k, v in HA_PRESET_TO_TCS.items()}\n\nHA_PRESET_TO_EVO = {\n 'temporary': EVO_TEMPOVER,\n 'permanent': EVO_PERMOVER,\n}\nEVO_PRESET_TO_HA = {v: k for k, v in HA_PRESET_TO_EVO.items()}\n\n\nasync def async_setup_platform(hass, hass_config, async_add_entities,\n discovery_info=None) -> None:\n \"\"\"Create the evohome Controller, and its Zones, if any.\"\"\"\n broker = hass.data[DOMAIN]['broker']\n loc_idx = broker.params[CONF_LOCATION_IDX]\n\n _LOGGER.debug(\n \"Found Controller, id=%s [%s], name=%s (location_idx=%s)\",\n broker.tcs.systemId, broker.tcs.modelType, broker.tcs.location.name,\n loc_idx)\n\n controller = EvoController(broker, broker.tcs)\n\n zones = []\n for zone_idx in broker.tcs.zones:\n evo_zone = broker.tcs.zones[zone_idx]\n _LOGGER.debug(\n \"Found Zone, id=%s [%s], name=%s\",\n evo_zone.zoneId, evo_zone.zone_type, evo_zone.name)\n zones.append(EvoZone(broker, evo_zone))\n\n entities = [controller] + zones\n\n async_add_entities(entities, update_before_add=True)\n\n\nclass EvoClimateDevice(EvoDevice, ClimateDevice):\n \"\"\"Base for a Honeywell evohome Climate device.\"\"\"\n\n def __init__(self, evo_broker, evo_device) -> None:\n \"\"\"Initialize the evohome Climate device.\"\"\"\n super().__init__(evo_broker, evo_device)\n\n self._hvac_modes = self._preset_modes = None\n\n @property\n def hvac_modes(self) -> List[str]:\n \"\"\"Return the list of available hvac operation modes.\"\"\"\n return self._hvac_modes\n\n @property\n def preset_modes(self) -> Optional[List[str]]:\n \"\"\"Return a list of available preset modes.\"\"\"\n return self._preset_modes\n\n\nclass EvoZone(EvoClimateDevice):\n \"\"\"Base for a Honeywell evohome Zone.\"\"\"\n\n def __init__(self, evo_broker, evo_device) -> None:\n \"\"\"Initialize the evohome Zone.\"\"\"\n super().__init__(evo_broker, evo_device)\n\n self._id = evo_device.zoneId\n self._name = evo_device.name\n self._icon = 'mdi:radiator'\n\n self._precision = \\\n self._evo_device.setpointCapabilities['valueResolution']\n self._state_attributes = [\n 'activeFaults', 'setpointStatus', 'temperatureStatus', 'setpoints']\n\n self._supported_features = SUPPORT_PRESET_MODE | \\\n SUPPORT_TARGET_TEMPERATURE\n self._hvac_modes = [HVAC_MODE_OFF, HVAC_MODE_HEAT]\n self._preset_modes = list(HA_PRESET_TO_EVO)\n\n for _zone in evo_broker.config['zones']:\n if _zone['zoneId'] == self._id:\n self._config = _zone\n break\n\n @property\n def hvac_mode(self) -> str:\n \"\"\"Return the current operating mode of the evohome Zone.\n\n NB: evohome Zones 'inherit' their operating mode from the controller.\n\n Usually, Zones are in 'FollowSchedule' mode, where their setpoints are\n a function of their schedule, and the Controller's operating_mode, e.g.\n Economy mode is their scheduled setpoint less (usually) 3C.\n\n However, Zones can override these setpoints, either for a specified\n period of time, 'TemporaryOverride', after which they will revert back\n to 'FollowSchedule' mode, or indefinitely, 'PermanentOverride'.\n \"\"\"\n if self._evo_tcs.systemModeStatus['mode'] in [EVO_AWAY, EVO_HEATOFF]:\n return HVAC_MODE_AUTO\n is_off = self.target_temperature <= self.min_temp\n return HVAC_MODE_OFF if is_off else HVAC_MODE_HEAT\n\n @property\n def current_temperature(self) -> Optional[float]:\n \"\"\"Return the current temperature of the evohome Zone.\"\"\"\n return (self._evo_device.temperatureStatus['temperature']\n if self._evo_device.temperatureStatus['isAvailable'] else None)\n\n @property\n def target_temperature(self) -> Optional[float]:\n \"\"\"Return the target temperature of the evohome Zone.\"\"\"\n if self._evo_tcs.systemModeStatus['mode'] == EVO_HEATOFF:\n return self._evo_device.setpointCapabilities['minHeatSetpoint']\n return self._evo_device.setpointStatus['targetHeatTemperature']\n\n @property\n def preset_mode(self) -> Optional[str]:\n \"\"\"Return the current preset mode, e.g., home, away, temp.\"\"\"\n if self._evo_tcs.systemModeStatus['mode'] in [EVO_AWAY, EVO_HEATOFF]:\n return None\n return EVO_PRESET_TO_HA.get(\n self._evo_device.setpointStatus['setpointMode'], 'follow')\n\n @property\n def min_temp(self) -> float:\n \"\"\"Return the minimum target temperature of a evohome Zone.\n\n The default is 5, but is user-configurable within 5-35 (in Celsius).\n \"\"\"\n return self._evo_device.setpointCapabilities['minHeatSetpoint']\n\n @property\n def max_temp(self) -> float:\n \"\"\"Return the maximum target temperature of a evohome Zone.\n\n The default is 35, but is user-configurable within 5-35 (in Celsius).\n \"\"\"\n return self._evo_device.setpointCapabilities['maxHeatSetpoint']\n\n def _set_temperature(self, temperature: float,\n until: Optional[datetime] = None):\n \"\"\"Set a new target temperature for the Zone.\n\n until == None means indefinitely (i.e. PermanentOverride)\n \"\"\"\n try:\n self._evo_device.set_temperature(temperature, until)\n except (requests.exceptions.RequestException,\n evohomeclient2.AuthenticationError) as err:\n _handle_exception(err)\n\n def set_temperature(self, **kwargs) -> None:\n \"\"\"Set a new target temperature for an hour.\"\"\"\n until = kwargs.get('until')\n if until:\n until = datetime.strptime(until, EVO_STRFTIME)\n\n self._set_temperature(kwargs['temperature'], until)\n\n def _set_operation_mode(self, op_mode) -> None:\n \"\"\"Set the Zone to one of its native EVO_* operating modes.\"\"\"\n if op_mode == EVO_FOLLOW:\n try:\n self._evo_device.cancel_temp_override()\n except (requests.exceptions.RequestException,\n evohomeclient2.AuthenticationError) as err:\n _handle_exception(err)\n return\n\n self._setpoints = self.get_setpoints()\n temperature = self._evo_device.setpointStatus['targetHeatTemperature']\n\n if op_mode == EVO_TEMPOVER:\n until = self._setpoints['next']['from_datetime']\n until = datetime.strptime(until, EVO_STRFTIME)\n else: # EVO_PERMOVER:\n until = None\n\n self._set_temperature(temperature, until=until)\n\n def set_hvac_mode(self, hvac_mode: str) -> None:\n \"\"\"Set an operating mode for the Zone.\"\"\"\n if hvac_mode == HVAC_MODE_OFF:\n self._set_temperature(self.min_temp, until=None)\n\n else: # HVAC_MODE_HEAT\n self._set_operation_mode(EVO_FOLLOW)\n\n def set_preset_mode(self, preset_mode: str) -> None:\n \"\"\"Set a new preset mode.\n\n If preset_mode is None, then revert to following the schedule.\n \"\"\"\n self._set_operation_mode(HA_PRESET_TO_EVO.get(preset_mode, EVO_FOLLOW))\n\n\nclass EvoController(EvoClimateDevice):\n \"\"\"Base for a Honeywell evohome Controller (hub).\n\n The Controller (aka TCS, temperature control system) is the parent of all\n the child (CH/DHW) devices. It is also a Climate device.\n \"\"\"\n\n def __init__(self, evo_broker, evo_device) -> None:\n \"\"\"Initialize the evohome Controller (hub).\"\"\"\n super().__init__(evo_broker, evo_device)\n\n self._id = evo_device.systemId\n self._name = evo_device.location.name\n self._icon = 'mdi:thermostat'\n\n self._precision = None\n self._state_attributes = [\n 'activeFaults', 'systemModeStatus']\n\n self._supported_features = SUPPORT_PRESET_MODE\n self._hvac_modes = list(HA_HVAC_TO_TCS)\n self._preset_modes = list(HA_PRESET_TO_TCS)\n\n self._config = dict(evo_broker.config)\n self._config['zones'] = '...'\n if 'dhw' in self._config:\n self._config['dhw'] = '...'\n\n @property\n def hvac_mode(self) -> str:\n \"\"\"Return the current operating mode of the evohome Controller.\"\"\"\n tcs_mode = self._evo_device.systemModeStatus['mode']\n return HVAC_MODE_OFF if tcs_mode == EVO_HEATOFF else HVAC_MODE_HEAT\n\n @property\n def current_temperature(self) -> Optional[float]:\n \"\"\"Return the average current temperature of the heating Zones.\n\n Controllers do not have a current temp, but one is expected by HA.\n \"\"\"\n temps = [z.temperatureStatus['temperature'] for z in\n self._evo_device._zones if z.temperatureStatus['isAvailable']] # noqa: E501; pylint: disable=protected-access\n return round(sum(temps) / len(temps), 1) if temps else None\n\n @property\n def target_temperature(self) -> Optional[float]:\n \"\"\"Return the average target temperature of the heating Zones.\n\n Controllers do not have a target temp, but one is expected by HA.\n \"\"\"\n temps = [z.setpointStatus['targetHeatTemperature']\n for z in self._evo_device._zones] # noqa: E501; pylint: disable=protected-access\n return round(sum(temps) / len(temps), 1) if temps else None\n\n @property\n def preset_mode(self) -> Optional[str]:\n \"\"\"Return the current preset mode, e.g., home, away, temp.\"\"\"\n return TCS_PRESET_TO_HA.get(self._evo_device.systemModeStatus['mode'])\n\n @property\n def min_temp(self) -> float:\n \"\"\"Return the minimum target temperature of the heating Zones.\n\n Controllers do not have a min target temp, but one is required by HA.\n \"\"\"\n temps = [z.setpointCapabilities['minHeatSetpoint']\n for z in self._evo_device._zones] # noqa: E501; pylint: disable=protected-access\n return min(temps) if temps else 5\n\n @property\n def max_temp(self) -> float:\n \"\"\"Return the maximum target temperature of the heating Zones.\n\n Controllers do not have a max target temp, but one is required by HA.\n \"\"\"\n temps = [z.setpointCapabilities['maxHeatSetpoint']\n for z in self._evo_device._zones] # noqa: E501; pylint: disable=protected-access\n return max(temps) if temps else 35\n\n def _set_operation_mode(self, op_mode) -> None:\n \"\"\"Set the Controller to any of its native EVO_* operating modes.\"\"\"\n try:\n self._evo_device._set_status(op_mode) # noqa: E501; pylint: disable=protected-access\n except (requests.exceptions.RequestException,\n evohomeclient2.AuthenticationError) as err:\n _handle_exception(err)\n\n def set_hvac_mode(self, hvac_mode: str) -> None:\n \"\"\"Set an operating mode for the Controller.\"\"\"\n self._set_operation_mode(HA_HVAC_TO_TCS.get(hvac_mode))\n\n def set_preset_mode(self, preset_mode: str) -> None:\n \"\"\"Set a new preset mode.\n\n If preset_mode is None, then revert to 'Auto' mode.\n \"\"\"\n self._set_operation_mode(HA_PRESET_TO_TCS.get(preset_mode, EVO_AUTO))\n\n def update(self) -> None:\n \"\"\"Get the latest state data.\"\"\"\n pass\n","sub_path":"homeassistant/components/evohome/climate.py","file_name":"climate.py","file_ext":"py","file_size_in_byte":12355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"402061719","text":"import gym\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport os\n\nfrom . import atari_wrappers\nfrom ..bench import Monitor\nfrom ..parallel import SubprocVecEnv\nfrom ..parallel import VecPyTorch\nfrom ..parallel import VecPyTorchFrameStack, TransposeImage\n\nimport cv2\n\nclass WarpFrame(gym.ObservationWrapper):\n def __init__(self, env):\n \"\"\"\n Warp frames to 84x84 as done in the Nature paper and later work.\n\n :param env: (Gym Environment) the environment\n \"\"\"\n gym.ObservationWrapper.__init__(self, env)\n self.width = 84\n self.height = 84\n self.observation_space = gym.spaces.Box(low=0, high=255, shape=(self.height, self.width, 3),\n dtype=env.observation_space.dtype)\n\n def observation(self, frame):\n \"\"\"\n returns the current observation from a frame\n\n :param frame: ([int] or [float]) environment frame\n :return: ([int] or [float]) the observation\n \"\"\"\n #frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\n frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA)\n return frame\n\n\n\"\"\"\nThese functions need to be adjusted according to the settings\n\"\"\"\n\n\ndef make_atari_env(env_id, seed, rank, log_dir=None):\n # define a temp function call\n def _env_func():\n env = atari_wrappers.make_atari(env_id)\n env.seed(seed + rank)\n\n if log_dir is not None:\n env = Monitor(env, os.path.join(log_dir, str(rank)))\n\n #env = atari_wrappers.wrap_deepmind(env)\n env = WarpFrame(env)\n env = TransposeImage(env)\n return env\n return _env_func\n\n\ndef make_parallel_env(env_name, seed, num_workers, num_frame_stack, device, log_dir=None):\n env = [make_atari_env(env_name, seed, i, log_dir)\n for i in range(num_workers)]\n env = SubprocVecEnv(env)\n env = VecPyTorch(env, device)\n #env = VecPyTorchFrameStack(env, num_frame_stack, device)\n\n return env\n","sub_path":"torchrl/utils/envs.py","file_name":"envs.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"202401215","text":"import cv2 \nimport numpy as np \nimport progressbar\nimport random\n\nclass reader():\n\n\tdef __init__(self,height=720,width=1280,scale_range=[0.8,1.1]):\n\t\t# set class params\n\t\tself.height = height\n\t\tself.width = width\n\t\tself.scale_range = scale_range\n\t\tprint('Loading images...')\n\t\tself.data = []\n\t\t# add a progressbar to make it better look\n\t\tbar = progressbar.ProgressBar(max_value=1000)\n\t\tf = open('annotation.txt')\n\t\tcounter = 0\n\t\tfor i in f:\n\t\t\ti = i.strip().split('\\t')\n\t\t\t# split the line, get the filename and coordinates \n\t\t\tfname = i[0]\n\t\t\tcoord = i[1:]\n\t\t\tcoord = [float(x) for x in coord]\n\t\t\t# split the coordinates \n\t\t\tx = coord[0::5]\n\t\t\ty = coord[1::5]\n\t\t\tw = coord[2::5]\n\t\t\th = coord[3::5]\n\t\t\tcategory = coord[4::5]\n\t\t\t# combine the coordinates \n\t\t\tcoord = list(zip(x,y,w,h,category))\n\t\t\tif len(coord)!=0:\n\t\t\t\t# write into data list\n\t\t\t\t# print(fname)\n\t\t\t\timg = cv2.imread(fname)\n\t\t\t\t#equ= cv2.cvtColor(img, cv2.COLOR_BGR2HLS)\n\t\t\t\t#equ[:,:,1] = cv2.equalizeHist(equ[:,:,1])\n\t\t\t\t#img = cv2.cvtColor(equ, cv2.COLOR_HLS2BGR)\n\t\t\t\tif not img is None:\n\t\t\t\t\tself.data.append([img,coord])\n\t\t\t\telse:\n\t\t\t\t\tprint(fname)\n\t\t\t# update the progressbar\n\t\t\tcounter+=1\n\t\t\tbar.update(counter)\n\t\tprint('Finish reading. Total valid data:',len(self.data))\n\n\tdef random_crop(self,img,annot):\n\t\t# right btm corner\n\t\tx2s = [i[0] for i in annot]\n\t\ty2s = [i[1] for i in annot]\n\t\t# left top corner\n\t\tx1s = [i[0]-i[2] for i in annot]\n\t\ty1s = [i[1]-i[3] for i in annot]\n\t\t# get the shift range\n\t\txmin = np.max(np.array(x2s)) - self.width\n\t\txmax = np.min(np.array(x1s))\n\t\tymin = np.max(np.array(y2s)) - self.height\n\t\tymax = np.min(np.array(y1s))\n\t\t# get transform value\n\t\tx_trans = random.random()*(xmax-xmin) + xmin\n\t\ty_trans = random.random()*(ymax-ymin) + ymin\n\t\t# get transformation matrix and do transform\n\t\t# print(xmin,xmax)\n\t\tM = np.float32([[1,0,-x_trans],[0,1,-y_trans]])\n\t\timg_result = img.copy()\n\t\timg_result = cv2.warpAffine(img_result,M,(self.width,self.height))\n\t\t# substract the transformed pixels\n\t\tannot = np.float32(annot) - np.float32([[x_trans,y_trans,0,0,0]])\n\t\t# print(annot)\n\t\treturn img_result,annot\n\n\tdef random_scale(self,img,annot):\n\t\t# set scale range\n\t\tscale_range = self.scale_range\n\t\tannot = np.float32(annot)\n\t\tscale = random.random()*(scale_range[1]-scale_range[0])+scale_range[0]\n\t\t# scaling the annotation and image\n\t\tannot = annot * scale\n\t\tannot[0][4] = annot[0][4]/scale\n\t\timg_result = cv2.resize(img,None,fx=scale,fy=scale)\n\t\treturn img_result,annot\n\n\tdef show_img(self,img,coord):\n\t\timgbuff = img.copy()\n\t\tfor x,y,w,h,category in coord:\n\t\t\tx = int(x)\n\t\t\ty = int(y)\n\t\t\tw = int(w)\n\t\t\th = int(h)\n\t\t\tcv2.rectangle(imgbuff,(x,y),(x-w,y-h),(0,0,255),5)\n\t\tfor i in range(1000):\n#\t\t\tcv2.line(imgbuff, (i*64, 0), (i*64, 768), (255, 0, 0), 1)\n#\t\t\tcv2.line(imgbuff, (0, i*64), (1024, i*64), (255, 0, 0), 1)\n\t\t\tcv2.line(imgbuff, (i*32, 0), (i*32, 768), (255, 0, 0), 1)\n\t\t\tcv2.line(imgbuff, (0, i*32), (1024, i*32), (255, 0, 0), 1)\n\t\tcv2.imshow('img',imgbuff)\n\t\tcv2.waitKey(0)\n\t\tcv2.destroyAllWindows()\n\n\tdef get_mtx(self,imgsize,coord):\n\t\t# lower_bound indicates the log2 of minimum grid size\n\t\t# choose the size of each grid\n\t\tindices = []\n\t\tgrid_sizes = 32\n\t\tcoords = []\n\n\t\t# create dictionary for conf and bias\n\t\t# key: indices, value: [conf,bias]\n\t\tresult_dict = []\n\n\t\theight = int(np.ceil(float(imgsize[0])/grid_sizes))\n\t\twidth = int(np.ceil(float(imgsize[1])/grid_sizes))\n\t\t# if no key in dictionary, create empty conf and bias array\n\n\t\tbias_empty = np.zeros([height,width,4],np.float32)\n\t\tconf_empty = np.zeros([height,width,1],np.float32)\n\t\tcat_empty = np.zeros([height,width,1],np.float32)\n\t\tpixelconf_empty = np.zeros([height,width,1],np.float32)\n\t\tcenterbias_empty = np.zeros([height,width,1],np.float32)\n\t\t# print(imgsize,grid_sizes[i])\n\t\tresult_dict=[conf_empty,bias_empty,cat_empty,pixelconf_empty,centerbias_empty]\n\t\t# get the column number and row number \n\t\tfor x,y,w,h,category in coord:\n\n\n\t\t\txc = x-float(w)//2\n\t\t\tyc = y-float(h)//2\n\t\t\tcol_num = int(xc//float(grid_sizes))\n\t\t\trow_num = int(yc//float(grid_sizes))\n\n\t\t\tif row_num>(imgsize[0]//grid_sizes-1):\n\t\t\t\trow_num=imgsize[0]//grid_sizes-1\n\t\t\tif col_num>(imgsize[1]//grid_sizes-1):\n\t\t\t\tcol_num=imgsize[1]//grid_sizes-1\n\n\t\t\tbr_x = int(x)\n\t\t\tbr_y = int(y)\n\t\t\ttl_x = br_x - w\n\t\t\ttl_y = br_y - h\n\t\t\ttl_col_num = int(tl_x//float(grid_sizes))\n\t\t\ttl_row_num = int(tl_y//float(grid_sizes))\n\t\t\tbr_col_num = int(br_x//float(grid_sizes))\n\t\t\tbr_row_num = int(br_y//float(grid_sizes))\n\n\t\t\t# print(height,width,row_num,col_num)\n\t\t\t# comute the bias_x and bias_y\n\t\t\tgrid_center_x = col_num*grid_sizes+grid_sizes//2\n\t\t\tgrid_center_y = row_num*grid_sizes+grid_sizes//2\n\t\t\tbias_x = (xc - grid_center_x)/grid_sizes\n\t\t\tbias_y = (yc - grid_center_y)/grid_sizes\n\t\t\t# update the bias matrix and conf matrix\n\t\t\tconf_mtx = result_dict[0]\n\t\t\tbias_mtx = result_dict[1]\n\t\t\tcat_mtx = result_dict[2]\n\t\t\tpixelconf_mtx = result_dict[3]\n\t\t\tcenterbias_mtx = result_dict[4]\n\n\t\t\tconf_mtx[row_num][col_num][0] = 1.\n\t\t\tcat_mtx[row_num][col_num][int(category)] = 1.\n\t\t\tbias_mtx[row_num][col_num][0] = bias_x\n\t\t\tbias_mtx[row_num][col_num][1] = bias_y\n\t\t\tbias_mtx[row_num][col_num][2] = w/self.width\n\t\t\tbias_mtx[row_num][col_num][3] = h/self.height\n\n\t\t\tfor i in range(br_row_num-tl_row_num-1):\n\t\t\t\tfor j in range(br_col_num-tl_col_num-1):\n\t\t\t\t\ty_pos=tl_row_num+i\n\t\t\t\t\tx_pos=tl_col_num+j\n\t\t\t\t\tif y_pos>(imgsize[0]//grid_sizes-1) or x_pos>(imgsize[1]//grid_sizes-1) :\n\t\t\t\t\t\tbreak\n\t\t\t\t\teuclidean_distance=((x_pos-col_num)**2+(y_pos-row_num)**2)**0.2\n\t\t\t\t\t#print (1-(euclidean_distance/5))\n\t\t\t\t\tif (euclidean_distance ==0):\n\t\t\t\t\t\teuclidean_distance=0.8\n\t\t\t\t\tcenterbias_mtx[y_pos][x_pos] = 1/(euclidean_distance)\n\t\t\t\t\tpixelconf_mtx[y_pos][x_pos] = 1.\n#\t\tconf_img=cv2.resize(centerbias_mtx,(640,480))\n#\t\tconf_img = np.asarray(conf_img,np.uint8)\n#\t\tconf_img=conf_img*255\n#\t\tcv2.imshow('ad',conf_img)\n#\t\tcv2.waitKey(0)\n#\t\tprint (row_num,col_num,bias_x,bias_y,category,bias_mtx[row_num][col_num][2],bias_mtx[row_num][col_num][3])\n\t\treturn result_dict\n\n\tdef get_img(self):\n\t\t# return one single image\n\t\timg,coord = random.sample(self.data,1)[0]\n\t\timg,coord = self.random_scale(img,coord)\n\t\timg,coord = self.random_crop(img,coord)\n\t\tresult_dict = self.get_mtx(img.shape,coord)\n\t\tself.show_img(img,coord)\n\t\treturn img,result_dict\n\n#while True:\n#\ta = reader()\n#\ta.get_img()\n","sub_path":"computer vision/train/data_reader.py","file_name":"data_reader.py","file_ext":"py","file_size_in_byte":6299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"394638655","text":"import pandas as pd \n\n# volatility breakout \ndef cal_target(exchange, symbol):\n btc = exchange.fetch_ohlcv(\n symbol=symbol,\n timeframe='4h', \n since=None, \n limit=10\n )\n\n df = pd.DataFrame(data=btc, columns=['datetime', 'open', 'high', 'low', 'close', 'volume'])\n df['datetime'] = pd.to_datetime(df['datetime'], unit='ms')\n df.set_index('datetime', inplace=True)\n\n yesterday = df.iloc[-2]\n today = df.iloc[-1]\n long_target = today['open'] * 1.007\n short_target = today['open'] * 0.993\n return long_target, short_target\n","sub_path":"larry.py","file_name":"larry.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"126618953","text":"#IMPORTANT: This will not work in a Windows OS environment\n\n#This example shows one way you can interwine a timer and socket reads that functions as an echo client (works with EchoServer.py) \n#The socket reads are non-blocking while the timer uses the time.time() function which gets the # of seconds that have elapsed\n#since epoch.\n\n#non-blocking echo client that sends hello1, hello2, hello3,....\n#with a timer that goes off every timeout=3 seconds\n\n#To make socket non-block you need: fcntl.fcntl(s, fcntl.F_SETFL, os.O_NONBLOCK)\n#When reading, you need to catch exceptions (see below)\n\nimport sys\nimport socket\nimport fcntl, os\nimport errno\nimport signal\nimport time\n\n\n\n\t\n#this function is the timeout handler\ndef dealWithTimeout(oldtime,newtime):\n\tprint('timer expired')\n\tprint('timeout amount:', int(newtime-oldtime))\t\n\tprint(time.ctime())\n\t\n\t\n\t\n#main\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect(('130.179.28.127',50000))\t\t\t#change this to match server's port, if necessary\n\nfcntl.fcntl(s, fcntl.F_SETFL, os.O_NONBLOCK)\t#make socket non-blocking\n\n\n\ntimeout = 3\t\t\t#set timeout interval \ni = 1\t\t\t\t#used to change msg sent to server\n\nmyMsg = b'hello'+str(i).encode()\t#build and send message to server\ns.send(myMsg)\n\noldtime = time.time()\t\t\t\t#get current time\n\nwhile True:\n\t\n\ttry:\n\t\tnewtime= time.time()\n\t\tif newtime - oldtime >= timeout:\t\t#look at the difference betwen the current time and when the timer was started\n\t\t\tdealWithTimeout(oldtime,newtime)\n\t\t\toldtime = time.time()\t\t\t\t\t#reset timer\n\t\t\t\t\n\t\tmsg = s.recv(2048)\t\t\t\t\t\t#non-blocking read\t\n\texcept socket.error as e:\n\t\terr = e.args[0]\n\t\t\n\t\tif err == errno.EAGAIN or err == errno.EWOULDBOCK:\n\t\t\ttime.sleep(1)\n\t\t\tprint ('recv unblocked, no data available')\n\t\t\tcontinue\n\t\telse:\n\t\t\t#some other error occured\n\t\t\tprint (e)\n\t\t\tsys.exit(1)\n\telse: #no exceptions occurred.\n\t\tif len(msg) == 0:\t#server closes connection\n\t\t\tprint ('orderly shutdown on server end')\n\t\t\tsys.exit(0)\n\t\t\t\n # got a message, print it\t\t\n\t\tprint (msg)\n\t\t\n\t\t#send next message to server to be echoed back\n\t\ti = i + 1\n\t\tmyMsg = b'hello'+str(i).encode()\t\t\n\t\ts.send(myMsg)\t\t\n\n\t\t\n\t\t\n","sub_path":"A/a4/non-blocking-echo-client.py","file_name":"non-blocking-echo-client.py","file_ext":"py","file_size_in_byte":2130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"376333541","text":"import random\nimport os\nfrom src.output import output\nfrom matplotlib import pyplot\nfrom matplotlib import pylab\n\nx = [2*random.random() + 1 for i in range(100)]\ny = [3*random.random() + 1 for i in range(100)]\n\nx2 = [random.random() for i in range(100)]\ny2 = [random.random() for i in range(100)]\n\nfigure = pyplot.plot(x,y,'bo',x2,y2,'rx')\n\nclass config():\n def __init__(self):\n self.canvasDirectory = os.getcwd()\n def giveClass(self):\n self.canvasDirectory = os.getcwd()\n return self\n\nimport pdb ; pdb.set_trace()\nconfig_f = config()\ncongfig = config_f.giveClass()\noutput_Func = output(congfig)\noutput_Func.signalNoisePlot('cubaan' , [x,y] , [x2,y2] )\n","sub_path":"toyfactory/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"389835895","text":"import asyncio\nimport discord\nfrom discord.ext import commands\n\ndescription = 'a basic say sound bot'\ntoken = ('Mzk5Mjg4NzQ3MDA0MjY0NDYx.DTK6YA.Z6-pNZTTqTDil3PyzTI9hILGLv4')\nbot = commands.Bot(command_prefix=commands.when_mentioned_or('!'), description=description)\nsoundlist = ['그만', '병신', '앙대', '개소리', '지랄', '거짓말', '좋아', '세계']\nsoundfilename = ['stop', 'idiot', 'no', 'dog', 'retard', 'lie', 'good', 'world']\npath = '\\\\sounds\\\\'\nplaying = False\ndebug = False\n\nif not discord.opus.is_loaded():\n\t# the 'opus' library here is opus.dll on windows\n\t# or libopus.so on linux in the current directory\n\t# you should replace this with the location the\n\t# opus library is located in and with the proper filename.\n\t# note that on windows this DLL is automatically provided for you\n\tdiscord.opus.load_opus('opus')\n\n@bot.event\nasync def on_ready():\n\tprint('Logged in as:\\n{0} (ID: {0.id})'.format(bot.user))\n\tchannel = bot.get_channel('398896214629941252')\n\tglobal voice\n\tvoice = await bot.join_voice_channel(channel)\n\tawait bot.change_presence(game=discord.Game(name='ran🅱om dank 🅱eme soun🅱s'))\n\n@bot.event\nasync def on_message(message):\n\tglobal voice\n\tglobal playing\n\tglobal soundlist\n\tglobal soundfilename\n\tglobal debug\n\tif not message.author == bot.user:\n\t\tif message.content.startswith('!debug'):\n\t\t\tif debug:\n\t\t\t\tdebug = False\n\t\t\telse:\n\t\t\t\tdebug = True\n\t\t\tawait bot.send_message(message.channel, 'debug mode toggled')\n\t\telif message.content.startswith('!list'):\n\t\t\tawait bot.send_message(message.channel, 'all sound list')\n\t\t\tlist_out = ''\n\t\t\ttemp2 = 0\n\t\t\tfor temp in soundlist:\n\t\t\t\tlist_out = list_out + ', ' + temp\n\t\t\t\ttemp2 = temp2 + 1\n\t\t\t\tif temp2 >=5:\n\t\t\t\t\ttemp2 = 0\n\t\t\t\t\tlist_out = list_out + '\\n'\n\t\t\tawait bot.send_message(message.channel, \"```{0}```\".format(list_out))\n\t\t\tawait asyncio.sleep(10)\n\t\t\tbot.delete_message(message)\n\t\telse:\n\t\t\tsearchcnt = 0\n\t\t\tfor temp3 in soundlist:\n\t\t\t\tif message.content.startswith(temp3):\n\t\t\t\t\tif debug : await bot.send_message(message.channel, 'current temp3 var:' + temp3)\n\t\t\t\t\tif not soundfilename[searchcnt] == 'null': temp3 = soundfilename[searchcnt]\n\t\t\t\t\tif debug : await bot.send_message(message.channel, 'current temp3 var:' + temp3)\n\t\t\t\t\tif not playing:\n\t\t\t\t\t\tif debug : await bot.send_message(message.channel, 'play sound attemp:' + path + temp3 + '.m4a')\n\t\t\t\t\t\tif not voice :\n\t\t\t\t\t\t\tvoice = await bot.join_voice_channel(channel)\n\t\t\t\t\t\t\tif debug : await bot.send_message(message.channel, 'wasnt connected to voice channel. reconnected.')\n\t\t\t\t\t\tplayer = voice.create_ffmpeg_player(path + temp3 + '.m4a')\n\t\t\t\t\t\tplayer.start()\n\t\t\t\t\t\tplaying = True\n\t\t\t\t\t\twhile player.is_playing():\n\t\t\t\t\t\t\tawait asyncio.sleep(1)\n\t\t\t\t\t\tplaying = False\n\t\t\t\t\telse:\n\t\t\t\t\t\tawait bot.send_message(message.channel, 'sound blocked to prevent earrape. please wait current playing sound to stop.')\n\t\t\t\t\tawait asyncio.sleep(10)\n\t\t\t\t\tbot.delete_message(message)\n\t\t\t\tsearchcnt = searchcnt + 1\nbot.run(token)\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"535172848","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 22 04:16:44 2017\n\n@author: Ayami\n\"\"\"\n\n#Assignment 8.3, 8.4: T-Shirt + Large Shirts\ndef make_shirt(shirt_size = 'L', shirt_message = 'i love python'):\n \"\"\"Summarises the T-Shirt Size and Message to be printed on it.\"\"\"\n print(\"The shirt size should be a \" + shirt_size.upper() + \" and should say ' \" + shirt_message.title() + \" '.\")\n\nmake_shirt()\nmake_shirt('s', 'jump on the bandwagon !')\nmake_shirt(shirt_size = 'xl', shirt_message = 'sodapop fanclub')","sub_path":"Chapter 8/t_shirt.py","file_name":"t_shirt.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"309086126","text":"import datetime\n\n\ndef get_dev_status(ingest):\n \"\"\"\n Get an ingest status that reflects whether we need to pay attention to it.\n For example:\n\n * If the ingest has failed, was it a user error (failed verification) or\n a storage service error (failed replication)?\n * If the ingest is processing or accepted, has it been updated recently,\n or is it stalled?\n\n \"\"\"\n # Success never needs our attention.\n if ingest[\"status\"] == \"succeeded\":\n return \"succeeded\"\n\n elif ingest[\"status\"] == \"failed\":\n # We sort failures into two groups:\n #\n # - a user error is one that means there was something wrong with the\n # bag, e.g. it couldn't be unpacked correctly, it failed verification\n # - an unknown error is one that we can't categorise, and might indicate\n # a storage service error, e.g. a replication failure\n #\n failure_reasons = [\n ev[\"description\"]\n for ev in ingest[\"events\"]\n if \"failed\" in ev[\"description\"]\n ]\n\n if failure_reasons and all(\n reason.startswith(\n (\n \"Verification (pre-replicating to archive storage) failed\",\n \"Detecting bag root failed\",\n # If we can't unpack a bag or assign a version for an\n # unknown reason, we should treat that as a storage service error.\n \"Unpacking failed -\",\n \"Assigning bag version failed -\",\n )\n )\n for reason in failure_reasons\n ):\n return \"failed (user error)\"\n else:\n return \"failed (unknown reason)\"\n\n elif ingest[\"status\"] == \"accepted\":\n # An ingest is in the 'accepted' state until it goes to the bag unpacker.\n # There may be a short delay while the bag unpacker starts up; a delay of\n # more than an hour suggests something is wrong.\n #\n # To allow for timezone slop, look for a delay of two hours.\n delay = datetime.datetime.now() - ingest[\"createdDate\"]\n\n if abs(delay.total_seconds()) > 60 * 60 * 2:\n return \"stalled\"\n else:\n return \"accepted\"\n\n elif ingest[\"status\"] == \"processing\":\n # Ingests should wait up to 5 hours before being retried due to SQS.\n # If an ingest hasn't been updated in more than 5 hours, something is\n # probably wrong.\n #\n # To allow for timezone slop, look for a delay of seven hours. It will\n # be flagged the following day if it's still stalled.\n delay = datetime.datetime.now() - ingest[\"createdDate\"]\n\n if abs(delay.total_seconds()) > 60 * 60 * 7:\n return \"stalled\"\n else:\n return \"processing\"\n","sub_path":"monitoring/daily_reporter/src/ingests.py","file_name":"ingests.py","file_ext":"py","file_size_in_byte":2841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"554357459","text":"import pickle\nimport argparse\n\nimport yaml\nimport torch\nimport numpy as np\nfrom tqdm import tqdm, trange\n\nfrom utils.init_env import init_env\nfrom algorithms.nn.actor_critic import init_actor_critic\nfrom algorithms.agents.base_agent import AgentInference\n\n\ndef _to_infinity():\n i = 0\n while True:\n yield i\n i += 1\n\n\ndef play_episode(\n env, agent,\n deterministic, silent, pause\n):\n episode_reward, episode_len = 0.0, 0.\n observations, actions, rewards = [], [], []\n\n obs, done = env.reset(), False\n observations.append(obs)\n\n if not silent:\n env.render()\n if pause: # useful to start 'Kazam', select window and record video\n input(\"press 'enter' to continue...\")\n while not done:\n # agent always takes observation with [batch, *dim(obs)] size as input\n # and returns action and log-prob with corresponding size\n if type(obs) is dict:\n act_obs = {key: value[None, :] for key, value in obs.items()}\n else:\n act_obs = [obs]\n act_result = agent.act(act_obs, deterministic=deterministic)\n action = act_result['action'][0]\n obs, reward, done, info = env.step(action, render=not silent)\n episode_reward += reward\n episode_len += 1\n\n observations.append(obs)\n actions.append(action)\n rewards.append(reward)\n\n if not silent:\n env.render()\n\n episode = (observations[:-1], actions, rewards)\n\n return episode_reward, episode_len, episode\n\n\ndef play_n_episodes(\n env, agent,\n deterministic,\n n_episodes, silent,\n reward_threshold, save_demo,\n pause_first\n):\n # if 'reward_threshold' is not None, then this function\n # will save #'n_episodes' with episode_reward > reward_threshold\n\n episode_rewards, episode_lengths = [], []\n episodes_to_save, save_ep_reward = [], []\n total_episodes = 0\n\n if reward_threshold is not None:\n p_bar = _to_infinity()\n real_p_bar = tqdm(total=n_episodes, ncols=60)\n silent = True\n else:\n if not silent:\n p_bar = range(n_episodes)\n else:\n p_bar = trange(n_episodes, ncols=60)\n\n for i in p_bar:\n episode_reward, episode_len, episode = play_episode(\n env, agent, deterministic, silent,\n i == 0 and pause_first\n )\n episode_rewards.append(float(episode_reward))\n episode_lengths.append(episode_len)\n total_episodes += 1\n\n if reward_threshold is not None:\n if episode_reward > reward_threshold:\n episodes_to_save.append(episode)\n save_ep_reward.append(episode_reward)\n # noinspection PyUnboundLocalVariable\n real_p_bar.update()\n if len(episodes_to_save) == n_episodes:\n real_p_bar.close()\n with open(save_demo, 'wb') as f:\n pickle.dump(episodes_to_save, f)\n\n print(\n f'done! '\n f'Saved {len(episodes_to_save)} episodes with mean reward {np.mean(save_ep_reward)} '\n f'out of {total_episodes} with mean reward {np.mean(episode_rewards)}'\n )\n break\n\n if not silent:\n print(f'episode_{i} done, len = {episode_len}, reward = {episode_reward}')\n\n print(f'mean(reward) = {np.mean(episode_rewards)}, std(reward) = {np.std(episode_rewards)}')\n\n # only for Humanoid:\n # num_fails = sum([1 for i in episode_lengths if i < 1000])\n # max_rewards = [episode_rewards[i] for i in range(n_episodes) if episode_lengths[i] == 1000]\n # print(f'num_fails: {num_fails}, mean_full_reward: {sum(max_rewards) / len(max_rewards)}')\n\n\ndef play_from_folder(\n folder, config_path, checkpoint_path,\n deterministic, silent, pause_first, n_episodes,\n save_gif, reward_threshold, save_demo,\n):\n if save_gif:\n raise ValueError('gif saving is not yet implemented...')\n\n with open(folder + config_path) as f:\n config = yaml.safe_load(f)\n\n test_env_args = config['test_env_args']\n test_env_args['env_num'] = 1\n test_env = init_env(**test_env_args)\n\n device = torch.device('cpu')\n nn_online = init_actor_critic(config['actor_critic_nn_type'], config['actor_critic_nn_args'])\n nn_online.to(device)\n policy = config['policy']\n policy_args = config['policy_args']\n agent = AgentInference(nn_online, device, policy, policy_args)\n agent.load(folder + checkpoint_path, map_location='cpu')\n agent.eval()\n play_n_episodes(\n test_env, agent,\n deterministic, n_episodes, silent,\n reward_threshold, save_demo,\n pause_first\n )\n test_env.close()\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n # config + checkpoint part\n parser.add_argument(\n '--folder', '-f',\n help='this will be added before config and checkpoint paths, default \\'\\'',\n default=''\n )\n parser.add_argument(\n '--config', '-c',\n help='path to config which contains agent and environment parameters, default \\'config.yaml\\'',\n default='config.yaml'\n )\n parser.add_argument(\n '--checkpoint', '-p',\n help='path to checkpoint which contains agent weights'\n )\n\n # playing episodes part\n parser.add_argument(\n '--random', '-r',\n help='if True then action will be sampled from the policy instead from taking mean, default False',\n action='store_true'\n )\n parser.add_argument(\n '--silent', '-s',\n help='if True then episodes will not be shown in window, '\n 'and only mean reward will be printed at the end, default False',\n action='store_true'\n )\n parser.add_argument(\n '--pause_first',\n help='if True, pauses the first episode at the first frame until enter press. '\n 'It is useful to record video with Kazam or something else, default False',\n action='store_true'\n )\n parser.add_argument(\n '--n_episodes', '-n',\n help='number of episodes to play or save demo, default 5',\n default=5, type=int\n )\n\n # saving results part\n parser.add_argument(\n '--save_gif', '-g',\n help='file name to save gif of played episodes (max 5) into, not yet implemented',\n default=None, type=str, required=False\n )\n parser.add_argument(\n '--reward_threshold', '-t',\n help='if \\'save_demo\\' arg provided, then '\n 'only episodes with reward > \\'reward_threshold\\' will be saved into buffer',\n default=None, type=float, required=False\n )\n parser.add_argument(\n '--save_demo', '-d',\n help='file name to save demo of episodes with reward > \\'reward_threshold\\' into',\n default=None, type=str, required=False\n )\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n args = parse_args()\n\n play_from_folder(\n args.folder, args.config, args.checkpoint,\n not args.random, args.silent, args.pause_first, args.n_episodes,\n args.save_gif, args.reward_threshold, args.save_demo,\n )\n","sub_path":"train_scripts/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":7216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"134196453","text":"import tensorflow as tf\nfrom transformers import ElectraTokenizer, TFElectraModel\nfrom ray import serve\nimport requests\nimport ray\nimport numpy as np\n\n\n\n@tf.autograph.experimental.do_not_convert\ndef create_matrix(samples, targets):\n state_cube = ['Answer.question', 'Answer.answerA', 'Answer.answerB', 'Answer.answerC', 'Answer.answerD', 'Answer.answerE', 'Answer.answerF']\n matrix = []\n for k in state_cube:\n matrix.append(samples[k])\n return matrix, targets\n\n@tf.function\ndef datagen(data):\n # import tensorflow as tf\n qnatext, answers = next(iter(data.take(1)))\n return qnatext, answers\n\nfrom transformers import ElectraTokenizer, TFElectraModel\ntokenizer = ElectraTokenizer.from_pretrained('google/electra-small-discriminator', max_length=128, pad_to_max_length=True)\nmodel = TFElectraModel.from_pretrained('google/electra-small-discriminator')\n\ncolumns = ['Answer.answerA', 'Answer.answerB', 'Answer.answerC', 'Answer.answerD', 'Answer.answerE', 'Answer.answerF', 'Answer.image.label', 'Answer.question']\nbatch_size=1\ndata = tf.data.experimental.make_csv_dataset('train_set.csv', batch_size=batch_size, select_columns=columns, label_name='Answer.image.label', num_epochs=1, prefetch_buffer_size=10, ignore_errors=True)\ndata = data.map(create_matrix, num_parallel_calls=tf.data.experimental.AUTOTUNE)\nqnatext, answers = datagen(data)\n\n\n# store_list= []\n\nseq = [qnatext[0], qnatext[1], qnatext[2], qnatext[3], qnatext[4], qnatext[5], qnatext[6]]\n# for i in range(batch_size):\nmatrix = []\nmatrix_max_size = 10\nfor element_text in seq:\n input_ids = tf.constant(tokenizer.encode(element_text[0].numpy().decode('utf-8'), max_length=128, pad_to_max_length=128))[None, :] # Batch size 1\n outputs = model(input_ids)\n outputs = np.squeeze(outputs)\n matrix.append(outputs)\nmatrix = np.asarray(matrix)\n# matrix = np.squeeze(matrix) \nstate_matrix = np.pad(matrix, [(0, matrix_max_size - matrix.shape[0]), (0,0), (0,0)])\nstate_matrix = np.moveaxis(state_matrix, 0, 2)\n# print(np.shape(state_matrix))\n# store_list.append(state_matrix)\n\nprint(np.shape(state_matrix))\n\n\n# class ServeData:\n# def __init__(self):\n\n\n\nclass ElectraTensors:\n def __init__(self):\n from transformers import ElectraTokenizer, TFElectraModel\n self.tokenizer = ElectraTokenizer.from_pretrained('google/electra-small-discriminator', max_length=128, pad_to_max_length=True)\n self.model = TFElectraModel.from_pretrained('google/electra-small-discriminator')\n\n def __call__(self, flask_request):\n import tensorflow as tf\n import json\n self.answer = flask_request.json\n self.input_ids = tf.constant(self.tokenizer.encode(self.answer['text'], max_length=128, pad_to_max_length=128))[None, :] # Batch size 1\n outputs = self.model(self.input_ids)\n results = outputs[0].numpy().tolist()\n return {\n \"Electra_Tensors\": results\n }\n\n\n#connect to ray start --head\n# url=\"34.83.237.208\"\n# url=\"35.233.235.19\"\nurl = \"35.247.47.188\"\nray.init(address= url + \":6000\", _redis_password='5241590000000000')\nclient = serve.start(detached=True, http_host=\"0.0.0.0\")\n\n\nconfig = {\n \"num_replicas\": 6\n}\nclient.create_backend('electra_tensors', ElectraTensors, config=config)\nclient.create_endpoint('ElectraTensors', backend='electra_tensors', route='/electratensors', methods=['POST'])\n","sub_path":"test-data-serve.py","file_name":"test-data-serve.py","file_ext":"py","file_size_in_byte":3363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"614410732","text":"#! /usr/bin/python\n\n__author__ = 'Kiamehr & Amin'\n\nimport basictypes\n\nclass MenuInfo:\n\n##############\n\n\tdef __init__(self, pid=-1, host='localhost', port=10000, team=basictypes.Teams.left, tanktype=basictypes.TankTypes.attacker, isAI=False, w=1000, h=400):\n\n\t\tself.pid = pid # player id\n\t\tself.host = host\n\t\tself.port = port\n\t\tself.team = team\n\t\tself.tanktype = tanktype\n\t\tself.isAI = isAI\n\n\t\tself.monitor_width = w\n\t\tself.monitor_height = h","sub_path":"client/menuinfo.py","file_name":"menuinfo.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"270069817","text":"from __future__ import division\nimport numpy as np\nfrom mpmath import meijerg\nfrom scipy import special, interpolate\nfrom numpy import log, exp, sin ,cos, pi, log10, sqrt\n\ncrit_density = 1.3211775*10**-7; \nf = 0.1;\np = 1.9;\nc = 10.0;\nG = 0.0045;\nk = 2;\nMprimary = 10**12;\nT_age = 10**4\n\ndef MaxRadius(M):\n return pow(3*M/(4 * pi * 200 * crit_density), 1/3)\n\ndef DFreeNFW(r, M):\n Rmax = MaxRadius(M)\n Rc = Rmax/c\n if(r < Rmax):\n return 200/3.0 * crit_density / (log(1+c) - c/(1+c))*c**3 * 1/(r/Rc*(1+r/Rc)**2)\n else:\n return 0\n\ndef MFreeNFW(r, M):\n Rmax = MaxRadius(M)\n Rc = Rmax/c\n if(r < Rmax):\n return M*(log(1+r/Rc)-r/(r+Rc))/(log(1+c) - c/(1+c))\n else:\n return M\n\ndef PhiFreeNFW(r, M):\n Rmax = MaxRadius(M)\n Rc = Rmax/c\n if(r < Rmax):\n return -M*G*((Rmax/r * log(1+r/Rc) - log(1+c))/(log(1+c) - c/(1+c)) + 1)/Rmax\n else:\n return -M*G/r\n\ndef TidalRadius(m, R):\n Rt = R*pow(m/(2*MFreeNFW(R, Mprimary)), 1/3)\n return Rt\n \ndef FourierF(k):\n\treturn sqrt(2/pi) * special.kv(0, abs(k))\n\t\ndef FourierIntegral(k):\n\treturn sqrt(pi/2) * (1/k - special.kv(0, k) * special.modstruve(-1,k) - special.kv(0,k) * special.modstruve(0,k))\t \n\ninterpolation_NUM = 1000\n\ninterpolation_points_x = np.logspace(-10, 1, interpolation_NUM)\ninterpolation_points_y = map(lambda k : 1/sqrt(4*pi) * (5.568327996831708 - float(meijerg([[1],[1]],[[1/2,1/2,1/2], [0]],k,1/2)))/k, interpolation_points_x) \ninterpolated_function = interpolate.interp1d(interpolation_points_x, interpolation_points_y, fill_value = 'extrapolate')\n\ndef SqFourierIntegral(k):\n\tif(k < 10):\n\t\treturn interpolated_function(k)\n\telse:\n\t\treturn 0\n\n\n#from matplotlib import pyplot as plt\n#plt.loglog(interpolation_points_x, interpolation_points_y)\n#plt.show()\n\n#x_new = np.logspace(-10, 2, 10**5)\n#plt.loglog(x_new, interpolated_function(x_new))\n#plt.loglog(x_new, map(SqFourierIntegral, x_new))\n#plt.show()\n \ndef trapz2d(z, x = None,y = None):\n ''' Integrates a regularly spaced 2D grid using the composite trapezium rule. \n IN:\n z : 2D array\n x : (optional) grid values for x (1D array)\n y : (optional) grid values for y (1D array)\n dx: if x is not supplied, set it to the x grid interval\n dy: if y is not supplied, set it to the x grid interval\n '''\n \n sum = np.sum\n dx = (x[-1]-x[0])/(np.shape(x)[0]-1)\n dy = (y[-1]-y[0])/(np.shape(y)[0]-1) \n \n s1 = z[0,0] + z[-1,0] + z[0,-1] + z[-1,-1]\n s2 = sum(z[1:-1,0]) + sum(z[1:-1,-1]) + sum(z[0,1:-1]) + sum(z[-1,1:-1])\n s3 = sum(z[1:-1,1:-1])\n \n return 0.25*dx*dy*(s1 + 2*s2 + 4*s3)\n\n","sub_path":"Analytic/shared.py","file_name":"shared.py","file_ext":"py","file_size_in_byte":2640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"202505258","text":"import datetime\nimport pandas as pd\n\nfrom ...OptimizerSimulator import OptimizerSimulator\n\nclass OptimizerSimulatorDEDS(OptimizerSimulator):\n\t\n\tdef __init__(self, simulator, bs_simulator, sg):\n\t\tself.sim = simulator\n\t\tself.bs_sim = bs_simulator\n\t\tself.sg = sg\n\n\n\tdef runSimulationGroup(self,\n\t\t\t\t\t\t\tprevol_threshold=[800000],\n\t\t\t\t\t\t\topen_dolar_threshold=[2],\n\t\t\t\t\t\t\tgap_threshold=[0.2],\n\t\t\t\t\t\t\tF_low_threshold=[0],\n\t\t\t\t\t\t\tF_high_threshold=[1],\n\t\t\t\t\t\t\tshort_after1 = [0],\n\t\t\t\t\t\t\tshort_after2 = [0.1],\n\t\t\t\t\t\t\tfirstEntryPct = [0.5],\n\t\t\t\t\t\t\texit_target = [0.3], \n\t\t\t\t\t\t\texit_stop = [0.3],\n\t\t\t\t\t\t\tstart_money = [10000],\n\t\t\t\t\t\t\tallocation=[0.1],\n\t\t\t\t\t\t\tlocate_fee=[0.02],\n\t\t\t\t\t\t\tcommission=[2]):\n\n\t\tparametros = [ # 1) acho que aqui sai uma list of lists, \n\t\t\t[a,b,c,d,e,f,g,h,i,j,k,l,m,n]\n\t\t\tfor a in prevol_threshold \n\t\t\tfor b in open_dolar_threshold\n\t\t\tfor c in gap_threshold\n\t\t\tfor d in F_low_threshold\n\t\t\tfor e in F_high_threshold\n\t\t\tfor f in short_after1\n\t\t\tfor g in short_after2\n\t\t\tfor h in firstEntryPct\n\t\t\tfor i in exit_target\n\t\t\tfor j in exit_stop\n\t\t\tfor k in start_money\n\t\t\tfor l in allocation\n\t\t\tfor m in locate_fee\n\t\t\tfor n in commission\n\t\t]\n\n\t\tparslist = []\n\t\tfor di in parametros: # 2) mas preisamos de uma list of dictionaries\n\t\t pars = {\n\t\t 'prevol_threshold':di[0],\n\t\t 'open_dolar_threshold':di[1],\n\t\t 'gap_threshold':di[2],\n\t\t 'F_low_threshold':di[3],\n\t\t 'F_high_threshold':di[4],\n\t\t 'short_after1':di[5],\n\t\t 'short_after2':di[6],\n\t\t 'firstEntryPct':di[7],\n\t\t 'exit_target':di[8],\n\t\t 'exit_stop':di[9],\n\t\t 'start_money':di[10],\n\t\t 'allocation':di[11],\n\t\t 'locate_fee':di[12],\n\t\t 'commission':di[13]\n\t\t }\n\t\t parslist.append(pars)\n\t\t# parslist\n\n\t\tprint(f\"Simulando {len(parslist)} combinações de parâmetros.\")\n\n\t\tfor p in parslist: # 3) para cada dictionary da list\n\t\t\tself.sim.parameters.setFilterParameters(prevol_threshold=p['prevol_threshold'],\n\t\t\t\t\t\t\t\t\topen_dolar_threshold=p['open_dolar_threshold'],\n\t\t\t\t\t\t\t\t\tgap_threshold=p['gap_threshold'],\n\t\t\t\t\t\t\t\t\tF_low_threshold=p['F_low_threshold'],\n\t\t\t\t\t\t\t\t\tF_high_threshold=p['F_high_threshold'])\n\t\t\tself.sim.runFiltering()\n\t\t\t\t\n\t\t\tself.sim.parameters.setAlgoParameters(short_after1 = p['short_after1'],\n\t\t\t\t\t\t\t\t\tshort_after2 = p['short_after2'],\n\t\t\t\t\t\t\t\t\tfirstEntryPct = p['firstEntryPct'],\n\t\t\t\t\t\t\t\t\texit_target = p['exit_target'],\n\t\t\t\t\t\t\t\t\texit_stop = p['exit_stop'])\n\t\t\tself.sim.parameters.setSimParameters(start_money = p['start_money'],\n\t\t\t\t\t\t\t\tallocation = p['allocation'],\n\t\t\t\t\t\t\t\tlocate_fee=p['locate_fee'],\n\t\t\t\t\t\t\t\tcommission=p['commission'])\n\n\t\t\tnow = datetime.datetime.now()\n\t\t\tnow_str = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n\t\t\tprint(\"running another simulation.\", now_str)\n\t\t\tself.sim.runSimulation()\n\t\t\tself.bs_sim.runBootstrap(n_iter=50, replace=False) # we will need data such as meanmax_drawdown, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# maxmax_drawdown, minmax_drawdown\n\t\t\tself.sg.appendSimResults()\n\t\t\t#self.bsresults = self.results.append(self.getSimResults(),ignore_index=True)","sub_path":"pynnystock/strategies/doubleentriesdoublestops/OptimizerSimulatorDEDS.py","file_name":"OptimizerSimulatorDEDS.py","file_ext":"py","file_size_in_byte":3041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"438301660","text":"import dash_core_components as dcc\nimport dash_bootstrap_components as dbc\nimport dash_html_components as html\nfrom datetime import date\n\n\nclass GraphTwoComponents:\n\n @staticmethod\n def radio_pred():\n radios = dcc.RadioItems(\n id='time-select-pred',\n options=[\n {'label': 'Hourly', 'value': 'Hour'},\n {'label': 'Daily', 'value': 'Day'},\n {'label': 'Weekly', 'value': 'Week'},\n {'label': 'Monthly', 'value': 'Month'}\n ],\n value='Day',\n labelStyle={'display': 'inline-block',\n 'padding': '4px 12px 0px 5px',\n 'font-size': '.9rem'},\n inputStyle={'margin-right': '4px'}\n )\n return radios\n\n @staticmethod\n def pred_graph_description():\n desc = \"This graph displays the mean values of both Actual data and Predicted data. The data displayed represents energy usage for UNCG from 2020-01-01 to present.\"\n p = html.P(\n desc\n )\n return p\n\n @staticmethod\n def tf_tooltip():\n tt = dbc.Tooltip(\n \"Select to change the timeframe displayed in graph.\",\n target=\"timeframe-graph2\",\n )\n return tt\n\n @staticmethod\n def date_selector_tooltip():\n tt = dbc.Tooltip(\n \"Select the time window you would like to view.\",\n target=\"date-selector\",\n )\n return tt\n\n @staticmethod\n def dd_tooltip():\n tt = dbc.Tooltip(\n \"Select a building to view its average energy consumption.\",\n target=\"drop-down-graph2\",\n )\n return tt\n\n @staticmethod\n def graph_tooltip():\n tt = dbc.Tooltip(\n \"Hover over the graph to view the average and predicted energy usage valeus for the selected timeframe.\",\n target=\"predictive-graph-container\",\n )\n return tt\n\n @staticmethod\n def date_picker():\n date_picker = dcc.DatePickerRange(\n id='date-picker-range',\n min_date_allowed=date(2020, 1, 1),\n # find a way to get latest date\n max_date_allowed=date(2020, 11, 1),\n initial_visible_month=date(2020, 1, 1),\n start_date=date(2020, 1, 1),\n end_date=date(2020, 11, 1)\n\n )\n return date_picker\n","sub_path":"layout/graph_two_components.py","file_name":"graph_two_components.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"583964139","text":"#!/usr/bin/env python\nimport os\n\n# DO NOT CLEAN PDFS\n\naux_ext = ['.aux', '.bbl', '.blg', '.out', '.DS_Store', '.cb', '.cb2', '.log', '.synctex.gz', '.toc']\n\n# Clean root source\nfor doc_filename in os.listdir('.'):\n for aux in aux_ext:\n if doc_filename.endswith(aux):\n os.remove(os.path.join('.', doc_filename)) \n\nfor doc_filename in os.listdir('author_source/coverletter'):\n for aux in aux_ext:\n if doc_filename.endswith(aux):\n os.remove(os.path.join('author_source/coverletter', doc_filename))\n\n\n# Clean tex aux files from each issue\nsrc_dir = 'author_source'\nfor author_dir in os.listdir(src_dir):\n if author_dir.startswith('.'):\n continue\n\n doc_types = ['.', 'openletter', 'article']\n for doc_type in doc_types:\n doc_dir = os.path.join(src_dir, author_dir, doc_type)\n if not os.path.isdir(doc_dir):\n continue\n\n for doc_filename in os.listdir(doc_dir):\n for aux in aux_ext:\n if doc_filename.endswith(aux):\n os.remove(os.path.join(doc_dir, doc_filename))","sub_path":"Journal/Volume1/Number1/clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"161132164","text":"#!/usr/bin/env python3\n\nimport fileinput\n\nfrom lilaclib import *\n\nbuild_prefix = 'archlinuxcn-x86_64'\npost_build = aur_post_build\n\npatch = '''\\\n lilyver=$(pacman -Q linux-lily-headers | awk '{print $2}')\n mkdir -p libau/linux\n cp \"/lib/modules/${lilyver}-lily/build/include/uapi/linux/aufs_type.h\" libau/linux\n sed -i 's/__user//g' libau/linux/aufs_type.h\n mkdir -p fhsm/libau/linux\n cp libau/linux/aufs_type.h fhsm/libau/linux\n sed -i 's/-lrt -L. -lfhsm -L.. -lautil/-L. -lfhsm -L.. -lautil -lrt/' fhsm/Makefile\n\n'''\n\ndef pre_build():\n aur_pre_build('aufs3-util')\n with fileinput.input(files=('PKGBUILD',), inplace=True) as f:\n for line in f:\n line = line.rstrip('\\n')\n if line.strip() == 'make':\n line = patch + line\n elif line.startswith('pkgname='):\n line = 'pkgname=aufs3-util-lily-git'\n elif line.startswith('makedepends'):\n line = \"makedepends=('linux-lily-headers' 'git')\"\n print(line)\n\nif __name__ == '__main__':\n single_main()\n","sub_path":"aufs3-util-lily-git/lilac.py","file_name":"lilac.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"90273925","text":"\"\"\"\nName: Main.py\nAuthor: Sid Bishnu\nDetails: This script contains functions for determining numerical solutions of the various test cases along with the \nnumerical error.\n\"\"\"\n\n\nimport numpy as np\nfrom IPython.utils import io\nwith io.capture_output() as captured:\n import CommonRoutines as CR\n import ExactSolutionsAndSourceTerms as ESST\n import DGSEM2DClass\n import TimeSteppingMethods as TSM\n\n\ndef FormatSimulationTime(time,non_integral_seconds=False,display_time=False,\n ProblemType_PlanetaryTopographicRossbyWave=False,ProblemType_NoExactSolution=False):\n years = np.floor(time/(86400.0*365.0))\n remainingtime = np.mod(time,86400.0*365.0)\n days = np.floor(remainingtime/86400.0)\n remainingtime = np.mod(remainingtime,86400.0)\n hours = np.floor(remainingtime/3600.0)\n remainingtime = np.mod(time,3600.0)\n minutes = np.floor(remainingtime/60.0)\n seconds = np.mod(remainingtime,60.0)\n if years <= 1.0:\n years_string = 'Year'\n else:\n years_string = 'Years'\n if days <= 1.0:\n days_string = 'Day'\n else:\n days_string = 'Days'\n if hours <= 1.0:\n hours_string = 'Hour'\n else:\n hours_string = 'Hours'\n if minutes <= 1.0:\n minutes_string = 'Minute'\n else:\n minutes_string = 'Minutes' \n if seconds <= 1.0:\n seconds_string = 'Second'\n else:\n seconds_string = 'Seconds'\n if ProblemType_PlanetaryTopographicRossbyWave or ProblemType_NoExactSolution:\n if time >= 86400.0*365.0:\n SimulationTime = ('%d %s %d %s %2d %s %2d %s' \n %(years,years_string,days,days_string,hours,hours_string,minutes,minutes_string))\n elif time < 86400.0*365.0 and time >= 86400.0:\n SimulationTime = '%d %s %2d %s %2d %s' %(days,days_string,hours,hours_string,minutes,minutes_string)\n elif time < 86400.0 and time >= 3600.0:\n SimulationTime = '%2d %s %2d %s' %(hours,hours_string,minutes,minutes_string)\n elif time < 3600.0:\n SimulationTime = '%2d %s' %(minutes,minutes_string) \n else:\n if time >= 86400.0*365.0:\n if non_integral_seconds:\n SimulationTime = ('%d %s %d %s %2d %s %2d %s %.2g %s' \n %(years,years_string,days,days_string,hours,hours_string,minutes,minutes_string,\n seconds,seconds_string)) \n else:\n SimulationTime = ('%d %s %d %s %2d %s %2d %s %2d %s' \n %(years,years_string,days,days_string,hours,hours_string,minutes,minutes_string,\n seconds,seconds_string))\n elif time < 86400.0*365.0 and time >= 86400.0:\n if non_integral_seconds:\n SimulationTime = ('%d %s %2d %s %2d %s %.2g %s' \n %(days,days_string,hours,hours_string,minutes,minutes_string,seconds,seconds_string))\n else:\n SimulationTime = ('%d %s %2d %s %2d %s %2d %s' \n %(days,days_string,hours,hours_string,minutes,minutes_string,seconds,seconds_string))\n elif time < 86400.0 and time >= 3600.0:\n if non_integral_seconds:\n SimulationTime = ('%2d %s %2d %s %.2g %s' %(hours,hours_string,minutes,minutes_string,seconds,\n seconds_string))\n else:\n SimulationTime = ('%2d %s %2d %s %2d %s' %(hours,hours_string,minutes,minutes_string,seconds,\n seconds_string))\n elif time < 3600.0 and time >= 60.0:\n if non_integral_seconds:\n SimulationTime = '%2d %s %.2g %s' %(minutes,minutes_string,seconds,seconds_string)\n else:\n SimulationTime = '%2d %s %2d %s' %(minutes,minutes_string,seconds,seconds_string)\n elif time < 60.0:\n if non_integral_seconds:\n SimulationTime = '%.2g %s' %(seconds,seconds_string)\n else:\n SimulationTime = '%2d %s' %(seconds,seconds_string) \n if display_time:\n print('The formatted simulation time is %s.' %SimulationTime)\n return SimulationTime\n\n\ndef DetermineCourantNumberForGivenTimeStep(ProblemType,PrintPhaseSpeedOfWaveModes,PrintAmplitudesOfWaveModes,\n TimeIntegrator,LF_TR_and_LF_AM3_with_FB_Feedback_Type,\n Generalized_FB_with_AB2_AM3_Step_Type,Generalized_FB_with_AB3_AM4_Step_Type,\n nElementsX,nElementsY,nXi,nEta,nXiPlot,nEtaPlot,dt,PrintCourantNumber=False):\n myDGSEM2D = DGSEM2DClass.DGSEM2D(ProblemType,PrintPhaseSpeedOfWaveModes,PrintAmplitudesOfWaveModes,TimeIntegrator,\n LF_TR_and_LF_AM3_with_FB_Feedback_Type,Generalized_FB_with_AB2_AM3_Step_Type,\n Generalized_FB_with_AB3_AM4_Step_Type,nElementsX,nElementsY,nXi,nEta,nXiPlot,\n nEtaPlot)\n dx = myDGSEM2D.myNameList.dx\n dy = myDGSEM2D.myNameList.dy\n nXi = myDGSEM2D.myDGSEM2DParameters.nXi\n nEta = myDGSEM2D.myDGSEM2DParameters.nEta\n cX1 = myDGSEM2D.myNameList.myExactSolutionParameters.cX1\n cX2 = myDGSEM2D.myNameList.myExactSolutionParameters.cX2\n cY1 = myDGSEM2D.myNameList.myExactSolutionParameters.cY1\n cY2 = myDGSEM2D.myNameList.myExactSolutionParameters.cY2\n abs_cX = max(abs(cX1),abs(cX2))\n abs_cY = max(abs(cY1),abs(cY2))\n CourantNumber = dt*(abs_cX/(dx/float(nXi**2)) + abs_cY/(dy/float(nEta**2)))\n if PrintCourantNumber:\n print('The Courant number is %.6f.' %CourantNumber)\n return CourantNumber\n\n\ndef DetermineCourantNumberForGivenTimeStepAndCheckItsValue(ProblemType):\n PrintPhaseSpeedOfWaveModes = True\n PrintAmplitudesOfWaveModes = True\n TimeIntegrator = 'WilliamsonLowStorageThirdOrderRungeKuttaMethod'\n LF_TR_and_LF_AM3_with_FB_Feedback_Type = 'ThirdOrderAccurate_MaximumStabilityRange'\n Generalized_FB_with_AB2_AM3_Step_Type = 'ThirdOrderAccurate_WideStabilityRange'\n Generalized_FB_with_AB3_AM4_Step_Type = 'ThirdOrderAccurate_MaximumStabilityRange'\n nElementsX = 5\n nElementsY = 5\n nXi = 10\n nEta = 10\n nXiPlot = 20\n nEtaPlot = 20\n if ProblemType == 'Plane_Gaussian_Wave':\n dt = 7.0*10.0**(-4.0)\n elif ProblemType == 'Coastal_Kelvin_Wave':\n dt = 50.0\n elif ProblemType == 'Inertia_Gravity_Wave':\n dt = 23.0\n elif ProblemType == 'Planetary_Rossby_Wave' or ProblemType == 'Topographic_Rossby_Wave':\n dt = 39000.0\n elif ProblemType == 'Equatorial_Kelvin_Wave':\n dt = 175.0\n elif ProblemType == 'Equatorial_Yanai_Wave':\n dt = 93.0\n elif ProblemType == 'Equatorial_Rossby_Wave':\n dt = 240.0\n elif ProblemType == 'Equatorial_Inertia_Gravity_Wave':\n dt = 57.0\n elif ProblemType == 'Barotropic_Tide':\n dt = 2.4\n elif ProblemType == 'NonLinear_Manufactured_Solution':\n dt = 35.0\n CourantNumber = DetermineCourantNumberForGivenTimeStep(\n ProblemType,PrintPhaseSpeedOfWaveModes,PrintAmplitudesOfWaveModes,TimeIntegrator,\n LF_TR_and_LF_AM3_with_FB_Feedback_Type,Generalized_FB_with_AB2_AM3_Step_Type,Generalized_FB_with_AB3_AM4_Step_Type,\n nElementsX,nElementsY,nXi,nEta,nXiPlot,nEtaPlot,dt,PrintCourantNumber=True)\n myDGSEM2D = DGSEM2DClass.DGSEM2D(ProblemType,PrintPhaseSpeedOfWaveModes,PrintAmplitudesOfWaveModes,TimeIntegrator,\n LF_TR_and_LF_AM3_with_FB_Feedback_Type,Generalized_FB_with_AB2_AM3_Step_Type,\n Generalized_FB_with_AB3_AM4_Step_Type,nElementsX,nElementsY,nXi,nEta,nXiPlot,\n nEtaPlot,CourantNumber,UseCourantNumberToDetermineTimeStep=True)\n if ProblemType == 'Inertia_Gravity_Wave' or ProblemType == 'Planetary_Rossby_Wave':\n beta0 = myDGSEM2D.myNameList.myExactSolutionParameters.beta0\n c0 = myDGSEM2D.myNameList.myExactSolutionParameters.c0\n f0 = myDGSEM2D.myNameList.myExactSolutionParameters.f0\n kX1 = myDGSEM2D.myNameList.myExactSolutionParameters.kX1\n kX2 = myDGSEM2D.myNameList.myExactSolutionParameters.kX2\n kY1 = myDGSEM2D.myNameList.myExactSolutionParameters.kY1\n kY2 = myDGSEM2D.myNameList.myExactSolutionParameters.kY2\n lY = myDGSEM2D.myNameList.myExactSolutionParameters.lY\n k1 = np.sqrt(kX1**2.0 + kY1**2.0)\n k2 = np.sqrt(kX2**2.0 + kY2**2.0)\n if ProblemType == 'Inertia_Gravity_Wave':\n print('For the first wave mode, the ratio of f0:ck is %.6f.' %(f0/(c0*k1)))\n print('For the second wave mode, the ratio of f0:ck is %.6f.' %(f0/(c0*k2)))\n else:\n print('With the meridional extent being %.3f km, the ratio of beta0*lY:f0 is %.6f << 1.' \n %(lY/1000.0,beta0*lY/f0))\n \n \ndef DetermineNumberOfTimeStepsForSimulation(ProblemType):\n PrintPhaseSpeedOfWaveModes = True\n PrintAmplitudesOfWaveModes = True\n TimeIntegrator = 'WilliamsonLowStorageThirdOrderRungeKuttaMethod'\n LF_TR_and_LF_AM3_with_FB_Feedback_Type = 'ThirdOrderAccurate_MaximumStabilityRange'\n Generalized_FB_with_AB2_AM3_Step_Type = 'ThirdOrderAccurate_WideStabilityRange'\n Generalized_FB_with_AB3_AM4_Step_Type = 'ThirdOrderAccurate_MaximumStabilityRange'\n nElementsX = 5\n nElementsY = 5\n nXi = 10\n nEta = 10\n nXiPlot = 20\n nEtaPlot = 20\n myDGSEM2D = DGSEM2DClass.DGSEM2D(ProblemType,PrintPhaseSpeedOfWaveModes,PrintAmplitudesOfWaveModes,TimeIntegrator,\n LF_TR_and_LF_AM3_with_FB_Feedback_Type,Generalized_FB_with_AB2_AM3_Step_Type,\n Generalized_FB_with_AB3_AM4_Step_Type,nElementsX,nElementsY,nXi,nEta,nXiPlot,\n nEtaPlot)\n ProblemType_EquatorialWave = myDGSEM2D.myNameList.ProblemType_EquatorialWave\n dt = myDGSEM2D.myNameList.dt \n lX = myDGSEM2D.myNameList.lX\n lY = myDGSEM2D.myNameList.lY\n cX1 = myDGSEM2D.myNameList.myExactSolutionParameters.cX1\n cX2 = myDGSEM2D.myNameList.myExactSolutionParameters.cX2\n cY1 = myDGSEM2D.myNameList.myExactSolutionParameters.cY1\n cY2 = myDGSEM2D.myNameList.myExactSolutionParameters.cY2\n abs_cX = max(abs(cX1),abs(cX2))\n abs_cY = max(abs(cY1),abs(cY2))\n if abs_cX != 0.0:\n SimulationTime = lX/abs_cX \n else:\n SimulationTime = lY/abs_cY\n # Note that for all two-dimensional dispersive waves, \n # SimulationTime = lX/abs_cX = lX*kX/abs(omega) = lY*kY/abs(omega) = lY/abs_cY\n # where kX and kY are the zonal and meridional wavenumbers of the fast wave mode with omega being its angular \n # frequency.\n if ProblemType == 'Plane_Gaussian_Wave':\n print('The time taken by the wave to traverse half the diagonal extent of the domain is %.3g.' %SimulationTime)\n elif ProblemType == 'Coastal_Kelvin_Wave':\n print('The time taken by the fast wave mode to traverse the meridional extent of the domain is %.3g.' \n %SimulationTime)\n elif ProblemType_EquatorialWave:\n print('The time taken by the fast wave mode to traverse the zonal extent of the domain is %.3g.' \n %SimulationTime)\n elif ProblemType == 'Barotropic_Tide':\n print('The time taken by either component of the first standing wave mode to traverse the zonal extent of the '\n + 'domain is %.3g.' %SimulationTime)\n elif ProblemType == 'NonLinear_Manufactured_Solution':\n print('The time taken by the wave to traverse half the diagonal extent of the domain is %.3g.' %SimulationTime)\n else:\n print('The time taken by the fast wave mode to traverse half the diagonal extent of the domain is %.3g.' \n %SimulationTime)\n print('The minimum number of time steps of magnitude %.3g required to constitute this simulation time is %d.'\n %(dt,int(np.ceil(SimulationTime/dt))))\n\n\ndef DetermineExactSolutions(ProblemType,PrintPhaseSpeedOfWaveModes,PrintAmplitudesOfWaveModes,TimeIntegrator,\n LF_TR_and_LF_AM3_with_FB_Feedback_Type,Generalized_FB_with_AB2_AM3_Step_Type,\n Generalized_FB_with_AB3_AM4_Step_Type,nElementsX,nElementsY,nXi,nEta,nXiPlot,nEtaPlot,\n CheckStateVariableLimits,PlotFigures):\n myDGSEM2D = DGSEM2DClass.DGSEM2D(ProblemType,PrintPhaseSpeedOfWaveModes,PrintAmplitudesOfWaveModes,TimeIntegrator,\n LF_TR_and_LF_AM3_with_FB_Feedback_Type,Generalized_FB_with_AB2_AM3_Step_Type,\n Generalized_FB_with_AB3_AM4_Step_Type,nElementsX,nElementsY,nXi,nEta,nXiPlot,\n nEtaPlot)\n ExactZonalVelocityLimits = myDGSEM2D.myNameList.ExactZonalVelocityLimits\n ExactMeridionalVelocityLimits = myDGSEM2D.myNameList.ExactMeridionalVelocityLimits\n ExactSurfaceElevationLimits = myDGSEM2D.myNameList.ExactSurfaceElevationLimits\n if CheckStateVariableLimits:\n print('The limits of zonal velocity are [%.6f,%.6f].' \n %(ExactZonalVelocityLimits[0],ExactZonalVelocityLimits[1]))\n print('The limits of meridional velocity are [%.6f,%.6f].' \n %(ExactMeridionalVelocityLimits[0],ExactMeridionalVelocityLimits[1]))\n print('The limits of surface elevation are [%.6f,%.6f].' \n %(ExactSurfaceElevationLimits[0],ExactSurfaceElevationLimits[1]))\n return\n nCounters = 2\n dt = myDGSEM2D.myNameList.dt\n nDumpFrequency = myDGSEM2D.myNameList.nDumpFrequency\n nTime = myDGSEM2D.myNameList.nTime\n if ProblemType == 'Planetary_Rossby_Wave' or ProblemType == 'Topographic_Rossby_Wave':\n ProblemType_PlanetaryTopographicRossbyWave = True\n ExactSurfaceElevationMaximumMagnitude = ExactSurfaceElevationLimits[1]\n else:\n ProblemType_PlanetaryTopographicRossbyWave = False\n if myDGSEM2D.myNameList.ProblemType_EquatorialWave and not(ProblemType == 'Equatorial_Kelvin_Wave'):\n HermiteFunctionMaximumAmplitude = (\n ESST.DetermineHermiteFunctionMaximumAmplitudeWithMeridionalLocation(ProblemType,ReturnMeridionalLocation=False))\n etaHat1 = myDGSEM2D.myNameList.myExactSolutionParameters.etaHat1\n etaHat2 = myDGSEM2D.myNameList.myExactSolutionParameters.etaHat2\n VelocityScale = myDGSEM2D.myNameList.myExactSolutionParameters.VelocityScale\n ExactMeridionalVelocityMaximumMagnitude = VelocityScale*HermiteFunctionMaximumAmplitude*(etaHat1 + etaHat2)\n PlotExactZonalVelocity = myDGSEM2D.myNameList.LogicalArrayPlot[0]\n PlotExactMeridionalVelocity = myDGSEM2D.myNameList.LogicalArrayPlot[1]\n PlotExactSurfaceElevation = myDGSEM2D.myNameList.LogicalArrayPlot[2]\n ProblemType_FileName = myDGSEM2D.myNameList.ProblemType_FileName\n for iCounter in range(0,nCounters):\n for iTime in range(0,nTime):\n myDGSEM2D.iTime = iTime\n myDGSEM2D.time = float(iTime)*dt\n if np.mod(iTime,nDumpFrequency) == 0.0:\n if iCounter == 0: \n DGSEM2DClass.DetermineExactSolutionAtInteriorNodes(myDGSEM2D)\n ExactZonalVelocities, ExactMeridionalVelocities, ExactSurfaceElevations = (\n DGSEM2DClass.ExpressStateAtInteriorNodesAsArrays(myDGSEM2D,'Exact'))\n if PlotFigures: \n if not(ProblemType == 'Plane_Gaussian_Wave' or ProblemType == 'Coastal_Kelvin_Wave' \n or ProblemType == 'Equatorial_Kelvin_Wave' or ProblemType == 'Inertia_Gravity_Wave' \n or ProblemType == 'Barotropic_Tide' or ProblemType == 'NonLinear_Manufactured_Solution'):\n if iTime == 0:\n ExactZonalVelocityMinimum = np.min(ExactZonalVelocities)\n ExactZonalVelocityMaximum = np.max(ExactZonalVelocities)\n ExactMeridionalVelocityMinimum = np.min(ExactMeridionalVelocities)\n ExactMeridionalVelocityMaximum = np.max(ExactMeridionalVelocities)\n if not(ProblemType_PlanetaryTopographicRossbyWave):\n ExactSurfaceElevationMinimum = np.min(ExactSurfaceElevations)\n ExactSurfaceElevationMaximum = np.max(ExactSurfaceElevations)\n else:\n ExactZonalVelocityMinimum = min(ExactZonalVelocityMinimum,np.min(ExactZonalVelocities))\n ExactZonalVelocityMaximum = max(ExactZonalVelocityMaximum,np.max(ExactZonalVelocities))\n ExactMeridionalVelocityMinimum = (\n min(ExactMeridionalVelocityMinimum,np.min(ExactMeridionalVelocities)))\n ExactMeridionalVelocityMaximum = (\n max(ExactMeridionalVelocityMaximum,np.max(ExactMeridionalVelocities))) \n if not(ProblemType_PlanetaryTopographicRossbyWave):\n ExactSurfaceElevationMinimum = min(ExactSurfaceElevationMinimum,\n np.min(ExactSurfaceElevations))\n ExactSurfaceElevationMaximum = max(ExactSurfaceElevationMaximum,\n np.max(ExactSurfaceElevations))\n if iTime == nTime - 1: \n ExactZonalVelocityMaximumMagnitude = max(abs(ExactZonalVelocityMinimum),\n abs(ExactZonalVelocityMaximum)) \n if not(myDGSEM2D.myNameList.ProblemType_EquatorialWave \n and not(ProblemType == 'Equatorial_Kelvin_Wave')):\n ExactMeridionalVelocityMaximumMagnitude = (\n max(abs(ExactMeridionalVelocityMinimum),abs(ExactMeridionalVelocityMaximum)))\n if not(ProblemType_PlanetaryTopographicRossbyWave):\n ExactSurfaceElevationMaximumMagnitude = max(abs(ExactSurfaceElevationMinimum),\n abs(ExactSurfaceElevationMaximum))\n ExactZonalVelocityLimits = [-ExactZonalVelocityMaximumMagnitude,\n ExactZonalVelocityMaximumMagnitude]\n ExactMeridionalVelocityLimits = [-ExactMeridionalVelocityMaximumMagnitude,\n ExactMeridionalVelocityMaximumMagnitude]\n ExactSurfaceElevationLimits = [-ExactSurfaceElevationMaximumMagnitude,\n ExactSurfaceElevationMaximumMagnitude]\n FileName = ProblemType_FileName + '_ExactSolution_%3.3d' %iTime\n DGSEM2DClass.WriteInterpolatedStateDGSEM2D(myDGSEM2D,FileName,ComputeOnlyExactSolution=True)\n if iTime == nTime - 1:\n if PlotExactZonalVelocity:\n FileName = ProblemType_FileName + '_ExactZonalVelocityLimits'\n CR.WriteStateVariableLimitsToFile(myDGSEM2D.OutputDirectory,ExactZonalVelocityLimits,\n FileName)\n if PlotExactMeridionalVelocity:\n FileName = ProblemType_FileName + '_ExactMeridionalVelocityLimits'\n CR.WriteStateVariableLimitsToFile(myDGSEM2D.OutputDirectory,\n ExactMeridionalVelocityLimits,FileName)\n if PlotExactSurfaceElevation:\n FileName = ProblemType_FileName + '_ExactSurfaceElevationLimits'\n CR.WriteStateVariableLimitsToFile(myDGSEM2D.OutputDirectory,ExactSurfaceElevationLimits,\n FileName) \n else: # if iCounter == 1:\n if PlotFigures:\n FileName = ProblemType_FileName + '_ExactSolution_%3.3d' %iTime + '.tec'\n DataType = 'Structured'\n if ProblemType == 'Plane_Gaussian_Wave' or ProblemType == 'Barotropic_Tide':\n non_integral_seconds = True\n else:\n non_integral_seconds = False\n DisplayTime = FormatSimulationTime(myDGSEM2D.time,non_integral_seconds=non_integral_seconds,\n display_time=False,ProblemType_PlanetaryTopographicRossbyWave\n =ProblemType_PlanetaryTopographicRossbyWave)\n UseGivenColorBarLimits = True\n ComputeOnlyExactSolution = True\n SpecifyDataTypeInPlotFileName = False\n DGSEM2DClass.PythonPlotStateDGSEM2D(myDGSEM2D,FileName,DataType,DisplayTime,\n UseGivenColorBarLimits,ComputeOnlyExactSolution,\n SpecifyDataTypeInPlotFileName)\n \n \ndef DetermineExactAndNumericalSolutions(ProblemType,PrintPhaseSpeedOfWaveModes,PrintAmplitudesOfWaveModes,\n TimeIntegrator,LF_TR_and_LF_AM3_with_FB_Feedback_Type,\n Generalized_FB_with_AB2_AM3_Step_Type,Generalized_FB_with_AB3_AM4_Step_Type,\n nElementsX,nElementsY,nXi,nEta,nXiPlot,nEtaPlot,CheckStateVariableLimits,\n PlotFigures,ComputeOnlyExactSolution=False,PlotNumericalSolution=False,\n Restart=False,Restart_iTime=0,Restart_FileName='',ReadFromSELFOutputData=False):\n myDGSEM2D = DGSEM2DClass.DGSEM2D(ProblemType,PrintPhaseSpeedOfWaveModes,PrintAmplitudesOfWaveModes,TimeIntegrator,\n LF_TR_and_LF_AM3_with_FB_Feedback_Type,Generalized_FB_with_AB2_AM3_Step_Type,\n Generalized_FB_with_AB3_AM4_Step_Type,nElementsX,nElementsY,nXi,nEta,nXiPlot,\n nEtaPlot,ReadFromSELFOutputData=ReadFromSELFOutputData)\n ProblemType_NoExactSolution = myDGSEM2D.myDGSEM2DParameters.ProblemType_NoExactSolution\n ExactZonalVelocityLimits = myDGSEM2D.myNameList.ExactZonalVelocityLimits\n ExactMeridionalVelocityLimits = myDGSEM2D.myNameList.ExactMeridionalVelocityLimits\n ExactSurfaceElevationLimits = myDGSEM2D.myNameList.ExactSurfaceElevationLimits\n if CheckStateVariableLimits:\n print('The limits of zonal velocity are [%.6f,%.6f].' \n %(ExactZonalVelocityLimits[0],ExactZonalVelocityLimits[1]))\n print('The limits of meridional velocity are [%.6f,%.6f].' \n %(ExactMeridionalVelocityLimits[0],ExactMeridionalVelocityLimits[1]))\n print('The limits of surface elevation are [%.6f,%.6f].' \n %(ExactSurfaceElevationLimits[0],ExactSurfaceElevationLimits[1]))\n return\n nCounters = 2\n dt = myDGSEM2D.myNameList.dt\n nDumpFrequency = myDGSEM2D.myNameList.nDumpFrequency\n nRestartFrequency = myDGSEM2D.myNameList.nRestartFrequency\n nTime = myDGSEM2D.myNameList.nTime\n if ProblemType == 'Planetary_Rossby_Wave' or ProblemType == 'Topographic_Rossby_Wave':\n ProblemType_PlanetaryTopographicRossbyWave = True\n ExactSurfaceElevationMaximumMagnitude = ExactSurfaceElevationLimits[1]\n else:\n ProblemType_PlanetaryTopographicRossbyWave = False\n if myDGSEM2D.myNameList.ProblemType_EquatorialWave and not(ProblemType == 'Equatorial_Kelvin_Wave'):\n HermiteFunctionMaximumAmplitude = (\n ESST.DetermineHermiteFunctionMaximumAmplitudeWithMeridionalLocation(ProblemType,ReturnMeridionalLocation=False))\n etaHat1 = myDGSEM2D.myNameList.myExactSolutionParameters.etaHat1\n etaHat2 = myDGSEM2D.myNameList.myExactSolutionParameters.etaHat2\n VelocityScale = myDGSEM2D.myNameList.myExactSolutionParameters.VelocityScale\n ExactMeridionalVelocityMaximumMagnitude = VelocityScale*HermiteFunctionMaximumAmplitude*(etaHat1 + etaHat2)\n PlotExactZonalVelocity = myDGSEM2D.myNameList.LogicalArrayPlot[0]\n PlotExactMeridionalVelocity = myDGSEM2D.myNameList.LogicalArrayPlot[1]\n PlotExactSurfaceElevation = myDGSEM2D.myNameList.LogicalArrayPlot[2]\n ProblemType_FileName = myDGSEM2D.myNameList.ProblemType_FileName\n TimeIntegratorShortForm = myDGSEM2D.myNameList.myTimeSteppingParameters.TimeIntegratorShortForm\n if ReadFromSELFOutputData:\n iTimeFormat = '%8.8d'\n else:\n iTimeFormat = '%3.3d'\n if Restart:\n iTime_Start = Restart_iTime\n else:\n iTime_Start = 0\n DisplayProgress = True\n for iCounter in range(0,nCounters):\n for iTime in range(iTime_Start,nTime):\n if (iCounter == 0 or (iCounter == 1 and np.mod(iTime,nDumpFrequency) == 0.0)) and DisplayProgress:\n print('Displaying Progress: iCounter = %1d and iTime = %3d.' %(iCounter,iTime))\n myDGSEM2D.iTime = iTime\n myDGSEM2D.time = float(iTime)*dt\n if iCounter == 0: \n if np.mod(iTime,nDumpFrequency) == 0.0: \n DGSEM2DClass.DetermineExactSolutionAtInteriorNodes(myDGSEM2D)\n ExactZonalVelocities, ExactMeridionalVelocities, ExactSurfaceElevations = (\n DGSEM2DClass.ExpressStateAtInteriorNodesAsArrays(myDGSEM2D,'Exact'))\n if not(ComputeOnlyExactSolution):\n if iTime == iTime_Start:\n if Restart:\n u, v, eta = DGSEM2DClass.ReadStateDGSEM2D(myDGSEM2D,Restart_FileName+'.tec')\n DGSEM2DClass.SpecifyRestartConditions(myDGSEM2D,u,v,eta)\n else:\n DGSEM2DClass.SpecifyInitialConditions(myDGSEM2D)\n DGSEM2DClass.ComputeError(myDGSEM2D)\n ZonalVelocityError, MeridionalVelocityError, SurfaceElevationError = (\n DGSEM2DClass.ExpressStateAtInteriorNodesAsArrays(myDGSEM2D,'Error'))\n if PlotFigures: \n if not(ProblemType == 'Plane_Gaussian_Wave' or ProblemType == 'Coastal_Kelvin_Wave' \n or ProblemType == 'Equatorial_Kelvin_Wave' or ProblemType == 'Inertia_Gravity_Wave' \n or ProblemType == 'Barotropic_Tide' or ProblemType == 'NonLinear_Manufactured_Solution'):\n if iTime == iTime_Start:\n ExactZonalVelocityMinimum = np.min(ExactZonalVelocities)\n ExactZonalVelocityMaximum = np.max(ExactZonalVelocities)\n ExactMeridionalVelocityMinimum = np.min(ExactMeridionalVelocities)\n ExactMeridionalVelocityMaximum = np.max(ExactMeridionalVelocities)\n if not(ProblemType_PlanetaryTopographicRossbyWave):\n ExactSurfaceElevationMinimum = np.min(ExactSurfaceElevations)\n ExactSurfaceElevationMaximum = np.max(ExactSurfaceElevations)\n else:\n ExactZonalVelocityMinimum = min(ExactZonalVelocityMinimum,np.min(ExactZonalVelocities))\n ExactZonalVelocityMaximum = max(ExactZonalVelocityMaximum,np.max(ExactZonalVelocities))\n ExactMeridionalVelocityMinimum = min(ExactMeridionalVelocityMinimum,\n np.min(ExactMeridionalVelocities))\n ExactMeridionalVelocityMaximum = max(ExactMeridionalVelocityMaximum,\n np.max(ExactMeridionalVelocities)) \n if not(ProblemType_PlanetaryTopographicRossbyWave):\n ExactSurfaceElevationMinimum = min(ExactSurfaceElevationMinimum,\n np.min(ExactSurfaceElevations))\n ExactSurfaceElevationMaximum = max(ExactSurfaceElevationMaximum,\n np.max(ExactSurfaceElevations))\n if iTime == nTime - 1: \n ExactZonalVelocityMaximumMagnitude = max(abs(ExactZonalVelocityMinimum),\n abs(ExactZonalVelocityMaximum)) \n if not(myDGSEM2D.myNameList.ProblemType_EquatorialWave \n and not(ProblemType == 'Equatorial_Kelvin_Wave')):\n ExactMeridionalVelocityMaximumMagnitude = max(abs(ExactMeridionalVelocityMinimum),\n abs(ExactMeridionalVelocityMaximum))\n if not(ProblemType_PlanetaryTopographicRossbyWave):\n ExactSurfaceElevationMaximumMagnitude = max(abs(ExactSurfaceElevationMinimum),\n abs(ExactSurfaceElevationMaximum)) \n if ProblemType_NoExactSolution:\n ExactZonalVelocityLimits = [ExactZonalVelocityMinimum,ExactZonalVelocityMaximum]\n ExactMeridionalVelocityLimits = [ExactMeridionalVelocityMinimum,\n ExactMeridionalVelocityMaximum]\n ExactSurfaceElevationLimits = [ExactSurfaceElevationMinimum,\n ExactSurfaceElevationMaximum] \n else:\n ExactZonalVelocityLimits = [-ExactZonalVelocityMaximumMagnitude,\n ExactZonalVelocityMaximumMagnitude]\n ExactMeridionalVelocityLimits = [-ExactMeridionalVelocityMaximumMagnitude,\n ExactMeridionalVelocityMaximumMagnitude]\n ExactSurfaceElevationLimits = [-ExactSurfaceElevationMaximumMagnitude,\n ExactSurfaceElevationMaximumMagnitude]\n if not(ComputeOnlyExactSolution):\n if iTime == iTime_Start:\n ZonalVelocityErrorMinimum = np.min(ZonalVelocityError)\n ZonalVelocityErrorMaximum = np.max(ZonalVelocityError)\n MeridionalVelocityErrorMinimum = np.min(MeridionalVelocityError)\n MeridionalVelocityErrorMaximum = np.max(MeridionalVelocityError)\n SurfaceElevationErrorMinimum = np.min(SurfaceElevationError)\n SurfaceElevationErrorMaximum = np.max(SurfaceElevationError)\n else:\n ZonalVelocityErrorMinimum = min(ZonalVelocityErrorMinimum,\n np.min(ZonalVelocityError))\n ZonalVelocityErrorMaximum = max(ZonalVelocityErrorMaximum,\n np.max(ZonalVelocityError))\n MeridionalVelocityErrorMinimum = min(MeridionalVelocityErrorMinimum,\n np.min(MeridionalVelocityError))\n MeridionalVelocityErrorMaximum = max(MeridionalVelocityErrorMaximum,\n np.max(MeridionalVelocityError))\n SurfaceElevationErrorMinimum = min(SurfaceElevationErrorMinimum,\n np.min(SurfaceElevationError))\n SurfaceElevationErrorMaximum = max(SurfaceElevationErrorMaximum,\n np.max(SurfaceElevationError))\n if iTime == nTime - 1:\n ZonalVelocityErrorMaximumMagnitude = max(abs(ZonalVelocityErrorMinimum),\n ZonalVelocityErrorMaximum)\n MeridionalVelocityErrorMaximumMagnitude = max(abs(MeridionalVelocityErrorMinimum),\n MeridionalVelocityErrorMaximum)\n SurfaceElevationErrorMaximumMagnitude = max(abs(SurfaceElevationErrorMinimum),\n SurfaceElevationErrorMaximum)\n if ProblemType_NoExactSolution:\n ZonalVelocityErrorLimits = [ZonalVelocityErrorMinimum,ZonalVelocityErrorMaximum]\n MeridionalVelocityErrorLimits = [MeridionalVelocityErrorMinimum,\n MeridionalVelocityErrorMaximum]\n SurfaceElevationErrorLimits = [SurfaceElevationErrorMinimum,\n SurfaceElevationErrorMaximum] \n else:\n ZonalVelocityErrorLimits = [-ZonalVelocityErrorMaximumMagnitude,\n ZonalVelocityErrorMaximumMagnitude]\n MeridionalVelocityErrorLimits = [-MeridionalVelocityErrorMaximumMagnitude,\n MeridionalVelocityErrorMaximumMagnitude]\n SurfaceElevationErrorLimits = [-SurfaceElevationErrorMaximumMagnitude,\n SurfaceElevationErrorMaximumMagnitude]\n if ComputeOnlyExactSolution:\n FileName = ProblemType_FileName + '_State_' + iTimeFormat %iTime\n else:\n FileName = (ProblemType_FileName + '_' + TimeIntegratorShortForm + '_State_' \n + iTimeFormat %iTime)\n DGSEM2DClass.WriteInterpolatedStateDGSEM2D(myDGSEM2D,FileName,ComputeOnlyExactSolution)\n if not(ComputeOnlyExactSolution) and np.mod(iTime,nRestartFrequency) == 0.0:\n FileName = (ProblemType_FileName + '_' + TimeIntegratorShortForm \n + '_RestartState_' + iTimeFormat %iTime)\n DGSEM2DClass.WriteStateDGSEM2D(myDGSEM2D,FileName)\n if iTime == nTime - 1:\n if PlotExactZonalVelocity:\n FileName = ProblemType_FileName + '_ExactZonalVelocityLimits'\n CR.WriteStateVariableLimitsToFile(myDGSEM2D.OutputDirectory,ExactZonalVelocityLimits,\n FileName)\n if not(ComputeOnlyExactSolution):\n FileName = (ProblemType_FileName + '_' + TimeIntegratorShortForm \n + '_ZonalVelocityErrorLimits')\n CR.WriteStateVariableLimitsToFile(myDGSEM2D.OutputDirectory,\n ZonalVelocityErrorLimits,FileName)\n if PlotExactMeridionalVelocity:\n FileName = ProblemType_FileName + '_ExactMeridionalVelocityLimits'\n CR.WriteStateVariableLimitsToFile(myDGSEM2D.OutputDirectory,\n ExactMeridionalVelocityLimits,FileName)\n if not(ComputeOnlyExactSolution):\n FileName = (ProblemType_FileName + '_' + TimeIntegratorShortForm\n + '_MeridionalVelocityErrorLimits')\n CR.WriteStateVariableLimitsToFile(myDGSEM2D.OutputDirectory,\n MeridionalVelocityErrorLimits,FileName)\n if PlotExactSurfaceElevation:\n FileName = ProblemType_FileName + '_ExactSurfaceElevationLimits'\n CR.WriteStateVariableLimitsToFile(myDGSEM2D.OutputDirectory,ExactSurfaceElevationLimits,\n FileName) \n if not(ComputeOnlyExactSolution):\n FileName = (ProblemType_FileName + '_' + TimeIntegratorShortForm \n + '_SurfaceElevationErrorLimits')\n CR.WriteStateVariableLimitsToFile(myDGSEM2D.OutputDirectory,\n SurfaceElevationErrorLimits,FileName)\n if not(ComputeOnlyExactSolution) and iTime < nTime - 1:\n TSM.TimeIntegration(myDGSEM2D)\n else: # if iCounter == 1:\n if np.mod(iTime,nDumpFrequency) == 0.0:\n if PlotFigures:\n if ComputeOnlyExactSolution:\n FileName = ProblemType_FileName + '_State_' + iTimeFormat %iTime + '.tec'\n else:\n FileName = (ProblemType_FileName + '_' + TimeIntegratorShortForm + '_State_' \n + iTimeFormat %iTime + '.tec')\n if ReadFromSELFOutputData:\n DataType = 'Unstructured'\n else:\n DataType = 'Structured'\n if ProblemType == 'Plane_Gaussian_Wave' or ProblemType == 'Barotropic_Tide':\n non_integral_seconds = True\n else:\n non_integral_seconds = False\n DisplayTime = FormatSimulationTime(myDGSEM2D.time,non_integral_seconds=non_integral_seconds,\n display_time=False,ProblemType_PlanetaryTopographicRossbyWave\n =ProblemType_PlanetaryTopographicRossbyWave,\n ProblemType_NoExactSolution=ProblemType_NoExactSolution)\n UseGivenColorBarLimits = True\n SpecifyDataTypeInPlotFileName = False\n DGSEM2DClass.PythonPlotStateDGSEM2D(myDGSEM2D,FileName,DataType,DisplayTime,\n UseGivenColorBarLimits,ComputeOnlyExactSolution,\n SpecifyDataTypeInPlotFileName,PlotNumericalSolution)","sub_path":"src/DGSEM_Rotating_Shallow_Water/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":40662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"149170966","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jan 12 10:50:56 2019\r\n\r\n@author: Chris Havenstein, David Stroud\r\n\"\"\"\r\n\r\nimport numpy as np\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.model_selection import KFold # EDIT: I had to import KFold\r\n\r\n# adapt this to run\r\n\r\n# Recommend to be done before live class 2\r\n# 1. Write a function to take a list or dictionary of clfs and hypers ie use\r\n# logistic regression, each with 3 different sets of hyper parameters for each\r\n\r\n# Recommend to be done before live class 3\r\n# 2. expand to include larger number of classifiers and hyperparameter settings\r\n# 3. find some simple data\r\n# 4. generate matplotlib plots that will assist in identifying the optimal clf\r\n# and parameters settings\r\n\r\n# Recommend to be done before live class 4\r\n# 5. Please set up your code to be run and save the results to the\r\n# directory that its executed from\r\n# 6. Investigate grid search function\r\n\r\n\r\n# EDIT: array M includes the X's\r\nM = np.array([[1, 2], [3, 4], [4, 5], [4, 5], [4, 5], [4, 5], [4, 5], [4, 5], [\r\n 4, 5], [4, 5], [4, 5], [4, 5], [4, 5], [4, 5], [4, 5], [4, 5]])\r\nprint(M)\r\n# EDIT: array L includes the Y's, they're all ones and as such is only for\r\n# example (an ML algorithm would always predict 1).\r\nL = np.ones(M.shape[0])\r\nprint(L)\r\n\r\n# EDIT: a single value, 5, to use for 5-fold (k-fold) cross validation\r\nn_folds = 5\r\n\r\n# EDIT: pack the arrays together into \"data\"\r\ndata = (M, L, n_folds)\r\n\r\n# EDIT: Let's see what we have.\r\nprint(data)\r\n\r\n\r\n# data expanded\r\nM, L, n_folds = data\r\n# https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.KFold.html\r\nkf = KFold(n_splits=n_folds)\r\n\r\nprint(kf)\r\n\r\n# if you want to see all values in NumPy arrays\r\n# np.set_printoptions(threshold=np.inf)\r\n\r\n# EDIT: Show what is kf.split doing\r\nfor ids, (train_index, test_index) in enumerate(kf.split(M, L)):\r\n print(\"k fold = \", ids)\r\n print(\" train indexes\", train_index)\r\n print(\" test indexes\", test_index)\r\n\r\n# EDIT: A function, \"run\", to run all our classifiers against our data.\r\n\r\n\r\ndef run(a_clf, data, clf_hyper={}):\r\n M, L, n_folds = data # EDIT: unpack the \"data\" container of arrays\r\n kf = KFold(n_splits=n_folds) # JS: Establish the cross validation\r\n ret = {} # JS: classic explicaiton of results\r\n\r\n # EDIT: We're interating through train and test indexes by using kf.split\r\n for ids, (train_index, test_index) in enumerate(kf.split(M, L)):\r\n # from M and L.\r\n # We're simply splitting rows into train and test rows\r\n # for our five folds.\r\n\r\n # JS: unpack paramters into clf if they exist #EDIT: this gives all keyword arguments except\r\n clf = a_clf(**clf_hyper)\r\n # for those corresponding to a formal parameter\r\n # in a dictionary.\r\n\r\n # EDIT: First param, M when subset by \"train_index\",\r\n clf.fit(M[train_index], L[train_index])\r\n # includes training X's.\r\n # Second param, L when subset by \"train_index\",\r\n # includes training Y.\r\n\r\n # EDIT: Using M -our X's- subset by the test_indexes,\r\n pred = clf.predict(M[test_index])\r\n # predict the Y's for the test rows.\r\n\r\n ret[ids] = {'clf': clf, # EDIT: Create arrays of\r\n 'train_index': train_index,\r\n 'test_index': test_index,\r\n 'accuracy': accuracy_score(L[test_index], pred)}\r\n return ret\r\n\r\n\r\n# Use run function\r\nresults = run(RandomForestClassifier, data, clf_hyper={})\r\n\r\nprint(results)\r\n\r\n\r\n# After explaining.... talk about lists and dictionaries.\r\n# https://docs.python.org/3/tutorial/\r\n\r\n# Also... Here's your clfs\r\n# https://scikit-learn.org/stable/supervised_learning.html\r\n\r\n# Go through examples in this order:\r\n# ** operator\r\n# list1,\r\n# dictionary,\r\n# list2\r\n","sub_path":"HW2/DeathToGridSearch-commented.py","file_name":"DeathToGridSearch-commented.py","file_ext":"py","file_size_in_byte":4029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"121817048","text":"from django.conf.urls import url\n\nfrom . import views\n\napp_name = 'event'\n\nurlpatterns = [\n url(r'^$', views.home, name='home'),\n url(r'^new_event$', views.new_event, name='new_event'),\n url(r'^(?P[0-9]+)/$', views.event_detail, name='event_detail'),\n #url(r'^(?P[0-9]+)/(?P[0-9]+)$', views.delete_name, name='del_name'),\n url(r'^(?P[0-9]+)/delete$', views.delete_name, name='del'),\n url(r'^login_page$', views.login_page, name='login_page'),\n url(r'^login$', views.login_r, name='login'),\n url(r'^logout$', views.logout_page, name='logout'),\n url(r'^adminhome$', views.admin_home, name='adminhome'),\n url(r'^delete_e$', views.delete_event, name='del_event'),\n url(r'^about$', views.about, name='about'),\n\n]\n","sub_path":"event/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"258299150","text":"from random import seed\nimport matplotlib.pyplot as plt\nfrom random import random, shuffle\nimport numpy as np\nfrom math import acos, sqrt\n\n\ndef create_abeille(T):\n bees = []\n min_x = min([i[0] for i in T])\n max_x = max([i[0] for i in T])\n min_y = min([i[1] for i in T])\n max_y = max([i[1] for i in T])\n for i in range(len(T)-2):\n bees.append([min_x + random()*(max_x-min_x), min_y + random()*(max_y-min_y)])\n return bees\n\ndef sonar2(bee, bees, T):\n nearest = []\n\n X = bees + T\n # calcul carre des normes\n normes= []\n for i in X:\n normes.append(abs((bee[0]-i[0])**2 + (bee[1]-i[1])**2))\n for i in range(3):\n idx = normes.index(min(normes))\n normes.pop(idx)\n nearest.append(X.pop(idx))\n return nearest\n\ndef sonar(bee, T):\n nearest = []\n #X=bees+T\n X=T\n # calcul carre des normes\n normes= []\n for i in X:\n normes.append( sqrt((bee[0]-i[0])**2 + (bee[1]-i[1])**2) )\n\n for i in range(3):\n idx = normes.index(min(normes))\n normes.pop(idx)\n nearest.append(X[idx])\n X.pop(idx)\n \n return nearest\n\ndef fobj(bee,proche):\n\ts=0\n\tfor p in proche :\n\t\ts+= (p[0]-bee[0])**2 + (p[1]-bee[1])**2\n\treturn s\n\ndef gradobj(bee,proche):\n\ts = [0, 0]\n\tfor p in proche :\n\t\ts[0]+=2*(bee[0]-p[0])\n\t\ts[1]+=2*(bee[1]-p[1])\n\treturn s\n\ndef angles(proche): # p0-p1 = l2// p0-p2=l1 //p1-p2=l0\n\n l0=sqrt( (proche[2][0]-proche[1][0])**2 + (proche[2][1]-proche[1][1])**2 )\n l1=sqrt( (proche[0][0]-proche[2][0])**2 + (proche[0][1]-proche[2][1])**2 )\n l2=sqrt( (proche[0][0]-proche[1][0])**2 + (proche[0][1]-proche[1][1])**2 )\n eps=0.0001\n if (l0==0 or l1==0 or l2==0) :\n return -1\n if ( (abs(l0-l1) <=eps) and (abs(l1-l2)<=eps) and (abs(l0-l2)<=eps) ):\n return -2\n A=float(l1**2 + l2**2 - l0**2)/ float(2.0*l1*l2)\n B=float(l0**2 + l2**2 - l1**2)/ float(2.0*l0*l2)\n C=(l1**2 + l0**2 - l2**2)/ (2.0*l1*l0)\n Z=[A,B,C]\n for i in Z:\n if abs(float(i))<=1.0:\n if float(i)<=-1:\n i=-1.0\n else:\n i=1.0 \n a0= acos(Z[0])\n a1= acos(Z[1])\n a2= acos(Z[2])\n \n if (a0 >=2*np.pi/3):\n return 0\n if (a1 >=2*np.pi/3):\n return 1\n if (a2 >=2*np.pi/3):\n return 2\n return -1\n\ndef deplacement2(bee, proche):\n #teste angle >=120\n ind=angles(proche)\n if (ind >-1) :\n bee=proche[ind]\n return bee\n #teste triangle equilateral\n if (ind==-2) :\n return deplacement(bee,proche)\n # sinon descente gradient \n eps=0.000001\n alpha=0.0002\n grad=gradobj(bee,proche)\n f=5000\n f2=fobj(bee,proche)\n while abs(f-f2)>=eps:\n bee[0]=bee[0] - alpha*grad[0]\n bee[1]=bee[1] - alpha*grad[1]\n grad=gradobj(bee,proche)\n f=f2\n f2=fobj(bee,proche)\n return bee\n\ndef deplacement(bee, proche):\n bee = [sum([i[0] for i in proche])/3.0, sum([i[1] for i in proche])/3.0]\n return bee\n\ndef affichage(bees, T, links):\n plt.scatter([i[0] for i in bees], [i[1] for i in bees], s = 50, c = 'red')\n plt.scatter([i[0] for i in T], [i[1] for i in T], s = 80, c = 'blue')\n for idx, lien in enumerate(links):\n X0 = bees[idx]\n for X1 in lien:\n if X1 in T or X1 in bees :\n plt.plot([X0[0], X1[0]], [X0[1], X1[1]], c='green')\n plt.show()\n\nif __name__ == '__main__':\n seed(1)\n #T = [[0.0,1.0], [0.0,0.], [0.5, 0.5], [0.4, 0.8], [0.7,0.7], [0.2,0.1], [ 0.3,0.7]]\n #T = [[0.0,5.0], [0.0,0.], [2.5, 2.5], [2.0, 5*0.8], [3.5,3.5], [1.0,0.5], [ 1.5,3.5]]\n T = [[0.0,0.0], [0.0,1.0], [1.0,1.0], [1.0,0.0]]\n bees = create_abeille(T)\n links = [[] for i in range(len(bees))]\n for i in range(8):\n #affichage(bees, T, links)\n\n for j, bee in enumerate(bees):\n #T = [[0.0,1.0], [0.0,0.], [0.5, 0.5], [0.4, 0.8], [0.7,0.7], [0.2,0.1], [ 0.3,0.7]]\n T = [[0.0,0.0], [0.0,1.0], [1.0,1.0], [1.0,0.0]]\n nearest = sonar2(bee, [a for a in bees if a!=bee], T)\n #nearest = sonar(bee, T)\n T = [[0.0,0.0], [0.0,1.0], [1.0,1.0], [1.0,0.0]]\n #T = [[0.0,1.0], [0.0,0.], [0.5, 0.5], [0.4, 0.8], [0.7,0.7], [0.2,0.1], [ 0.3,0.7]] \n bees[j] = deplacement2(bee, nearest)\n links[j] = nearest\n \n affichage(bees, T, links)\n print(bees)","sub_path":"abeille2.py","file_name":"abeille2.py","file_ext":"py","file_size_in_byte":4383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"130861852","text":"def func(l) -> list:\n n = len(l)\n for i in range(1,n):\n preindex = i-1\n cur = l[i]\n while preindex>=0 and l[preindex] > cur:\n l[preindex+1] = l[preindex]\n preindex -= 1\n l[preindex+1] = cur\n return l\n\n\n\nif __name__ == '__main__':\n \"\"\"\n 平均时间复杂度:O(n^2)\n 最好情况:O(n)\n 最坏情况:O(n^2)\n 空间复杂度:O(1)\n 排序方式:In-place\n 稳定性:稳定\n \"\"\"\n print()\n l = [2,3,5,6,3,2,1,3,4,5,6,7,8,5,4,1,3,4]\n print('unsorted: {}'.format(l))\n print()\n print('sorted: {}'.format(func(l)))","sub_path":"Solution/Sorting Methods/InsertionSort.py","file_name":"InsertionSort.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"66492925","text":"import os, cv2, numpy as np\n\ndef clrHistogramOfImage(img):\n\tchannels = cv2.split(img)\n\tcolors = ('b', 'g', 'r')\n\tfeatures = []\n\tfeature_data = \"\"\n\tctr = 0\n\tfor (channel, color) in zip(channels, colors):\n\t\tctr += 1\n\t\thistogram = cv2.calcHist([channel], [0], None, [256], [0, 256])\n\t\tfeatures.extend(histogram)\n\t\t\n\t\tprev = None\n\t\tl = []\n\t\tii=0\n\t\twhile(ii<50):\n\t\t\tif(prev!=None):\n\t\t\t\telem = np.argmax(histogram)\n\t\t\t\tif(abs(prev-elem)>=2):\n\t\t\t\t\tl.append(elem)\n\t\t\t\t\tprev=elem; histogram[elem] = [-200]\n\t\t\t\t\tii+=1\n\t\t\t\telse:\n\t\t\t\t\thistogram[elem] = [-200]\n\t\t\telse:\n\t\t\t\tl.append(np.argmax(histogram))\n\t\t\t\tprev=l[-1]; histogram[l[-1]] = [-200]\n\t\t\t\tii+=1\n\n\t\tif(ctr==1):\n\t\t\tblue = l\n\t\telif(ctr==2):\n\t\t\tgreen = l\n\t\telif(ctr==3):\n\t\t\tred = l\n\t\t\tfor i in range(50):\n\t\t\t\tfeature_data += \"{},{},{}\\n\".format(red[i],green[i],blue[i])\n\t\t\tfeature_data = feature_data.strip()\n\t\n\twith open(\"colorDetectionProg/img_clr_data.csv\", 'w') as F:\n\t\tF.write(feature_data)\n\ndef clrHistogramForTrainingImage(img):\n\tdataSRC = img.split(\"/\")[-2]\n\timage = cv2.imread(img)\n\n\tchannels = cv2.split(image)\n\tcolors = ('b', 'g', 'r')\n\tfeatures = []\n\tfeature_data = ''\n\tctr = 0\n\tfor (channel, color) in zip(channels, colors):\n\t\tctr += 1\n\t\thistogram = cv2.calcHist([channel], [0], None, [256], [0, 256])\n\t\tfeatures.extend(histogram)\n\n\t\telem = np.argmax(histogram)\n\t\t\n\t\tif(ctr==1):\n\t\t\tblue = elem\n\t\telif(ctr==2):\n\t\t\tgreen = elem\n\t\telif(ctr==3):\n\t\t\tred = elem\n\t\t\tfeature_data = \"{},{},{},{}\\n\".format(red,green,blue,dataSRC)\n\n\twith open(\"colorDetectionProg/training_clr_data.csv\", 'a') as F:\n\t\tF.write(feature_data)\n\n\ndef Training():\n\tpath=os.getcwd()+\"/trainingSet\"\n\tfor root,_,files in os.walk(path):\n\t\tfor f in files:\n\t\t\tclrHistogramForTrainingImage(root+\"/\"+f)\n","sub_path":"Task-Instance-5057924276158464/colorDetectionProg/clrHistogramExtraction.py","file_name":"clrHistogramExtraction.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"421910368","text":"from operator import mul\nfrom functools import reduce\n\ndef cmb(n,r):\n r = min(n-r,r)\n if r == 0: return 1\n over = reduce(mul, range(n, n - r, -1))\n under = reduce(mul, range(1,r + 1))\n return over // under\n\nans = 0\nN, M = map(int,input().split())\na = []\nfor _ in range(M):\n a.append(int(input()))\n\nprint(N,M,a)\n\na = N\nb = 0\ntotal = 0\n\nwhile( a >= b ):\n total += cmb(a,b)\n a -= 1\n b += 1\n\nprint(total)\n\n# 突破できず\n\n\n\n","sub_path":"ABC/ABC129/C-ABC129.py","file_name":"C-ABC129.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"277466137","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 27 00:29:01 2021\n\n@author: guang\n\"\"\"\n\n# Azadeh: wrote ScrapeClass.py\n# This script is written using different methods for converting html to pd then save as CSV\n# Purpose is to save scraped html tables as CSV for dataframes and plotting\n\nimport json\nimport pandas as pd\nfrom bs4 import BeautifulSoup\nfrom urllib.request import urlopen\nimport os\nimport codecs\n#import lxml\n#import html5lib\nimport csv\nimport requests\nimport urllib.error\nimport urllib.parse\n\nclass scrapping:\n def __init__(self):\n self.current_dir = os.getcwd()\n self.day = self.get_date()\n self.local_html = os.path.join('local_html', self.get_filename())\n self.file_dir = os.path.join(self.current_dir, self.local_html)\n self.scrapped = self.scrape_html()\n self.rows = self.get_rows()\n self.df = self.get_df()\n \n def get_date(self):\n response = int(input(\"Enter day only (1-31):\\n\"))\n return response\n \n def get_filename(self):\n # missing 2021-03-21 data\n if self.day>=15 and self.day<=17:\n return 'local_page2021-03-17.html'\n elif self.day >=18 and self.day <=20:\n return 'local_page2021-03-20.html'\n elif self.day >= 22 and self.day <=23:\n return 'local_page2021-03-24.html'\n elif self.day >=24 and self.day <=26:\n return 'local_page2021-03-26.html'\n elif self.day >=27 and self.day <=29:\n return 'local_page2021-03-29.html'\n else:\n print(\"Sorry, the day you enterred is not in the database\")\n \n def scrape_html(self):\n file = codecs.open(self.file_dir, \"r\",\"utf-8\")\n html = file.read()\n scrapped = BeautifulSoup(html, 'html.parser')\n return scrapped\n \n def get_tableid(self):\n if self.day == 17 or self.day == 20 or self.day==26 or self.day==29:\n return 'main_table_countries_today'\n elif self.day == 16 or self.day == 19 or self.day==25 or self.day==28 or self.day==23:\n return 'main_table_countries_yesterday'\n elif self.day == 15 or self.day == 18 or self.day==24 or self.day==27 or self.day==22:\n return 'main_table_countries_yesterday2'\n else:\n print(\"Interested date is not in stored in database\")\n \n def get_rows(self):\n table = self.scrapped.find(id=self.get_tableid())\n rows = table.find_all('tr')\n return rows\n \n def get_df(self):\n try:\n table_data = []\n for row in self.rows:\n row_data = []\n for cell in row.findAll('td'):\n row_data.append(cell.text)\n if(len(row_data) > 0):\n data_item = {\"Country\": row_data[1],\n \"TotalCases\": row_data[2],\n \"NewCases\": row_data[3],\n \"TotalDeaths\": row_data[4],\n \"NewDeaths\": row_data[5],\n \"TotalRecovered\": row_data[6],\n \"NewRecovered\": row_data[7],\n \"ActiveCases\": row_data[8],\n \"CriticalCases\": row_data[9],\n \"Totcase1M\": row_data[10],\n \"Totdeath1M\": row_data[11],\n \"TotalTests\": row_data[12],\n \"Tottest1M\": row_data[13],\n \"Population\": row_data[14]\n }\n table_data.append(data_item)\n df = pd.DataFrame(table_data)\n df = df.drop(df[df.Population==''].index)\n df['NewCases']=df['NewCases'].str.strip('+')\n #df['NewCases']=df['NewCases'].replace(',','', regex=True).astype(float)\n #df['NewDeaths']=df['NewDeaths'].replace(',','', regex=True).astype(float)\n #df['NewRecovered']=df['NewRecovered'].replace(',','', regex=True).astype(float)\n #df['Totdeath1M']=df['Totdeath1M'].replace(',','', regex=True).astype(float)\n return df\n except:\n print('Error for converting to DataFrame')\n \n def save_df(self):\n csvName = 'corona2021-03-'+str(self.day)+'.csv'\n csvPath = os.path.join(self.current_dir, 'corona_tables')\n csv_dir = os.path.join(csvPath, csvName)\n self.df.to_csv(csv_dir, index=True)\n print(\"Saved to local csv file\")\n \n \ndef main_save_localhtml(url, htmlname):\n response = requests.get(url)\n html = response.content\n html_path = os.path.join(os.getcwd(), 'local_html')\n html_name = os.path.join(html_path, htmlname)\n f = open(html_name, 'wb')\n f.write(html)\n f.close\n \n\n","sub_path":"scrape_module.py","file_name":"scrape_module.py","file_ext":"py","file_size_in_byte":4823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"653581630","text":"from collections import Counter\n\ndef is_isogram(string):\n #input = string\n # count the number of ocurrence of each element\n #frequency = Counter(input)\n #print(type(frequency))\n #print(str(frequency))\n # if no character appear more than once - isogram\n\n\n # if a character appears more than once - and they are not space or hypens - not isogram\n #\n # if they are space or hypens - isogram\n #\n # Convert the word or sentence in lower case letters.\n clean_string = string.lower()\n\n\n # Make an empty list to append unique letters\n letter_list = []\n\n for letter in clean_string:\n\n # If letter is an alphabet then only check\n if letter.isalpha():\n if letter in letter_list:\n return False\n letter_list.append(letter)\n\n return True\n","sub_path":"isogram/isogram.py","file_name":"isogram.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"37913928","text":"fields = None\n\ndef input():\n global fields\n input_data = list(filter(None, open('day_16/input.txt').read().split('\\n\\n')))\n fields = list(filter(None, input_data[0].split('\\n')))\n fields = list(map(parse_field, fields))\n your_ticket = parse_ticket(input_data[1].split('\\n')[1])\n nearby_tickets = list(map(parse_ticket, list(filter(None, input_data[2].split('\\n')))[1:]))\n return fields, your_ticket, nearby_tickets\n\ndef parse_ticket(ticket):\n fields = filter(None, ticket.split(','))\n return list(map(int, fields))\n\ndef parse_field(field):\n name, ranges = field.split(':')\n ranges = ranges.split(' or ')\n ranges = list(map(lambda r: r.split('-'), ranges))\n ranges = list(map(lambda r: (int(r[0].strip()), int(r[1].strip())), ranges))\n return name, ranges\n\ndef valid_for_ranges(field, ranges):\n for r in ranges:\n if r[0] <= field <= r[1]:\n return True\n return False\n \ndef valid_field(field):\n for name, ranges in fields:\n if valid_for_ranges(field, ranges):\n return True\n return False\n\ndef extract_invalid_fields(ticket):\n invalids = []\n for field in ticket:\n if not valid_field(field):\n invalids.append(field)\n return invalids\n\ndef sum_invalid_fields(ticket):\n return sum(extract_invalid_fields(ticket))\n\ndef part_1(input_data):\n nearby_tickets = input_data[2]\n return sum(map(sum_invalid_fields, nearby_tickets))\n\ndef valid_ticket(ticket):\n return all(map(valid_field, ticket))\n\ndef nonify_invalid_ticket(ticket):\n return ticket if valid_ticket(ticket) else None\n\ndef valid_for_all_tickets(ranges, index, tickets):\n for ticket in tickets:\n if not valid_for_ranges(ticket[index], ranges):\n return False\n return True\n\ndef part_2(input_data):\n fields, your_ticket, nearby_tickets = input_data\n valid_nearby_tickets = list(filter(None, map(nonify_invalid_ticket, nearby_tickets)))\n posible_field_ids = {}\n for name, ranges in fields:\n possible_indices = set()\n for ticket_field_index in range(0, len(your_ticket)):\n if valid_for_all_tickets(ranges, ticket_field_index, valid_nearby_tickets):\n possible_indices.add(ticket_field_index)\n posible_field_ids[name] = possible_indices\n\n field_ids = {}\n while True:\n taken_ids = set()\n for name, possible_indices in posible_field_ids.items():\n if len(possible_indices) == 1:\n id = possible_indices.pop()\n field_ids[name] = id\n taken_ids.add(id)\n if not taken_ids:\n break\n for id in taken_ids:\n for name, possible_indices in posible_field_ids.items():\n if id in possible_indices:\n possible_indices.remove(id)\n\n total = 1\n for name, id in field_ids.items():\n if name.startswith('departure'):\n total *= your_ticket[id]\n\n return total\n\nif __name__ == '__main__':\n print(part_1(input()))\n print(part_2(input()))\n","sub_path":"day_16/day_16.py","file_name":"day_16.py","file_ext":"py","file_size_in_byte":3043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"30018734","text":"class DoubleLinkedListNode(object):\n \n\tdef __init__(self, value, nxt, prev):\n\t\tself.value = value\n\t\tself.next = nxt\n\t\tself.prev = prev\n\n\tdef __repr__(self):\n\t\tnval = self.next and self.next.value or None\n\t\tpval = self.prev and self.prev.value or None\n\t\treturn f\"[{self.value}, {repr(nval)}, {repr(pval)}]\"\n\nclass DoubleLinkedList(object):\n\n\tdef __init__(self):\n\t\tself.begin = None\n\t\tself.end = None\n\t\tself.counter = 0\n\n\tdef push(self, obj):\n\t\t\"\"\"Append to the end of the list\"\"\"\n\t\tself._invariant()\n\t\tnode = DoubleLinkedListNode(obj, None, self.end)\n\t\tif self.counter == 0:\n\t\t\tself.begin = self.end = node\n\t\telse:\n\t\t\tself.end.next = node\n\t\t\tself.end = node\n\t\tself.counter +=1\n\n\tdef pop(self):\n\t\t\"\"\"Remove last item and return\"\"\"\n\t\tself._invariant()\n\t\tif not self.end:\n\t\t\treturn None\n\t\tif self.begin == self.end:\n\t\t\tpopped = self.end.value\n\t\t\tself.begin = None\n\t\t\tself.end = None\n\t\t\tself.counter = 0\n\t\t\treturn popped\n\t\telse:\n\t\t\tpopped = self.end\n\t\t\tself.end = self.end.prev\n\t\t\tself.end.next = None\n\t\t\tself.counter -= 1\n\t\t\treturn popped.value\n\n\tdef shift(self, obj):\n\t\t\"\"\"Append to the begining of a list\"\"\"\n\t\tself._invariant()\n\t\tnode = DoubleLinkedListNode(obj, self.begin, None)\n\t\tif self.counter == 0:\n\t\t\tself.end = node\n\t\telse:\n\t\t\tself.begin.prev = node\n\t\tself.counter += 1\n\t\tself.begin = node\n\n\tdef unshift(self):\n\t\t\"\"\"Removes the first item (from begin) and returns it.\"\"\"\n\t\tself._invariant()\n\t\tif not self.begin:\n\t\t\treturn None\n\t\tif self.begin == self.end:\n\t\t\tshifted = self.begin.value\n\t\t\tself.begin = None\n\t\t\tself.end = None\n\t\t\tself.counter = 0\n\t\t\treturn shifted\n\t\telse:\n\t\t\tshifted = self.begin.value\n\t\t\tself.begin = self.begin.next\n\t\t\tself.begin.prev = None\n\t\t\tself.counter -= 1\n\t\t\treturn shifted\n\n\tdef detach_node(self, node):\n\t\t\"\"\"You'll need to use this operation sometimes, but mostly\n\t\t\tinside remove(). It should take a node, and detach it from the\n\t\t\tlist, whether the node is at the front, end, or in the middle.\"\"\"\n\t\tself._invariant()\n\t\tif node == self.begin == self.end:\n\t\t\tself.begin = None\n\t\t\tself.end = None\n\t\telif node == self.begin:\n\t\t\tnode.next.prev = None\n\t\t\tself.begin = node.next\n\t\telif node == self.end:\n\t\t\tnode.prev.next = None\n\t\t\tself.end = node.prev\n\t\telse:\n\t\t\tnode.next.prev = node.prev\n\t\t\tnode.prev.next = node.next\n\t\tself.counter -= 1\n\n\tdef remove(self, obj):\n\t\t\"\"\"Finds a matching item and removes it from the list.\"\"\"\n\t\tself._invariant()\n\t\tif not self.begin:\n\t\t\treturn None\n\t\tnode = self.begin\n\t\tindex = 0\n\t\twhile node.value != obj:\n\t\t\tif node == self.end:\n\t\t\t\treturn None\n\t\t\tnode = node.next\n\t\t\tindex +=1\n\t\tself.detach_node(node)\n\t\treturn index\n\n\tdef first(self):\n\t\t\"\"\"Returns a *reference* to the first item, does not remove.\"\"\"\n\t\tself._invariant()\n\t\tif not self.begin:\n\t\t\treturn None\n\t\telse:\n\t\t\treturn self.begin.value\n\n\tdef last(self):\n\t\t\"\"\"Returns a reference to the last item, does not remove.\"\"\"\n\t\tself._invariant()\n\t\tif not self.end:\n\t\t\treturn None\n\t\telse:\n\t\t\treturn self.end.value\n\n\tdef count(self):\n\t\t\"\"\"Counts the number of elements in the list.\"\"\"\n\t\treturn self.counter\n\n\tdef get(self, index):\n\t\t\"\"\"Get the value at index. (1 based indexing)\"\"\"\n\t\tself._invariant()\n\t\tif not self.begin:\n\t\t\treturn None\n\t\tif index > self.count():\n\t\t\treturn None\n\t\tif index < self.count()/2:\n\t\t\tnode = self.begin\n\t\t\tcounter = 1\n\t\t\twhile counter < index:\n\t\t\t\tnode = node.next\n\t\t\t\tcounter +=1\n\t\t\treturn node.value\n\t\telse:\n\t\t\tnode = self.end\n\t\t\tcounter = self.count()\n\t\t\twhile counter > index:\n\t\t\t\tnode = node.prev\n\t\t\t\tcounter -=1\n\t\t\treturn node.value\n\n\tdef dump(self, mark):\n\t\t\"\"\"Debugging function that dumps the contents of the list.\"\"\"\n\t\tself._invariant()\n\t\tif not self.begin:\n\t\t\tprint(mark)\n\t\t\tprint(\"Empty\")\n\t\telse:\n\t\t\tprint(mark)\n\t\t\tnode = self.begin\n\t\t\tprint(node)\n\t\t\twhile node != self.end:\n\t\t\t\tnode = node.next\n\t\t\t\tprint(node)\n\t\t\t\t\n# 1. Are there zero elements? Then self.begin and self.end need to be None.\n\n# 2. If there is one element, then self.begin and self.end have to be equal (point at same node).\n\n# 3. The first element must always have a prev that is None.\n\n# 4. The last element must always have a next that is None.\n\n\n\tdef _invariant(self):\n\t\tif self.count() == 0:\n\t\t\tassert self.begin == self.end == None\n\t\telif self.count() == 1:\n\t\t\tassert self.begin == self.end\n\t\t\tassert self.begin.prev == None\n\t\t\tassert self.end.next == None\n\t\telse:\n\t\t\tassert self.begin.prev == None\n\t\t\tassert self.end.next == None\n","sub_path":"ex14.py","file_name":"ex14.py","file_ext":"py","file_size_in_byte":4354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"418253003","text":"from time import sleep\n\nfrom django.shortcuts import (render, redirect, HttpResponse, \nHttpResponseRedirect, get_object_or_404)\n\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\n#from django.contrib import messages\n\nfrom .models import Post, Collection\nfrom .forms import HomePageLoginForm, EditPostForm, SaveToCollectionForm\n\ndef country_cleaned():\n\tcountry_sel = Post()\n\tcountry = country_sel.COUNTRIES\n\treturn country\n\ndef home(request):\n\tif request.method == \"POST\":\n\t\tform = HomePageLoginForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tcd = form.cleaned_data\n\t\t\tuser = authenticate(username=cd['username'],\n\t\t\tpassword=cd['password'])\n\t\t\tif user is not None:\n\t\t\t\tif user.is_active:\n\t\t\t\t\tlogin(request, user)\n\t\t\t\t\treturn HttpResponseRedirect(\"/dashboard/verified/\")\n\t\t\t\telse:\n\t\t\t\t\treturn HttpResponse(\"Disabled account.\")\n\t\t\telse:\n\t\t\t\treturn HttpResponse(\"Invalid Login\")\n\telse:\n\t\tform = HomePageLoginForm()\n\tcontext = {\"form\": form}\n\treturn render(request, \"login.html\", context)\n\n@login_required\t\ndef index(request):\n\tposts = Post.objects.all()\n\tchecked_post = request.POST.getlist('checkbox')\n\tif 'checkbox' in request.POST:\n\t\tif 'submit' in request.POST:\n\t\t\tvalues = []\n\t\t\tfor i in checked_post:\n\t\t\t\tvalues.append(int(i))\n\t\t\tcollections = []\n\t\t\tfor i in values:\n\t\t\t\tselected_post = Post.objects.get(pk=i)\n\t\t\t\tcollections.append(selected_post)\n\n\t\tcontext = {\"collections\": collections}\n\t\treturn render(request, \"dashboard/ppost.html\", context)\n\telse:\n\t\tform = SaveToCollectionForm(request.POST)\n\t\tif request.method == \"POST\":\n\t\t\tif form.is_valid():\n\t\t\t\tform.save()\n\t\t\t\t\n\t\tcontext = {\"posts\": posts, \"countries\": country_cleaned(), \"form\": form}\n\t\treturn render(request, \"dashboard/collections.html\", context)\n\t\t\ndef verified(request):\n\tsleep(0.5)\n\treturn redirect(\"/dashboard/index/\")\n\tindex(request)\n\ndef logout_view(request):\n\tlogout(request)\n\treturn home(request)\n\n\ndef post_detail_edit(request, post):\n\tpost = get_object_or_404(Post, slug=post)\n\n\tif request.method == \"POST\":\n\t\tform = EditPostForm(request.POST, instance=post)\n\t\tif form.is_valid():\n\t\t\t# cd = form.cleaned_data\n\t\t\t# summary = cd[\"summary\"]\n\t\t\t# summary.save()\n\t\t\tform.save()\n\t\t\treturn HttpResponseRedirect(\"/dashboard/index\")\n\telse:\n\t\tform = EditPostForm(instance=post)\n\n\tcontext = {\n\t\t\"form\": form,\n\t\t\"post\": post\n\t\t}\n\treturn render(request, \"dashboard/edit.html\", context)","sub_path":"dashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"163239019","text":"\"\"\"\nsetup.py\n\"\"\"\n \nfrom distutils.core import setup, Extension\n \n \nTradeX_module = Extension('_TradeX',\n sources=['TradeX_wrap.cxx', ],\n\t\t\t\t\t\t include_dirs=['../TradeX-dev'],\n library_dirs=['../TradeX-dev'],\n libraries=['TradeX'],\n )\n \nsetup (name = 'TradeX',\n version = '0.1',\n author = \"newgu8@163.com\",\n description = \"\"\"TradeX-Python27-API\"\"\",\n ext_modules = [TradeX_module],\n py_modules = [\"TradeX\"],\n\t url = \"https://github.com/huichou/TradeX-API\",\n )\n","sub_path":"Py27/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"302937245","text":"# coding: utf-8\n\nimport os\nimport sys\nimport pandas as pd\nimport numpy as np\nimport codecs\nimport tensorflow as tf\n\nimport plot\nimport data_preprocess\nfrom Dataset import DataSet\n\nimport keras\nfrom keras.layers import Input, Dense, Dropout\nfrom keras.models import Model\nfrom keras import regularizers\nfrom keras import constraints\nfrom keras import backend as K\nfrom tensorflow.contrib import distributions\n\nimport utils\nimport shutil\nimport time\nfrom datetime import datetime\nimport shutil\nimport utils\nimport argparse\nfrom scipy.stats import norm\nimport matplotlib.pyplot as plt\nfrom matplotlib import animation, rc\nimport seaborn as sns\nfrom IPython.display import HTML\n\nseed = 42\nnp.random.seed(seed)\ntf.set_random_seed(seed)\n\ndef linear(input, output_dim, scope=None, stddev=1.0):\n with tf.variable_scope(scope or 'linear'):\n w = tf.get_variable(\n 'w',\n [input.get_shape()[1], output_dim],\n initializer=tf.random_normal_initializer(stddev=stddev)\n )\n b = tf.get_variable(\n 'b',\n [output_dim],\n initializer=tf.random_normal_initializer(stddev=stddev)\n )\n return tf.matmul(input, w) + b\n\n\ndef encoder(input, feature_num, dropout):\n with tf.variable_scope(\"encoder\"):\n out = Dense(feature_num // 4, activation=\"relu\")(input)\n if dropout > 0.0:\n out = keras.layers.Dropout(dropout)(out)\n out = Dense(feature_num // 16, activation=\"relu\")(out)\n out = Dense(feature_num // 32)(out)\n out = keras.layers.advanced_activations.PReLU(alpha_initializer=\"zero\", weights=None)(out)\n return out\n\n\ndef decoder(input, feature_num, dropout):\n with tf.variable_scope(\"decoder\") as D:\n # out = Dropout(0.2)(input)\n out = Dense(feature_num // 16, activation=\"relu\")(input)\n out = Dense(feature_num // 4, activation=\"relu\")(out)\n # out = Dense(self.feature_num, kernel_constraint=constraints.non_neg, bias_constraint=constraints.non_neg)(out)\n\n if dropout > 0.0:\n out = keras.layers.Dropout(dropout)(out)\n out = Dense(self.feature_num, kernel_regularizer=regularizers.l2(0.01))(out)\n out = keras.layers.advanced_activations.PReLU(weights=None, alpha_initializer=\"zero\")(out)\n return out\n\ndef mlp(input, h_dim):\n init_const = tf.constant_initializer(0.0)\n init_norm = tf.random_normal_initializer()\n w0 = tf.get_variable('w0', [input.get_shape()[1], h_dim], initializer=init_norm)\n b0 = tf.get_variable('b0', [h_dim], initializer=init_const)\n w1 = tf.get_variable('w1', [h_dim, h_dim], initializer=init_norm)\n b1 = tf.get_variable('b1', [h_dim], initializer=init_const)\n h0 = tf.tanh(tf.matmul(input, w0) + b0)\n h1 = tf.tanh(tf.matmul(h0, w1) + b1)\n return h1, [w0, b0, w1, b1]\n\n\ndef generator(input, h_dim, feature_nums):\n transform, params = mlp(input, h_dim)\n init_const = tf.constant_initializer(0.0)\n init_norm = tf.random_normal_initializer()\n w = tf.get_variable('g_w', [h_dim, feature_nums], initializer=init_norm)\n b = tf.get_variable('g_b', [feature_nums], initializer=init_const)\n h = tf.matmul(transform, w) + b\n # s = tf.sigmoid(h)\n s = tf.tanh(h)\n return s, params + [w, b]\n\n\ndef minibatch(input, num_kernels=5, kernel_dim=3):\n x = linear(input, num_kernels * kernel_dim, scope='minibatch', stddev=0.02)\n activation = tf.reshape(x, (-1, num_kernels, kernel_dim))\n diffs = tf.expand_dims(activation, 3) - \\\n tf.expand_dims(tf.transpose(activation, [1, 2, 0]), 0)\n abs_diffs = tf.reduce_sum(tf.abs(diffs), 2)\n minibatch_features = tf.reduce_sum(tf.exp(-abs_diffs), 2)\n return tf.concat([input, minibatch_features], 1)\n\n\n'''\ndef discriminator(input, h_dim, minibatch_layer=False):\n h0 = tf.nn.relu(linear(input, h_dim * 2, 'd0'))\n h1 = tf.nn.relu(linear(h0, h_dim * 2, 'd1'))\n\n # without the minibatch layer, the discriminator needs an additional layer\n # to have enough capacity to separate the two distributions correctly\n if minibatch_layer:\n h2 = minibatch(h1)\n else:\n h2 = tf.nn.relu(linear(h1, h_dim * 2, scope='d2'))\n\n h3 = tf.sigmoid(linear(h2, 1, scope='d3'))\n return h3\n'''\n\n\ndef discriminator(input, h_dim):\n transform, params = mlp(input, h_dim)\n init_const = tf.constant_initializer(0.0)\n init_norm = tf.random_normal_initializer()\n w = tf.get_variable('d_w', [h_dim, 1], initializer=init_norm)\n b = tf.get_variable('d_b', [1], initializer=init_const)\n h_logits = tf.matmul(transform, w) + b\n h_prob = tf.sigmoid(h_logits)\n return h_prob, h_logits, params + [w, b]\n\n\n# In[16]:\n\ndef optimizer(loss, var_list, num_decay_steps=1000):\n initial_learning_rate = 0.01\n decay = 0.95\n batch = tf.Variable(0)\n learning_rate = tf.train.exponential_decay(\n initial_learning_rate,\n batch,\n num_decay_steps,\n decay,\n staircase=True\n )\n optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(\n loss,\n global_step=batch,\n var_list=var_list\n )\n return optimizer\n\n\nanim_frames = []\n\n\ndef plot_distributions(GAN, session, loss_d, loss_g):\n num_points = 100000\n num_bins = 100\n xs = np.linspace(-GAN.gen.range, GAN.gen.range, num_points)\n bins = np.linspace(-GAN.gen.range, GAN.gen.range, num_bins)\n\n # p(data)\n d_sample = GAN.data.sample(num_points)\n\n # decision boundary\n ds = np.zeros((num_points, 1)) # decision surface\n for i in range(num_points // GAN.batch_size):\n ds[GAN.batch_size * i:GAN.batch_size * (i + 1)] = session.run(GAN.D1, {\n GAN.x: np.reshape(xs[GAN.batch_size * i:GAN.batch_size * (i + 1)], (GAN.batch_size, 1))\n })\n\n # p(generator)\n zs = np.linspace(-GAN.gen.range, GAN.gen.range, num_points)\n gs = np.zeros((num_points, 1)) # generator function\n for i in range(num_points // GAN.batch_size):\n gs[GAN.batch_size * i:GAN.batch_size * (i + 1)] = session.run(GAN.G, {\n GAN.z: np.reshape(\n zs[GAN.batch_size * i:GAN.batch_size * (i + 1)],\n (GAN.batch_size, 1)\n )\n })\n\n anim_frames.append((d_sample, ds, gs, loss_d, loss_g))\n\n\nclass AEGan(object):\n def __init__(self, feature_nums, model_name=\"AEGAN.model\", mlp_hidden_size=10000, lam=0.1):\n\n self.feature_nums = feature_nums\n self.log_every = 10\n # self.mlp_hidden_size = mlp_hidden_size\n self.mlp_hidden_size = feature_nums // 2\n self.lam = lam\n self.model_name = model_name\n self._create_model()\n\n def _create_model(self):\n\n self.is_training = tf.placeholder(tf.bool, name=\"is_training\")\n # This defines the generator network - it takes samples from a noise\n # distribution as input, and passes them through an MLP.\n with tf.variable_scope('G'):\n self.z = tf.placeholder(tf.float32, shape=(None, self.feature_nums))\n self.G, self.theta_g = generator(self.z, self.mlp_hidden_size, self.feature_nums)\n self.z_sum = tf.summary.histogram(\"z\", self.z)\n\n # The discriminator tries to tell the difference between samples from the\n # true data distribution (self.x) and the generated samples (self.z).\n #\n # Here we create two copies of the discriminator network (that share parameters),\n # as you cannot use the same network with different inputs in TensorFlow.\n with tf.variable_scope('D') as scope:\n self.x = tf.placeholder(tf.float32, shape=(None, self.feature_nums))\n self.D1_prob, self.D1_logits, self.theta_d1 = discriminator(self.x, self.mlp_hidden_size)\n scope.reuse_variables()\n self.D2_prob, self.D2_logits, self.theta_d2 = discriminator(self.G, self.mlp_hidden_size)\n\n self.d_sum = tf.summary.histogram(\"d1\", self.D1_prob)\n self.d__sum = tf.summary.histogram(\"d_\", self.D2_prob)\n self.G_sum = tf.summary.histogram(\"G\", self.G)\n\n # Define the loss for discriminator and generator networks (see the original\n # paper for details), and create optimizers for both\n self.d_loss_real = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D1_logits, labels=tf.ones_like(self.D1_logits)))\n self.d_loss_fake = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D2_logits, labels=tf.zeros_like(self.D2_logits)))\n\n self.d_loss_real_sum = tf.summary.scalar(\"d_loss_real\", self.d_loss_real)\n self.d_loss_fake_sum = tf.summary.scalar(\"d_loss_fake\", self.d_loss_fake)\n\n self.loss_d = self.d_loss_real + self.d_loss_fake\n self.loss_g = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D2_logits, labels=tf.ones_like(self.D2_logits)))\n\n self.g_loss_sum = tf.summary.scalar(\"g_loss\", self.loss_g)\n self.d_loss_sum = tf.summary.scalar(\"d_loss\", self.loss_d)\n\n self.opt_d = optimizer(self.loss_d, self.theta_d2)\n self.opt_g = optimizer(self.loss_g, self.theta_g)\n\n self.saver = tf.train.Saver(max_to_keep=1)\n\n # Completion.\n self.mask = tf.placeholder(tf.float32, [None, self.feature_nums], name='mask')\n self.contextual_loss = tf.reduce_sum(\n tf.contrib.layers.flatten(\n tf.abs(tf.multiply(self.mask, self.G) - tf.multiply(self.mask, self.x))), 1)\n self.perceptual_loss = self.loss_g\n self.complete_loss = self.contextual_loss + self.lam * self.perceptual_loss\n self.grad_complete_loss = tf.gradients(self.complete_loss, self.z)\n\n def train(self, config):\n\n dataset = DataSet(config.train_datapath, config.batch_size)\n\n steps = dataset.steps * config.epoch\n samples = np.random.normal(-1, 1, (config.batch_size, self.feature_nums))\n sample_dirs = os.path.join(\"samples\", self.model_name)\n if os.path.exists(sample_dirs) == False:\n os.makedirs(sample_dirs)\n\n with tf.Session() as session:\n\n if config.load_checkpoint and os.path.exists(config.checkpoint_dir):\n self.load(session, config.checkpoint_dir)\n elif os.path.exists(config.checkpoint_dir):\n shutil.rmtree(config.checkpoint_dir)\n\n tf.global_variables_initializer().run()\n\n self.g_sum = tf.summary.merge(\n [self.z_sum, self.d__sum, self.G_sum, self.d_loss_fake_sum, self.g_loss_sum])\n self.d_sum = tf.summary.merge(\n [self.z_sum, self.d_sum, self.d_loss_real_sum, self.d_loss_sum])\n\n logs_dir = os.path.join(\"./logs\", self.model_name)\n if os.path.exists(logs_dir) == False:\n os.makedirs(logs_dir)\n self.writer = tf.summary.FileWriter(logs_dir, session.graph)\n\n for step in range(steps):\n\n batch_data = dataset.next()\n\n sz = len(batch_data)\n\n random_data = np.random.normal(-1, 1, (sz, self.feature_nums))\n\n loss_d, _, d_summary_str = session.run([self.loss_d, self.opt_d, self.d_sum], {\n self.x: batch_data,\n self.z: random_data\n })\n\n self.writer.add_summary(d_summary_str, steps)\n\n # update generator\n loss_g, _, g_summary_str = session.run([self.loss_g, self.opt_g, self.g_sum], {\n self.z: random_data\n })\n self.writer.add_summary(g_summary_str, step)\n\n if step % self.log_every == 0:\n print('{}: {}\\t{}'.format(step, loss_d, loss_g))\n\n if step % config.sample_steps == 0:\n sample_gen = session.run(self.G, {\n self.z: samples\n })\n sample_path = os.path.join(sample_dirs, \"{}-{}.csv\".format(self.model_name, str(step)))\n pd.DataFrame(sample_gen).to_csv(sample_path, index=False)\n\n if step % config.save_freq_steps == 0:\n save_dir = os.path.join(config.checkpoint_dir, self.model_name)\n self.save(session, save_dir, step)\n\n def complete(self, config):\n\n dataset = DataSet(config.infer_complete_datapath, batch_size=config.batch_size, onepass=True)\n\n missing_val = config.missing_val\n\n complete_datas = []\n feature_nums = dataset.feature_nums\n\n with tf.Session() as sess:\n\n load_model_dir = os.path.join(config.checkpoint_dir, self.model_name)\n isLoaded = self.load(sess, load_model_dir)\n assert (isLoaded)\n\n try:\n tf.global_variables_initializer().run()\n except:\n tf.initialize_all_variables().run()\n\n while (1):\n batch_data = dataset.next()\n if batch_data is None:\n break\n data_shape = np.shape(batch_data)\n sample_size, feature_nums = data_shape\n\n batch_mask = utils.MaskData(batch_data, missing_val)\n mask_data = np.multiply(batch_data, batch_mask)\n zhats = np.random.uniform(0, 1, size=data_shape)\n completed = batch_data\n\n m = 0\n v = 0\n G_data = None\n\n for i in range(config.nIter):\n fd = {\n self.z: zhats,\n self.mask: batch_mask,\n self.x: batch_data,\n self.is_training: False\n }\n run = [self.complete_loss, self.grad_complete_loss, self.G]\n loss, g, G_data = sess.run(run, feed_dict=fd)\n\n if config.approach == 'adam':\n # Optimize single completion with Adam\n m_prev = np.copy(m)\n v_prev = np.copy(v)\n m = config.beta1 * m_prev + (1 - config.beta1) * g[0]\n v = config.beta2 * v_prev + (1 - config.beta2) * np.multiply(g[0], g[0])\n m_hat = m / (1 - config.beta1 ** (i + 1))\n v_hat = v / (1 - config.beta2 ** (i + 1))\n zhats += - np.true_divide(config.lr * m_hat, (np.sqrt(v_hat) + config.eps))\n zhats = np.clip(zhats, -1, 1)\n\n elif config.approach == 'hmc':\n # Sample example completions with HMC (not in paper)\n zhats_old = np.copy(zhats)\n loss_old = np.copy(loss)\n v = np.random.randn(sample_size, feature_nums)\n v_old = np.copy(v)\n\n for steps in range(config.hmcL):\n v -= config.hmcEps / 2 * config.hmcBeta * g[0]\n zhats += config.hmcEps * v\n np.copyto(zhats, np.clip(zhats, -1, 1))\n loss, g, _, _ = sess.run(run, feed_dict=fd)\n v -= config.hmcEps / 2 * config.hmcBeta * g[0]\n\n for i in range(sample_size):\n logprob_old = config.hmcBeta * loss_old[i] + np.sum(v_old[i] ** 2) / 2\n logprob = config.hmcBeta * loss[i] + np.sum(v[i] ** 2) / 2\n accept = np.exp(logprob_old - logprob)\n if accept < 1 and np.random.uniform() > accept:\n np.copyto(zhats[i], zhats_old[i])\n\n config.hmcBeta *= config.hmcAnneal\n\n inv_masked_hat_data = np.multiply(G_data, 1.0 - batch_mask)\n completed = mask_data + inv_masked_hat_data\n complete_datas.append(completed)\n\n complete_datas = np.reshape(np.concatenate(complete_datas, axis=0), (-1, feature_nums))\n df = pd.DataFrame(complete_datas)\n if os.path.exists(config.outDir) == False:\n os.makedirs(config.outDir)\n outPath = os.path.join(config.outDir, \"infer.complete\")\n df.to_csv(outPath, index=None)\n print(\"save complete data from {} to {}\".format(config.infer_complete_datapath, outPath))\n\n def save(self, sess, save_dir, step):\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n self.saver.save(sess,\n os.path.join(save_dir, self.model_name),\n global_step=step)\n\n def load(self, sess, checkpoint_dir):\n print(\" [*] Reading checkpoints...\")\n\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\n if ckpt and ckpt.model_checkpoint_path:\n self.saver.restore(sess, ckpt.model_checkpoint_path)\n return True\n else:\n return False","sub_path":"ae-gan.py","file_name":"ae-gan.py","file_ext":"py","file_size_in_byte":16853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"583885459","text":"import pygame\r\n\r\n\r\n\r\nWIDTH = 700\r\nscreen = pygame.display.set_mode((WIDTH , WIDTH))\r\npygame.display.set_caption(\"BFS Algorithm Visualizer\")\r\n\r\nLIGHT_GREEN = (128, 255, 128)\r\nDARK_GREEN = (0, 153, 51)\r\nBLUE = (0, 255, 0)\r\nYELLOW = (255, 255, 0)\r\nWHITE = (255, 255, 255)\r\nOBSCOLOR = (0, 38, 51)\r\nPURPLE = (255, 77, 196)\r\nORANGE_RED = (255, 69, 0)\r\nGREY = (128, 128, 128)\r\nDODGER_BLUE = (30, 144, 255)\r\n\r\nclass spot:\r\n def __init__(self,row,col,width,total_row):\r\n self.row=row\r\n self.col=col\r\n self.width=width\r\n self.x=width*row\r\n self.y=width*col\r\n self.total_row=total_row\r\n self.color=WHITE\r\n self.neighbours=[]\r\n\r\n\r\n def get_pos(self):\r\n return self.row,self.col\r\n def is_start(self):\r\n return self.color == ORANGE_RED\r\n def is_end(self):\r\n return self.color == DODGER_BLUE\r\n def is_barrier(self):\r\n return self.color == OBSCOLOR\r\n def is_closed(self):\r\n return self.color == LIGHT_GREEN\r\n def is_open(self):\r\n return self.color == DARK_GREEN\r\n\r\n\r\n def reset(self):\r\n self.color=WHITE\r\n\r\n def make_start(self):\r\n self.color=ORANGE_RED\r\n def make_end(self):\r\n self.color=DODGER_BLUE\r\n def make_barrier(self):\r\n self.color=OBSCOLOR\r\n def make_path(self):\r\n self.color=PURPLE\r\n def make_open(self):\r\n self.color=DARK_GREEN\r\n def make_closed(self):\r\n self.color=LIGHT_GREEN\r\n\r\n def draw(self,screen):\r\n pygame.draw.rect(screen,self.color,(self.x,self.y,self.width,self.width))\r\n def update_neighbours(self,grid):\r\n self.neighbours=[]\r\n if self.row0 and not grid[self.row-1][self.col].is_barrier():#up\r\n self.neighbours.append(grid[self.row-1][self.col])\r\n\r\n if self.col>0 and not grid[self.row][self.col-1].is_barrier():#left\r\n self.neighbours.append(grid[self.row][self.col-1])\r\n\r\n if self.col\", __file__)\n \n from sys import path\n from os.path import dirname as dir\n print(\"existing path:\\n\", path)\n\n # adding FastAPI-Spotify to the path\n # everything under FastAPI-Spotify such as \"appdir\" would be recognized\n path.append(dir(dir(c_dir)))\n\n print(\"expanded system path:\\n\", path)\n __package__ = \"appdir.tests\"\n\n\nfrom fastapi.testclient import TestClient\nfrom appdir.main import app\nclient = TestClient(app)\n\n\ndef test_docs():\n \"\"\"Return HTML docs for root route.\"\"\"\n response = client.get('/')\n assert response.status_code == 200\n assert response.headers['content-type'].startswith('text/html')\n\n\ndef test_predict():\n \"\"\"Test the webs server response and the \n returned data type on predict method\"\"\"\n response = client.get('/predict/07j5RLJHwsm4cUb3GGoW3w')\n assert response.status_code == 200\n assert response.headers['content-type'].startswith('application/json')\n\nif __name__ == '__main__':\n test_docs()\n test_predict()","sub_path":"appdir/tests/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"407972877","text":"#!/usr/bin/env python3\n\n# DATE_STR = \"2 Oct 2020\"\n# VERSION = \"1_i\"\n# AUTHOR = \"Oliver Bonham-Carter\"\n# AUTHORMAIL = \"obonhamcarter@allegheny.edu\"\n\nimport sqlite3\n\ndbFilename_str = \"myCampusDB.sqlite3\" #establish the DB file\nconn = sqlite3.connect(dbFilename_str) # open connection to the DB\n\nmyTable_str = \"Instructor\"\nattribute1_str = \"name\"\nattribute2_str = \"deptName\"\nattribute3_str = \"salary\"\n\nprint(f\"\\n\\n\\t Running query in table : \\\"{myTable_str}\\\"\")\n\nmyQuery_str = f\"SELECT {attribute1_str},{attribute2_str}, {attribute3_str} FROM {myTable_str} WHERE {attribute3_str} > 99000;\"\nresult = conn.execute(myQuery_str) # run the query\ntables = result.fetchall() # collect query for processing\nprint(\"\\t \"+myQuery_str)\nprint(\"\\t [+] Results: \")\nfor i in tables:\n\tprint(f\"\\t {i}\") # show results of query\n\nconn.close() # close the database connection\n","sub_path":"lessons/06_week_pythonAndSQL/sandbox/src/simpleQuery1.py","file_name":"simpleQuery1.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"635821562","text":"# convert 10 patients data into ground truth images, and numpy array.\n\nimport os\nimport glob as glob\nimport sys\nimport SimpleITK as sitk\nimport json\nimport matplotlib.pyplot as plt\nfrom utilities import getSurfacesArray, scaleDownMatrix, get3PointSmoothMatrix\n\nimport numpy as np\n\nextractIndexs = (0, 1, 3, 5, 6, 10) # extracted surface indexes from original 11 surfaces.\nsurfaceNames = (\"ILM\", \"RNFL-GCL\", \"IPL-INL\", \"OPL-ONL\", \"BMEIS\", \"OB_RPE\")\npltColors = ('tab:cyan', 'tab:orange', 'tab:red', 'tab:green', 'tab:pink', 'tab:brown')\nneedLegend = True\n\nH = 1024\nN = len(extractIndexs)\nW = 200 # target image width\n\n# output Dir:\noutputImageDir = \"/home/hxie1/data/Ophthalmology/thinRetina/rawGT\"\noutputNumpyParentDir = \"/home/hxie1/data/Ophthalmology/thinRetina/numpy\"\noutputTrainNumpyDir = os.path.join(outputNumpyParentDir, \"training\")\noutputTestNumpyDir = os.path.join(outputNumpyParentDir, \"test\")\n\n# original patientDirList\ntrainPatientDirList= [ #8 patients\n\"/home/hxie1/data/garvinlab/Data/IOWA_VIP_25_Subjects_Thin_Retina/Graph_Search/PVIP2-4060_Macular_200x200_8-25-2009_11-55-11_OD_sn16334_cube_z\",\n\"/home/hxie1/data/garvinlab/Data/IOWA_VIP_25_Subjects_Thin_Retina/Graph_Search/PVIP2-4073_Macular_200x200_1-3-2013_15-52-39_OS_sn10938_cube_z\",\n\"/home/hxie1/data/garvinlab/Data/IOWA_VIP_25_Subjects_Thin_Retina/Graph_Search/PVIP2-4084_Macular_512x128_5-14-2012_14-35-40_OD_sn26743_cube_z\",\n\"/home/hxie1/data/garvinlab/Data/IOWA_VIP_25_Subjects_Thin_Retina/Graph_Search/PVIP2-4081_Macular_512x128_11-11-2010_12-42-15_OS_sn14530_cube_z\",\n\"/home/hxie1/data/garvinlab/Data/IOWA_VIP_25_Subjects_Thin_Retina/Manual_Correction/PVIP2-4004_Macular_200x200_10-10-2012_12-17-24_OD_sn11266_cube_z\",\n\"/home/hxie1/data/garvinlab/Data/IOWA_VIP_25_Subjects_Thin_Retina/Manual_Correction/PVIP2-4074_Macular_200x200_11-7-2013_8-14-8_OD_sn26558_cube_z\",\n\"/home/hxie1/data/garvinlab/Data/IOWA_VIP_25_Subjects_Thin_Retina/Manual_Correction/PVIP2-4088_Macular_512x128_12-4-2012_9-48-42_OD_sn12365_cube_z\",\n\"/home/hxie1/data/garvinlab/Data/IOWA_VIP_25_Subjects_Thin_Retina/Manual_Correction/PVIP2-4045_Macular_512x128_4-20-2010_14-18-22_OD_sn12908_cube_z\",\n]\n\ntestPatientDirList=[ # 2 patients\n\"/home/hxie1/data/garvinlab/Data/IOWA_VIP_25_Subjects_Thin_Retina/Graph_Search/PVIP2-4068_Macular_200x200_10-18-2012_12-10-55_OS_sn14463_cube_z\",\n\"/home/hxie1/data/garvinlab/Data/IOWA_VIP_25_Subjects_Thin_Retina/Manual_Correction/PVIP2-4083_Macular_200x200_10-24-2012_10-24-46_OS_sn14353_cube_z\",\n]\n\ntrainingCase = True\n\nif trainingCase:\n patientDirList = trainPatientDirList\n outputNumpyDir = outputTrainNumpyDir\n totalSlices = 4*(200+128)\nelse:\n patientDirList = testPatientDirList\n outputNumpyDir = outputTestNumpyDir\n totalSlices = 2*200\n\noutputNumpyImagesPath = os.path.join(outputNumpyDir, f\"images.npy\")\noutputNumpySurfacesPath = os.path.join(outputNumpyDir, f\"surfaces.npy\")\noutputPatientIDPath = os.path.join(outputNumpyDir, \"patientID.json\")\n\nallPatientsImageArray = np.empty((totalSlices , H, W), dtype=float)\nallPatientsSurfaceArray = np.empty((totalSlices, N, W), dtype=float) # the ground truth of JHU data is float\npatientIDDict = {}\n\n\nprint(f\"Program is outputing raw_GT images in {outputImageDir}, please wait ......\")\ns = 0 # initial slice index\nfor patientDir in patientDirList:\n # get volumePath and surfacesXmlPath\n octVolumeFileList = glob.glob(patientDir + f\"/*_OCT_Iowa.mhd\")\n assert len(octVolumeFileList) == 1\n octVolumePath = octVolumeFileList[0]\n dirname = os.path.dirname(octVolumePath)\n basename = os.path.basename(octVolumePath)\n basename = basename[0:basename.rfind(\"_OCT_Iowa.mhd\")]\n surfacesXmlPath = os.path.join(dirname, basename+f\"_Surfaces_Iowa_Ray.xml\")\n if not os.path.isfile(surfacesXmlPath):\n surfacesXmlPath = os.path.join(dirname, basename+f\"_Surfaces_Iowa.xml\")\n if not os.path.isfile(surfacesXmlPath):\n print(\"Error: can not find surface xml file\")\n assert False\n\n # convert Ray's special raw format to standard BxHxW for image, and BxSxW format for surface.\n # Ray mhd format in BxHxW dimension, but it flip the H and W dimension.\n # for 200x1024x200 image, and 128x1024x512 in BxHxW direction.\n itkImage = sitk.ReadImage(octVolumePath)\n npImage = sitk.GetArrayFromImage(itkImage).astype(float) # in BxHxW dimension\n npImage = np.flip(npImage, (1, 2)) # as ray's format filp H and W dimension.\n B,curH,curW = npImage.shape\n assert H == curH\n\n surfaces = getSurfacesArray(surfacesXmlPath) # size: SxNxW, where N is number of surfacres.\n surfaces = surfaces[:, extractIndexs, :] # extract 6 surfaces (0, 1, 3, 5, 6, 10)\n # its surface names: [\"ILM\", \"RNFL-GCL\", \"IPL-INL\", \"OPL-ONL\", \"BMEIS\", \"OB_RPE\"]\n B1, curN, _ = surfaces.shape\n assert N == curN\n assert B == B1\n\n # scale down image and surface, if W = 512.\n if npImage.shape == (128, 1024, 512): # scale image to 1024x200.\n scaleM = scaleDownMatrix(B, curW, W)\n npImage = np.matmul(npImage, scaleM)\n surfaces = np.matmul(surfaces, scaleM)\n else:\n assert curW == W\n\n # flip all OS eyes into OD eyes\n if \"_OS_\" in basename:\n npImage = np.flip(npImage, 2)\n surfaces = np.flip(surfaces, 2)\n\n # Make sure alll surfaces not interleave, especially the top surface of GCIPL (i.e., surface_1) is NOT above ILM (surface_0)\n for i in range(1, N):\n surfaces[:, i, :] = np.where(surfaces[:, i, :] < surfaces[:, i - 1, :], surfaces[:, i - 1, :],\n surfaces[:, i, :])\n\n # a slight smooth the ground truth before using:\n # A \"very gentle\" 3D smoothing process (or thin-plate-spline) should be applied to reduce the manual tracing artifact\n # Check the smoothing results again in the images to make sure they still look reasonable\n smoothM = get3PointSmoothMatrix(B,W)\n surfaces = np.matmul(surfaces, smoothM)\n\n # output numpy array.\n allPatientsImageArray[s:s+B,:,:] = npImage\n allPatientsSurfaceArray[s:s+B, :, :] = surfaces\n for i in range(B):\n # basename: PVIP2-4074_Macular_200x200_11-7-2013_8-14-8_OD_sn26558_cube_z\n patientIDDict[str(s+i)] = basename + f\"_s{i:03d}\"\n s += B\n\n # out Raw_GT images\n for i in range(B):\n f = plt.figure(frameon=False)\n DPI = 100\n rowSubplot = 1\n colSubplot = 2\n f.set_size_inches(W * colSubplot / float(DPI), H * rowSubplot / float(DPI))\n\n plt.margins(0)\n plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0, hspace=0) # very important for erasing unnecessary margins.\n\n subplot1 = plt.subplot(rowSubplot, colSubplot, 1)\n subplot1.imshow(npImage[i, :, :], cmap='gray')\n subplot1.axis('off')\n\n subplot2 = plt.subplot(rowSubplot, colSubplot, 2)\n subplot2.imshow(npImage[i, :, :], cmap='gray')\n for n in range(0, N):\n subplot2.plot(range(0, W), surfaces[i, n, :], pltColors[n], linewidth=1.5)\n if needLegend:\n subplot2.legend(surfaceNames, loc='lower left', ncol=2, fontsize='x-small')\n subplot2.axis('off')\n\n curImagePath = os.path.join(outputImageDir, basename+f\"_s{i:03d}_raw_GT.png\")\n\n plt.savefig(curImagePath, dpi='figure', bbox_inches='tight', pad_inches=0)\n plt.close()\n\n# after reading all patients, save numpy array\nnp.save(outputNumpyImagesPath, allPatientsImageArray)\nnp.save(outputNumpySurfacesPath, allPatientsSurfaceArray)\nwith open(outputPatientIDPath, 'w') as fp:\n json.dump(patientIDDict, fp)\n\nprint(f\"===========END of Convert data==============\")\n","sub_path":"OCTSegTool/thinRetina/dataPrepare_VIP/convertData.py","file_name":"convertData.py","file_ext":"py","file_size_in_byte":7635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"410354418","text":"from django.shortcuts import render, redirect,get_object_or_404\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom .forms import UserRegisterForm, User, UserUpdateForm, ProfileUpdateForm\nfrom QnA.models import comment,question\nfrom django.views.generic import ListView,DetailView\nfrom .models import Reward\n# tells backend how to show template and process info from template\ndef register(request):\n if request.method == \"POST\":\n form = UserRegisterForm(request.POST)#tell django what form to use\n if form.is_valid():#check validity of form\n form.save() #adds user to database\n username = form.cleaned_data.get('username')\n messages.success(request, f'Successfully created account: {username}. Please login and add a Profile Picture!')\n return redirect(\"profile_update\")\n else:\n form = UserRegisterForm() \n return render(request, 'users/register.html', {'form': form})\n\n\nclass ProfileListView(ListView):\n model = question #choose database\n template_name : 'users/profile.html'\n context_object_name = 'question'\n ordering = ['-date_published']\n paginate_by = 5\n\nclass RewardView(DetailView):\n model = Reward \n\n@login_required\ndef profile(request):\n if request.method == 'POST':\n u_form = UserUpdateForm(request.POST, instance=request.user)#request user data \n p_form = ProfileUpdateForm(request.POST,\n request.FILES,\n instance=request.user.profile)\n if u_form.is_valid() and p_form.is_valid():\n u_form.save()\n p_form.save()\n messages.success(request, f'Your account has been updated!')\n return redirect('profile')\n\n else:\n u_form = UserUpdateForm(instance=request.user)\n p_form = ProfileUpdateForm(instance=request.user.profile)\n\n context = {\n 'u_form': u_form,#define forms to use in template\n 'p_form': p_form\n }\n\n return render(request, 'users/update.html', context)\n\n\n\n \n\n\n\n\n\n\n\n\n","sub_path":"MondayBlur/mondayblur/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"19976723","text":"from flask import Flask, jsonify, request\nimport mysql.connector\nfrom util import gatherRecipeData\n\napp = Flask(__name__)\n\ndbconfig = {\n 'user': \"root\",\n 'password': \"root\",\n 'database': \"recipes\",\n 'host': \"localhost\"\n}\n\n\n@app.route('/search')\ndef search():\n query = request.args.get('q', default='', type=str)\n recipe_ids = []\n con = mysql.connector.connect(**dbconfig)\n cursor = con.cursor()\n\n # Gather all valid recipe_ids\n cursor.execute((\n \"SELECT DISTINCT recipe_id \"\n \"FROM Recipes \"\n \"WHERE name LIKE %s\"\n ), (\"%\" + query + \"%\",))\n for recipe_id in cursor:\n recipe_ids.append(recipe_id[0])\n\n cursor.execute((\n \"SELECT DISTINCT ri.recipe_id \"\n \"FROM RecipeIngredients ri, Ingredients i \"\n \"WHERE ri.ingredient_id = i.ingredient_id \"\n \"AND i.name LIKE %s\"\n ), (\"%\" + query + \"%\",))\n for recipe_id in cursor:\n recipe_ids.append(recipe_id[0])\n \n # Gather all relevant data for the filtered recipe_ids\n return jsonify(gatherRecipeData(recipe_ids, cursor))\n\n\n@app.route('/recipes')\ndef recipes():\n con = mysql.connector.connect(**dbconfig)\n cursor = con.cursor()\n cursor.execute(\"SELECT * FROM Recipes;\")\n\n recipes = []\n for recipe_id, name, source, url in cursor:\n recipes.append({\n 'recipe_id': recipe_id,\n 'name': name,\n 'source': source,\n 'url': url\n })\n\n con.close()\n cursor.close()\n return jsonify(recipes)\n\n\n@app.route('/')\ndef hello_world():\n return \"hello, world!\"\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", port=80)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"331196736","text":"import os\nimport sys\nfrom models.StandardFactory import StandardFactory\nfrom models.ImportTools import *\nfrom models.Utils import parse_args\nimport numpy as np\nimport pandas as pd\n\ndef process_all_emb(data_dic=None, cores=10, save=False): \n EmbedFactory = StandardFactory.get_factory(\"embedding\")\n m2vemb = EmbedFactory.get_instance('m2v')\n d3emb = EmbedFactory.get_instance('d3')\n strucemb = EmbedFactory.get_instance('struc')\n result = {}\n for k,v in data_dic.items():\n result_tmp = []\n m2v_t = m2vemb(method='map', data=v.copy(), save=save, cores=cores, prefix=k) \n d3_t = d3emb(method='map', data=v.copy(), save=save, cores=cores, prefix=k) \n struce_t = strucemb(method='map', data=v.copy(), save=save, cores=cores, prefix=k) \n result_tmp.append(m2v_t.get_result())\n result_tmp.append(d3_t.get_result())\n result_tmp.append(struce_t.get_result())\n result[k] = result_tmp\n return result\n\ndef process_all_score(data_dic=None, cores=10, save=False): \n ScoreFactory = StandardFactory.get_factory(\"scoring\")\n m2vscore = ScoreFactory.get_instance('m2v')\n d3score = ScoreFactory.get_instance('d3')\n strucscore = ScoreFactory.get_instance('struc')\n result = {}\n for k,v in data_dic.items():\n result_tmp = []\n m2v_s = m2vscore(method='map', target=v[0][0], data=v[1][0], save=save, cores=cores, prefix=k)\n d3_s = d3score(method='map', target=v[0][1], data=v[1][1], save=save, cores=cores, prefix=k)\n struc_s = strucscore(method='map', target=v[0][2], data=v[1][2], save=save, cores=cores, prefix=k)\n result_tmp.append(m2v_s.get_result())\n result_tmp.append(d3_s.get_result())\n result_tmp.append(struc_s.get_result())\n result[k] = result_tmp\n return result\n\n\ndef emb_filter(data_dic=None):\n result = {}\n for k,v in data_dic.items():\n tmp_lst = []\n for df_tmp in v:\n col = [f for f in df_tmp.columns if f not in [\"ID\", \"SMILES\"]][0]\n tmp_lst.append(df_tmp[df_tmp[col] != -1].reset_index(drop=True))\n result[k] = tmp_lst\n return result\n \ndef get_similarity(input_path=\"\", candidate_file=\"\", target_file=\"\", cores=20):\n \n df_c = pd.read_csv(f\"{input_path}/{candidate_file}\")\n df_t = pd.read_csv(f\"{input_path}/{target_file}\")\n\n emb_dic = {}\n emb_dic['test_c'] = df_c\n emb_dic['test_t'] = df_t\n emb_result =emb_filter(data_dic = process_all_emb(data_dic = emb_dic, cores=cores)) \n result_c = emb_result['test_c']\n result_t = emb_result['test_t']\n socre_dic = {}\n socre_dic['test_socre'] = [result_t, result_c]\n score_result = process_all_score(data_dic = socre_dic, cores=cores)\n df_result = df_c[['ID']]\n for df_ in score_result['test_socre']:\n df_result = df_result.merge(df_, on='ID', how='left')\n return df_result\n\n\n\nif __name__ == \"__main__\":\n start =time.time()\n args = parse_args()\n print(f\"file path: {args.path}\")\n print(f\"cadidate file: {args.candidate_file}\")\n print(f\"target file: {args.target_file}\")\n print(f\"output file: {args.output_file}\")\n print(f\"computing cores: {args.cores}\")\n print(f\"need save result : {args.save}\")\n result = get_similarity(input_path=args.path, candidate_file=args.candidate_file, target_file=args.target_file, cores=args.cores)\n if args.save:\n result.to_csv(f'{args.path}/{args.output_file}.csv', index=False)\n print(result.head())\n end = time.time()\n print('Running time: %s Seconds'%(end-start))\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":3565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"461611444","text":"import ROOT\nimport StatisticalTest\nimport numpy\nfrom MathFunctions import poissonPVal, poissonConvGammaPVal\nimport HistWrapper\n\nclass BumpHunter(StatisticalTest.StatisticalTest) :\n\n def __init__(self) :\n StatisticalTest.StatisticalTest.__init__(self)\n self.allowDeficit = False\n self.useSidebands = False\n self.minBinsInBump = 2\n self.maxBinsInBump = 1e5\n self.nBinsInSideband = 1\n self.doErr = False\n self.tomography = None\n self.excludeWindow = False\n self.firstBinToExclude = -1\n self.lastBinToExclude = -1\n\n def doTest(self, dataHist, bkgHist, firstBinToUse, lastBinToUse) :\n\n dataCore = dataHist.histogram\n bkgCore = bkgHist.histogram\n \n assert dataCore.GetNbinsX() == bkgCore.GetNbinsX()\n \n # Find first and last bins with data\n # If reasonable, overwrite with user's choice\n firstBin = dataHist.firstBinWithData\n lastBin = dataHist.lastBinWithData\n if firstBinToUse>0 and firstBinToUse > firstBin and firstBinToUse < lastBin : firstBin = firstBinToUse\n if lastBinToUse > firstBinToUse and lastBinToUse>0 and lastBinToUse > firstBin and lastBinToUse < lastBin :\n lastBin = lastBinToUse\n \n regionsDef = []\n if self.excludeWindow :\n regionsDef.append([firstBin,self.firstBinToExclude-1])\n regionsDef.append([self.lastBinToExclude+1,lastBin])\n else :\n regionsDef.append([firstBin,lastBin])\n\n self.mostInterestingDict = {\"binlow\" : 0, \"binhigh\" : 0, \"prob\" : 1.0}\n\n for region in regionsDef :\n \n nBins = region[1] - region[0] + 1\n minWidth = max(self.minBinsInBump,1)\n maxWidth = min(self.maxBinsInBump,int(nBins/2.0))\n\n self.doCalculationCore(dataCore,bkgCore,minWidth,maxWidth,region[0],region[1])\n \n self.tomography = ROOT.TGraphErrors()\n index = -1\n for windowDict in self.bumpInfoList :\n index = index+1\n self.tomography.SetPoint(index,(windowDict[\"binlow\"]+windowDict[\"binhigh\"])/2.0,windowDict[\"prob\"])\n self.tomography.SetPointError(index,(windowDict[\"binhigh\"]-windowDict[\"binlow\"])/2.0,0)\n \n if self.mostInterestingDict[\"prob\"] == 0 :\n self.findBumpInCaseOfIncalculable(dataCore,bkgCore,firstBin,lastBin)\n\n return -numpy.log(self.mostInterestingDict[\"prob\"])\n\n def doCalculationCore(self, dataHist, bkgHist, minWidth, maxWidth, firstBin, lastBin) :\n\n self.bumpInfoList = []\n self.mostInterestingDict = {\"binlow\" : 0, \"binhigh\" : 0, \"prob\" : 1.0}\n\n for width in range(minWidth,maxWidth+1) :\n\n # Sideband width needs to be something sensible\n if self.nBinsInSideband > 1 :\n sidebandWidth = self.nBinsInSideband\n else : sidebandWidth = max(1,int(width/2.0))\n \n smallestPForWidth = 1.0\n \n if self.useSidebands :\n minBinL = firstBin + sidebandWidth\n maxBinL = lastBin - width - sidebandWidth + 1\n else :\n minBinL = firstBin\n maxBinL = lastBin - width + 1\n\n # Loop over left edges possible with this bin width\n for windowLeft in range(minBinL, maxBinL+1) :\n\n # Other limits on window & sidebands\n windowRight = windowLeft + width - 1\n sidebandLeft = windowLeft - sidebandWidth\n sidebandRight = windowRight + sidebandWidth\n\n data, dataErr, bkg, bkgErr = self.getEffectiveBandContentsWithError(dataHist, bkgHist, windowLeft, windowRight)\n \n # Don't care about deficits unless otherwise specified\n if not self.allowDeficit and data < bkg :\n continue\n \n # Use uncertainty convolution if specified\n if self.doErr :\n probability = poissonConvGammaPVal(data, bkg, bkgErr)\n else :\n probability = poissonPVal(data, bkg)\n\n # Get probabilities for sidebands if desired.\n # If we have a big discrepancy in the sidebands then we do not\n # keep considering this window.\n if self.useSidebands :\n LSdata, LSdataErr, LSbkg, LSbkgErr = self.getEffectiveBandContentsWithError(dataHist, bkgHist, sidebandLeft, windowLeft - 1)\n RSdata, RSdataErr, RSbkg, RSbkgErr = self.getEffectiveBandContentsWithError(dataHist, bkgHist, windowRight + 1, sidebandRight)\n if self.doErr :\n probLeftSideband = poissonConvGammaPVal(LSdata, LSbkg, LSbkgErr)\n probRightSideband = poissonConvGammaPVal(RSdata, RSbkg, RSbkgErr)\n else :\n probLeftSideband = poissonPVal(LSdata, LSbkg, LSbkgErr)\n probRightSideband = poissonPVal(RSdata, RSbkg, RSbkgErr)\n\n if probLeftSideband < 1E-3 or probRightSideband < 1E-3 :\n continue\n \n # Save information on this window for the tomography plot\n windowDict = {\"binlow\" : windowLeft, \"binhigh\" : windowRight, \"prob\" : probability}\n self.bumpInfoList.append(windowDict)\n if probability < self.mostInterestingDict[\"prob\"] :\n self.mostInterestingDict = windowDict\n\n\n def getEffectiveBandContentsWithError(self,data, bkg, firstBin, lastBin) :\n \n# print \"With firstBin, lastBin =\",firstBin,lastBin\n# data.Print(\"all\")\n# bkg.Print(\"all\")\n\n dataInt = dataErr = bkgInt = bkgErr = 0.0\n for bin in range(firstBin, lastBin+1) :\n dataInt = dataInt + data.GetBinContent(bin)\n dataErr = dataErr + data.GetBinError(bin)\n bkgInt = bkgInt + bkg.GetBinContent(bin)\n bkgErr = bkgErr + bkg.GetBinError(bin)\n \n return dataInt, dataErr, bkgInt, bkgErr\n\n def findBumpInCaseOfIncalculable(self, data, bkg, firstBin, lastBin) :\n\n lastWasInf = False\n allInfsConsecutive = True\n singlebinsinf = []\n for bin in range(firstBin, lastBin+1) :\n D = data.GetBinContent(bin)\n B = bkg.GetBinContent(bin)\n thisbinpval = poissonPVal(D,B)\n if thisbinpval==0 and D>B :\n if len(singlebinsinf)>0 and lastWasInf==False :\n allInfsConsecutive = False\n singlebinsinf.append(bin)\n lastWasInf = True\n else : lastWasInf = False\n \n if len(singlebinsinf) > 0 and allInfsConsecutive :\n windowDict = {\"binlow\" : singlebinsinf[0],\\\n \"binhigh\" : singlebinsinf[-1],\n \"prob\" : 0.0}\n self.mostInterestingDict = windowDict\n\n def getFurtherInformation(self) :\n return self.mostInterestingDict[\"binlow\"], self.mostInterestingDict[\"binhigh\"], self.tomography\n\n","sub_path":"BumpHunter.py","file_name":"BumpHunter.py","file_ext":"py","file_size_in_byte":6319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"90517315","text":"#:coding=utf-8:\n\nfrom django.template import Library\nfrom django.template.defaultfilters import stringfilter\nfrom django.utils.safestring import mark_safe\n\nregister = Library()\n\ndef stripentities(value):\n \"\"\"Strips all HTML entities\"\"\"\n from django.utils.html import strip_entities\n return strip_entities(value)\nstripentities.is_safe = True\nstripentities = stringfilter(stripentities)\nregister.filter(stripentities)\n\n@register.filter\ndef to_anchor(text, autoescape=None):\n from beproud.utils.html import urlize\n return mark_safe(urlize(text, attrs={\"rel\": \"nofollow\", \"target\": \"_blank\"}, autoescape=autoescape))\nto_anchor.is_safe=True\nto_anchor.needs_autoescape = True\nto_anchor = stringfilter(to_anchor)\n\n@register.filter\ndef to_anchortrunc(text, limit, autoescape=None):\n from beproud.utils.html import urlize\n return mark_safe(urlize(text, attrs={\"rel\": \"nofollow\", \"target\": \"_blank\"}, \n trim_url_limit=limit, autoescape=autoescape))\nto_anchortrunc.is_safe=True\nto_anchortrunc.needs_autoescape = True\nto_anchortrunc = stringfilter(to_anchortrunc)\n\n@register.filter\ndef force_js(value, type=None):\n from beproud.utils.javascript import force_js\n return mark_safe(force_js(value, type))\n","sub_path":"beproud/django/commons/templatetags/html_tags.py","file_name":"html_tags.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"160878075","text":"import smtplib\nfrom email.MIMEMultipart import MIMEMultipart\nfrom email.MIMEBase import MIMEBase\nfrom email import Encoders\n\n\nSUBJECT = \"NewsMeme Video\"\n\nEMAIL_SERVER = 'smtp.gmail.com:587'\nEMAIL_FROM = 'stomatrix@gmail.com'\nEMAIL_TO = 'abhishekskmr44@gmail.com,saurav@newsmeme.in'\n\nmsg = MIMEMultipart()\nmsg['Subject'] = SUBJECT \nmsg['From'] = EMAIL_FROM\nmsg['To'] = EMAIL_TO\n\npart = MIMEBase('application', \"octet-stream\")\npart.set_payload(open(\"watermark.png\", \"rb\").read())\nEncoders.encode_base64(part)\n\npart.add_header('Content-Disposition', 'attachment; filename=\"watermark.png\"')\n\nmsg.attach(part)\n\nserver = smtplib.SMTP(EMAIL_SERVER)\nserver.sendmail(EMAIL_FROM, EMAIL_TO, msg.as_string())","sub_path":"app/mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"245647749","text":"class Solution(object):\n\n # We could use a hashtable to track the number of times an elment appear\n # Or for this specific question we can use set ( add the element to the set if there isn't one, remove it if there is one. the result will be the only one left in the set )\n # But XOR is much more simpler\n # It's based on the fact that XOR a number with itself is zero\n # and XOR operation is both commutative and associative\n # So we can just simply XOR all the numbers then it will be the result\n def singleNumber(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n\n result = 0\n for num in nums:\n result ^= num\n\n return result\n","sub_path":"Code/Single Number/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"396091325","text":"from random import randint\n\nfrom sklearn.base import BaseEstimator, ClusterMixin\n\nimport geomstats.backend as gs\nfrom geomstats.learning._template import TransformerMixin\n\n\nclass RiemannianKMeans(TransformerMixin, ClusterMixin, BaseEstimator):\n\n def __init__(self, riemannian_metric, n_clusters=8, init='random',\n tol=1e-2, mean_method='default', verbose=0):\n \"\"\" K-Means algorithm using Riemannian manifolds\n\n Parameters\n ----------\n n_clusters : Number of clusters (k value of the k-means)\n\n riemannian_metric : The geomstats riemmanian metric associate to\n the space used\n\n init : How to init centroids at the beginning of the algorithm.\n 'random' : will select random uniformally train point as\n initial centroids\n\n tol : convergence factor. If the difference of mean distance\n between two step is lower than tol\n\n verbose : if verbose > 0, information will be print during learning\n\n Returns\n -------\n self : object\n Returns the instance itself.\n \"\"\"\n self.n_clusters = n_clusters\n self.init = init\n self.riemannian_metric = riemannian_metric\n self.tol = tol\n self.verbose = verbose\n self.mean_method = mean_method\n\n def fit(self, X, max_iter=100):\n \"\"\"Predict for each data point the closest center in terms of\n riemannian_metric distance\n\n Parameters\n ----------\n X : array-like, shape=[n_samples, n_features]\n Training data, where n_samples is the number of samples\n and n_features is the number of features.\n\n max_iter : Maximum number of iterations\n\n Returns\n -------\n self : object\n Return centroids array\n \"\"\"\n\n n_samples = X.shape[0]\n belongs = gs.zeros(n_samples)\n self.centroids = [gs.expand_dims(X[randint(0, n_samples - 1)], 0)\n for i in range(self.n_clusters)]\n self.centroids = gs.concatenate(self.centroids)\n index = 0\n while index < max_iter:\n index += 1\n\n dists = [gs.to_ndarray(\n self.riemannian_metric.dist(self.centroids[i], X), 2, 1)\n for i in range(self.n_clusters)]\n dists = gs.hstack(dists)\n belongs = gs.argmin(dists, 1)\n old_centroids = gs.copy(self.centroids)\n for i in range(self.n_clusters):\n fold = gs.squeeze(X[belongs == i])\n\n if len(fold) > 0:\n\n self.centroids[i] = self.riemannian_metric.mean(\n fold,\n mean_method=self.mean_method,\n n_max_iterations=150)\n\n else:\n self.centroids[i] = X[randint(0, n_samples - 1)]\n\n centroids_distances = self.riemannian_metric.dist(old_centroids,\n self.centroids)\n\n if gs.mean(centroids_distances) < self.tol:\n if self.verbose > 0:\n print(\"Convergence Reached after \", index, \" iterations\")\n\n return gs.copy(self.centroids)\n\n if index == max_iter:\n print('K-means maximum number of iterations {} reached.'\n 'The mean may be inaccurate'.format(max_iter))\n\n return gs.copy(self.centroids)\n\n def predict(self, X):\n\n \"\"\"Predict for each data point the closest center in terms of\n riemannian_metric distance\n\n Parameters\n ----------\n X : array-like, shape=[n_samples, n_features]\n data, where n_samples is the number of samples\n and n_features is the number of features.\n\n Returns\n -------\n self : object\n Return array containing for each point the cluster associated\n \"\"\"\n dists = gs.hstack([self.riemannian_metric.dist(self.centroids[i], X)\n for i in range(self.n_clusters)])\n belongs = gs.argmin(dists, -1)\n return belongs\n","sub_path":"geomstats/learning/kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":4185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"195636027","text":"# Copyright (c) 2021-2023, NVIDIA CORPORATION.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport gc\n\nimport pytest\nimport networkx as nx\n\nimport cudf\nimport cugraph\nfrom cugraph.testing import utils, UNDIRECTED_DATASETS\nfrom cugraph.datasets import netscience\nfrom cugraph.experimental import sorensen as exp_sorensen\nfrom cudf.testing import assert_series_equal, assert_frame_equal\n\n\nprint(\"Networkx version : {} \".format(nx.__version__))\n\n\n# =============================================================================\n# Pytest Setup / Teardown - called for each test function\n# =============================================================================\ndef setup_function():\n gc.collect()\n\n\n# =============================================================================\n# Helper functions\n# =============================================================================\ndef compare_sorensen_two_hop(G, Gnx, edgevals=False):\n \"\"\"\n Compute both cugraph and nx sorensen after extracting the two hop neighbors\n from G and compare both results\n \"\"\"\n pairs = (\n G.get_two_hop_neighbors()\n .sort_values([\"first\", \"second\"])\n .reset_index(drop=True)\n )\n nx_pairs = []\n nx_pairs = list(pairs.to_records(index=False))\n preds = nx.jaccard_coefficient(Gnx, nx_pairs)\n nx_coeff = []\n for u, v, p in preds:\n # FIXME: Use known correct values of Sorensen for few graphs,\n # hardcode it and compare to Cugraph Sorensen to get a more robust test\n\n # Conversion from Networkx Jaccard to Sorensen\n # No networkX equivalent\n nx_coeff.append((2 * p) / (1 + p))\n df = cugraph.sorensen(G, pairs)\n df = df.sort_values(by=[\"first\", \"second\"]).reset_index(drop=True)\n if not edgevals:\n # experimental sorensen currently only supports unweighted graphs\n df_exp = exp_sorensen(G, pairs)\n df_exp = df_exp.sort_values(by=[\"first\", \"second\"]).reset_index(drop=True)\n assert_frame_equal(df, df_exp, check_dtype=False, check_like=True)\n assert len(nx_coeff) == len(df)\n for i in range(len(df)):\n diff = abs(nx_coeff[i] - df[\"sorensen_coeff\"].iloc[i])\n assert diff < 1.0e-6\n\n\ndef cugraph_call(benchmark_callable, graph_file, edgevals=False, input_df=None):\n G = cugraph.Graph()\n G = graph_file.get_graph(ignore_weights=not edgevals)\n\n # If no vertex_pair is passed as input, 'cugraph.sorensen' will\n # compute the 'sorensen_similarity' with the two_hop_neighbor of the\n # entire graph while nx compute with the one_hop_neighbor. For better\n # comparaison, get the one_hop_neighbor of the entire graph for 'cugraph.sorensen'\n # and pass it as vertex_pair\n vertex_pair = input_df.rename(columns={\"0\": \"first\", \"1\": \"second\"})\n vertex_pair = vertex_pair[[\"first\", \"second\"]]\n\n # cugraph Sorensen Call\n df = benchmark_callable(cugraph.sorensen, G, vertex_pair=vertex_pair)\n\n df = df.sort_values([\"first\", \"second\"]).reset_index(drop=True)\n\n return (\n df[\"first\"].to_numpy(),\n df[\"second\"].to_numpy(),\n df[\"sorensen_coeff\"].to_numpy(),\n )\n\n\ndef networkx_call(M, benchmark_callable=None):\n\n sources = M[\"0\"]\n destinations = M[\"1\"]\n edges = []\n for i in range(len(M)):\n edges.append((sources[i], destinations[i]))\n edges.append((destinations[i], sources[i]))\n edges = list(dict.fromkeys(edges))\n edges = sorted(edges)\n # in NVGRAPH tests we read as CSR and feed as CSC, so here we doing this\n # explicitly\n print(\"Format conversion ... \")\n\n Gnx = nx.from_pandas_edgelist(\n M, source=\"0\", target=\"1\", edge_attr=\"weight\", create_using=nx.Graph()\n )\n\n # Networkx Jaccard Call\n print(\"Solving... \")\n if benchmark_callable is not None:\n preds = benchmark_callable(nx.jaccard_coefficient, Gnx, edges)\n else:\n preds = nx.jaccard_coefficient(Gnx, edges)\n\n src = []\n dst = []\n coeff = []\n for u, v, p in preds:\n src.append(u)\n dst.append(v)\n # Conversion from Networkx Jaccard to Sorensen\n # No networkX equivalent\n coeff.append((2 * p) / (1 + p))\n return src, dst, coeff\n\n\n# =============================================================================\n# Pytest Fixtures\n# =============================================================================\n@pytest.fixture(scope=\"module\", params=UNDIRECTED_DATASETS)\ndef read_csv(request):\n \"\"\"\n Read csv file for both networkx and cugraph\n \"\"\"\n graph_file = request.param\n dataset_path = graph_file.get_path()\n M = utils.read_csv_for_nx(dataset_path)\n M_cu = utils.read_csv_file(dataset_path)\n\n return M_cu, M, graph_file\n\n\n@pytest.mark.sg\ndef test_sorensen(gpubenchmark, read_csv):\n\n M_cu, M, graph_file = read_csv\n cu_src, cu_dst, cu_coeff = cugraph_call(gpubenchmark, graph_file, input_df=M_cu)\n nx_src, nx_dst, nx_coeff = networkx_call(M)\n\n # Calculating mismatch\n err = 0\n tol = 1.0e-06\n\n assert len(cu_coeff) == len(nx_coeff)\n for i in range(len(cu_coeff)):\n if abs(cu_coeff[i] - nx_coeff[i]) > tol * 1.1:\n err += 1\n\n print(\"Mismatches: %d\" % err)\n assert err == 0\n\n\n@pytest.mark.sg\ndef test_nx_sorensen_time(gpubenchmark, read_csv):\n\n _, M, _ = read_csv\n nx_src, nx_dst, nx_coeff = networkx_call(M, gpubenchmark)\n\n\n@pytest.mark.sg\n@pytest.mark.parametrize(\"graph_file\", [netscience])\n@pytest.mark.skip(reason=\"Skipping because this datasets is unrenumbered\")\ndef test_sorensen_edgevals(gpubenchmark, graph_file):\n dataset_path = netscience.get_path()\n M = utils.read_csv_for_nx(dataset_path)\n M_cu = utils.read_csv_file(dataset_path)\n cu_src, cu_dst, cu_coeff = cugraph_call(\n gpubenchmark, netscience, edgevals=True, input_df=M_cu\n )\n nx_src, nx_dst, nx_coeff = networkx_call(M)\n\n # Calculating mismatch\n err = 0\n tol = 1.0e-06\n\n assert len(cu_coeff) == len(nx_coeff)\n for i in range(len(cu_coeff)):\n if abs(cu_coeff[i] - nx_coeff[i]) > tol * 1.1:\n err += 1\n\n print(\"Mismatches: %d\" % err)\n assert err == 0\n\n\n@pytest.mark.sg\ndef test_sorensen_two_hop(read_csv):\n\n _, M, graph_file = read_csv\n\n Gnx = nx.from_pandas_edgelist(M, source=\"0\", target=\"1\", create_using=nx.Graph())\n G = graph_file.get_graph(ignore_weights=True)\n\n compare_sorensen_two_hop(G, Gnx)\n\n\n@pytest.mark.sg\ndef test_sorensen_two_hop_edge_vals(read_csv):\n\n _, M, graph_file = read_csv\n\n Gnx = nx.from_pandas_edgelist(\n M, source=\"0\", target=\"1\", edge_attr=\"weight\", create_using=nx.Graph()\n )\n\n G = graph_file.get_graph()\n\n compare_sorensen_two_hop(G, Gnx, edgevals=True)\n\n\n@pytest.mark.sg\ndef test_sorensen_multi_column(read_csv):\n\n _, M, _ = read_csv\n\n cu_M = cudf.DataFrame()\n cu_M[\"src_0\"] = cudf.Series(M[\"0\"])\n cu_M[\"dst_0\"] = cudf.Series(M[\"1\"])\n cu_M[\"src_1\"] = cu_M[\"src_0\"] + 1000\n cu_M[\"dst_1\"] = cu_M[\"dst_0\"] + 1000\n G1 = cugraph.Graph()\n G1.from_cudf_edgelist(\n cu_M, source=[\"src_0\", \"src_1\"], destination=[\"dst_0\", \"dst_1\"]\n )\n\n vertex_pair = cu_M[[\"src_0\", \"src_1\", \"dst_0\", \"dst_1\"]]\n vertex_pair = vertex_pair[:5]\n\n df_res = cugraph.sorensen(G1, vertex_pair)\n df_plc_exp = exp_sorensen(G1, vertex_pair)\n\n df_plc_exp = df_plc_exp.rename(\n columns={\n \"0_src\": \"0_source\",\n \"0_dst\": \"0_destination\",\n \"1_src\": \"1_source\",\n \"1_dst\": \"1_destination\",\n }\n )\n sorensen_res = df_res[\"sorensen_coeff\"].sort_values().reset_index(drop=True)\n sorensen_plc_exp = df_plc_exp[\"sorensen_coeff\"].sort_values().reset_index(drop=True)\n assert_series_equal(sorensen_res, sorensen_plc_exp)\n\n G2 = cugraph.Graph()\n G2.from_cudf_edgelist(cu_M, source=\"src_0\", destination=\"dst_0\")\n df_exp = cugraph.sorensen(G2, vertex_pair[[\"src_0\", \"dst_0\"]])\n\n # Calculating mismatch\n actual = df_res.sort_values(\"0_first\").reset_index()\n expected = df_exp.sort_values(\"first\").reset_index()\n assert_series_equal(actual[\"sorensen_coeff\"], expected[\"sorensen_coeff\"])\n\n\n@pytest.mark.sg\ndef test_weighted_exp_sorensen():\n karate = UNDIRECTED_DATASETS[0]\n G = karate.get_graph()\n with pytest.raises(ValueError):\n exp_sorensen(G)\n\n G = karate.get_graph(ignore_weights=True)\n use_weight = True\n with pytest.raises(ValueError):\n exp_sorensen(G, use_weight=use_weight)\n\n\n@pytest.mark.sg\ndef test_invalid_datasets_sorensen():\n karate = UNDIRECTED_DATASETS[0]\n df = karate.get_edgelist()\n df = df.add(1)\n G = cugraph.Graph(directed=False)\n G.from_cudf_edgelist(df, source=\"src\", destination=\"dst\")\n with pytest.raises(ValueError):\n cugraph.sorensen(G)\n","sub_path":"python/cugraph/cugraph/tests/link_prediction/test_sorensen.py","file_name":"test_sorensen.py","file_ext":"py","file_size_in_byte":9223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"248277070","text":"from __future__ import print_function\nfrom redis_monitor.redis_monitor import RedisStats\nfrom redis_monitor.argsparse import parse_cli\nimport json\nimport redis_monitor.cli as monitor\nimport time\nimport sys\n\n\ndef json_output(r, options):\n if options.raw:\n data = r.get_raw_stats()\n elif options.mem:\n data = r.get_memory_stats()\n elif options.sys:\n data = r.get_system_stats()\n elif options.perf:\n data = r.get_performance_stats()\n elif options.conn:\n data = r.get_connection_stats()\n elif options.dbinstance:\n if options.dbinstance == \"SENT\":\n data = r.get_instances_summary_stats()\n else:\n data = r.get_instance_stats(options.dbinstance)\n else:\n data = r.get_full_summary_stats()\n # if debug change json output format\n if options.debug:\n print(json.dumps(data, indent=2))\n else:\n print(json.dumps(data))\n\n\ndef console_output(r, options):\n screen = None\n try:\n data = r.get_full_summary_stats()\n screen = monitor.monitor_active()\n while monitor.monitor_watch(screen, data):\n data = r.get_full_summary_stats()\n time.sleep(options.watch)\n except Exception as e:\n print(e)\n finally:\n if screen is not None:\n monitor.monitor_deactivate(screen)\n\n\nif __name__ == \"__main__\":\n options = parse_cli()\n r = RedisStats(host=options.host,\n port=options.port,\n password=options.pwd)\n if options.json:\n json_output(r, options)\n else:\n console_output(r, options)\n","sub_path":"redis_monitor.py","file_name":"redis_monitor.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"166173558","text":"from matplotlib.pyplot import * \n\nbohr = 0.5291772105638411\n\npyeff = np.loadtxt('BE.dat')\n\nE_atom_pyeff = -6.08004291199\n\n\nfig1 = figure(1) \n\nplot(pyeff[:,0],pyeff[:,1],'o-',label=r'pyeff Li$_{2}$')\nplot([pyeff[1,0],pyeff[-1,0]],[2*E_atom_pyeff,2*E_atom_pyeff],label='pyeff 2 x Li atom')\nxlabel('Distance r$_{Li-Li}$ [$\\AA$]')\nylabel('Total energy [Hartree]')\nlegend()\nshow()\n\n","sub_path":"tests/Li2_BE/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"370717543","text":"import utils\nimport skimage\nimport skimage.morphology\nimport numpy as np\nfrom task3a import remove_noise\n\n\ndef distance_transform(im: np.ndarray) -> np.ndarray:\n \"\"\"\n A function that computes the distance to the closest boundary pixel.\n\n args:\n im: np.ndarray of shape (H, W) with boolean values (dtype=np.bool)\n return:\n (np.ndarray) of shape (H, W). dtype=np.int32\n \"\"\"\n ### START YOUR CODE HERE ### (You can change anything inside this block)\n # You can also define other helper functions\n assert im.dtype == np.bool\n\n structuring_element = np.array([\n [1, 1, 1],\n [1, 1, 1],\n [1, 1, 1]\n ], dtype=bool)\n\n result = im.astype(np.int32)\n\n previous_image = im.copy()\n \n intensity = 0\n\n # Loop until entire image is black. When pixels disappear, we have their intensity.\n while True:\n if np.sum(im) == 0:\n break\n\n previous_image = im\n\n # Perform binary erosion.\n im = skimage.morphology.binary_erosion(im, selem=structuring_element)\n\n # Check which pixels disappeared.\n for row in range(im.shape[0]):\n for col in range(im.shape[1]):\n # Pixel exists in previous iteration, but disappeared now.\n if im[row, col] == False and previous_image[row, col] == True:\n # Set output intensity.\n result[row, col] = intensity\n\n intensity += 1\n\n return result\n\n ### END YOUR CODE HERE ### \n\n\n\nif __name__ == \"__main__\":\n im = utils.read_image(\"noisy.png\")\n binary_image = (im != 0)\n noise_free_image = remove_noise(binary_image)\n distance = distance_transform(noise_free_image)\n\n assert im.shape == distance.shape, \\\n \"Expected image shape ({}) to be same as resulting image shape ({})\".format(\n im.shape, distance.shape)\n assert distance.dtype == np.int32, \\\n \"Expected resulting image dtype to be np.int32. Was: {}\".format(\n distance.dtype)\n\n distance = utils.to_uint8(distance)\n utils.save_im(\"noisy-distance.png\", distance)\n\n \n \n\n\n\n","sub_path":"assignment3/task3b.py","file_name":"task3b.py","file_ext":"py","file_size_in_byte":2133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"295742682","text":"import os\r\nfrom telegram.ext import Updater, CommandHandler, CallbackQueryHandler\r\nimport telegram\r\nimport RuBot, BusBot, CalendarBot\r\n\r\nruBot = RuBot.RuBot()\r\nbusBot = BusBot.BusBot()\r\ncalendarBot = CalendarBot.CalendarBot()\r\n\r\ndef showStartMenu(bot, update):\r\n msgToSend = 'Olá!\\nSelecione uma opção para continuar...'\r\n\r\n keyboard = [\r\n [\r\n telegram.InlineKeyboardButton('Cardápio RU', callback_data = 'cardapio-ru'),\r\n telegram.InlineKeyboardButton('Horário ônibus', callback_data = 'onibus')\r\n ],\r\n [\r\n telegram.InlineKeyboardButton('Calendário acadêmico', callback_data = 'academic-calendar')\r\n ]\r\n ]\r\n\r\n reply_markup = telegram.InlineKeyboardMarkup(keyboard)\r\n\r\n bot.send_message(\r\n chat_id = update.message.chat_id,\r\n text = msgToSend,\r\n reply_markup = reply_markup\r\n )\r\n\r\ndef callHandler(bot, update):\r\n if update.callback_query.data == 'cardapio-ru':\r\n ruBot.selectCampus(bot, update)\r\n elif update.callback_query.data[:2] == 'RU':\r\n ruBot.showCardapio(bot, update, update.callback_query.data[3:])\r\n elif update.callback_query.data == 'onibus':\r\n busBot.selectCampus(bot, update)\r\n elif update.callback_query.data[:3] == 'bus':\r\n busBot.selectStartPoint(bot, update, update.callback_query.data[4:])\r\n elif update.callback_query.data[:13] == 'startPointBus':\r\n busBot.showSchedule(bot, update, update.callback_query.data[14:])\r\n elif update.callback_query.data == 'academic-calendar':\r\n calendarBot.getCalendar(bot, update)\r\n\r\ndef main():\r\n updater = Updater(os.environ['telegramToken'])\r\n dp = updater.dispatcher\r\n dp.add_handler(CommandHandler('start', showStartMenu))\r\n dp.add_handler(CommandHandler('cal_academico', calendarBot.getCalendar))\r\n dp.add_handler(CallbackQueryHandler(callHandler))\r\n updater.start_polling()\r\n updater.idle() \r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"src/uffsBot.py","file_name":"uffsBot.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"6057318","text":"def resultado(act_cor,act_no_cor,patri_neto,pasivo_cor,pasivo_no_cor,roa,roe,\r\nact_cor_1,act_no_cor_1,patri_neto_1,pasivo_cor_1,pasivo_no_cor_1,roa_1,roe_1,\r\nact_cor_2,act_no_cor_2,patri_neto_2,pasivo_cor_2,pasivo_no_cor_2,roa_2,roe_2,\r\nact_cor_3,act_no_cor_3,patri_neto_3,pasivo_cor_3,pasivo_no_cor_3,roa_3,roe_3):\r\n\r\n Fondo_de_maniobra = act_cor - pasivo_cor\r\n ratio_de_liquidez = act_cor/pasivo_cor\r\n Ratio_de_endeudamiento = (pasivo_no_cor+ pasivo_cor)/patri_neto\r\n Ratio_de_endeudamiento_a_largo_plazo = pasivo_no_cor/patri_neto\r\n Ratio_de_deuda = (pasivo_no_cor + pasivo_cor)/(act_cor+act_no_cor)\r\n Ratio_de_apalancamiento_financiero = (act_cor+act_no_cor)/patri_neto\r\n ROA = roa\r\n ROE= roe\r\n Fondo_de_maniobra_1 = act_cor_1 - pasivo_cor_1\r\n ratio_de_liquidez_1 = act_cor_1/pasivo_cor_1\r\n Ratio_de_endeudamiento_1 = (pasivo_no_cor_1+ pasivo_cor_1)/patri_neto_1\r\n Ratio_de_endeudamiento_a_largo_plazo_1 = pasivo_no_cor_1/patri_neto_1\r\n Ratio_de_deuda_1 = (pasivo_no_cor_1 + pasivo_cor_1)/(act_cor_1+act_no_cor_1)\r\n Ratio_de_apalancamiento_financiero_1 = (act_cor_1+act_no_cor_1)/patri_neto_1\r\n ROA_1 = roa_1\r\n ROE_1= roe_1\r\n Fondo_de_maniobra_2 = act_cor_2 - pasivo_cor_2\r\n ratio_de_liquidez_2 = act_cor_2/pasivo_cor_2\r\n Ratio_de_endeudamiento_2 = (pasivo_no_cor_2+ pasivo_cor_2)/patri_neto_2\r\n Ratio_de_endeudamiento_a_largo_plazo_2 = pasivo_no_cor_2/patri_neto_2\r\n Ratio_de_deuda_2 = (pasivo_no_cor_2 + pasivo_cor_2)/(act_cor_2+act_no_cor_2)\r\n Ratio_de_apalancamiento_financiero_2 = (act_cor_2+act_no_cor_2)/patri_neto_2\r\n ROA_2 = roa_2\r\n ROE_2= roe_2\r\n Fondo_de_maniobra_3 = act_cor_3 - pasivo_cor_3\r\n ratio_de_liquidez_3 = act_cor_3/pasivo_cor_3\r\n Ratio_de_endeudamiento_3 = (pasivo_no_cor_3+ pasivo_cor_3)/patri_neto_3\r\n Ratio_de_endeudamiento_a_largo_plazo_3 = pasivo_no_cor_3/patri_neto_3\r\n Ratio_de_deuda_3 = (pasivo_no_cor_3 + pasivo_cor_3)/(act_cor_3+act_no_cor_3)\r\n Ratio_de_apalancamiento_financiero_3 = (act_cor_3+act_no_cor_3)/patri_neto_3\r\n ROA_3 = roa_3\r\n ROE_3 = roe_3\r\n\r\n return(Fondo_de_maniobra,Fondo_de_maniobra_1,Fondo_de_maniobra_2,Fondo_de_maniobra_3,\r\n ROA,ROA_1,ROA_2,ROA_3,\r\n ROE,ROE_1,ROE_2,ROE_3,\r\n Ratio_de_apalancamiento_financiero,Ratio_de_apalancamiento_financiero_1,Ratio_de_apalancamiento_financiero_2,Ratio_de_apalancamiento_financiero_3,\r\n Ratio_de_deuda,Ratio_de_deuda_1,Ratio_de_deuda_2,Ratio_de_deuda_3,\r\n Ratio_de_endeudamiento,Ratio_de_endeudamiento_1,Ratio_de_endeudamiento_2,Ratio_de_endeudamiento_3,\r\n Ratio_de_endeudamiento_a_largo_plazo,Ratio_de_endeudamiento_a_largo_plazo_1,Ratio_de_endeudamiento_a_largo_plazo_2,Ratio_de_endeudamiento_a_largo_plazo_3,\r\n ratio_de_liquidez,ratio_de_liquidez_1,ratio_de_liquidez_2,ratio_de_liquidez_3)\r\n\r\n\r\n\r\n\r\n","sub_path":"funciones.py","file_name":"funciones.py","file_ext":"py","file_size_in_byte":2817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"325628305","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n#\n\"\"\"\n\"\"\"\n\n\nval = input(\"Please input temperature with label(e.g. 32C): \")\n\nif val[-1] in ['C', 'c']:\n f = 1.8 * float(val[0:-1]) + 32\n print(\"The trans temperature is: %.2fF\" % f)\nelif val[-1] in ['F', 'f']:\n c = (float(val[0:-1]) - 32) / 1.8\n print(\"The trans temperature is %0.2fC\" % c)\nelse:\n print(\"Input error...\")\n\n","sub_path":"python/BIT-268001/code/tempconvert.py","file_name":"tempconvert.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"4598496","text":"# -*- coding: utf-8 -*-\nimport os\nimport datetime\nimport shutil\nimport glob\nimport re\ndef numericalSort(value):\n numbers = re.compile(r'(\\d+)')\n parts = numbers.split(value)\n parts[1::2] = map(int, parts[1::2])\n return parts\n\nchild_dir = 'task4/'\nfile_list = sorted(glob.glob('task4/*.csv'), key=numericalSort)\n\nind = 0\nfor file_name in file_list:\n #print(file_name)\n new_name = 'yoshida_task4_' + str(ind) + '.csv'\n print(new_name)\n shutil.move(file_name, child_dir + new_name)\n ind += 1\n","sub_path":"gaze_ana/src/evalGIBresult/rename2 copy.py","file_name":"rename2 copy.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"90701637","text":"# coding:utf-8\nimport requests\nimport time\nfrom datetime import datetime\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\n\n'''\n爬取智联招聘 招聘信息\n'''\n\ndef getJsonData(keyword, page, city):\n\n url = \"https://fe-api.zhaopin.com/c/i/sou\"\n params = {\n 'start': f'{(page - 1) * 90}',\n 'pageSize': '90',\n 'cityId': f'{(530 + city)}',\n 'salary': '0,0',\n 'workExperience': '-1',\n 'education': '-1',\n 'companyType': '-1',\n 'employmentType': '-1',\n 'jobWelfareTag': '-1',\n 'kw': f'{keyword}',\n 'kt': '3',\n '_v': '0.02270441',\n 'x-zp-page-request-id': 'a9819f27e6be4eeb867e2e61a068255c-1573447961742-743855',\n 'x-zp-client-id': '06a1d1b2-c25b-4359-c5eb-42dd0ba470f3',\n }\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'\n }\n\n try:\n response = requests.get(url, headers=headers, params=params, timeout=5)\n if response.status_code == 200:\n response.encoding = 'utf-8'\n return response.json()\n except:\n print(\"请求超时!\")\n\ndef parseJsonData(jsonData):\n data = []\n for dat in jsonData['data']['results']:\n # data.append({\"职位名称\" : dat['jobName'], \"公司名称\" : dat['company']['name'], \"工作地点\" : dat['city']['items'][0]['name'], \"发布日期\" : dat['updateDate'],\n # \"薪资水平\" : dat['salary']})\n data.append([dat['jobName'], dat['company']['name'], dat['city']['items'][0]['name'], str(datetime.strptime(dat['updateDate'], \"%Y-%m-%d %H:%M:%S\").date()), dat['salary']])\n return data\n\ndef saveData(data):\n file = open('111.txt','a',encoding='UTF-8')\n for link in data:\n for s in link:\n file.write(s + \",\")\n\n file.write(\"\\n\")\n\n file.close()\n\nif __name__ == '__main__':\n keyword = input(\"请输入查询的职位:\")\n\n city = 0\n # for city in range(0, 300):\n while True:\n print(\"城市id:\" + str(530 + city))\n # print(parseJsonData(getJsonData(keyword, page, city)))\n page = 1\n error = 0\n\n while True:\n if error <= 3:\n try:\n result = getJsonData(keyword, page, city)\n # print(parseJsonData(getJsonData(keyword, page, city)))\n if result['data']['results'] == []:\n print(\"当前城市招聘信息采集结束!\")\n break\n else:\n print(\"正在爬取第\" + str(page) + \"页信息\")\n data = parseJsonData(result)\n saveData(data)\n time.sleep(5)\n page += 1\n\n except:\n print(\"Error!\")\n error += 1\n else:\n break\n\n city += 1\n\n","sub_path":"PythonSpider/Spider.py","file_name":"Spider.py","file_ext":"py","file_size_in_byte":2977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"594744704","text":"import json\r\nfrom ADT.adt import MatchList\r\n\r\n\r\nclass League:\r\n \"\"\"Represents a league class\"\"\"\r\n def __init__(self, code):\r\n self.code = code\r\n self._standings = self.__get_standings()\r\n self.matches = self.__get_matches()\r\n self.teams = self.__get_teams()\r\n\r\n def __get_teams(self):\r\n \"\"\"\r\n Returns a dict\r\n key = team name\r\n item = Team object\r\n :return: dict\r\n \"\"\"\r\n teams = {}\r\n with open(\"data/\" + self.code + \"teams.json\", \"r\", encoding=\"utf-8\") as file:\r\n teams_js = json.load(file)\r\n self.matchday = teams_js['season']['currentMatchday'] - 1\r\n for team in teams_js['teams']:\r\n teams[team['name']] = Team(team['name'], team['id'], self.matches, self.matchday, self._standings)\r\n return teams\r\n\r\n def __get_standings(self):\r\n \"\"\"\r\n Returns a dictionary where key is team name and value is the team`s position\r\n :return: dict\r\n \"\"\"\r\n standings = {}\r\n with open(\"data/\" + self.code + \"standings.json\", \"r\", encoding=\"utf-8\") as file:\r\n standings_js = json.load(file)\r\n for team in standings_js['standings'][0]['table']:\r\n standings[team['team']['name']] = team['position']\r\n return standings\r\n\r\n def print_standings(self):\r\n \"\"\"\r\n Prints the standings\r\n :return: None\r\n \"\"\"\r\n with open(\"data/\" + self.code + \"standings.json\", \"r\", encoding=\"utf-8\") as file:\r\n standings_js = json.load(file)\r\n print(standings_js['competition']['name'])\r\n print(\"№ \" + \"Name\" + (24*\" \") + \"G W D L GF GA GD P\")\r\n for team in standings_js['standings'][0]['table']:\r\n print((2-len(str(team['position'])))*\" \" + str(team['position']) + \" \" + team['team']['name'] +\r\n (27 - len(team['team']['name']))*\" \" + \" \"*(2-len(str(team['playedGames']))) +\r\n str(team['playedGames']) + \" \" * (3 - len(str(team['won']))) + str(team['won']) +\r\n \" \" * (3 - len(str(team['draw']))) + str(team['draw']) +\r\n \" \" * (3 - len(str(team['lost']))) + str(team['lost']) + \" \" * (4 - len(str(team['goalsFor']))) +\r\n str(team['goalsFor']) + \" \" * (4 - len(str(team['goalsAgainst']))) + str(team['goalsAgainst']) +\r\n \" \" * (4 - len(str(team['goalDifference']))) + str(team['goalDifference']) +\r\n (\" \" * (4 - len(str(team['points']))) + str(team['points'])))\r\n\r\n def print_scorers(self):\r\n \"\"\"\r\n Prints a list of top 10 scorers of the league\r\n :return: None\r\n \"\"\"\r\n with open(\"data/\" + self.code + \"scorers.json\", \"r\", encoding=\"utf-8\") as file:\r\n scorers = json.load(file)\r\n print(\"Top Scorers:\")\r\n num = 1\r\n for player in scorers['scorers']:\r\n print(\" \"*(2 - len(str(num))) + str(num) + \".\" + player['player']['name'] +\r\n \" \"*(26-len(player['player']['name'])) +\r\n player['team']['name'] + \" \"*(26-len(player['team']['name'])) +\r\n str(player['numberOfGoals']))\r\n num += 1\r\n\r\n def __get_matches(self):\r\n \"\"\"\r\n Returns all league matches\r\n :return: MatchList object\r\n \"\"\"\r\n with open(\"data/\" + self.code + \"matches.json\", \"r\", encoding=\"utf-8\") as file:\r\n matches_js = json.load(file)\r\n lst = matches_js['matches']\r\n matches = MatchList()\r\n for match in lst:\r\n matches.append((match['status'], match['matchday'], match['homeTeam']['name'],\r\n match['awayTeam']['name'], match['score']['fullTime']))\r\n return matches\r\n\r\n def print_matchday(self, num):\r\n \"\"\"\r\n Prints all matches of given matchday\r\n :param num: matchday number\r\n :return: None\r\n \"\"\"\r\n matches = \"\"\r\n for match in self.matches.get_matchday(num):\r\n matches += \"{} {}-{} {}\".format(match[2], match[4]['homeTeam'], match[4]['awayTeam'], match[3]) + \"\\n\"\r\n print(\"MatchDay {}\".format(num))\r\n print(matches)\r\n\r\n\r\nclass Team:\r\n \"\"\"Represents one football team\"\"\"\r\n def __init__(self, name, team_id, match_list, matchday, standings):\r\n self.name = name\r\n self.id = team_id\r\n self.match_list = match_list\r\n self._matchday = matchday\r\n self.position = standings[self.name]\r\n self._standings = standings\r\n self._all_matches = None\r\n self._stat = None\r\n self._stat_percentage = None\r\n self._games_num = None\r\n self._goals = None\r\n self._home_matches = None\r\n self._away_matches = None\r\n self._home_stat = None\r\n self._away_stat = None\r\n self._home_stat_percentage = None\r\n self._away_stat_percentage = None\r\n self._home_goals = None\r\n self._away_goals = None\r\n self._avg_goals_scored = None\r\n self._avg_goals_missed = None\r\n self._home_goals_avg_scored = None\r\n self._home_goals_avg_missed = None\r\n self._away_goals_avg_scored = None\r\n self._away_goals_avg_missed = None\r\n self._top6_stat = None\r\n self.get_stats()\r\n self._form = self.get_form(5)\r\n\r\n def get_stats(self):\r\n \"\"\"\r\n Finds the stats and gives the values to the atributes\r\n :return: None\r\n \"\"\"\r\n self._all_matches = self.match_list.get_team_matches(self.name)\r\n self._stat = self.match_list.get_stats(self.name)\r\n self._stat_percentage = (round((self._stat[1]/self._stat[0])*100, 1),\r\n round((self._stat[2]/self._stat[0])*100, 1),\r\n round((self._stat[3]/self._stat[0])*100, 1))\r\n self._games_num = self._stat[0]\r\n self._goals = self.match_list.get_goals(self.name)\r\n self._home_matches = self.match_list.get_home_matches(self.name)\r\n self._away_matches = self.match_list.get_away_matches(self.name)\r\n self._home_stat = self._home_matches.get_stats(self.name)\r\n self._away_stat = self._away_matches.get_stats(self.name)\r\n self._home_stat_percentage = (round((self._home_stat[1] / self._home_stat[0])*100, 1),\r\n round((self._home_stat[2] / self._home_stat[0])*100, 1),\r\n round((self._home_stat[3] / self._home_stat[0])*100, 1))\r\n self._away_stat_percentage = (round((self._away_stat[1] / self._away_stat[0])*100, 1),\r\n round((self._away_stat[2] / self._away_stat[0])*100, 1),\r\n round((self._away_stat[3] / self._away_stat[0])*100, 1))\r\n\r\n self._home_goals = self._home_matches.get_goals(self.name)\r\n self._away_goals = self._away_matches.get_goals(self.name)\r\n self._avg_goals_scored = round(self._goals[0] / self._games_num, 2)\r\n self._avg_goals_missed = round(self._goals[1] / self._games_num, 2)\r\n self._home_goals_avg_scored = round(self._home_goals[0] / (self._home_stat[0]), 2)\r\n self._home_goals_avg_missed = round(self._home_goals[1] / (self._home_stat[0]), 2)\r\n self._away_goals_avg_scored = round(self._away_goals[0] / (self._away_stat[0]), 2)\r\n self._away_goals_avg_missed = round(self._away_goals[1] / (self._away_stat[0]), 2)\r\n self._top6_stat = self.against_top_6()\r\n return {\"stat\": self._stat, \"percentage\": self._stat_percentage, \"goals\": self._goals,\r\n \"home_stat\": self._home_stat, \"home_percentage\": self._home_stat_percentage,\r\n \"away_stat\": self._away_stat, \"away_percentage\": self._away_stat_percentage,\r\n \"avg_scored\": self._avg_goals_scored, \"avg_missed\": self._avg_goals_missed\r\n }\r\n \r\n def get_form(self, num):\r\n \"\"\"\r\n Returns a MatchList class object that contains last number(num) of matches\r\n :param num: number of last matches\r\n :return: MatchList object\r\n \"\"\"\r\n return self._all_matches[self._matchday-num:self._matchday]\r\n\r\n def against_top_6(self):\r\n \"\"\"\r\n Returns a MatchList class object with matches only against top 6 teams in league\r\n :return: MatchList object with all matches against top 6 teams\r\n \"\"\"\r\n against_top6 = MatchList()\r\n for match in self._all_matches:\r\n if (self._standings[match[2]] <= 6 and match[2] != self.name) or \\\r\n (self._standings[match[3]] <= 6 and match[3] != self.name):\r\n against_top6.append(match)\r\n return against_top6.get_stats(self.name)\r\n\r\n def print_info(self):\r\n print('''\r\nTeam name: {}\r\nPosition: {}\r\nOVERALL statistics:\r\nWins - {}({}%) Draws - {}({}%) Loses - {}({}%)\r\nGoals scored - {}\r\nGoals missed - {}\r\nGoals per game scored(average) - {}\r\nGoals per game missed(average) - {}\r\nStatistics against top 6 teams:\r\nWins - {} Draws - {} Loses - {}\r\n---------------------------------------------\r\nHOME statistics:\r\nWins - {}({}%) Draws - {}({}%) Loses - {}({}%)\r\nGoals scored - {}\r\nGoals missed - {}\r\nGoals per game scored(average) - {}\r\nGoals per game missed(average) - {}\r\n---------------------------------------------\r\nAWAY statistics:\r\nWins - {}({}%) Draws - {}({}%) Loses - {}({}%)\r\nGoals scored - {}\r\nGoals missed - {}\r\nGoals per game scored(average) - {}\r\nGoals per game missed(average) - {}\r\n---------------------------------------------\r\nFORM(Last 5 games):\r\n{} {}-{} {}\r\n{} {}-{} {}\r\n{} {}-{} {}\r\n{} {}-{} {}\r\n{} {}-{} {}\r\n'''.format(self.name, self.position, self._stat[1], self._stat_percentage[0], self._stat[2], self._stat_percentage[1],\r\n self._stat[3], self._stat_percentage[2], self._goals[0], self._goals[1], self._avg_goals_scored,\r\n self._avg_goals_missed, self._top6_stat[1], self._top6_stat[2], self._top6_stat[3],\r\n self._home_stat[1], self._home_stat_percentage[0], self._home_stat[2], self._home_stat_percentage[1],\r\n self._home_stat[3], self._home_stat_percentage[2], self._home_goals[0], self._home_goals[1],\r\n self._home_goals_avg_scored, self._home_goals_avg_missed,\r\n self._away_stat[1], self._away_stat_percentage[0], self._away_stat[2], self._away_stat_percentage[1],\r\n self._away_stat[3], self._away_stat_percentage[2], self._away_goals[0], self._away_goals[1],\r\n self._away_goals_avg_scored, self._away_goals_avg_missed,\r\n self._form[0][2], self._form[0][4]['homeTeam'], self._form[0][4]['awayTeam'], self._form[0][3],\r\n self._form[1][2], self._form[1][4]['homeTeam'], self._form[1][4]['awayTeam'], self._form[1][3],\r\n self._form[2][2], self._form[2][4]['homeTeam'], self._form[2][4]['awayTeam'], self._form[2][3],\r\n self._form[3][2], self._form[3][4]['homeTeam'], self._form[3][4]['awayTeam'], self._form[3][3],\r\n self._form[4][2], self._form[4][4]['homeTeam'], self._form[4][4]['awayTeam'] , self._form[4][3]))\r\n\r\n def print_head_to_head(self, other_team):\r\n \"\"\"\r\n Prints comparison of two teams and their previous games against each other\r\n :param other_team: name of team to compare with\r\n :return: None\r\n \"\"\"\r\n head_to_head_matches = self._all_matches.get_team_matches(other_team.name)\r\n matches = \"\"\r\n for match in head_to_head_matches:\r\n matches += \"{} {}-{} {}\".format(match[2], match[4]['homeTeam'], match[4]['awayTeam'], match[3]) + \"\\n\"\r\n other = other_team.get_stats()\r\n print(\"\"\"\\t\\t{}\\t\\t\\t{}\r\n\\t\\t\\t{}\\tposition\\t{}\r\n{}({}%)-{}({}%)-{}({}%)\\tstat\\t\\t{}({}%)-{}({}%)-{}({}%)\r\n{}({}%)-{}({}%)-{}({}%)\\thome stat\\t{}({}%)-{}({}%)-{}({}%)\\t\r\n{}({}%)-{}({}%)-{}({}%)\\taway stat\\t{}({}%)-{}({}%)-{}({}%)\\t\r\n\\t\\t\\t{}\\tgoals scored\\t{}\r\n\\t\\t\\t{}\\tgoals missed\\t{}\r\n\\t\\t\\t{}\\tavg scored\\t{}\r\n\\t\\t\\t{}\\tavg missed\\t{}\r\n\"\"\".format(self.name, other_team.name, self.position, other_team.position,\r\n self._stat[1], self._stat_percentage[0], self._stat[2], self._stat_percentage[1],\r\n self._stat[3], self._stat_percentage[2], other['stat'][1], other['percentage'][0], other['stat'][2],\r\n other['percentage'][1], other['stat'][3], other['percentage'][2],\r\n self._home_stat[1], self._home_stat_percentage[0], self._home_stat[2], self._home_stat_percentage[1],\r\n self._home_stat[3], self._home_stat_percentage[2], other['home_stat'][1],\r\n other['home_percentage'][0], other['home_stat'][2], other['home_percentage'][1],\r\n other['home_stat'][3], other['home_percentage'][2],\r\n self._away_stat[1], self._away_stat_percentage[0], self._away_stat[2], self._away_stat_percentage[1],\r\n self._away_stat[3], self._away_stat_percentage[2], other['away_stat'][1],\r\n other['away_percentage'][0], other['away_stat'][2], other['away_percentage'][1],\r\n other['away_stat'][3], other['away_percentage'][2],\r\n self._goals[0], other['goals'][0], self._goals[1], other['goals'][1],\r\n self._avg_goals_scored, other['avg_scored'], self._avg_goals_missed, other['avg_missed']))\r\n print(\"Previous matches:\\n\" + matches)\r\n","sub_path":"classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":13267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"405996218","text":"#!/usr/bin/env python\n\nimport sys\nfrom argparse import ArgumentParser\nfrom mglib import AUTH_LIST, VERSION, API_URL, get_auth_token, urlencode, stdout_from_url\n\nprehelp = \"\"\"\nNAME\n mg-get-similarity-for-taxon\n\nVERSION\n %s\n\nSYNOPSIS\n mg-get-similarity-for-taxon [ --help, --user , --passwd , --token , --id , --name , --level , --source , --evalue , --identity , --length ]\n\nDESCRIPTION\n Retrieve taxa annotated sequences for a metagenome filtered by taxon containing inputted name.\n\"\"\"\n\nposthelp = \"\"\"\nOutput\n BLAST m8 format - tab-delimited list of: query sequence id, hit m5nr id, percentage identity, alignment length, number of mismatches, number of gap openings, query start, query end, hit start, hit end, e-value, bit score, semicolon seperated list of annotations\n\nEXAMPLES\n mg-get-similarity-for-taxon --id \"mgm4441680.3\" --name Lachnospiraceae --level family --source RefSeq --evalue 8\n\nSEE ALSO\n -\n\nAUTHORS\n %s\n\"\"\"\n\ndef main(args):\n ArgumentParser.format_description = lambda self, formatter: self.description\n ArgumentParser.format_epilog = lambda self, formatter: self.epilog\n parser = ArgumentParser(usage='', description=prehelp%VERSION, epilog=posthelp%AUTH_LIST)\n parser.add_argument(\"--id\", dest=\"id\", default=None, help=\"KBase Metagenome ID\")\n parser.add_argument(\"--url\", dest=\"url\", default=API_URL, help=\"communities API url\")\n parser.add_argument(\"--user\", dest=\"user\", default=None, help=\"OAuth username\")\n parser.add_argument(\"--passwd\", dest=\"passwd\", default=None, help=\"OAuth password\")\n parser.add_argument(\"--token\", dest=\"token\", default=None, help=\"OAuth token\")\n parser.add_argument(\"--name\", dest=\"name\", default=None, help=\"taxon name to filter by\")\n parser.add_argument(\"--level\", dest=\"level\", default=None, help=\"taxon level to filter by\")\n parser.add_argument(\"--source\", dest=\"source\", default='SEED', help=\"datasource to filter results by, default is SEED\")\n parser.add_argument(\"--evalue\", dest=\"evalue\", default=5, help=\"negative exponent value for maximum e-value cutoff, default is 5\")\n parser.add_argument(\"--identity\", dest=\"identity\", default=60, help=\"percent value for minimum %% identity cutoff, default is 60\")\n parser.add_argument(\"--length\", dest=\"length\", default=15, help=\"value for minimum alignment length cutoff, default is 15\")\n \n # get inputs\n opts = parser.parse_args()\n if not opts.id:\n sys.stderr.write(\"ERROR: id required\\n\")\n return 1\n \n # get auth\n token = get_auth_token(opts)\n \n # build url\n params = [ ('source', opts.source),\n ('evalue', opts.evalue),\n ('identity', opts.identity),\n ('length', opts.length),\n ('type', 'organism') ]\n if opts.name:\n params.append(('filter', opts.name))\n if opts.level:\n params.append(('filter_level', opts.level))\n url = opts.url+'/annotation/similarity/'+opts.id+'?'+urlencode(params, True)\n \n # output data\n stdout_from_url(url, auth=token)\n \n return 0\n \n\nif __name__ == \"__main__\":\n sys.exit(main(sys.argv))\n","sub_path":"scripts/mg-get-similarity-for-taxon.py","file_name":"mg-get-similarity-for-taxon.py","file_ext":"py","file_size_in_byte":3276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"271223234","text":"import cv2\nimport numpy as np\nimg = cv2.imread(\"1*mk1-6aYaf_Bes1E3Imhc0A.jpeg\")\nlayer = img.copy()\ngp = [layer]\n\nfor i in range(6):\n layer = cv2.pyrDown(layer)\n gp.append(layer)\n cv2.imshow(str(i), layer)\n\nlayer = gp[5]\n\ncv2.imshow(\"Original Image\",img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","sub_path":"OpenCV/image_Pyramid_1.py","file_name":"image_Pyramid_1.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"18350833","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 17 22:39:17 2011\n\n@author: alex\n\"\"\"\nimport Image\nimport pylab as pb\nfrom numpy import array\nfrom scipy.ndimage import gaussian_filter, convolve, sobel, \\\n maximum_filter, binary_dilation, \\\n median_filter\nfrom numpy.random import randn\n\nim = array(Image.open('10.jpg').convert(\"L\"))/255.\npb.clf()\npb.set_cmap(pb.cm.gray)\npb.imshow(im)\npb.savefig('000.png')\n\ndef build_pyramid(im, nlevels, method='gauss'):\n pim = im\n for i in xrange(nlevels):\n yield(im)\n if method == 'gauss':\n im = gaussian_filter(im, 1.4)\n else:\n im = convolve(im, ones((5,5))/25.)\n im = im[::2,::2] \n\n#Построение пирамиды\nimp = list(build_pyramid(im, 6))\nfor i in xrange(6):\n pb.clf()\n pb.imshow(imp[i], interpolation = 'nearest')\n pb.title('Pyramid level %d' % i)\n pb.savefig('pyramid_level_%d.png' % i, dpi = 100)\n \n#Фильтр Собеля\nfilter_x = [[-1, 0, 1],\n [-2, 0, 2],\n [-1,0,1]]\nfilter_x = array(filter_x)\nfilter_y = filter_x.transpose()\n\nimp = list(build_pyramid(im, 6))\nthreshold = 0.28\n\nfor i in xrange(6):\n edge_x =convolve(imp[i], filter_x) > threshold\n edge_y =convolve(imp[i], filter_y) > threshold\n edge = edge_x + edge_y\n\n pb.clf()\n pb.title('x+y edges level %d' % i)\n pb.imshow(edge, interpolation = 'nearest')\n pb.savefig('sobel_edge_%d.png' % i, dpi = 100)\n\n\n# Canny\nimport numpy\nfrom scipy import ndimage\n# Filter kernels for calculating the value of neighbors in several directions\n_N = numpy.array([[0, 1, 0],\n [0, 0, 0],\n [0, 1, 0]],\n dtype=bool)\n_NE = numpy.array([[0, 0, 1],\n [0, 0, 0],\n [1, 0, 0]],\n dtype=bool)\n_W = numpy.array([[0, 0, 0],\n [1, 0, 1],\n [0, 0, 0]],\n dtype=bool)\n_NW = numpy.array([[1, 0, 0],\n [0, 0, 0],\n [0, 0, 1]],\n dtype=bool)\n\n# After quantizing the angles, vertical (north-south) edges get values of 3,\n# northwest-southeast edges get values of 2, and so on, as below:\n_NE_d = 0\n_W_d = 1\n_NW_d = 2\n_N_d = 3\n\ndef canny(image, high_threshold, low_threshold):\n grad_x = ndimage.sobel(image, 0)\n grad_y = ndimage.sobel(image, 1)\n grad_mag = numpy.sqrt(grad_x**2+grad_y**2)\n grad_angle = numpy.arctan2(grad_y, grad_x)\n # next, scale the angles in the range [0, 3] and then round to quantize\n quantized_angle = numpy.around(3 * (grad_angle + numpy.pi) / (numpy.pi * 2))\n # Non-maximal suppression: an edge pixel is only good if its magnitude is\n # greater than its neighbors normal to the edge direction. We quantize\n # edge direction into four angles, so we only need to look at four\n # sets of neighbors\n NE = ndimage.maximum_filter(grad_mag, footprint=_NE)\n W = ndimage.maximum_filter(grad_mag, footprint=_W)\n NW = ndimage.maximum_filter(grad_mag, footprint=_NW)\n N = ndimage.maximum_filter(grad_mag, footprint=_N)\n thinned = (((grad_mag > W) & (quantized_angle == _N_d )) |\n ((grad_mag > N) & (quantized_angle == _W_d )) |\n ((grad_mag > NW) & (quantized_angle == _NE_d)) |\n ((grad_mag > NE) & (quantized_angle == _NW_d)) )\n thinned_grad = thinned * grad_mag\n # Now, hysteresis thresholding: find seeds above a high threshold, then\n # expand out until we go below the low threshold\n high = thinned_grad > high_threshold\n low = thinned_grad > low_threshold\n canny_edges = ndimage.binary_dilation(high, iterations=-1, mask=low)\n return grad_mag, thinned_grad, canny_edges\n\nimp = list(build_pyramid(im, 6))\nlow = 0.28\nhigh = 0.65\n\nfor i in xrange(6):\n pb.clf()\n grad_mag, thinned_grad, canny_edges = canny(imp[i], low, high)\n pb.imshow(grad_mag, interpolation = 'nearest')\n pb.title('Gradient magnitude level %d' % i)\n pb.savefig('grad_%d.png' % i, dpi = 100)\n pb.clf()\n pb.imshow(thinned_grad, interpolation = 'nearest')\n pb.title('Thinned gradient level %d' % i)\n pb.savefig('thinned_grad_%d.png' % i, dpi = 100)\n pb.clf()\n pb.imshow(canny_edges, interpolation = 'nearest')\n pb.title('Canny edges level %d' % i)\n pb.savefig('canny_%d.png' % i, dpi = 100)\n\n# Добавление нормального шума с сигмой = 0.1 + 0.2*10/20\nsigma = 0.1 + 0.2*10/20\n\nimpp = im + randn(*im.shape) * sigma\npb.clf()\npb.imshow(impp)\npb.title('Image with gauss noise')\npb.savefig('gauss_noise.png')\n\n#удаление шума гауссовским фильтром\npb.title('Gauss filter')\npb.imshow(gaussian_filter(impp, 1.5))\npb.savefig('noise_gaussfilter.png')\n\n#удаление шума ранговым фильтром \npb.title('Median filter')\npb.imshow(median_filter(impp, 4.5))\npb.savefig('noise_rangfilter.png')\n\n# Добавление шума типа \"соль и перец\"\ndef salt_and_peper_noise(image):\n noise = randn(*image.shape)\n res = array(image)\n res[noise > 2] = 1\n res[noise < -2] = 0\n return res\n\nim1 = salt_and_peper_noise(im)\npb.clf()\npb.title('Salt and peper')\npb.imshow(im1)\npb.savefig('salt_and_peper.png')\n\n#удаление шума гауссовским фильтром\npb.title('Gauss filter')\npb.imshow(gaussian_filter(im1, 1.8))\npb.savefig('sap_gaussfilter.png')\n\n#удаление шума ранговым фильтром \npb.title('Median filter')\npb.imshow(median_filter(im1, 4.5))\npb.savefig('sap_rangfilter.png')\n","sub_path":"turborufus/lab3/lab3.py","file_name":"lab3.py","file_ext":"py","file_size_in_byte":5582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"433418856","text":"#!/usr/bin/env python\n# -*- coding: latin-1 -*-\n\nimport os\nimport sys\nimport argparse\n\n\"\"\"\nA python script template.\n\n\"\"\"\n\n#version string\n__version__ = \"0.0.1\"\n\ndef main():\n \n parser = argparse.ArgumentParser(description=__doc__, \n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('-a', '--abacad', help='An option')\n\n args = parser.parse_args()\n print(args)\n \nif __name__ == '__main__':\n sys.exit(main())\n\n","sub_path":"sge/test_argparse_shell/ap.py","file_name":"ap.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"269192874","text":"from pathlib import Path\n\nfrom shapely.geometry import Point\nimport geopandas as gpd\n\nfrom dublin_building_stock.spatial_operations import get_geometries_within\n\n\ndata_dir = Path(\"../data\")\n\n\ndublin_boundary = gpd.read_file(data_dir / \"dublin_boundary.geojson\", driver=\"GeoJSON\")\n\ndublin_routing_key_boundaries = gpd.read_file(\n data_dir / \"dublin_routing_key_boundaries.geojson\",\n driver=\"GeoJSON\",\n)\n\ndublin_local_authority_boundaries = gpd.read_file(\n data_dir / \"dublin_local_authority_boundaries.geojson\", driver=\"GeoJSON\"\n).rename(columns={\"COUNTYNAME\": \"local_authority\"})\n\nuse_columns = [\"SMALL_AREA\", \"EDNAME\", \"geometry\"]\nireland_small_area_boundaries = gpd.read_file(\n data_dir / \"Census2011_Small_Areas_generalised20m\"\n)[use_columns]\n\ntemple_bar_location = Point(715643, 734177)\nm_to_km = 1 / 1000\ndublin_small_area_boundaries = (\n ireland_small_area_boundaries.to_crs(epsg=2157)\n .pipe(get_geometries_within, dublin_boundary.to_crs(epsg=2157))\n .pipe(\n get_geometries_within,\n dublin_routing_key_boundaries.drop(columns=\"local_authority\").to_crs(epsg=2157),\n )\n .pipe(get_geometries_within, dublin_local_authority_boundaries.to_crs(epsg=2157))\n .assign(\n distance_to_city_centre_in_km=lambda gdf: gdf.geometry.representative_point()\n .distance(temple_bar_location)\n .multiply(m_to_km)\n .round(2)\n )\n)\n\ndublin_small_area_boundaries.to_file(\n data_dir / \"dublin_small_area_boundaries_2011.geojson\", driver=\"GeoJSON\"\n)","sub_path":"notebooks/wrangle_small_area_boundaries_2011.py","file_name":"wrangle_small_area_boundaries_2011.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"547459996","text":"import copy\nimport numpy as np\nfrom scipy.special import softmax\nimport tensorflow_probability as tfp\nimport tensorflow as tf\nfrom tensorflow import keras\n\n\nclass np_nn_softmax_out:\n def __init__(self, inp=2, h1=256, h2=256, out=3, init_weights=None):\n if init_weights:\n self.init_weights(init_weights)\n else:\n self.weights = {\n 'w1': self.xavier_init(inp, h1),\n 'b1': np.zeros((1, h1)),\n 'w2': self.xavier_init(h1, h2),\n 'b2': np.zeros((1, h2)),\n 'w3': self.xavier_init(h2, out),\n 'b3': np.zeros((1, out))\n }\n\n @staticmethod\n def xavier_init(h1, h2):\n glorot = 1.0 * np.sqrt(6.0 / (h1 + h2))\n size = (h1, h2)\n return np.random.uniform(-glorot, glorot, size)\n\n @staticmethod\n def relu(l):\n return np.where(l < 0, 0, l)\n\n @staticmethod\n def softmax(l):\n e_x = np.exp(l - np.max(l))\n return e_x / e_x.sum(axis=-1)\n\n def init_weights(self, init_weights):\n self.weights = copy.deepcopy(init_weights)\n\n def forward(self, inp):\n w1 = self.weights['w1']\n b1 = self.weights['b1']\n w2 = self.weights['w2']\n b2 = self.weights['b2']\n w3 = self.weights['w3']\n b3 = self.weights['b3']\n\n l1 = self.relu(inp @ w1 + b1)\n l2 = self.relu(l1 @ w2 + b2)\n out = self.softmax(l2 @ w3 + b3)\n\n return out\n\n\nclass tf_nn_softmax_out:\n def __init__(self, *args, **kwargs):\n self.build_model(*args, **kwargs)\n\n def build_model(self, inp=2, h1=256, h2=256, out=3, init_weights=None):\n self.inp = inp\n self.out = out\n\n inp = keras.Input(shape=(inp, ))\n x = keras.layers.Dense(\n h1, activation='relu', use_bias=True, kernel_initializer='glorot_uniform')(inp)\n x = keras.layers.Dense(\n h2, activation='relu', use_bias=True, kernel_initializer='glorot_uniform')(x)\n outp = keras.layers.Dense(\n out, activation='softmax', use_bias=True, kernel_initializer='glorot_uniform')(x)\n\n self.lr = keras.optimizers.schedules.ExponentialDecay(\n initial_learning_rate=1e-2, decay_steps=10000, decay_rate=0.95)\n # self.lr = 1e-2\n\n self.optimizer = keras.optimizers.SGD(learning_rate=self.lr)\n self.model = keras.Model(inputs=inp, outputs=outp)\n\n self.weights = self.model.trainable_weights\n\n def predict(self, inp):\n if not isinstance(inp, tf.Tensor):\n inp = tf.convert_to_tensor(np.array(inp).reshape(1, -1))\n return np.argmax(self.model(inp))\n\n def forward(self, inp):\n if not isinstance(inp, tf.Tensor):\n inp = tf.convert_to_tensor(np.array(inp).reshape(-1, self.inp))\n probs = self.model(inp)\n return probs.numpy()\n\n def distributions(self, inp):\n if not isinstance(inp, tf.Tensor):\n inp = tf.convert_to_tensor(np.array(inp).reshape(1, -1))\n probs = self.model(inp)\n return tfp.distributions.Categorical(probs=probs)\n\n def update_params(self, grads):\n self.optimizer.apply_gradients(\n zip(grads, self.model.trainable_weights))\n\n\nclass tf_nn_linear_out:\n def __init__(self, *args, **kwargs):\n self.build_model(*args, **kwargs)\n\n def build_model(self, inp=4, h1=256, h2=256, out=1, init_weights=None):\n self.inp = inp\n self.out = out\n\n inp = keras.Input(shape=(inp, ))\n x = keras.layers.Dense(\n h1, activation='relu', use_bias=True, kernel_initializer='glorot_uniform')(inp)\n x = keras.layers.Dense(\n h2, activation='relu', use_bias=True, kernel_initializer='glorot_uniform')(x)\n outp = keras.layers.Dense(\n out, activation='linear', use_bias=True, kernel_initializer='glorot_uniform')(x)\n\n self.lr = 1e-3\n self.optimizer = keras.optimizers.Adam(learning_rate=self.lr)\n self.model = keras.Model(inputs=inp, outputs=outp)\n\n self.weights = self.model.trainable_weights\n\n def forward(self, inp):\n if not isinstance(inp, tf.Tensor):\n inp = tf.convert_to_tensor(np.array(inp).reshape(-1, self.inp))\n return self.model(inp)\n\n def predict(self, inp):\n if not isinstance(inp, tf.Tensor):\n inp = tf.convert_to_tensor(np.array(inp).reshape(-1, self.inp))\n return self.model(inp).numpy()\n\n def update_params(self, grads):\n self.optimizer.apply_gradients(\n zip(grads, self.model.trainable_weights))\n","sub_path":"rl_research/algorithms/neural_networks.py","file_name":"neural_networks.py","file_ext":"py","file_size_in_byte":4572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"248913021","text":"'''\n@Descripttion: \n@version: \n@Author: Liang Anqing\n@Date: 2020-07-03 21:01:53\n@LastEditors: Liang Anqing\n@LastEditTime: 2020-07-03 21:02:41\n'''\n'''\n1299. 将每个元素替换为右侧最大元素\n\n给你一个数组 arr ,请你将每个元素用它右边最大的元素替换,如果是最后一个元素,用 -1 替换。\n\n完成所有替换操作后,请你返回这个数组。\n\n \n\n示例:\n\n输入:arr = [17,18,5,4,6,1]\n输出:[18,6,6,6,1,-1]\n'''\ndef replaceElements(self, arr):\n \"\"\"\n :type arr: List[int]\n :rtype: List[int]\n \"\"\"\n t_max=arr[len(arr)-1]\n for i in range(len(arr)-1,-1,-1):\n if i==len(arr)-1:\n arr[i]=-1\n continue\n prev=arr[i]\n arr[i]=t_max\n t_max=max(t_max,prev)\n return arr","sub_path":"1299.将每个元素替换为右侧最大元素.py","file_name":"1299.将每个元素替换为右侧最大元素.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"600530133","text":"def welcome_message():\n printNow(\"Welcome to Bitspice Island!\")\n \ndef help_message():\n printNow(\"\"\"In each room you will be told which directions you can go\nYou'll be able to go north, south, east or west by typing that direction\nType help to redisplay this introduction\nType exit to quit at any time\"\"\")\ndef command_parser(command):\n if command == \"help\":\n help_message()\n return ('help', None)\n elif command == \"exit\":\n return ('exit', None)\n elif command.startswith(\"go\"):\n direction = command.split(\" \")[1]\n if direction == None:\n return ('move', (0,0))\n else:\n if direction == \"north\":\n return ('move', (0, -1))\n elif direction == \"south\":\n return ('move', (0, 1))\n elif direction == \"west\":\n return ('move', (-1, 0))\n elif direction == \"east\":\n return ('move', (1, 0))\n else:\n return ('move', (0,0))\n \nclass Room(object):\n \"\"\"base room class for all the rooms in the game\"\"\"\n def __init__(self, room_name, room_description, row, column, total_rows, total_columns):\n self.room_name = room_name\n self.room_description = room_description\n self.row = row\n self.column = column\n self.total_rows = total_columns\n self.total_columns = total_columns\n \n def get_move_directions(self):\n move_directions = []\n if self.column >= 0 and self.column < self.total_columns - 1:\n move_directions = move_directions + [\"east\"]\n if self.column < self.total_columns and self.column != 0:\n move_directions = move_directions + [\"west\"]\n if self.row >= 0 and self.row < self.total_rows - 1:\n move_directions = move_directions + [\"south\"]\n if self.row < self.total_rows and self.row != 0:\n move_directions = move_directions + [\"north\"]\n return move_directions\n \n def get_description(self):\n return self.room_description\n \n def get_name(self):\n return self.room_name\n \n def get_position(self):\n return (self.column, self.row)\n \n# Rooms with descriptions\nclass EntranceRoom(Room):\n def __init__(self, row, column, total_rows, total_columns):\n super(EntranceRoom, self).__init__(\"Entrance Room (\" + str(column) + \",\" + str(row) +\")\", \"This is the entrance to the room, you see nothing of particular value in this room.\", row, column, total_rows, total_columns)\nclass Lobby(Room):\n def _init_(self, row, column, total_rows, total_columns):\n super(Lobby, self).__init__(\"Lobby (\" + str(column) + \",\" + str(row) +\")\", \"This is the lobby, you see expensive funiture all around and two doors. A corner of this room is filled with broken chairs. You can go east or north.\", row, column, total_rows, total_columns)\nclass DiningRoom(Room):\n def _init_(self, row, column, total_rows, total_columns):\n super(DiningRoom, self).__init__(\"Dining Room (\" + str(column) + \",\" + str(row) +\")\", \"This is the dining room, you see a huge table. There are very expensive paintings on the wall and two doors. You can go east or west.\", row, column, total_rows, total_columns)\nclass DarkRoom(Room):\n def __init__(self, row, column, total_rows, total_columns):\n super(DarkRoom, self).__init__(\"Dark Room (\" + str(column) + \",\" + str(row) +\")\", \"This is the dark room, you see a large collection of photos of previous visitors. There are two doors, you can go north and south.\", row, column, total_rows, total_columns)\nclass WineCellar(Room):\n def __init__(self, row, column, total_rows, total_columns):\n super(WineCellar, self).__init__(\"Wine Cellar (\" + str(column) + \",\" + str(row) +\")\", \"This is wine cellar, the walls look like a medieval castle. On the east wall, you spot a key wrapped around an old bottle. You can go west or north.\", row, column, total_rows, total_columns)\nclass Basement(Room):\n def __init__(self, row, column, total_rows, total_columns):\n super(Basement, self).__init__(\"Basement (\" + str(column) + \",\" + str(row) +\")\", \"This is the basement, you see empty boxes and cages. There is a dust and a wierd smell in the air. There is an old door to the west that seems to be locked and another door on the east that is unlocked.\", row, column, total_rows, total_columns)\nclass Dungeon(Room):\n def _init_(self, row, column, total_rows, total_columns):\n super(Dungeon, self).__init__(\"Dungeon(\" + str(column) + \",\" + str(row) +\")\", \"You took a wrong turn and wound up in the dungeon. GAME OVER\", row, column, total_rows, total_columns)\nclass HiddenRoom(Room):\n def __init__(self, row, column, total_rows, total_columns):\n super(HiddenRoom, self).__init__(\"Hidden Room (\" + str(column) + \",\" + str(row) +\")\", \"Congratualtions! You found the hidden room. You see diamonds and gold on a wooden table. YOU WIN!\", row, column, total_rows, total_columns)\n\ndef create_game(rows, columns):\n map = {}\n starting_column = 0\n starting_row = 0\n for row in range(0,rows):\n for column in range(0, columns):\n ### TODO: We need to randomly choose from a list of predefined rooms.\n ### Possible solution: Make a list of pre-created rooms, and slowly fill the map with them.\n ### We can even randomize the map by choosing a random location for each. If we do so\n ### We need to set the starting_column and starting_row to that of wherever the entrance room is.\n ### Ex: if entrance_room, set starting_column = column, starting_row = row\n ### If you want to attempt random, use randint and you import 'from random import randint' at the top\n ### in order to not overwrite rooms, make sure you check if a room exists there already, if it does, run random again until you get an empty slot.\n ### Alternative method: Have a list of all possible room slots, randomly pick one for a room, and remove it from the list. Could be faster and less error prone.\n room = EntranceRoom(row, column, rows, columns)\n if map.get(column) == None:\n map[column] = {}\n map[column][row] = room\n return (map, (starting_column, starting_row))\n \n# Game Loop\ndone = False\ngame_map, starting_room = create_game(3,3)\ncurrent_room = game_map[starting_room[0]][starting_room[1]]\nwelcome_message()\nhelp_message()\nwhile(True):\n printNow(\"You have entered the '\" + current_room.get_name() + \"'\")\n printNow(current_room.get_description())\n command = requestString(\"What do you want to do? (Valid Directions: \" + \", \".join(current_room.get_move_directions()) + \")\").strip()\n result = command_parser(str(command))\n if result[0] == 'move':\n new_room_position = tuple([i1+i2 for i1, i2 in zip(current_room.get_position(), result[1])])\n ### TODO: Need to verify directions is valid before moving. compare agains get_move_directions.\n ### Maybe make a custom function which has hybrid capability between get_move_directions and the command parser\n ### for the north/south/east/west movement.\n current_room = game_map[new_room_position[0]][new_room_position[1]]\n if result[0] == 'exit':\n printNow(\"Thanks for playing!\")\n break\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":6946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"569175236","text":"import time\nfrom functools import wraps\n\n\nclass MesureRuntime:\n def __init__(self, active_state):\n self.mesure_active = active_state\n\n def __call__(self, func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n if self.mesure_active is False:\n return func(*args, **kwargs)\n start = time.time()\n result = func(*args, **kwargs)\n end = time.time()\n print(\n f\"'{func.__name__}' function running time {round((end-start),2)}\")\n return result\n return wrapper\n\n\n@MesureRuntime(True)\ndef active_worker(delay_time):\n time.sleep(delay_time)\n\n\n@MesureRuntime(False)\ndef non_active_worker(delay_time):\n time.sleep(delay_time)\n\n\nif __name__ == \"__main__\":\n active_worker(3)\n non_active_worker(3)\n","sub_path":"decorator/class_decorator_parameter.py","file_name":"class_decorator_parameter.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"28940844","text":"import logging\nimport pkgutil\nimport re\nimport typing\nfrom abc import ABCMeta, abstractmethod\nfrom functools import lru_cache\n\nfrom google.cloud import resource_manager\nfrom googleapiclient import discovery\nfrom googleapiclient import errors\n\nfrom util.config_utils import is_copying_labels_from_project, iris_prefix\nfrom util.utils import cls_by_name, shorten, methods\n\nPLUGINS_MODULE = \"plugins\"\n\n\nclass Plugin(object, metaclass=ABCMeta):\n __proj_regex = re.compile(r\"[a-z]([-a-z0-9]*[a-z0-9])?\")\n # Underlying API max is 1000; avoid off-by-one errors\n # We send a batch when _BATCH_SIZE or more tasks are in it.\n _BATCH_SIZE = 990\n\n # For a class to know its subclasses is generally bad.\n # Here, the Plugin class also serves as a manager of its subclasses.\n # We could create a separate PluginManager but let's not get too Java-ish.\n subclasses = []\n\n def __init__(self):\n self._google_client = discovery.build(*self.discovery_api())\n self.__init_batch_req()\n\n @classmethod\n @abstractmethod\n def discovery_api(cls) -> typing.Tuple[str, str]:\n pass\n\n @classmethod\n def is_labeled_on_creation(cls) -> bool:\n \"\"\"\n Only a few classes are labeled on creation, and these classes should override this method.\n \"\"\"\n return True\n\n @lru_cache(maxsize=256)\n def _project_labels(self, project_id) -> typing.Dict:\n\n assert self.__proj_regex.match(\n project_id\n ), f\"Project ID is illegal: {project_id}\"\n try:\n client = resource_manager.Client()\n proj = client.fetch_project(project_id)\n labels = proj.labels or {} # Will be {} if emptu but playing it safe\n return labels\n except errors.HttpError as e:\n logging.exception(f\"Failing to get labels for project {project_id}: {e}\")\n return {}\n\n def __iris_labels(self, gcp_object) -> typing.Dict[str, str]:\n pfx = \"_gcp_\"\n\n def legalize_value(s):\n \"\"\"\n Only hyphens (-), underscores (_), lowercase characters,\n and numbers are allowed in label values. International characters are allowed.\n \"\"\"\n label_chars = re.compile(r\"[\\w\\d_-]\") # cached\n return \"\".join(c if label_chars.match(c) else \"_\" for c in s).lower()[:62]\n\n def value(func, gcp_obj):\n return legalize_value(func(gcp_obj))\n\n def key(func) -> str:\n return iris_prefix() + \"_\" + func.__name__[len(pfx) :]\n\n ret = {key(f): value(f, gcp_object) for f in methods(self, pfx)}\n\n return ret\n\n def __batch_callback(self, request_id, response, exception):\n\n if exception is not None:\n logging.error(\n \"in __batch_callback(), %s\",\n exception,\n )\n\n def do_batch(self):\n \"\"\"In do_label, we loop over all objects. But for efficienccy, we do not process\n then all at once, but rather gather objects and process them in batches of\n self._BATCH_SIZE as we loop; then parse the remaining at the end of the loop\"\"\"\n try:\n self._batch.execute()\n except Exception as e:\n logging.exception(e)\n\n self.__init_batch_req()\n\n @abstractmethod\n def do_label(self, project_id):\n \"\"\"Label all objects of a type in a given project\"\"\"\n pass\n\n @abstractmethod\n def get_gcp_object(self, log_data):\n \"\"\"Parse logging data to get a GCP object\"\"\"\n pass\n\n @abstractmethod\n def label_one(self, gcp_object: typing.Dict, project_id: str):\n \"\"\"Tag a single new object based on its description that comes from alog-line\"\"\"\n pass\n\n @abstractmethod\n def api_name(self):\n pass\n\n @abstractmethod\n def method_names(self):\n pass\n\n @classmethod\n def init(cls):\n def load_plugin_class(name):\n module_name = PLUGINS_MODULE + \".\" + name\n __import__(module_name)\n assert name == name.lower(), name\n plugin_cls = cls_by_name(PLUGINS_MODULE + \".\" + name + \".\" + name.title())\n return plugin_cls\n\n for _, module, _ in pkgutil.iter_modules([PLUGINS_MODULE]):\n plugin_class = load_plugin_class(module)\n Plugin.subclasses.append(plugin_class)\n\n assert Plugin.subclasses, \"No plugins defined\"\n\n @staticmethod\n def create_plugin(plugin_name: str) -> \"Plugin\":\n cls = cls_by_name(\n PLUGINS_MODULE + \".\" + plugin_name.lower() + \".\" + plugin_name\n )\n plugin = cls()\n return plugin\n\n def _build_labels(self, gcp_object, project_id):\n \"\"\"\n :return dict including original labels, project labels (if the system is configured to add those)\n and new labels. But if that would result in no change, return None\n \"\"\"\n\n original_labels = gcp_object[\"labels\"] if \"labels\" in gcp_object else {}\n project_labels = (\n self._project_labels(project_id) if is_copying_labels_from_project() else {}\n )\n iris_labels = self.__iris_labels(gcp_object)\n all_labels = {**iris_labels, **project_labels, **original_labels}\n if \"goog-gke-node\" in original_labels:\n # We do not label GKE resources. (TODO This is really just instances and disks, and so should be pushed to a hook method)\n logging.info(\n f\"{self.__class__.__name__}, skip labeling GKE object {gcp_object.get('name')}\"\n )\n return None\n elif all_labels == original_labels:\n # Skip labeling because no change\n return None\n else:\n labels = {\"labels\": all_labels}\n fingerprint = gcp_object.get(\"labelFingerprint\", \"\")\n if fingerprint:\n labels[\"labelFingerprint\"] = fingerprint\n\n return labels\n\n def _name_after_slash(self, gcp_object):\n return self.__name(gcp_object, separator=\"/\")\n\n def _name_no_separator(self, gcp_object):\n return self.__name(gcp_object, separator=\"\")\n\n def __name(self, gcp_object, separator=\"\"):\n try:\n name = gcp_object[\"name\"]\n if separator:\n index = name.rfind(separator)\n name = name[index + 1 :]\n return name\n except KeyError as e:\n logging.exception(e)\n return None\n\n def __init_batch_req(self):\n self.counter = 0\n self._batch = self._google_client.new_batch_http_request(\n callback=self.__batch_callback\n )\n","sub_path":"plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":6603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"76198374","text":"#Imported necessary packages.\r\nimport os\r\nimport codecs\r\nimport pandas as pd\r\nimport numpy as np\r\nimport spacy \r\nimport pickle\r\nimport itertools as it\r\nimport seaborn as sns\r\nimport matplotlib as mpl\r\nimport matplotlib.pyplot as plt\r\nimport random\r\nfrom gensim.models import Phrases\r\nfrom gensim.models.word2vec import LineSentence\r\nimport os\r\nfrom gensim.corpora import Dictionary, MmCorpus\r\nfrom gensim.models.ldamulticore import LdaMulticore\r\nimport pyLDAvis\r\nimport pyLDAvis.gensim\r\nimport warnings\r\nfrom gensim.models import Word2Vec\r\nimport datetime\r\n\r\n#Read in Yelp reviews\r\nyelp = pd.read_csv('yelp.csv')\r\nyelp['business_categories'] = yelp['business_categories'].fillna('remove')\r\ndf = yelp[yelp['business_categories'].str.contains('Restaurants')]\r\n\r\n#Set working directory to where the data and files are stored.\r\ndirectory = 'C:\\\\Users\\\\e2slp2f\\\\.spyder-py3\\\\new_text_project\\\\'\r\n\r\n#Load 'spacy' in to read words in English.\r\nnlp = spacy.load('en')\r\n\r\n#Define function to split data into training and test data.\r\ndef test_vs_train(df,col):\r\n split = int(len(df[col])*.70)\r\n train = df[:split]\r\n test = df[split:]\r\n split_data = {'train':train,'test':test}\r\n return split_data\r\n\r\n#Split the data using previously defined function.\r\nsplit_data = test_vs_train(df,'text')\r\ntraining = split_data['train']\r\ntest = split_data['test']\r\n\r\n\r\n#Define function in order to identify punctuation and spaces.\r\ndef punct_space_removal(token):\r\n return token.is_punct or token.is_space\r\n\r\n#Define a function that normalizes basic text, removes punctuation, spaces and \r\n#stem of words. Function also tuples the normalized data with the original \r\n#data as to continue carrying forward the stars, original text, and business \r\n#name.Return normalized data frame.\r\ndef normalize(df,col,col2,col3):\r\n norms = []\r\n for a,b,c in zip(df[col],df[col2],df[col3]):\r\n try:\r\n parsed = nlp(str(a))\r\n except:\r\n pass\r\n for num,sentence in enumerate(parsed.sents):\r\n norms.append(tuple([u' '.join([token.lemma_ for token in sentence\r\n if not punct_space_removal(token)]),b,c]))\r\n norms_df = pd.DataFrame(norms)\r\n norms_df.columns = ['Normalized_Text','Stars','Business_Name']\r\n return norms_df\r\n\r\n#Read in a 'pickled' file. \r\ntrain_open = open(os.path.join(directory,\"normalize.pickle\"),\"rb\")\r\ntrain_new = pickle.load(train_open)\r\ntrain_open.close()\r\n\r\n\r\ntext = train_new['Normalized_Text']\r\n\r\n#Creates a file containing first part of phrasing.\r\nunigram_sent_path = os.path.join(directory,'unigram.txt')\r\n\r\nif 0 ==1:\r\n#Make this if statement true (0 == 0) if you want to run code.\r\n with codecs.open(unigram_sent_path,'w',encoding='utf-8') as f:\r\n for sentence in text:\r\n f.write(sentence + '\\n')\r\n \r\n#Reading in more 'pickle' files.\r\nuni_open = open(os.path.join(directory,\"unigram.pickle\"), \"rb\")\r\nuni_sentence = pickle.load(uni_open)\r\nuni_open.close()\r\n\r\nbigrm_mdl_path = os.path.join(directory,'bigram.txt')\r\n\r\nif 0 == 1:\r\n#Make this if statement true (0 == 0) if you want to run code.\r\n bigram_model = Phrases(uni_sentence)\r\n bigram_model.save(bigrm_mdl_path)\r\n \r\n#More pickles! \r\nbigram_open = open(os.path.join(directory,\"bigram.pickle\"),\"rb\")\r\nbigram_model = pickle.load(bigram_open)\r\nbigram_open.close()\r\n\r\nbigrm_sentences_fp = os.path.join(directory,'bigrm_sentences_all.txt')\r\n\r\nif 0 == 1:\r\n#Make this if statement true (0 == 0) if you want to run code.\r\n with codecs.open(bigrm_sentences_fp,'w',encoding='utf_8')as f:\r\n for uni_sent in uni_sentence:\r\n bigram_sentence = u' '.join(bigram_model[uni_sent])\r\n f.write(bigram_sentence + '\\n')\r\n\r\n#Apply 'LineSentence' to bigrm_sentences_fp to break each review into individual\r\n#sentences.\r\nbigram_sentences = LineSentence(bigrm_sentences_fp)\r\n\r\n\r\ntrigram_model_pth = os.path.join(directory,'trigram_model_all.txt')\r\n\r\nif 0 == 1:\r\n \r\n#Make this if statement true (0 == 0) if you want to run code.\r\n trigram_model = Phrases(bigram_sentences)\r\n trigram_model.save(trigram_model_pth)\r\n \r\n\r\ntrigram_open = open(os.path.join(directory,\"trigram.pickle\"),\"rb\")\r\ntrigram_model = pickle.load(trigram_open)\r\ntrigram_open.close()\r\n\r\ntrigram_sentences_pth = os.path.join(directory,'trigram_sentences_all.txt')\r\n\r\nif 0 == 1:\r\n \r\n#Make this if statement true (0 == 0) if you want to run code\r\n with codecs.open(trigram_sentences_pth, 'w',encoding='utf_8') as f:\r\n for bigram_sentence in bigram_sentences:\r\n trigram_sentence = u' '.join(trigram_model[bigram_sentence])\r\n f.write(trigram_sentence +'\\n')\r\n\r\n#Apply 'LineSentence' to trigram_sentences_pth to break each review into \r\n#individual sentences.\r\ntrigram_sentences = LineSentence(trigram_sentences_pth)\r\n\r\n#Define function that merges three previously created dataframes with the \r\n#original training data. Shows the difference in the uni,bi and trigram words \r\n#compared to the original text\r\ndef normal_frame_check(uni,bi,tri,df):\r\n u,br,t = [],[],[]\r\n for a,b,c in zip(uni,bi,tri):\r\n u.append(u' '.join(a))\r\n br.append(u' '.join(b))\r\n t.append(u' '.join(c))\r\n mrg_df = pd.merge(pd.merge(pd.DataFrame(u),pd.DataFrame(br),\r\n left_index=True,right_index=True),pd.DataFrame(t),\r\n left_index=True,right_index=True)\r\n \r\n mrg_df.columns = ['Unigram_Sent','Bigram_Sent','Trigram_Sent']\r\n final_df = pd.merge(df,mrg_df,left_index=True,right_index=True)\r\n return final_df\r\nframe_comp = normal_frame_check(uni_sentence,\r\n bigram_sentences,\r\n trigram_sentences,\r\n train_new)\r\n\r\n#Defined function to identify \"stop words\"\r\ndef stop_words(token):\r\n return token.is_stop\r\n\r\n#Applying normalization to entire review as opposed to each sentence\r\ntrigram_reviews_path = os.path.join(directory,'trigram_reviews.txt')\r\n\r\n#Define a function that applies normalization technique used before on the \r\n#entire review\r\ndef review_data(df,col):\r\n if 0 == 1:\r\n with codecs.open(trigram_reviews_path,'w',encoding='utf_8')as f:\r\n for a in df[col]:\r\n parsed=nlp(a)\r\n uni_review = [token.lemma_ for token in parsed\r\n if not punct_space_removal(token)]\r\n \r\n bi_review = bigram_model[uni_review]\r\n tri_review = trigram_model[bi_review]\r\n tri_review = [t for t in tri_review\r\n if t not in set(('and','or','not','but','to'))]\r\n \r\n tri_review = u' '.join(tri_review)\r\n f.write(tri_review + '\\n')\r\n \r\nprint('part 1 complete')","sub_path":"analysis_part_1_clean.py","file_name":"analysis_part_1_clean.py","file_ext":"py","file_size_in_byte":6813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"244314200","text":"from django.shortcuts import render\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\nfrom django.views.generic import ListView\nfrom django.views.generic.detail import DetailView\nfrom django.http import HttpResponseBadRequest, JsonResponse\nfrom django.core.urlresolvers import reverse_lazy\n\nimport json\n\nfrom datetime import date\nfrom calendar import monthrange\n\nfrom .models import ThirdParty, PaymentMode, Expense\n\nfrom .forms import ExpenseForm, ThirdPartyForm, PaymentModeForm\n\nfrom django.http import HttpResponse\n\nfrom reportlab.lib import colors\nfrom reportlab.platypus import SimpleDocTemplate, Table, TableStyle\nfrom reportlab.lib.pagesizes import letter\n\ndef quarter_range():\n \"\"\"\n return the start date and the end date for the actual quarter of the year\n \"\"\"\n quarter = [[1,3],[4,6],[7,9],[10,12]]\n \n start_date = date(date.today().year,quarter[date.today().month//3][0],1)\n \n end_date = date(date.today().year, quarter[date.today().month//3][1], monthrange(date.today().year,quarter[date.today().month//3][1])[1])\n\n return start_date, end_date\n\n\nclass AjaxableResponseMixin(object):\n \"\"\"\n Mixin to add AJAX support to a form.\n Must be used with an object-based FormView (e.g. CreateView)\n \"\"\"\n def form_invalid(self, form):\n response = super(AjaxableResponseMixin, self).form_invalid(form)\n if self.request.is_ajax():\n return JsonResponse(form.errors, status=400)\n else:\n return response\n\n def form_valid(self, form):\n # We make sure to call the parent's form_valid() method because\n # it might do some processing (in the case of CreateView, it will\n # call form.save() for example).\n response = super(AjaxableResponseMixin, self).form_valid(form)\n if self.request.is_ajax():\n data = {\n 'pk': self.object.pk,\n }\n return JsonResponse(data)\n else:\n return response\n\nclass WithNameAjaxableResponseMixin(object):\n \"\"\"\n Mixin to add AJAX support to a form.\n Must be used with an object-based FormView (e.g. CreateView)\n \"\"\"\n def form_invalid(self, form):\n response = super(WithNameAjaxableResponseMixin, self).form_invalid(form)\n if self.request.is_ajax():\n return JsonResponse(form.errors, status=400)\n else:\n return response\n\n def form_valid(self, form):\n # We make sure to call the parent's form_valid() method because\n # it might do some processing (in the case of CreateView, it will\n # call form.save() for example).\n response = super(WithNameAjaxableResponseMixin, self).form_valid(form)\n if self.request.is_ajax():\n data = {\n 'pk': self.object.pk,\n 'name': self.object.name,\n }\n return JsonResponse(data)\n else:\n return response\n\nclass ThirdPartyCreate(WithNameAjaxableResponseMixin,CreateView):\n \n model = ThirdParty\n form_class = ThirdPartyForm\n template_name = 'simplecost/thirdparty_create_form.html'\n \n @method_decorator(login_required)\n def dispatch(self, *args, **kwargs):\n return super(ThirdPartyCreate, self).dispatch(*args, **kwargs)\n\nclass PaymentModeCreate(WithNameAjaxableResponseMixin,CreateView):\n \n model = PaymentMode\n form_class = PaymentModeForm\n template_name = 'simplecost/paymentmode_create_form.html'\n \n @method_decorator(login_required)\n def dispatch(self, *args, **kwargs):\n return super(PaymentModeCreate, self).dispatch(*args, **kwargs)\n\nclass ExpenseListView(ListView):\n \"\"\"\n Display a list of expenses for the request user\n The default queryset is all the values\n The user can filter values, his choice is store in a session's variable\n \"\"\"\n \n model = Expense\n context_object_name = 'expenses'\n \n def get_queryset(self, *args, **kwargs):\n \n if not 'filterexpense' in self.request.session:\n self.request.session['filterexpense'] = 'All'\n self.request.session['filterexpensemonth'] = date.today().month\n self.request.session['filterexpenseyear'] = date.today().year\n \n if not self.request.GET.get(\"filter\"):\n \n filter_expense = self.request.session['filterexpense']\n \n else:\n \n filter_expense = self.request.GET.get(\"filter\")\n \n if filter_expense == \"All\":\n \n queryset = Expense.objects.filter(property_of=self.request.user).order_by('-date_expense')\n self.request.session['filterexpense'] = 'All'\n self.request.session['filterexpensemonth'] = date.today().month\n self.request.session['filterexpenseyear'] = date.today().year\n \n elif filter_expense == \"This month\":\n \n queryset = Expense.objects.filter(property_of=self.request.user,date_expense__year=date.today().year,date_expense__month=date.today().month).order_by('-date_expense')\n self.request.session['filterexpense'] = 'This month'\n self.request.session['filterexpensemonth'] = date.today().month\n self.request.session['filterexpenseyear'] = date.today().year\n \n elif filter_expense == \"This quarter\":\n \n queryset = Expense.objects.filter(property_of=self.request.user,date_expense__range=quarter_range()).order_by('-date_expense')\n self.request.session['filterexpense'] = 'This quarter'\n self.request.session['filterexpensemonth'] = date.today().month\n self.request.session['filterexpenseyear'] = date.today().year\n \n elif filter_expense == \"Previous month\":\n \n if not self.request.is_ajax():\n \n request_year = self.request.session['filterexpenseyear']\n request_month = self.request.session['filterexpensemonth']\n \n elif self.request.session['filterexpensemonth'] == 1:\n \n request_month = 12\n request_year = self.request.session['filterexpenseyear'] - 1\n \n else:\n \n request_year = self.request.session['filterexpenseyear']\n request_month = self.request.session['filterexpensemonth'] - 1\n \n queryset = Expense.objects.filter(property_of=self.request.user,date_expense__year=request_year,date_expense__month=request_month).order_by('-date_expense')\n self.request.session['filterexpense'] = 'Previous month'\n self.request.session['filterexpenseyear'] = request_year\n self.request.session['filterexpensemonth'] = request_month\n \n \n elif filter_expense == \"Next month\":\n \n if not self.request.is_ajax():\n \n request_year = self.request.session['filterexpenseyear']\n request_month = self.request.session['filterexpensemonth']\n \n elif self.request.session['filterexpensemonth'] == 12:\n \n request_month = 1\n request_year = self.request.session['filterexpenseyear'] + 1\n \n else:\n \n request_year = self.request.session['filterexpenseyear']\n request_month = self.request.session['filterexpensemonth'] + 1\n \n queryset = Expense.objects.filter(property_of=self.request.user,date_expense__year=request_year,date_expense__month=request_month).order_by('-date_expense')\n self.request.session['filterexpense'] = 'Next month'\n self.request.session['filterexpenseyear'] = request_year\n self.request.session['filterexpensemonth'] = request_month\n \n return queryset\n \n def get_template_names(self, *args, **kwargs):\n \n if self.request.method == 'GET' and self.request.is_ajax():\n \n template_name = 'simplecost/expense_list_table.html'\n\n else:\n \n template_name = 'simplecost/expense_list.html'\n \n return template_name\n \n @method_decorator(login_required)\n def dispatch(self, *args, **kwargs):\n return super(ExpenseListView, self).dispatch(*args, **kwargs)\n\nclass ExpenseCreate(AjaxableResponseMixin, CreateView):\n \"\"\"\n View to create an expense. Works with ajax.\n \"\"\"\n \n model = Expense\n form_class = ExpenseForm\n template_name = 'simplecost/expense_create_form.html'\n \n def form_valid(self, form):\n form.instance.property_of = self.request.user\n return super(ExpenseCreate, self).form_valid(form)\n \n @method_decorator(login_required)\n def dispatch(self, *args, **kwargs):\n return super(ExpenseCreate, self).dispatch(*args, **kwargs)\n\nclass ExpenseUpdate(AjaxableResponseMixin, UpdateView):\n \"\"\"\n View to update an Expense. Works with ajax.\n Use default template expense_form.html\n \"\"\"\n \n model = Expense\n form_class = ExpenseForm\n\n @method_decorator(login_required)\n def dispatch(self, *args, **kwargs):\n return super(ExpenseUpdate, self).dispatch(*args, **kwargs)\n \nclass ExpenseDelete(AjaxableResponseMixin, DeleteView):\n \"\"\"\n View to delete an Expense. Works with ajax.\n Use default template expense_confirm_delete.html\n \"\"\"\n \n model = Expense\n success_url = reverse_lazy('expense-list')\n\n @method_decorator(login_required)\n def dispatch(self, *args, **kwargs):\n return super(ExpenseDelete, self).dispatch(*args, **kwargs)\n \ndef print_it(request):\n \"\"\"\n Create a pdf file with the list of expenses\n ordered by oldest expenses and filter by the view data\n \"\"\"\n # Create the HttpResponse object with the appropriate PDF headers.\n response = HttpResponse(content_type='application/pdf')\n response['Content-Disposition'] = 'filename=\"expenses.pdf\"'\n\n # Create the PDF object, using the response object as its \"file.\"\n p = SimpleDocTemplate(response, pagesize=letter)\n \n if request.GET.get(\"filter\") == \"This month\":\n \n expenses = Expense.objects.filter(property_of=request.user,date_expense__year=date.today().year,date_expense__month=date.today().month).order_by('-date_expense')\n \n elif request.GET.get(\"filter\") == \"This quarter\":\n \n expenses = Expense.objects.filter(property_of=request.user,date_expense__range=quarter_range()).order_by('-date_expense')\n \n elif request.GET.get(\"filter\") == \"Previous month\" or request.GET.get(\"filter\") == \"Next month\":\n \n request_year = request.session['filterexpenseyear']\n request_month = request.session['filterexpensemonth']\n \n expenses = Expense.objects.filter(property_of=request.user,date_expense__year=request_year,date_expense__month=request_month).order_by('-date_expense')\n \n \n else:\n \n expenses = Expense.objects.filter(property_of=request.user).order_by('date_expense')\n \n # container for the 'Flowable' objects\n elements = []\n \n # container for the table content\n table_data = []\n \n table_data.append(['Date', 'Third party', 'Amount','Payment mode','Notes'])\n \n for i, expense in enumerate(expenses):\n # Add a row to the table\n table_data.append([expense.date_expense, expense.third_party, str(expense.amount) + ' €',expense.payment_mode,expense.notes])\n \n # Create the table\n expense_table = Table(table_data)\n \n # Add grid an font for table \n expense_table.setStyle(TableStyle([('ALIGN',(0,0),(-1,-1),'CENTER'),\n ('VALIGN',(0,0),(-1,-1),'MIDDLE'),\n ('INNERGRID', (0,0), (-1,-1), 0.25, colors.black),\n ('BOX', (0,0), (-1,-1), 0.25, colors.black),\n ('FONTNAME',(0,0),(-1,-1),'Courier'),\n ]))\n \n elements.append(expense_table)\n \n p.build(elements)\n \n return response","sub_path":"simplecost/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"286858587","text":"#!/usr/bin/env python\n# -*- coding=utf-8 -*-\nimport os\nimport sys\nfrom flask import Flask, request, redirect, url_for\nfrom utils import *\nfrom price_interfaces import RetUtil, PriceInterfaces\nsys.path.append(os.path.dirname(os.path.abspath(__file__)))\n\n\n# gevent\nfrom gevent import monkey\nfrom gevent.pywsgi import WSGIServer\nmonkey.patch_all()\n# gevent end\n\n\napp = Flask(__name__)\nlogger = None\nprice_if = None\n\n\n# get hotel prices\n@app.route('/get_hotel_prices', methods=['GET', 'POST'])\ndef get_hotel_prices():\n resp = RetUtil.return_json(RetUtil.unknown_error)\n begin = time.time()\n supplier_id = 0\n hotel_id = 0\n try:\n # get request param\n\n request_param = json.loads(request.data)\n # print(request_param)\n # logger.info(request_param)\n supplier_id = request_param['supplier_id']\n hotel_id = request_param['hotel_id']\n resp = price_if.get_hotel_prices(request_param)\n except:\n logger.error(traceback.format_exc())\n finally:\n end = time.time()\n tmp_data = json.loads(resp)\n logger.info('get_hotel_prices: {:>5}--{:<12} '\n 'cost--[{:<5.1f}]s {}'.format(supplier_id, hotel_id,\n (end - begin),\n tmp_data['desc']))\n return resp\n\n\n# 无效请求\n@app.errorhandler(404)\ndef page_not_found(e):\n return RetUtil.return_json(RetUtil.invalid_request, e)\n\n\n# 内部服务器错误\n@app.errorhandler(500)\ndef page_not_found(e):\n return RetUtil.return_json(RetUtil.inner_error, e)\n\n\ndef __main__():\n # 初始化日志\n global logger\n global price_if\n logger = ProjectUtil.get_project_logger(\"price_server\")\n if not logger:\n print(\"init logger failed\")\n sys.exit(1)\n price_if = PriceInterfaces(logger)\n\n # 启��服务\n try:\n # 写pid\n pid_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n 'bin/price_server.pid')\n fp = open(pid_file, 'wb')\n pid_str = str(os.getpid())\n print(pid_str)\n fp.write(pid_str.encode('utf-8'))\n fp.close()\n logger.info(\"write pid[%s] into file[%s]\" % (pid_str, pid_file))\n # 写完pid后再启动服务\n http_server = WSGIServer(('0.0.0.0', 3722), app)\n http_server.serve_forever()\n #app.run(host='0.0.0.0', port=3722, debug=False)\n except:\n logger.error(traceback.format_exc())\n sys.exit(traceback.format_exc())\n\n\nif __name__ == '__main__':\n __main__()","sub_path":"crawlers/price_server/price_server.py","file_name":"price_server.py","file_ext":"py","file_size_in_byte":2584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"64997620","text":"# Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration\n\nfrom __future__ import print_function\n\nimport os.path\nimport subprocess\nimport re\nimport shlex\n\ndef ELG_prun(sample) :\n\n try:\n from pandatools import PandaToolsPkgInfo # noqa: F401\n except ImportError:\n print (\"prun needs additional setup, try:\")\n print (\" lsetup panda\")\n return 99\n\n cmd = [\"prun\"]\n\n #These are options that can be set by the user\n opts = ['destSE',\n 'site',\n 'cloud',\n 'rootVer',\n 'cmtConfig',\n 'excludedSite',\n 'nGBPerJob',\n 'memory',\n 'maxCpuCount',\n 'nFiles',\n 'nFilesPerJob',\n 'nJobs',\n 'maxFileSize',\n 'maxNFilesPerJob',\n 'addNthFieldOfInDSToLFN',\n 'cpuTimePerEvent',\n 'maxWalltime',\n 'voms',\n 'workingGroup',\n 'tmpDir']\n\n #These are options that can be set by the user\n switches = ['useChirpServer',\n 'express',\n 'noSubmit',\n 'skipScout',\n 'disableAutoRetry',\n 'useNewCode',\n 'official',\n 'mergeOutput',\n 'useRootCore',\n 'useAthenaPackages',\n 'useContElementBoundary']\n\n for opt in opts :\n arg = sample.getMetaDouble('nc_' + opt, -1) \n if abs(arg + 1) > 1e-6 :\n cmd += [\"--\" + opt + \"=\" + str(int(round(arg)))]\n else :\n arg = sample.getMetaString('nc_' + opt)\n if len(arg) :\n cmd += [\"--\" + opt + \"=\" + arg]\n \n for switch in switches :\n arg = sample.getMetaDouble('nc_' + switch, 0)\n if arg != 0 :\n cmd += [\"--\" + switch]\n else :\n arg = sample.getMetaString('nc_' + switch)\n if len(arg) :\n if arg != \"False\" and arg != \"false\" and arg != \"FALSE\" :\n cmd += [\"--\" + switch]\n\n #These options should normally not be touched by the user\n internalOpts = ['exec',\n 'inDS',\n 'outDS',\n 'outputs',\n 'writeInputToTxt',\n 'match'] \n\n for opt in internalOpts :\n cmd += [\"--\" + opt + \"=\" + sample.getMetaString('nc_' + opt)]\n\n if sample.getMetaDouble('nc_mergeOutput', 1) == 0 or sample.getMetaString('nc_mergeOutput').upper() == 'FALSE' : \n #don't set merge script \n pass\n else :\n cmd += [\"--mergeScript=\" + sample.getMetaString('nc_mergeScript')]\n\n if len(sample.getMetaString('nc_EventLoop_SubmitFlags')) :\n cmd += shlex.split (sample.getMetaString('nc_EventLoop_SubmitFlags'))\n\n if sample.getMetaDouble('nc_showCmd', 0) != 0 :\n print (cmd)\n\n if not os.path.isfile('jobcontents.tgz') : \n import copy\n dummycmd = copy.deepcopy(cmd)\n dummycmd += [\"--outTarBall=jobcontents.tgz\"]\n dummycmd += [\"--extFile=jobdef.root,runjob.sh\"]\n dummycmd += [\"--noSubmit\"]\n\n try:\n out = subprocess.check_output(dummycmd, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e: \n print (\"Command:\")\n print (e.cmd)\n print (\"failed with return code \" , e.returncode)\n print (\"output was:\")\n print (e.output)\n return 1\n\n cmd += [\"--inTarBall=jobcontents.tgz\"]\n\n out = \"\"\n try:\n out = subprocess.check_output(cmd, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e: \n print (\"Command:\")\n print (e.cmd)\n print (\"failed with return code \", e.returncode)\n print (\"output was:\")\n print (e.output)\n return 2\n\n jediTaskID = 0\n try:\n line = re.findall(r'TaskID=\\d+', out)[0]\n jediTaskID = int(re.findall(r'\\d+', line)[0])\n except IndexError:\n print (out)\n return 3\n\n return jediTaskID\n","sub_path":"PhysicsAnalysis/D3PDTools/EventLoopGrid/data/ELG_prun.py","file_name":"ELG_prun.py","file_ext":"py","file_size_in_byte":4068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"506223626","text":"import clr\nclr.AddReference('RevitAPI')\nfrom Autodesk.Revit.DB import *\n\ndef GetFamilyName(item):\n\tif hasattr(item, \"FamilyName\"): return item.FamilyName\n\telse: return None\n\nitems = UnwrapElement(IN[0])\n\nif isinstance(IN[0], list): OUT = [GetFamilyName(x) for x in items]\nelse: OUT = GetFamilyName(items)","sub_path":"nodes/2.x/python/FamilyType.FamilyName.py","file_name":"FamilyType.FamilyName.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"479461009","text":"\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport numpy as np\n'''tf keras 高层API单机模式示例代码'''\ndata = np.random.random((1000, 32))\nlabels = np.random.random((1000, 10))\n\nval_data = np.random.random((100, 32))\nval_labels = np.random.random((100, 10))\n\n# 定义常量,用于创建数据流图\nflags = tf.app.flags\n# 因网络问题,这里将数据手动下载到项目指定目录下\nflags.DEFINE_string(\"data_dir\", \"../data/mnist\",\n \"Directory for storing mnist data\")\nFLAGS = flags.FLAGS\n\n# 构建一个简单的全连接网络\nmodel = keras.Sequential()\n# Adds a densely-connected layer with 64 units to the model:\n# tf.keras.layers 用于构建一层\n# activation:设置层的激活函数。此参数由内置函数的名称指定,或指定为可调用对象。默认情况下,系统不会应用任何激活函数。\n# kernel_initializer 和 bias_initializer:创建层权重(核和偏差)的初始化方案。此参数是一个名称或可调用对象,默认为 \"Glorot uniform\" 初始化器。\n# kernel_regularizer 和 bias_regularizer:应用层权重(核和偏差)的正则化方案,例如 L1 或 L2 正则化。默认情况下,系统不会应用正则化函数。\nmodel.add(keras.layers.Dense(64, activation='relu'))\n# Add another:\nmodel.add(keras.layers.Dense(64, activation='relu'))\n# Add a softmax layer with 10 output units:\nmodel.add(keras.layers.Dense(10, activation='softmax'))\n\n# 配置模型的学习流程\nmodel.compile(optimizer=tf.train.AdamOptimizer(0.001),\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n# 模型训练,设置训练集,迭代次数,测试集等参数\nmodel.fit(x=data, y=labels, epochs=10, validation_data=(val_data, val_labels), steps_per_epoch=3, batch_size=32)\n\n# 保存模型至h5文件\nmodel.save(\"my_model.h5\")\nkeras.models.load_model(\"my_model.h5\")\n# 使用模型进行评估和预测\n# model.evaluate()\n# model.predict()\n","sub_path":"tf_local/tf-local-keras.py","file_name":"tf-local-keras.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"330591095","text":"import math\nimport random\nn=int(input(\"Cuantos clientes tienes o cuantos clientes llegaron?:\"))\nl=int(input(\"Cual es la media estadistica?:\"))\nsuma=0\nfor i in range(n):\n x = (-1/l)\n a = random.random()*0.9;\n xx = math.log(a)\n xxx = x * xx\n suma=suma+xxx\n print(\"tiempo de cada uno\",i,\"=\",\"-\",\"1/\",l,\"LN\",\"(\",a,\")\",\"=\",xxx,\"minutos\")\npromedio=suma/n\nprint(\"Tiempo total de operacion:\",suma,\"minutos\")\nprint(\"Tiempo promedio de atencion:\",promedio,\"minutos\")\n","sub_path":"distribucionexponencial.py","file_name":"distribucionexponencial.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"402922297","text":"import textwrap\n\nfrom async_pydevd.async_eval import async_eval\nfrom pytest import fixture, mark, raises\n\nfrom .utils import MyException, ctxmanager, generator, raise_exc, regular # noqa # isort:skip\n\ntry:\n import contextvars\nexcept ImportError:\n contextvars = None\n\n\npytestmark = mark.asyncio\n\n\n@mark.parametrize(\n \"expr,result\",\n [\n (\"10\", 10),\n (\"regular\", regular),\n (\"await regular()\", 10),\n (\"[i async for i in generator()]\", [*range(10)]),\n (\"async with ctxmanager():\\n 10\", 10),\n (\"await regular()\\nawait regular() * 2\", 20),\n (\"async for i in generator():\\n i * 2\", None),\n ],\n ids=[\n \"literal\",\n \"not-async\",\n \"await\",\n \"async-comprehension\",\n \"async-with\",\n \"multiline\",\n \"async-for\",\n ],\n)\nasync def test_async_eval(expr, result):\n assert async_eval(expr) == result\n\n\n@mark.parametrize(\n \"expr,result\",\n [\n (\"a = 20\", 20),\n (\"a = regular\", regular),\n (\"a = await regular()\", 10),\n (\"a = [i async for i in generator()]\", [*range(10)]),\n (\"async with ctxmanager():\\n a = 10\", 10),\n (\"async for i in generator():\\n a = i\", 9),\n ],\n ids=[\n \"literal\",\n \"not-async\",\n \"await\",\n \"async-comprehension\",\n \"async-with\",\n \"async-for\",\n ],\n)\nasync def test_async_eval_modify_locals(expr, result):\n a = None\n async_eval(expr)\n assert a == result\n\n\nasync def test_eval_raise_exc():\n with raises(MyException):\n async_eval(\"await raise_exc()\")\n\n\nasync def test_async_eval_dont_leak_internal_vars():\n _globals = _locals = {}\n async_eval(\"10\", _globals, _locals)\n\n assert not _globals\n assert not _locals\n\n\nif contextvars:\n ctx_var = contextvars.ContextVar(\"ctx_var\")\n\n\n@mark.skipif(\n contextvars is None,\n reason=\"contextvars is not available\",\n)\nclass TestContextVars:\n @fixture(autouse=True)\n def reset_var(self):\n ctx_var.set(0)\n\n def test_ctx_get(self):\n assert async_eval(\"ctx_var.get()\") == 0\n\n def test_ctx_set(self):\n async_eval(\"ctx_var.set(10)\")\n assert ctx_var.get() == 10\n\n # issue #7\n def test_ctx_var_reset(self):\n # fmt: off\n async_eval(textwrap.dedent(\"\"\"\n from asyncio import sleep\n token = ctx_var.set(10)\n await sleep(0) # switch to different task\n ctx_var.reset(token)\n \"\"\"))\n # fmt: on\n\n assert ctx_var.get() == 0\n","sub_path":"async-pydevd/tests/test_async_eval.py","file_name":"test_async_eval.py","file_ext":"py","file_size_in_byte":2515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"161766980","text":"from kivy.uix.boxlayout import BoxLayout\nfrom kivy.properties import ObjectProperty, ListProperty\nfrom kivy.uix.popup import Popup\nfrom popups import ChoicePopup\nfrom functools import partial\n\n\nclass Root(BoxLayout):\n _selected_class = ObjectProperty()\n _selected_skill_set = ObjectProperty()\n active_students = ListProperty()\n active_skills = ListProperty()\n choice_popup = ObjectProperty()\n\n def __init__(self, students, classes, skills, skill_sets, **kwargs):\n super().__init__(**kwargs)\n self.student_list = students\n self.class_list = classes\n self.skill_list = skills\n self.skill_set_list = skill_sets\n self.selected_class = self.class_list[0]\n self.selected_skill_set = self.skill_set_list[0]\n self.screen_list = ['Accueil', 'Evaluation', 'Comportement']\n self.menu.drop_down.bind(on_select=self.select_screen)\n\n @property\n def selected_class(self):\n return self._selected_class\n\n @selected_class.setter\n def selected_class(self, value):\n self.active_students = [s for s in self.student_list\n if s.class_ == value.class_]\n self.start_screen.active_class_label.text = \"Classe : {}\".format(value.class_)\n self._selected_class = value\n\n @property\n def selected_skill_set(self):\n return self._selected_skill_set\n\n @selected_skill_set.setter\n def selected_skill_set(self, value):\n self.active_skills = [s for s in self.skill_list\n if s.set_name == value.set_name]\n self.start_screen.active_skill_set_label.text = \"Competences : {}\".format(value.set_name)\n self._selected_skill_set = value\n\n def _on_choice(self, item, attribute, item_list, callback,\n popup_inst, choice, btn_inst):\n self.choice_popup.dismiss()\n for x in item_list:\n if getattr(x, attribute) == choice:\n item = x\n return callback(item)\n\n def select_screen(self, instance, value):\n self.screen_manager.current = value\n self.menu.update_menu(instance, value, self.screen_list)\n\n def open_choice_popup(self, item, item_list, attribute, callback,\n title='Choisissez', size_hint_y=None):\n labels = [getattr(x, attribute) for x in item_list]\n content = ChoicePopup(labels=labels)\n __on_choice = partial(self._on_choice, item, attribute,\n item_list, callback)\n content.bind(on_choice=__on_choice)\n self.choice_popup = Popup(title=title, content=content,\n size_hint_y=size_hint_y, auto_dismiss=False)\n self.choice_popup.open()\n\n def clear_display_label(self):\n self.start_screen.display_label.clear_widgets()\n self.start_screen.display_header.clear_widgets()\n","sub_path":"root.py","file_name":"root.py","file_ext":"py","file_size_in_byte":2874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"473259594","text":"# coding: utf-8\n# Benjamin Chetioui / Loïc Laisné / Clément Schreiner\n\nimport math\nimport unittest\nimport kmeans as myKm\nimport numpy as npy\nimport mds\n\nclass Test(unittest.TestCase):\n\n def setUp(self):\n self.K = 2\n self.dataset = [[1, 2, 3], [2, 2, 2],\n [10, 10, 10], [17, 15, 8]]\n self.expectedDimensions = 3\n self.result = myKm.kmeansDo(self.dataset, self.K)\n self.ordered = myKm.orderClusters(self.result[1])\n # Percentage of anomalies we want to get\n self.anomaliesP = 0.1\n self.anomalies = myKm.getAnomalies(self.ordered, self.anomaliesP)\n self.m = mds.lle(self.result[1], 1, 2)\n\n def tearDown(self):\n pass\n\n # Are all of our points LLE'd?\n def testLLE(self):\n self.assertEqual(len(self.m), self.K)\n result = 0\n for elem in self.m:\n result += len(elem)\n self.assertEqual(result, len(self.dataset))\n\n # Do we have the right number of centroids/clusters?\n def testClustersNumber(self):\n self.assertEqual(len(self.result[0]), self.K)\n\n # Do all the centroids contain the right number of dimensions?\n def testDimension(self):\n for centroid in self.result[0]:\n self.assertEqual(len(centroid), self.expectedDimensions)\n\n # Is the result what was expected for a simple dataset?\n def testResult(self):\n self.assertEqual(\n sorted(self.result[0]), [(1.5, 2.0, 2.5), (13.5, 12.5, 9.0)]\n )\n\n # Is each point associated to its nearest centroid? \n def testCorrectAssociation(self):\n for centroid in self.result[0]:\n for point in self.result[1][centroid]:\n # Initializing mini to the smallest safe value\n mini = float(\"inf\")\n current = npy.linalg.norm(\n npy.array(centroid)-npy.array(point)\n )\n for c_dest in self.result[0]:\n # DAYUM, THREE FORs => TRIFORCE\n mini = min(\n mini,\n npy.linalg.norm(\n npy.array(c_dest)-npy.array(point)\n )\n )\n # Finally checking if our algorithm works, fo' shizzle\n self.assertEqual(current, mini)\n\n # Are all the points assigned to a centroid?\n def testPoints(self):\n nb_points = 0\n for centroid in self.result[0]:\n for point in self.result[1][centroid]:\n nb_points = nb_points + 1\n self.assertEqual(nb_points, len(self.dataset))\n\n # Do we have the right number of anomalies?\n def testAnomalies(self):\n for centroid in self.result[0]:\n nb_points = 0\n for point in self.anomalies[centroid]:\n nb_points = nb_points + 1\n self.assertEqual(\n nb_points,\n int(\n math.ceil(\n len(\n self.result[1][centroid]\n )*self.anomaliesP\n )\n )\n )\n","sub_path":"psychic_spice/lib/test_units.py","file_name":"test_units.py","file_ext":"py","file_size_in_byte":3258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"208989144","text":"# Autor: Ronaldo Estefano Lira Buendia\r\n# Programa que divide por medio de restas y de una lista encontrar el mayor numero.\r\n\r\ndef probarDivisiones(dividendo, divisor):\r\n div = 0\r\n residuo = dividendo\r\n while residuo >= divisor:\r\n residuo -= divisor\r\n div += 1\r\n x = (dividendo, \"/\", divisor, '=', div, \"sobra\", residuo)\r\n return x\r\n\r\n\r\ndef encontrarMayor():\r\n x = 0\r\n y = 0\r\n x = int(input(\"introduce tu valor[coloca -1 para salir]:\"))\r\n y = x\r\n while x != (-1):\r\n if x < 0 and x != -1:\r\n b =(\"tus numeros deben de ser positivos\")\r\n elif x > y:\r\n y = x\r\n x = int(input(\"introduce tu valor[coloca -1 para salir]:\"))\r\n if y == -1:\r\n b = (\"No hay numeros mayores\")\r\n else:\r\n b = (\"El mayor es\" ,y, \"\")\r\n return b\r\n\r\n\r\ndef main():\r\n opc = 1\r\n while opc != 0:\r\n print(\"\"\"Mision 07. Ciclos White \r\n Autor: Ronaldo Estefano Lira Buendia.\r\n Matricula: A01748428.\r\n 1.-Calcular divisores\r\n 2.-Encontrar el mayor')\r\n 3.-Salir\"\"\")\r\n opc = int(input(\"Teclea tu opcion: \"))\r\n if opc > 0 and opc < 4:\r\n if opc == 1:\r\n dividendo = int(input(\"Introduce tu dividendo: \"))\r\n divisor = int(input(\"Introduce tu divisor: \"))\r\n x = probarDivisiones(dividendo, divisor)\r\n print (x)\r\n else:\r\n if opc == 2:\r\n a = encontrarMayor()\r\n print(a)\r\n else:\r\n if opc==3:\r\n print(\"Hasta luego\")\r\n else:\r\n print(\"ERROR\")\r\n\r\nmain ()","sub_path":"Mision7..py","file_name":"Mision7..py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"220966438","text":"\nimport argparse, os, re\nimport socket\n\n\nsupported_jtag_hardware=['auto']\ntry:\n for file in os.listdir(\"assets/adapters\"):\n m=re.match(r\"^(.+)\\.cfg$\", file)\n if m:\n supported_jtag_hardware.append(m.group(1))\nexcept:\n pass\n\n\nparser = argparse.ArgumentParser(prog='sudo python .',\n description='Network recovery for your Toon.')\n\nparser.add_argument('--serial-port',\n metavar='PATH',\n help='The path of the serial port to use',\n default='/dev/serial0')\n\nparser.add_argument('--serverip',\n metavar='IP',\n help='The NFS server IP where the recovery image is located. Default is the IP adddress of the server running this script.',\n default=None)\n\nparser.add_argument('--gatewayip',\n metavar='IP',\n help='The gateway IP if DHCP does not work',\n default=None)\n\nparser.add_argument('--output-level',\n metavar='INFO|DEBUG',\n help='The level of output to print to the console',\n default=\"INFO\")\n\nparser.add_argument('--jtag-available', action='store_true', help='Indicates you have a JTAG debugger connected to your Toon\\'s JTAG headers')\nparser.add_argument('--jtag-hardware',\n metavar='TYPE',\n help='The JTAG debugger type that we\\'re working with. The default is to autodetect the JTAG debugger (which currently only works on Raspberry Pi). Supported values are: {}'.format(', '.join(supported_jtag_hardware)),\n default=\"auto\")\n\nparser.add_argument('--dont-check-uboot', action='store_true', help='Don\\'t check whether we can access the installer version of U-Boot before using JTAG to start up the custom one.')\n\nparser.add_argument('--boot-only', action='store_true', help='Don\\'t start recovery, just boot into the serial console')\n\n\nargs = parser.parse_args()\n\nimport logging\nlogging.basicConfig(level={\n \"INFO\":logging.INFO,\n \"DEBUG\":logging.DEBUG,\n}[args.output_level])\nlog = logging.getLogger(__name__)\n\ndef get_cpuinfo():\n info = {}\n with open('/proc/cpuinfo') as fo:\n for line in fo:\n name_value = [s.strip() for s in line.split(':', 1)]\n if len(name_value) != 2:\n continue\n name, value = name_value\n if name not in info:\n info[name]=[]\n info[name].append(value)\n return info\ndef find_rpi_version():\n try:\n revision = get_cpuinfo()['Revision'][0]\n return {\n \"Beta\": \"rpi1\",\n \"0002\": \"rpi1\",\n \"0003\": \"rpi1\",\n \"0004\": \"rpi1\",\n \"0005\": \"rpi1\",\n \"0006\": \"rpi1\",\n \"0007\": \"rpi1\",\n \"0008\": \"rpi1\",\n \"0009\": \"rpi1\",\n \"000d\": \"rpi1\",\n \"000e\": \"rpi1\",\n \"000f\": \"rpi1\",\n \"0010\": \"rpi1\",\n \"0011\": \"rpi1\",\n \"0012\": \"rpi1\",\n \"0013\": \"rpi1\",\n \"0014\": \"rpi1\",\n \"0015\": \"rpi1\",\n \"a01040\": \"rpi2\",\n \"a01041\": \"rpi2\",\n \"a21041\": \"rpi2\",\n \"a22042\": \"rpi2\",\n \"900021\": \"rpi1\",\n \"900032\": \"rpi1\",\n \"900092\": \"rpi1\",\n \"900093\": \"rpi1\",\n \"920093\": \"rpi1\",\n \"9000c1\": \"rpi1\",\n \"a02082\": \"rpi3\",\n \"a020a0\": \"rpi3\",\n \"a22082\": \"rpi3\",\n \"a32082\": \"rpi3\",\n \"a020d3\": \"rpi3\",\n\t \"a03111\": \"rpi4\",\n \"b03111\": \"rpi4\",\n \"c03111\": \"rpi4\",\n }[revision]\n except:\n pass\n return None\n\ndef detect_jtag_hardware():\n hardware=find_rpi_version()# or detect_usb_device() or detect_something_else()\n #TODO: implement more checks here\n if not hardware:\n raise Exception(\"Cannot autodetect jtag hardware\")\n return hardware\n\ndef get_ip_address():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"8.8.8.8\", 80))\n return s.getsockname()[0]\n\ndef main():\n\n log.info(\"Starting up...\")\n\n import recovery\n\n serial_path = args.serial_port\n jtag_available = args.jtag_available\n jtag_hardware = args.jtag_hardware\n check_current_bootloader = not args.dont_check_uboot\n boot_only = args.boot_only\n gatewayip = args.gatewayip\n serverip = args.serverip\n\n if jtag_hardware == \"auto\":\n jtag_hardware = detect_jtag_hardware()\n log.info(\"Detected JTAG hardware '{}'\".format(jtag_hardware))\n\n if serverip is None:\n serverip = get_ip_address()\n log.info(\"Setting server ip to {}\".format(serverip))\n\n import json\n params = {\n \"port\" : serial_path,\n \"has_jtag\" : jtag_available,\n \"check_uboot\" : check_current_bootloader,\n \"jtag_hardware\" : jtag_hardware,\n\t\"boot_only\" : boot_only,\n\t\"gatewayip\" : gatewayip,\n\t\"serverip\" : serverip \n }\n log.debug(json.dumps(params))\n recovery.Recover(**params).run()\n\nif __name__ == '__main__' :\n try:\n main()\n except Exception as e:\n if args.output_level==\"DEBUG\":\n raise\n else:\n log.fatal(str(e))\n","sub_path":"__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":5443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"519205904","text":"#!/usr/bin/python3\nimport os\n# initialize asebamedulla in background and wait 0.3s to let\n# asebamedulla startup\nos.system(\"(asebamedulla ser:name=Thymio-II &) && sleep 0.3\")\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\nfrom picamera import PiCamera\nfrom time import sleep\nimport dbus\nimport dbus.mainloop.glib\nfrom adafruit_rplidar import RPLidar\nfrom math import cos, sin, pi, floor\nimport threading\n\n\nprint(\"Starting robot\")\n\n#-----------------------init script--------------------------\ncamera = PiCamera()\n\ndef dbusError(self, e):\n # dbus errors can be handled here.\n # Currently only the error is logged. Maybe interrupt the mainloop here\n print('dbus error: %s' % str(e))\n\n\n# init the dbus main loop\ndbus.mainloop.glib.DBusGMainLoop(set_as_default=True)\n \n# get stub of the aseba network\nbus = dbus.SessionBus()\nasebaNetworkObject = bus.get_object('ch.epfl.mobots.Aseba', '/')\n \n# prepare interface\nasebaNetwork = dbus.Interface(\n asebaNetworkObject,\n dbus_interface='ch.epfl.mobots.AsebaNetwork'\n)\n \n# load the file which is run on the thymio\nasebaNetwork.LoadScripts(\n 'thympi.aesl',\n reply_handler=dbusError,\n error_handler=dbusError\n)\n\n#signal scanning thread to exit\nexit_now = False\n\n# Setup the RPLidar\nPORT_NAME = '/dev/ttyUSB0'\nlidar = RPLidar(None, PORT_NAME)\n#This is where we store the lidar readings\nscan_data = [0]*360\n#--------------------- init script end -------------------------\n\ndef testCamera():\n print(\"Camera test\")\n camera.start_preview()\n sleep(5)\n #we capture to openCV compatible format\n #you might want to increase resolution\n camera.resolution = (320, 240)\n camera.framerate = 24\n sleep(2)\n image = np.empty((240, 320, 3), dtype=np.uint8)\n camera.capture(image, 'bgr')\n cv2.imwrite('out.png', image) \n camera.stop_preview()\n print(\"saved image to out.png\")\n\ndef testThymio():\n left_wheel = 20\n right_wheel = 200\n asebaNetwork.SendEventName(\n 'motor.target',\n [left_wheel, right_wheel]\n )\n print(\"motor should be running now\")\n sleep(5)\n asebaNetwork.SendEventName(\n 'motor.target',\n [0, 0]\n )\n\n\n#NOTE: if you get adafruit_rplidar.RPLidarException: Incorrect descriptor starting bytes\n# try disconnecting the usb cable and reconnect again. That should fix the issue\ndef lidarScan():\n print(\"Starting background lidar scanning\")\n for scan in lidar.iter_scans():\n if(exit_now):\n return\n for (_, angle, distance) in scan:\n scan_data[min([359, floor(angle)])] = distance\n\nscanner_thread = threading.Thread(target=lidarScan)\nscanner_thread.daemon = True\nscanner_thread.start()\n\ndef testLidar():\n print(scan_data)\n\n#------------------ Main loop here -------------------------\n\ndef mainLoop():\n #do stuff\n print(scan_data) \n\n#------------------- Main loop end ------------------------\n\nif __name__ == '__main__':\n #testCamera()\n #testThymio()\n testLidar()\n try:\n while True:\n mainLoop()\n except KeyboardInterrupt:\n print(\"Stopping robot\")\n exit_now = True\n sleep(1)\n lidar.stop()\n lidar.disconnect()\n os.system(\"pkill -n asebamedulla\")\n print(\"asebamodulla killed\")\n","sub_path":"jonasTestFile.py","file_name":"jonasTestFile.py","file_ext":"py","file_size_in_byte":3271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"211366784","text":"import hashlib\nimport os\nimport tempfile\nfrom zipfile import ZipFile\n\nfrom tqdm import tqdm\n\ntry:\n from urllib.request import urlretrieve\nexcept ImportError:\n # python 2\n from urllib import urlretrieve\n\nELEPHANT_TMP_DIR = os.path.join(tempfile.gettempdir(), \"elephant\")\n\n\nclass TqdmUpTo(tqdm):\n \"\"\"\n Provides `update_to(n)` which uses `tqdm.update(delta_n)`.\n Original implementation:\n https://github.com/tqdm/tqdm/blob/master/examples/tqdm_wget.py\n \"\"\"\n\n def update_to(self, b=1, bsize=1, tsize=None):\n \"\"\"\n b : int, optional\n Number of blocks transferred so far [default: 1].\n bsize : int, optional\n Size of each block (in tqdm units) [default: 1].\n tsize : int, optional\n Total size (in tqdm units). If [default: None] remains unchanged.\n \"\"\"\n if tsize is not None:\n self.total = tsize\n self.update(b * bsize - self.n) # will also set self.n = b * bsize\n\n\ndef calculate_md5(fpath, chunk_size=1024 * 1024):\n md5 = hashlib.md5()\n with open(fpath, 'rb') as f:\n for chunk in iter(lambda: f.read(chunk_size), b''):\n md5.update(chunk)\n return md5.hexdigest()\n\n\ndef check_integrity(fpath, md5):\n if not os.path.exists(fpath) or md5 is None:\n return False\n return calculate_md5(fpath) == md5\n\n\ndef download(url, filepath=None, checksum=None, verbose=True):\n if filepath is None:\n filename = url.split('/')[-1]\n filepath = os.path.join(ELEPHANT_TMP_DIR, filename)\n if check_integrity(filepath, md5=checksum):\n return filepath\n folder = os.path.dirname(os.path.abspath(filepath))\n if not os.path.exists(folder):\n os.mkdir(folder)\n desc = \"Downloading '{url}' to '{filepath}'\".format(url=url,\n filepath=filepath)\n with TqdmUpTo(unit='B', unit_scale=True, unit_divisor=1024, miniters=1,\n desc=desc, disable=not verbose) as t:\n urlretrieve(url, filename=filepath, reporthook=t.update_to)\n return filepath\n\n\ndef unzip(filepath, outdir=ELEPHANT_TMP_DIR, verbose=True):\n with ZipFile(filepath) as zfile:\n zfile.extractall(path=outdir)\n if verbose:\n print(\"Extracted {filepath} to {outdir}\".format(filepath=filepath,\n outdir=outdir))\n","sub_path":"elephant/test/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"23219870","text":"from django.test import TestCase\nfrom django.urls import reverse\nfrom blog.forms import SignupForm\n\n\nclass SignUpTests(TestCase):\n def setUp(self):\n url = reverse('signup')\n self.url = url\n self.response = self.client.get(url)\n\n # def test_csrf(self):\n # self.assertContains(self.response, 'csrfmiddlewaretoken')\n\n def test_signup_invalid_post_data_empty_fields(self):\n response = self.client.post(self.url, {})\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, '手机号不能为空')\n self.assertContains(response, '验证码不能为空')\n\n def test_signup_invalid_post_data(self):\n data = {'phone': 10876356745}\n response = self.client.post(self.url, data)\n form = response.context.get('form')\n self.assertContains(response, '请输入正确的手机号')\n self.assertTrue(form.errors)\n\n def test_contain_signup_form(self):\n form = self.response.context.get('form')\n self.assertIsInstance(form, SignupForm)","sub_path":"blog/tests/test_view_signup.py","file_name":"test_view_signup.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"507990129","text":"\"\"\"\nFile name:\t\tsieve.py\n\nDescription:\tImplementation of the Sieve of Eratosthenes\n\nAuthor:\t\t\tEli Howey\n\t\t\t\tWritten for Final Project\n\t\t\t\tMATH 56, Spring 2014\n\n\"\"\"\n\nfrom math import ceil, sqrt, log\nimport matplotlib.pyplot as plt\nimport time\n\n\n\ndef sieve(n):\n\t\"\"\"\n\tsieve(n)\n\tReturns a list of all primes less than n, collected using the Sieve of\n\t\tEratosthenes\n\t\"\"\"\n\t# Sieve runs from 2 to n, but more convenient from 0 to n\n\tsieve = [True] * n\n\tsieve[0] = sieve[1] = False\n\n\t# Run through elements in the sieve, crossing off multiples\n\tmax = int(ceil(sqrt(n))) + 1\n\tfor i in range(max):\n\t\tif sieve[i]:\n\t\t\tj = 2 * i\n\t\t\twhile j < n:\n\t\t\t\tsieve[j] = False\n\t\t\t\tj += i\n\n\t# Collect the primes from the sieve\n\tprimes = []\n\tfor i in range(n):\n\t\tif sieve[i]:\n\t\t\tprimes.append(i)\n\n\treturn primes\n\n\n\ndef factor_sieve(n):\n\t\"\"\"\n\tfactor_sieve(n)\n\tReturns a list called sieve, where sieve[i] is:\n\t\t1 if i is prime\n\t\tthe smallest prime factor of i if i is composite\n\t\t0 if i = 0,1 (neither prime nor composite)\n\n\tCan be used to factor any integer less than n through repeated division\n\t\tby the relevant entries in sieve\n\t\"\"\"\n\tn = int(n)\n\tsieve = [1] * n\t\t\t\t# Assume all numbers prime\n\tsieve[0] = sieve[1] = 0\n\n\tmax = int(ceil(sqrt(n))) + 1\n\tfor i in range(max):\n\t\t# If number is prime, record that prime as the smallest factor of its multiples (if applicable)\n\t\tif sieve[i] == 1:\n\t\t\tj = 2 * i\n\t\t\twhile j < n:\n\t\t\t\t# If a multiple does not have a smaller factor,\n\t\t\t\t# record the factor\n\t\t\t\tif sieve[j] == 1:\n\t\t\t\t\tsieve[j] = i\n\t\t\t\tj += i\n\n\treturn sieve\n\n\n\ndef trial_divide(n, sieve):\n\t\"\"\"\n\ttrial_divide(n, sieve)\n\tReturns a list containing the prime factorization of n, constructed\n\t\tby trial division\n\n\tNote: Having the factor sieve as an input allows for the sieve to be\n\t\tcalculated only once, instead of every trial division. The sieve must\n\t\trange from 1 to n.\n\t\"\"\"\n\tn = int(n)\n\tif len(sieve) <= n:\n\t\treturn \"Error: trial_divide: sieve does not contain n\"\n\n\tfactors = []\t\t\t\t# Factorization of n\n\n\t# Record the smallest prime factor of n, and divide n by that factor,\n\t# until n is prime\n\twhile sieve[n] != 1:\n\t\tf = sieve[n]\n\t\tfactors.append(f)\n\t\tn /= f\n\tfactors.append(n)\t\t# Append that prime factor\n\n\treturn (sorted(factors), len(factors))\n\n\n\ndef main():\n\tN = [pow(10,i) for i in range(1,7)]\t\t# Upper bounds\n\tT = []\t\t\t\t\t\t\t\t\t# Runtimes for each bound\n\n\tfor n in N:\n\t\tt = time.time()\n\t\tsieve(n)\n\t\tT.append(time.time() - t)\n\n\tplt.loglog(N,T)\n\tplt.xlabel('n')\n\tplt.ylabel('Runtime (s)')\n\tplt.show()\n\n\nmain()\n\n","sub_path":"Final Project/Code/sieve.py","file_name":"sieve.py","file_ext":"py","file_size_in_byte":2493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"7948702","text":"import mb as monopoly\nfrom timer import *\nimport cProfile\nfrom random import shuffle, randint, uniform\n\n\ndef random_ordering():\n all_groups = [\"Brown\", \"Light Blue\", \"Pink\", \"Orange\",\n \"Red\", \"Yellow\", \"Green\", \"Dark Blue\",\n \"Utility\", \"Railroad\"]\n shuffle(all_groups)\n return tuple(all_groups)\n\n\ndef random_value():\n return randint(1, 600)\n\n\ndef random_values():\n return {\"Brown\": random_value(),\n \"Light Blue\": random_value(),\n \"Pink\": random_value(),\n \"Orange\": random_value(),\n \"Red\": random_value(),\n \"Yellow\": random_value(),\n \"Green\": random_value(),\n \"Dark Blue\": random_value(),\n \"Utility\": random_value(),\n \"Railroad\": random_value()}\n\n\ndef main(games_in_a_set=5000):\n game0 = monopoly.Game(cutoff=1000, trading_enabled=True)\n for j in range(40):\n thresh = j / 200\n winners = [0, 0, 0]\n for i in range(games_in_a_set):\n # Play game.\n player1 = monopoly.Player(1, buying_threshold=thresh, group_ordering=random_ordering())\n player2 = monopoly.Player(2, buying_threshold=uniform(0, 1), group_ordering=random_ordering())\n\n game0.new_players([player1, player2])\n results = game0.play()\n\n # Store length.\n winners[results['winner']] += 1\n\n print(winners, thresh)\n\n\ndef main2(games_in_a_set=100):\n game0 = monopoly.Game(cutoff=1000, trading_enabled=True)\n\n winners = [0, 0, 0]\n for i in range(games_in_a_set):\n # Play game.\n player1 = monopoly.Player(1, buying_threshold=500, group_ordering=random_ordering(), static_threshold=True)\n # player2 = monopoly.Player(2, buying_threshold=randint(1, 500), group_ordering=random_ordering(),static_threshold=True)\n player2 = monopoly.Player(2, buying_threshold=500, group_ordering=random_ordering(), static_threshold=True)\n\n game0.new_players([player1, player2])\n results = game0.play()\n\n # Store length.\n winners[results['winner']] += 1\n\n print(winners)\n\n\ndef go_record(games_in_a_set=1000):\n game0 = monopoly.Game(cutoff=1000, trading_enabled=False)\n go_record = []\n\n for i in range(games_in_a_set):\n # Play game.\n player1 = monopoly.Player(1, buying_threshold=100)\n player2 = monopoly.Player(2, buying_threshold=100)\n game0.new_players([player1, player2])\n results = game0.play()\n\n # Store length.\n go_record.extend(player1.go_record)\n\n print(sum(go_record) / len(go_record))\n\n\ndef main3(games_in_a_set=1000):\n for m1 in range(1, 21):\n for m2 in range(1, 21):\n game0 = monopoly.Game(cutoff=1000, trading_enabled=True, image_exporting=0, matrix1=m1, matrix2=m2)\n trade_count = []\n winners = [0, 0, 0]\n for i in range(games_in_a_set):\n # Play game.\n player1 = monopoly.Player(1, buying_threshold=100,\n # group_ordering=random_ordering(),\n dynamic_ordering=True,\n static_threshold=False\n )\n player2 = monopoly.Player(2, buying_threshold=100,\n group_ordering=[\"Railroad\", \"Light Blue\", \"Orange\", \"Pink\", \"Red\", \"Yellow\",\n \"Green\", \"Dark Blue\", \"Utility\", \"Brown\"],\n # group_ordering=random_ordering(),\n static_threshold=True)\n\n game0.new_players([player1, player2])\n results = game0.play()\n\n # Store length.\n winners[results['winner']] += 1\n trade_count.append(results['trade count'])\n\n print(winners, m1, m2)\n\n '''print(\"**\")\n print(\"trades:\",results['trade count'])\n for trade_pair in game0.trades:\n print(trade_pair[0].name, \"--\", trade_pair[1].name)'''\n\n '''print(\"avg. trades\", sum(trade_count) / games_in_a_set)\n print(\"max trades\", max(trade_count))\n print(\"min trades\", min(trade_count))'''\n\n\ndef best_ordering():\n return tuple([\"Railroad\", \"Light Blue\", \"Orange\", \"Pink\", \"Red\",\n \"Yellow\", \"Green\", \"Dark Blue\", \"Utility\", \"Brown\"])\n\n\ndef optimize(games_in_a_set=10000):\n game0 = monopoly.Game(cutoff=1000, trading_enabled=True, image_exporting=0)\n for c in range(-1000, 1, 100):\n trade_count = []\n winners = [0, 0, 0]\n for i in range(games_in_a_set):\n # Play game.\n player1 = monopoly.Player(1,\n dynamic_ordering=True,\n # group_ordering=random_ordering(),\n c=c,\n n=6,\n )\n player2 = monopoly.Player(2,\n group_ordering=best_ordering(),\n step_threshold=True,\n buying_threshold=1000\n )\n\n game0.new_players([player1, player2])\n results = game0.play()\n\n # Store length.\n winners[results['winner']] += 1\n trade_count.append(results['trade count'])\n\n print(winners, c, sum(trade_count) / games_in_a_set)\n # print(\"avg. trades\", sum(trade_count) / games_in_a_set)\n # print(\"max trades\", max(trade_count))\n #print(\"min trades\", min(trade_count))\n\n\nif __name__ == '__main__':\n timer()\n optimize()\n # cProfile.run('main2()', sort=1)\n timer()","sub_path":"testScript.py","file_name":"testScript.py","file_ext":"py","file_size_in_byte":5783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"574091186","text":"# Write your code here\nfrom random import choice\nimport string\n\nword_list = [\"python\", \"java\", \"kotlin\", \"javascript\"]\nrandom_word = choice(word_list)\nlives = 8\nmsg = list(\"-\"*len(random_word))\nattempts = set()\ncontrol = \"play\"\n\nprint(\"H A N G M A N\")\nwhile control != \"exit\":\n control = input('Type \"play\" to play the game, \"exit\" to quit: ')\n if control == \"exit\":\n break\n\n while lives and \"-\" in msg:\n print()\n print(\"\".join(msg))\n letter = input(\"Input a letter: \")\n\n if len(letter) > 1:\n print(\"You should print a single letter\")\n\n elif letter not in string.ascii_lowercase:\n print(\"It is not an ASCII lowercase letter\")\n\n elif letter in attempts:\n print(\"You already typed this letter\")\n\n else:\n attempts.add(letter)\n if letter in set(random_word):\n if letter not in set(msg):\n for pos, char in enumerate(random_word):\n if char == letter:\n msg[pos] = letter\n else:\n print(\"No improvements\")\n lives -= 1\n else:\n print(\"No such letter in the word\")\n lives -= 1\n\n if control == \"play\":\n if \"-\" not in msg:\n print()\n print(\"\".join(msg))\n print(\"You guessed the word!\")\n print(\"You survived!\")\n\n else:\n print(\"You are hanged!\")\n","sub_path":"task/hangman/hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"235483774","text":"from typing import Any, Dict, cast\n\nfrom pytest import mark, param, raises\n\nfrom graphql.type import (\n GraphQLArgument,\n GraphQLDirective,\n GraphQLEnumType,\n GraphQLEnumValue,\n GraphQLField,\n GraphQLInputField,\n GraphQLInputObjectType,\n GraphQLInterfaceType,\n GraphQLObjectType,\n GraphQLScalarType,\n GraphQLSchema,\n GraphQLUnionType,\n)\n\ndummy_type = GraphQLScalarType(\"DummyScalar\")\n\nbad_extensions = [param([], id=\"list\"), param({1: \"ext\"}, id=\"non_string_key\")]\n\n\ndef bad_extensions_msg(name: str) -> str:\n return f\"{name} extensions must be a dictionary with string keys.\"\n\n\ndef describe_type_system_extensions():\n def describe_graphql_scalar_type():\n def without_extensions():\n some_scalar = GraphQLScalarType(\"SomeScalar\")\n assert some_scalar.extensions is None\n assert some_scalar.to_kwargs()[\"extensions\"] is None\n\n def with_extensions():\n scalar_extensions = {\"SomeScalarExt\": \"scalar\"}\n some_scalar = GraphQLScalarType(\"SomeScalar\", extensions=scalar_extensions)\n\n assert some_scalar.extensions is scalar_extensions\n assert some_scalar.to_kwargs()[\"extensions\"] is scalar_extensions\n\n @mark.parametrize(\"extensions\", bad_extensions)\n def with_bad_extensions(extensions):\n with raises(TypeError, match=bad_extensions_msg(\"SomeScalar\")):\n # noinspection PyTypeChecker\n GraphQLScalarType(\"SomeScalar\", extensions=extensions)\n\n def describe_graphql_object_type():\n def without_extensions():\n some_object = GraphQLObjectType(\n \"SomeObject\",\n {\n \"someField\": GraphQLField(\n dummy_type, {\"someArg\": GraphQLArgument(dummy_type)}\n )\n },\n )\n\n assert some_object.extensions is None\n some_field = some_object.fields[\"someField\"]\n assert some_field.extensions is None\n some_arg = some_field.args[\"someArg\"]\n assert some_arg.extensions is None\n\n assert some_object.to_kwargs()[\"extensions\"] is None\n assert some_field.to_kwargs()[\"extensions\"] is None\n assert some_arg.to_kwargs()[\"extensions\"] is None\n\n def with_extensions():\n object_extensions = {\"SomeObjectExt\": \"object\"}\n field_extensions = {\"SomeFieldExt\": \"field\"}\n arg_extensions = {\"SomeArgExt\": \"arg\"}\n\n some_object = GraphQLObjectType(\n \"SomeObject\",\n {\n \"someField\": GraphQLField(\n dummy_type,\n {\n \"someArg\": GraphQLArgument(\n dummy_type, extensions=arg_extensions\n )\n },\n extensions=field_extensions,\n )\n },\n extensions=object_extensions,\n )\n\n assert some_object.extensions is object_extensions\n some_field = some_object.fields[\"someField\"]\n assert some_field.extensions is field_extensions\n some_arg = some_field.args[\"someArg\"]\n assert some_arg.extensions is arg_extensions\n\n assert some_object.to_kwargs()[\"extensions\"] is object_extensions\n assert some_field.to_kwargs()[\"extensions\"] is field_extensions\n assert some_arg.to_kwargs()[\"extensions\"] is arg_extensions\n\n @mark.parametrize(\"extensions\", bad_extensions)\n def with_bad_extensions(extensions):\n with raises(TypeError, match=bad_extensions_msg(\"SomeObject\")):\n # noinspection PyTypeChecker\n GraphQLObjectType(\"SomeObject\", {}, extensions=extensions)\n with raises(TypeError, match=bad_extensions_msg(\"Field\")):\n # noinspection PyTypeChecker\n GraphQLField(dummy_type, extensions=extensions)\n with raises(TypeError, match=bad_extensions_msg(\"Argument\")):\n # noinspection PyTypeChecker\n GraphQLArgument(dummy_type, extensions=extensions)\n\n def describe_graphql_interface_type():\n def without_extensions():\n some_interface = GraphQLInterfaceType(\n \"SomeInterface\",\n {\n \"someField\": GraphQLField(\n dummy_type, {\"someArg\": GraphQLArgument(dummy_type)}\n )\n },\n )\n\n assert some_interface.extensions is None\n some_field = some_interface.fields[\"someField\"]\n assert some_field.extensions is None\n some_arg = some_field.args[\"someArg\"]\n assert some_arg.extensions is None\n\n assert some_interface.to_kwargs()[\"extensions\"] is None\n assert some_field.to_kwargs()[\"extensions\"] is None\n assert some_arg.to_kwargs()[\"extensions\"] is None\n\n def with_extensions():\n interface_extensions = {\"SomeInterfaceExt\": \"interface\"}\n field_extensions = {\"SomeFieldExt\": \"field\"}\n arg_extensions = {\"SomeArgExt\": \"arg\"}\n\n some_interface = GraphQLInterfaceType(\n \"SomeInterface\",\n {\n \"someField\": GraphQLField(\n dummy_type,\n {\n \"someArg\": GraphQLArgument(\n dummy_type, extensions=arg_extensions\n )\n },\n extensions=field_extensions,\n )\n },\n extensions=interface_extensions,\n )\n\n assert some_interface.extensions is interface_extensions\n some_field = some_interface.fields[\"someField\"]\n assert some_field.extensions is field_extensions\n some_arg = some_field.args[\"someArg\"]\n assert some_arg.extensions is arg_extensions\n\n assert some_interface.to_kwargs()[\"extensions\"] is interface_extensions\n assert some_field.to_kwargs()[\"extensions\"] is field_extensions\n assert some_arg.to_kwargs()[\"extensions\"] is arg_extensions\n\n @mark.parametrize(\"extensions\", bad_extensions)\n def with_bad_extensions(extensions):\n with raises(TypeError, match=bad_extensions_msg(\"SomeInterface\")):\n # noinspection PyTypeChecker\n GraphQLInterfaceType(\"SomeInterface\", {}, extensions=extensions)\n\n def describe_graphql_union_type():\n def without_extensions():\n some_union = GraphQLUnionType(\"SomeUnion\", [])\n\n assert some_union.extensions is None\n\n assert some_union.to_kwargs()[\"extensions\"] is None\n\n def with_extensions():\n union_extensions = {\"SomeUnionExt\": \"union\"}\n\n some_union = GraphQLUnionType(\"SomeUnion\", [], extensions=union_extensions)\n\n assert some_union.extensions is union_extensions\n\n assert some_union.to_kwargs()[\"extensions\"] is union_extensions\n\n @mark.parametrize(\"extensions\", bad_extensions)\n def with_bad_extensions(extensions):\n with raises(TypeError, match=bad_extensions_msg(\"SomeUnion\")):\n # noinspection PyTypeChecker\n GraphQLUnionType(\"SomeUnion\", [], extensions=extensions)\n\n def describe_graphql_enum_type():\n def without_extensions():\n some_enum = GraphQLEnumType(\"SomeEnum\", {\"SOME_VALUE\": None})\n\n assert some_enum.extensions is None\n some_value = some_enum.values[\"SOME_VALUE\"]\n assert some_value.extensions is None\n\n assert some_enum.to_kwargs()[\"extensions\"] is None\n assert some_value.to_kwargs()[\"extensions\"] is None\n\n def with_extensions():\n enum_extensions = {\"SomeEnumExt\": \"enum\"}\n value_extensions = {\"SomeValueExt\": \"value\"}\n\n some_enum = GraphQLEnumType(\n \"SomeEnum\",\n {\"SOME_VALUE\": GraphQLEnumValue(extensions=value_extensions)},\n extensions=enum_extensions,\n )\n\n assert some_enum.extensions is enum_extensions\n some_value = some_enum.values[\"SOME_VALUE\"]\n assert some_value.extensions is value_extensions\n\n assert some_enum.to_kwargs()[\"extensions\"] is enum_extensions\n assert some_value.to_kwargs()[\"extensions\"] is value_extensions\n\n @mark.parametrize(\"extensions\", bad_extensions)\n def with_bad_extensions(extensions):\n with raises(TypeError, match=bad_extensions_msg(\"SomeEnum\")):\n # noinspection PyTypeChecker\n GraphQLEnumType(\n \"SomeEnum\", cast(Dict[str, Any], {}), extensions=extensions\n )\n with raises(TypeError, match=bad_extensions_msg(\"Enum value\")):\n # noinspection PyTypeChecker\n GraphQLEnumValue(extensions=extensions)\n\n def describe_graphql_input_object_type():\n def without_extensions():\n some_input_object = GraphQLInputObjectType(\n \"SomeInputObject\", {\"someInputField\": GraphQLInputField(dummy_type)}\n )\n\n assert some_input_object.extensions is None\n some_input_field = some_input_object.fields[\"someInputField\"]\n assert some_input_field.extensions is None\n\n assert some_input_object.to_kwargs()[\"extensions\"] is None\n assert some_input_field.to_kwargs()[\"extensions\"] is None\n\n def with_extensions():\n input_object_extensions = {\"SomeInputObjectExt\": \"inputObject\"}\n input_field_extensions = {\"SomeInputFieldExt\": \"inputField\"}\n\n some_input_object = GraphQLInputObjectType(\n \"SomeInputObject\",\n {\n \"someInputField\": GraphQLInputField(\n dummy_type, extensions=input_field_extensions\n )\n },\n extensions=input_object_extensions,\n )\n\n assert some_input_object.extensions is input_object_extensions\n some_input_field = some_input_object.fields[\"someInputField\"]\n assert some_input_field.extensions is input_field_extensions\n\n assert (\n some_input_object.to_kwargs()[\"extensions\"] is input_object_extensions\n )\n assert some_input_field.to_kwargs()[\"extensions\"] is input_field_extensions\n\n @mark.parametrize(\"extensions\", bad_extensions)\n def with_bad_extensions(extensions):\n with raises(TypeError, match=bad_extensions_msg(\"SomeInputObject\")):\n # noinspection PyTypeChecker\n GraphQLInputObjectType(\"SomeInputObject\", {}, extensions=extensions)\n with raises(TypeError, match=bad_extensions_msg(\"Input field\")):\n # noinspection PyTypeChecker\n GraphQLInputField(dummy_type, extensions=extensions)\n\n def describe_graphql_directive():\n def without_extensions():\n some_directive = GraphQLDirective(\n \"SomeDirective\", [], {\"someArg\": GraphQLArgument(dummy_type)}\n )\n\n assert some_directive.extensions is None\n some_arg = some_directive.args[\"someArg\"]\n assert some_arg.extensions is None\n\n assert some_directive.to_kwargs()[\"extensions\"] is None\n assert some_arg.to_kwargs()[\"extensions\"] is None\n\n def with_extensions():\n directive_extensions = {\"SomeDirectiveExt\": \"directive\"}\n arg_extensions = {\"SomeArgExt\": \"arg\"}\n\n some_directive = GraphQLDirective(\n \"SomeDirective\",\n [],\n {\"someArg\": GraphQLArgument(dummy_type, extensions=arg_extensions)},\n extensions=directive_extensions,\n )\n\n assert some_directive.extensions is directive_extensions\n some_arg = some_directive.args[\"someArg\"]\n assert some_arg.extensions is arg_extensions\n\n assert some_directive.to_kwargs()[\"extensions\"] is directive_extensions\n assert some_arg.to_kwargs()[\"extensions\"] is arg_extensions\n\n @mark.parametrize(\"extensions\", bad_extensions)\n def with_bad_extensions(extensions):\n with raises(TypeError, match=bad_extensions_msg(\"Directive\")):\n # noinspection PyTypeChecker\n GraphQLDirective(\"SomeDirective\", [], extensions=extensions)\n\n def describe_graphql_schema():\n def without_extensions():\n schema = GraphQLSchema()\n\n assert schema.extensions is None\n assert schema.to_kwargs()[\"extensions\"] is None\n\n def with_extensions():\n schema_extensions = {\"schemaExtension\": \"schema\"}\n\n schema = GraphQLSchema(extensions=schema_extensions)\n\n assert schema.extensions is schema_extensions\n\n assert schema.to_kwargs()[\"extensions\"] is schema_extensions\n\n @mark.parametrize(\"extensions\", bad_extensions)\n def with_bad_extensions(extensions):\n with raises(TypeError, match=bad_extensions_msg(\"Schema\")):\n # noinspection PyTypeChecker\n GraphQLSchema(extensions=extensions)\n","sub_path":"tests/type/test_extensions.py","file_name":"test_extensions.py","file_ext":"py","file_size_in_byte":13483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"60745940","text":"import argparse\nfrom datetime import datetime\nimport struct\nimport sys\nimport time\nimport traceback\nfrom uuid import uuid4\n\nimport pigpio\nfrom nrf24 import *\n\n#\n# A simple NRF24L receiver that connects to a PIGPIO instance on a hostname and port, default \"localhost\" and 8888, and\n# starts receiving data on the address specified sending a continiously increasing integer as acknowledgement payload. \n# Use the companion program \"ack-sender.py\" to send data to it from a different Raspberry Pi.\n#\nif __name__ == \"__main__\":\n\n print(\"Python NRF24 Receiver with Acknowledgement Payload Example.\")\n \n # Parse command line argument.\n parser = argparse.ArgumentParser(prog=\"ack-receiver.py\", description=\"Simple NRF24 Receiver with Acknowledgement Payload.\")\n parser.add_argument('-n', '--hostname', type=str, default='localhost', help=\"Hostname for the Raspberry running the pigpio daemon.\")\n parser.add_argument('-p', '--port', type=int, default=8888, help=\"Port number of the pigpio daemon.\")\n parser.add_argument('address', type=str, nargs='?', default='1ACKS', help=\"Address to listen to (3 to 5 ASCII characters).\")\n\n args = parser.parse_args()\n hostname = args.hostname\n port = args.port\n address = args.address\n\n # Verify that address is between 3 and 5 characters.\n if not (2 < len(address) < 6):\n print(f'Invalid address {address}. Addresses must be between 3 and 5 ASCII characters.')\n sys.exit(1)\n \n # Connect to pigpiod\n print(f'Connecting to GPIO daemon on {hostname}:{port} ...')\n pi = pigpio.pi(hostname, port)\n if not pi.connected:\n print(\"Not connected to Raspberry Pi ... goodbye.\")\n exit()\n\n # Create NRF24 object.\n # PLEASE NOTE: PA level is set to MIN, because test sender/receivers are often close to each other, and then MIN works better.\n nrf = NRF24(pi, ce=25, payload_size=RF24_PAYLOAD.ACK, channel=100, data_rate=RF24_DATA_RATE.RATE_250KBPS, pa_level=RF24_PA.MIN)\n nrf.set_address_bytes(len(address))\n\n # Listen on the address specified as parameter\n nrf.open_reading_pipe(RF24_RX_ADDR.P1, address)\n \n # Display the content of NRF24L01 device registers.\n nrf.show_registers()\n\n # Set the UUID that will be the payload of the next acknowledgement.\n next_id = 1\n nrf.ack_payload(RF24_RX_ADDR.P1, struct.pack(' m_overlap)[0]\n w,f,v = np.append(wb[gb],wr[gr]),np.append(fb[gb],fr[gr]),np.append(vb[gb],vr[gr])\n if smooth == None:\n ss.plot_spectrum_array(w,f,title=title,clear=True)\n else:\n if output:\n w,f,v = ss.smooth_boxcar(None,smooth,varwt=True,title=title,line=line,output=output,clear=True,w_in=w,f_in=f,v_in=v)\n else:\n ss.smooth_boxcar(None,smooth,varwt=True,title=title,line=line,output=False,clear=True,w_in=w,f_in=f,v_in=v)\n plt.xlim(np.min(wb)-0.025*(np.max(wr)-np.min(wb)),np.max(wr)+0.025*(np.max(wr)-np.min(wb)))\n if output:\n return w,f,v\n else:\n return\n","sub_path":"Plot_RB_comb.py","file_name":"Plot_RB_comb.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"5695274","text":"import datetime\nimport logging\nimport os\nimport sys\n\nfrom hero.entities.high_score import HighScore\nfrom hero.adapters.data_gateway import (\n get_user,\n get_channel,\n get_claims_after_start,\n channel_exist,\n game_is_running,\n)\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef try_to_get_high_score(channel_id):\n LOGGER.info(\"Try to get high score from channel: %s\", channel_id)\n if not channel_exist(channel_id):\n LOGGER.info(\"No such channel exist %s\", channel_exist)\n return\n if not game_is_running(channel_id):\n LOGGER.info(\"No game running in %s\", channel_exist)\n return\n claims = get_claims_after_start(channel_id)\n high_score = HighScore()\n for claim in claims:\n high_score.add(claim.user)\n high_score.sort()\n\n return high_score\n","sub_path":"src/hero/usecases/high_score.py","file_name":"high_score.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"373810927","text":"from django.urls import path\nfrom . import views\n\n#欢迎页,首页,用户个人信息\napp_name = 'users'\nurlpatterns = [\n path('', views.index),\n path('community/', views.community),\n path('id=', views.userspace, name = 'user_space')\n]","sub_path":"back_end/users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"455405916","text":"#-*- coding: utf-8 -*-\nimport slamBase\ndef transformPC(src,T):\n pointcloud = []\n for item in src:\n a = list(item)\n a.append(1)\n a = np.matrix(a)\n a= a.reshape((-1,1))\n temp = T * a\n temp = temp.reshape((-1,1))\n\n","sub_path":"old/joinCloud.py","file_name":"joinCloud.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"136527077","text":"def emoji_converter(message):\n words = message.split(' ')\n emojis = {\n \":)\" : \"😄\", # Mac hot key: control + command + blank\n \":(\" : \"😟\"\n }\n output = \"\"\n for word in words:\n output +=emojis.get(word, word) + \" \"\n return output\n\n\nmes = input(\">\")\nprint(emoji_converter(mes))","sub_path":"util/emojiConverter.py","file_name":"emojiConverter.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"595485197","text":"from unittest.mock import patch\nfrom unittest import skip\n\nfrom test_plus import TestCase\n\nfrom core.consul import ConsulServer, get_available_datacenters\n\n\nclass ConsulTest(TestCase):\n def test_get_available_dc_no_config(self):\n available_dc = get_available_datacenters()\n self.assertEqual([], available_dc)\n\n @patch('consul.base.Consul.Catalog.datacenters', return_value=['dc1'])\n def test_get_available_dc_with_result(self, mock_dc):\n available_dc = get_available_datacenters()\n self.assertEqual(['dc1'], available_dc)\n\n @skip('Need to overwrite value from config file')\n def test_connectable_no_consul_agent(self):\n consul_server = ConsulServer()\n self.assertFalse(consul_server.connectable())\n\n @patch('consul.base.Consul.Status.leader', return_value='127.0.0.1:8500')\n def test_connectable_with_consul_agent(self, MockLeader):\n consul_server = ConsulServer()\n self.assertTrue(consul_server.connectable())\n","sub_path":"core/tests/test_consul.py","file_name":"test_consul.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"461735993","text":"import requests\nfrom termgraph import termgraph as tg\nfrom termcolor import colored\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom get_covid_data import get_data\n\njson_ = get_data('https://covid2019-api.herokuapp.com/v2/current')\n\nn_groups = len(json_)\nlocation = [item['location'] for item in json_]\nconfirmed = [item['confirmed'] for item in json_]\ndeaths = [item['deaths'] for item in json_]\nrecovered = [item['recovered'] for item in json_]\nactive = [item['active'] for item in json_]\n\n\ncolors = [94, 91, 96]\n\nmax_width = confirmed[0]/100\n\ndata = []\nnormal_data = []\nfor item in json_:\n data.append([item['confirmed'], 0, 0]) # 0 for the other two values since for some reason using this API the active deaths and recovered numbers dont add up to be the same as the confirmed cases. This should not effect the graph\n normal_data.append([item['active']/max_width + 1, item['deaths']/max_width + 1, item['recovered']/max_width + 1])\n\nargs = {'filename': 'data/ex4.dat', 'title': '', 'width': 100,\n 'format': '{:<5.2f}', 'suffix': '', 'no_labels': False,\n 'color': None, 'vertical': False, 'stacked': False,\n 'different_scale': False, 'calendar': False,\n 'start_dt': None, 'custom_tick': '', 'delim': '',\n 'verbose': False, 'version': False}\n\nprint(\"Covid 19 Cases per location\\n\" + colored('▇', 'blue') + \" Active\\n\" + colored('▇', 'red') + \" Deaths\\n\" + colored('▇', 'green') + \" Recovered\\n\")\ntg.stacked_graph(location, data, normal_data, 3, args, colors)","sub_path":"terminal_graph.py","file_name":"terminal_graph.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"65207889","text":"\"\"\"\ntime : o(n)\nspace : 0(1)\n\"\"\"\n\nclass Solution(object):\n def singleNumber(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n \n bitmask = 0\n \n for n in nums:\n bitmask ^= n #combination of two numbers\n \n diff = bitmask & (-bitmask) #bitwise & with 2's complement of mask\n bitmask2 = 0\n for n in nums:\n if n & diff != 0: #will give a random number for one of the single numbers\n bitmask2 ^= n\n \n return [bitmask2, bitmask ^ bitmask2] #basically, if we find one of the numbers and xor it with the combination we will get the next number\n \n ","sub_path":"Problem3.py","file_name":"Problem3.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"313890620","text":"#!/usr/bin/env python\n\nimport panel as pn\nimport pandas as pd\n\nfrom detail_pyrenemofs.dft_info import plot_energy_steps\nfrom detail_pyrenemofs.structure import structure_jsmol\nfrom detail_pyrenemofs.utils import get_mat_id, get_details_title, get_geom_table, get_title\nfrom pipeline_pyrenemofs import get_mat_nodes_dict\n\nfrom pipeline_pyrenemofs import load_profile\nload_profile()\n\npn.extension(css_files=['detail_pyrenemofs/static/style.css'])\n\nclass DetailView():\n\n def __init__(self):\n self.mat_id = get_mat_id()\n self.mat_nodes_dict = get_mat_nodes_dict(self.mat_id)\n print(\">> Display details of MAT_ID:\", self.mat_id, self.mat_nodes_dict['orig_cif'])\n\n @property\n def title_col(self):\n col = pn.Column(width=700)\n col.append(pn.pane.Markdown(get_details_title(self.mat_nodes_dict['orig_cif'])))\n return col\n\n @property\n def structure_col(self):\n nodes = self.mat_nodes_dict\n col = pn.Column(sizing_mode='stretch_width')\n if 'opt_cif_ddec' in nodes:\n col.append(get_title('Cell optimized structure', uuid=nodes['opt_cif_ddec'].uuid))\n col.append(pn.pane.Bokeh(structure_jsmol(nodes['opt_cif_ddec'])))\n col.append(get_title('Geometric properties', uuid=nodes[\"opt_zeopp\"].uuid))\n col.append(pn.pane.Markdown(get_geom_table(nodes[\"opt_zeopp\"])))\n col.append(get_title('Energy profile during cell optimization', uuid=nodes['dftopt'].uuid))\n col.append(pn.pane.Bokeh(plot_energy_steps(dftopt_out=nodes['dftopt'])))\n else:\n col.append(get_title('Cell structure (not DFT optimized)', uuid=nodes['orig_cif'].uuid))\n col.append(pn.pane.Bokeh(structure_jsmol(nodes['orig_cif'])))\n col.append(pn.pane.Markdown(\"\"\"\n ###NOTE: \n This MOF was not optimized because the framework is charged or DFT failed.\n \"\"\"))\n col.append(get_title('Geometric properties (cell not optimized)', uuid=nodes[\"orig_zeopp\"].uuid))\n col.append(pn.pane.Markdown(get_geom_table(nodes[\"orig_zeopp\"])))\n return col\n\ndv = DetailView()\n\npage = dv.title_col\npage.append(dv.structure_col)\npage.servable()\n","sub_path":"detail_pyrenemofs/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"39452958","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Dec 8 23:50:00 2018\r\n\r\n@author: user\r\n\"\"\"\r\n\r\n\r\nimport itertools\r\nimport numpy as np\r\nimport random\r\nimport pickle\r\nnp.set_printoptions(threshold=np.inf)\r\n\r\n\r\nNTier = int(4)\r\nNCol = int(6)\r\nNumCon = int(10)\r\n\r\n\r\nwith open('input_layer_4_6_10_3.pickle', 'rb') as file:\r\n Bay = pickle.load(file)\r\n\r\nBay.shape = (len(Bay), NTier, NCol)\r\n\r\ndef bay_test(Initial_Bay,NumC):\r\n Bay_test = Initial_Bay\r\n np.place(Initial_Bay, Initial_Bay == 0, NumC + 1) # strange syntax, transfer 0 to NumCon + 1\r\n return Bay_test\r\n\r\ndef Min_Max(nt,nc,NumC,Bay,Height):\r\n it = 0\r\n Movement = 0\r\n relocation = 0 \r\n while it < NumC: \r\n p_l_c = np.where(Bay == Bay.min(keepdims = True)) #p_l_c = The position of lowest container\r\n '''\r\n print(Bay)\r\n \r\n print(p_l_c)\r\n '''\r\n p_r = int(p_l_c[0]) #the row position of p_l_c\r\n #print('p_r =', p_r)\r\n p_c = int(p_l_c[1]) #the column position of p_l_c\r\n if p_r == nt - int(Height[p_c]): #if target container is on the top of a stack, directly retrieving it\r\n Bay[p_r][p_c] = NumC + 1 \r\n Height[p_c] = Height[p_c] - 1 \r\n '''\r\n print('Height =', Height) \r\n '''\r\n Movement += 1\r\n np.place(Bay, Bay == NumC +1, 0)\r\n '''\r\n print('Round =',Movement,'\\n',Bay)\r\n '''\r\n np.place(Bay, Bay == 0, NumC + 1)\r\n '''\r\n print('\\n')\r\n '''\r\n elif p_r > nt - Height[p_c]:\r\n r = nt - Height[p_c]\r\n '''\r\n print('r = ', r)\r\n '''\r\n while p_r > r: #while loop concept \r\n i = 0\r\n Height_m = [] #create an empty list for column which is up to maximum height\r\n c_s_i = Bay.min(axis = 0) - Bay[r][p_c] #candidate stack including target container\r\n while nc > i:\r\n if Height[i] == nt:\r\n Height_m.append(i) #add column with maximum height to list \r\n i = i + 1\r\n Height_m.append(p_c) # add the target column to list \r\n c_s = np.delete(c_s_i, Height_m, None) #candidate stack after deleting target container and stack up to height limit \r\n \r\n if np.max(c_s) > 0: #find arg number\r\n arg_c = min(i for i in c_s if i > 0)\r\n else:\r\n arg_c = max(c_s)\r\n \r\n l_arg_c = []\r\n for i in range(0,nc): # I ingore if there are many arg numbers, it will do the same things. It will influence the value of Height.\r\n if c_s_i[i] == arg_c: # find the location of arg_c\r\n l_arg_c.append(i) # add them to list\r\n r_arg_c = random.choice(l_arg_c) #random choose one of them\r\n Bay[nt-Height[r_arg_c]-1][r_arg_c] = Bay[r][p_c] #relocation\r\n Bay[r][p_c] = NumC +1\r\n Height[p_c] = Height[p_c] - 1\r\n Height[r_arg_c] = Height[r_arg_c] + 1\r\n '''\r\n print('\\n')\r\n print('Height =', Height)\r\n '''\r\n relocation += 1\r\n Movement += 1\r\n np.place(Bay, Bay == NumC +1, 0)\r\n '''\r\n print('Round =',Movement,'\\n',Bay)\r\n '''\r\n np.place(Bay, Bay == 0, NumC + 1)\r\n '''\r\n print('relocation =', relocation)\r\n print('\\n') \r\n '''\r\n r = r + 100\r\n '''\r\n Bay[p_r][p_c] = NumC + 1\r\n Height[p_c] = Height[p_c] - 1\r\n #print('Height =', Height)\r\n Movement += 1\r\n '''\r\n #print('Height =', Height)\r\n np.place(Bay, Bay == NumC +1, 0)\r\n '''\r\n print('Round =',Movement,'\\n',Bay)\r\n '''\r\n np.place(Bay, Bay == 0, NumC + 1)\r\n break\r\n '''\r\n print('\\n')\r\n '''\r\n it = it + 100\r\n np.place(Bay, Bay == NumC +1, 0)\r\n '''\r\n print(Bay)\r\n '''\r\n '''\r\n np.place(Bay, Bay == 0, NumC + 1)\r\n '''\r\n '''\r\n print(\"Total movements =\", Movement)\r\n print(\"Total relocation =\", relocation)\r\n '''\r\n #print('Height =', Height)\r\n #print('Bay =', Bay)\r\n return Height\r\n\r\nBay_height = []\r\nfor i in range(0, len(Bay)):\r\n Height_origin = np.zeros((NCol, ), dtype = int)\r\n for row in range(0, NTier):\r\n for column in range(0, NCol):\r\n if Bay[i][row][column] > 0:\r\n Height_origin[column] += 1 \r\n Bay_height.append(Height_origin)\r\n\r\nBay_one_move = []\r\nfor i in range(0, len(Bay)):\r\n Height = np.zeros((NCol, ), dtype = int)\r\n #print(Height)\r\n #print(Bay[i])\r\n for row in range(0, NTier):\r\n for column in range(0, NCol):\r\n if Bay[i][row][column] > 0 and Bay[i][row][column] < NumCon + 1:\r\n Height[column] += 1\r\n #print(Bay[i])\r\n Bay_test = bay_test(Bay[i], NumCon)\r\n Height = Min_Max(NTier, NCol, NumCon, Bay_test, Height)\r\n Bay_one_move.append(Height)\r\n\r\nBay_height = np.asarray(Bay_height)\r\nBay_one_move = np.asarray(Bay_one_move)\r\n\r\noutput_layer = Bay_one_move - Bay_height\r\n\r\n'''#(6, m)\r\noutput_layer_bi = np.zeros((len(output_layer), NCol), dtype = int)\r\nfor i in range(0, len(output_layer_bi)):\r\n for j in range(0, NCol):\r\n if output_layer[i][j] == 1:\r\n output_layer_bi[i][j] = 1\r\n'''\r\n\r\n''' #create (36,m) output layer, but only a one, others are zero\r\noutput_layer_bi = np.zeros((len(output_layer), NCol ** 2), dtype = int)\r\nlabel_num = np.array([[list(range(0, NCol ** 2))]])\r\nlabel_num.shape = (NCol, NCol)\r\n\r\nfor i in range(0, len(output_layer)):\r\n for j in range(0, NCol):\r\n if output_layer[i][j] == 1:\r\n x = j\r\n \r\n if output_layer[i][j] == -1:\r\n y = j\r\n else:\r\n y = x\r\n label = label_num[x][y]\r\n output_layer_bi[i][label] = 1 \r\noutput_layer_bi = np.transpose(output_layer_bi)\r\nprint(output_layer_bi.shape)\r\n'''\r\n#create (12,m) output layer, but 2 one, others are zero\r\noutput_layer_bi = np.zeros((len(output_layer), NCol * 2), dtype = int)\r\nfor i in range(0, len(output_layer)):\r\n for j in range(0, NCol):\r\n if output_layer[i][j] == 1:\r\n output_layer_bi[i][j + NCol] = 1\r\n \r\n if output_layer[i][j] == -1:\r\n output_layer_bi[i][j] = 1\r\n\r\n#print(output_layer_bi)\r\noutput_layer_bi = np.transpose(output_layer_bi)\r\n#print(output_layer_bi)\r\nprint(output_layer_bi.shape)\r\n\r\n\r\nwith open('output_layer_4_6_10_3.pickle' ,'wb') as file:\r\n pickle.dump(output_layer_bi, file)\r\n","sub_path":"Create-Dataset/Output-Data/Min-Max/4_6_10_3_output_layer.py","file_name":"4_6_10_3_output_layer.py","file_ext":"py","file_size_in_byte":6907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"328142282","text":"# noinspection PyPackageRequirements\nimport subprocess as sp\nimport json\nfrom matplotlib import rc\n\n# ------------------------------------------------------------------------------\n# global configurations\n# ------------------------------------------------------------------------------\ncore_cap = 2\n\n# var_range = 70\n# CKPTs = [1 + i * 1 for i in range(var_range)]\n\n\"\"\"\nobsolete proto functions\nuse proto_shared_2 insteads\n\"\"\"\n\n\ndef cmd_to_execute(part=20,\n node=10,\n lineage=1000,\n victim=1,\n interruption_mode=\"direct\",\n iteration_based=\"true\",\n part_detachable=\"false\",\n reps=10,\n unit_processing_time=1.0,\n checkpoint_write_time=1.0,\n downtime=2.0,\n restart_time=1.5,\n MTTR=40,\n checkpoint_interval=4.0,\n time_scale=4):\n return './simulator2 ' \\\n '-p {0} ' \\\n '-n {1} ' \\\n '-l {2} ' \\\n '-v {3} ' \\\n '-i {4} ' \\\n '-d {5} ' \\\n '--detachable {6} ' \\\n '-r {7} ' \\\n '-U {8} ' \\\n '-W {9} ' \\\n '-D {10} ' \\\n '-R {11} ' \\\n '-M {12} ' \\\n '-C {13} ' \\\n '-T {14} '.format(part,\n node,\n lineage,\n victim,\n interruption_mode,\n iteration_based,\n part_detachable,\n reps,\n unit_processing_time,\n checkpoint_write_time,\n downtime,\n restart_time,\n MTTR,\n checkpoint_interval,\n time_scale\n )\n\n\ndef json_parser(filename):\n config = json.loads(open(filename).read())\n return config\n\n\ndef get_checkpoint_candidates(config):\n return [config['from'] + i * config['step'] for i in range(config['var_range'])]\n\n\ndef dict_to_execute(config, ckpt_candidates, single_node=False, victim=-1, w_time=-1):\n try:\n del config['from']\n del config['step']\n del config['var_range']\n except KeyError:\n pass\n if victim != -1:\n config.update({'victim': victim})\n if w_time != -1:\n config.update({'checkpoint_write_time', w_time})\n\n return get_line(ckpt_candidates, config, single_node)\n\n\ndef expectation(\n tau,\n part=40,\n node=10,\n lineage=1000,\n ckpt_write_time=1.0,\n # tau=CKPTs,\n downtime=2.0,\n restart_time=1.5,\n MTTR=40,\n unit_time=1,\n detachable=False,\n single_node=True):\n Ts = lineage\n R = downtime\n R += restart_time if detachable else 0\n delta = ckpt_write_time\n\n if single_node:\n return [unit_time * (\n Ts\n + Ts / t * delta\n + (1 / 2 * (t + delta) + R) * Ts / t * (t + delta) / MTTR\n ) for t in tau]\n else:\n return [unit_time * (part / node) / core_cap * (\n Ts\n + Ts / t * delta\n + (1 / 2 * (t + delta) + R) * Ts / t * (t + delta) / MTTR\n ) for t in tau]\n\n\ndef get_point(args):\n cmd = cmd_to_execute(**args)\n median, mean, std_dev = sp.check_output(cmd, shell=True).split()[0:3]\n return float(median.decode()), float(mean.decode()), float(std_dev.decode())\n\n\ndef get_line(ckpt_candidates, args, single_node=False):\n line_median, line_mean, deviations = [], [], []\n for ckpt in ckpt_candidates:\n args.update({'checkpoint_interval': ckpt})\n median, mean, std_dev = get_point(args)\n line_median.append(median)\n line_mean.append(mean)\n deviations.append(std_dev)\n exp = expectation(\n tau=ckpt_candidates,\n part=args['part'],\n node=args['node'],\n lineage=args['lineage'],\n ckpt_write_time=args['checkpoint_write_time'],\n restart_time=args['restart_time'],\n MTTR=args['MTTR'],\n unit_time=args['unit_processing_time'],\n single_node=single_node)\n\n return exp, line_median, line_mean, deviations\n\n\n# ------------------------------------------------------------------------------\n# config for graph\n# ------------------------------------------------------------------------------\nred, blue, black, green, yellow = '#C3272B', '#005CAF', '#000000', '#7A942E', '#FFA400' # style\nrc('text', usetex=True)\nrc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']})\n\n# figure title, axis, etc.\nxlabel = \"Checkpoint Interval (unit)\"\nylable = \"Running Time\"\n","sub_path":"thesis/proto_shared.py","file_name":"proto_shared.py","file_ext":"py","file_size_in_byte":4829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"320359689","text":"#!/usr/bin/env python\nimport TTFiles\nimport subprocess\n\ndef get_single_point_energy(filename):\n command = (\"$TINKER/analyze \"+filename+\n \" $TINKER/../params/mm3.prm E\")\n output = subprocess.check_output(command,shell=True)\n output = output.split()\n for index, out in enumerate(output):\n if \"Potential\" in out:\n energy = float(output[index+3])\n return energy\n \ndef get_minimum_energy(filename):\n command = (\"$TINKER/minimize \"+filename+\n \" $TINKER/../params/mm3.prm 0.01\")\n output = subprocess.check_output(command,shell=True)\n output = output.split()\n for index, out in enumerate(output):\n if \"Function\" in out:\n energy = float(output[index+3])\n return energy\n ","sub_path":"TTEnergy.py","file_name":"TTEnergy.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"646479152","text":"from unittest import mock\n\nimport pytest\n\nfrom kolas.application import Application\nfrom kolas.middleware import use\nfrom starlette.testclient import TestClient\n\n\n@pytest.fixture()\ndef app(router, container):\n return Application(\n router=router, container=container\n )\n\n\nclass TestApplication:\n @pytest.mark.asyncio\n async def test_middleware(self, container, router, scope, send, receive):\n spy = mock.MagicMock()\n\n class mw:\n def __init__(self, app):\n pass\n\n async def __call__(self, scope, receive, send):\n spy()\n\n app = Application(\n router=router, container=container, middleware=[use(mw)]\n )\n await app(scope, receive, send)\n spy.assert_called_once()\n\n def test_on_startup_on_shutdown(self, app, send):\n startup_called = False\n shutdown_called = False\n\n def on_startup():\n nonlocal startup_called\n startup_called = True\n\n def on_shutdown():\n nonlocal shutdown_called\n shutdown_called = True\n\n app.add_startup_handler(on_startup)\n app.add_shutdown_handler(on_shutdown)\n\n with TestClient(app):\n assert startup_called\n assert not shutdown_called\n\n def test_calls_with_services(self, router, container):\n has_app = False\n has_debug = False\n has_container = False\n\n class mw:\n def __init__(self, app):\n self.app = app\n\n async def __call__(self, scope, receive, send):\n nonlocal has_app, has_debug, has_container\n has_app = 'app' in scope\n has_debug = 'debug' in scope\n has_container = 'container' in scope\n await self.app(scope, receive, send)\n\n app = Application(router, container, middleware=[use(mw)])\n with TestClient(app):\n assert has_app\n assert has_debug\n assert has_container\n","sub_path":"kolas/tests/test_application.py","file_name":"test_application.py","file_ext":"py","file_size_in_byte":2017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"611718769","text":"# Store info about a person in a dictionary.\npersonal_info = {\n\t'Bob': {\n\t\t'first_name': 'bob',\n\t\t'last_name': 'smith',\n\t\t'age': 56,\n\t\t'city_of_residence': 'new york',\n\t\t},\n\t'Lisa': {'first_name': 'lisa',\n\t\t'last_name': 'Yandi',\n\t\t'age': 43,\n\t\t'city_of_residence': 'singapore',\n\t\t},\n\t'Luke':{\n\t\t'first_name': 'luke',\n\t\t'last_name': 'butter',\n\t\t'age': 21,\n\t\t'city_of_residence': 'moscow',\n\t\t},\n\t}\n\nfor name, info in personal_info.items():\n\tprint(f\"\\nName: {name.title()}\")\n\tfull_name = f\"{info['first_name']} {info['last_name']}\"\n\tage = info['age']\n\tresidence = info['city_of_residence']\n\n\tprint(f\"\\tFull Name: {full_name.title()}\")\n\tprint(f\"\\tAge: {age}\")\n\tprint(f\"\\tResidence: {residence.title()}\")\n\n# Make a dictionary called favorite_places.\nfavorite_places = {'andrew': 'san francisco', 'leia': 'antarctica', \n\t'reggie': 'djbouti', 'peter': 'missouri',\n\t}\nfor name, place in favorite_places.items():\n\tprint(f\"\\n{name.title()}'s favorite place in the whole world \"\n\tf\"is {place.title()}.\")\n\n# Store people's favorite numbers.\nfavorite_numbers = {\n\t'bob': [44, 33, 444],\n\t'lisa': [23,],\n\t'meg': [376, 13],\n\t'mike': [15, 22],\n\t'luke': [9, 45, 999999999999],\n\t}\nfor name, numbers in favorite_numbers.items():\n\tprint(f\"\\n{name.title()}'s favorite numbers are:\")\n\tprint(f\"\\t{numbers}\")\n\n# Cities information.\ncities = {\n\t'new york': {\n\t\t'country': 'united states',\n\t\t'population': 1_000_000,\n\t\t'nickname': 'the big apple',\n\t\t},\n\t'honolulu': {\n\t\t'country': 'united states',\n\t\t'population': 1_000_000,\n\t\t'nickname': 'the big pineapple',\n\t\t},\n\t'venice': {\n\t\t'country': 'italy',\n\t\t'population': 1_000_000,\n\t\t'nickname': 'city of bridges',\n\t\t},\n\t'rio de janerio': {\n\t\t'country': 'brazil',\n\t\t'population': 1_000_000,\n\t\t'nickname': 'marvellous city',\n\t\t},\n\t}\nfor city, facts in cities.items():\n\tprint(f\"\\n{city.title()}:\")\n\tprint(f\"\\tLocation: {facts['country'].title()}\")\n\tprint(f\"\\tPopulation: {facts['population']}\")\n\tprint(f\"\\tNickname: {facts['nickname'].title()}\")\n","sub_path":"people_info_ch6.py","file_name":"people_info_ch6.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"499455973","text":"#!venv/bin/python\n\nimport os\n\nfrom app import app\n\nif __name__ == \"__main__\":\n DATA_MODELS = ['question', 'solution', 'user', 'submission', 'result']\n for model in DATA_MODELS:\n directory_path = os.path.join(app.config['DATA_PATH'], model)\n if not os.path.isdir(directory_path):\n try:\n os.makedirs(directory_path, exist_ok=False)\n except OSError:\n print(\"Directory already exists!\")\n\n app.run(debug=True, threaded=True)\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"119822396","text":"import settings\nfrom mysql import db\nimport os\nimport csv\nimport utils\n\n\ndef time_main(start_time, end_time, tablename, columns=None):\n\n timespan = settings.timespan\n output_filename = 'sfhd_' + 'origin_' + utils.getDigitDay(start_time) + '.csv'\n db.connect()\n # 判断输出文件是否存在 :False为不存在\n if os.path.isfile(output_filename) == False:\n pass\n else:\n end_time = start_time\n start_time = start_time -86400\n output_filename = 'sfhd_' + 'origin_' + utils.getDigitDay(start_time) + '.csv'\n\n\n # 隔一个时间段timespan存一次\n with open(output_filename, 'w') as csvfile:\n\n if columns == None:\n columns = db.find_columns(tablename)\n data = list(columns)\n\n writer = csv.writer(csvfile, dialect=(\"excel\"))\n data_1 = sorted(set(data), key=data.index)\n writer.writerow(data_1)\n\n temp_time = start_time + timespan\n current_time = start_time\n\n while temp_time <= end_time+3:\n utils.log_easy('sfhd_origin_data_extract', utils.getTimeDes(temp_time))\n fieldNames, results = db.find(tablename, current_time, temp_time-1, columns)\n # 插入data\n for info in results:\n writer.writerow(info)\n current_time = temp_time\n temp_time = current_time + timespan\n db.disconnect()\n\n","sub_path":"sparktest/task_mysql/sfhd_origin_data_extract.py","file_name":"sfhd_origin_data_extract.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"511634860","text":"import random\n#1\ndef both():\n\tfor i in range(8,-4,-1):\n\t\tprint(i)\n\ta=9\n\twhile a>=-2:\n\t\tprint(a-1)\n\t\ta-=1\nboth()\n\n#2\ndef is_odd(a):\n\tif a%2==1:\n\t\treturn True\n\telse:\n\t\treturn False\n\nfor i in range(1,11):\n\tis_odd(i)\n\tif is_odd(i)==True:\n\t\tprint(\"odd\")\n\telse:\n\t\tprint(\"even\")\n\n#3\ndef dice_roll(a):\n alldice = 0\n count = 0\n while alldice != a:\n count += 1\n dice1 = random.randint(1,6)\n dice2 = random.randint(1,6)\n dice3 = random.randint(1,6)\n alldice = dice1 + dice2 + dice3\n print(count)\ndice_roll(17)\ndice_roll(13)\n\n#4\ndef odd_even_count(a):\n\todd=0\n\teven=0\n\twhile a>0:\n\t\tb=a%10\n\t\tif b%2==0:\n\t\t\teven+=1\n\t\telse:\n\t\t\todd+=1\n\t\ta=int(a/10)\n\tprint(\"odds:\",odd,\"evens:\",even)\nodd_even_count(789319231)\nodd_even_count(987654)\n\n#5\ndef string_analysis(a):\n\tdigits = sum(i.isdigit() for i in a)\n\tletters = sum(i.isalpha() for i in a)\n\tblanks = sum(i.isspace() for i in a)\n\tprint(\"Letters:\",letters,\", Digits:\",digits,\", Blanks:\",blanks)\nstring_analysis(\"I'm not 89 years old\")\n\n","sub_path":"Computational-Thinking/loops.py","file_name":"loops.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"25577693","text":"from flask import Flask, jsonify, make_response, render_template, request\r\nfrom helpers import validate_request, get_num_facts\r\nimport random\r\n\r\napp = Flask(__name__)\r\n\r\n\r\n@app.route(\"/\")\r\ndef homepage():\r\n \"\"\"Show homepage.\"\"\"\r\n\r\n return render_template(\"index.html\")\r\n\r\n@app.route(\"/api/get-lucky-num\", methods=['POST'])\r\ndef lucky_num():\r\n \"\"\"POST route to process form data and send json response\"\"\"\r\n\r\n name = request.json[\"name\"]\r\n email = request.json[\"email\"]\r\n year = request.json[\"year\"]\r\n color = request.json[\"color\"]\r\n\r\n req = {\r\n \"name\": name,\r\n \"email\": email,\r\n \"year\": year,\r\n \"color\": color\r\n }\r\n\r\n try:\r\n validation = validate_request(req)\r\n\r\n num = random.randint(1,100)\r\n data = get_num_facts(num, year)\r\n return make_response(data, 200)\r\n except ValidationError as e:\r\n return make_response(e.errors, 422)","sub_path":"Lucky Number/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"213535366","text":"'''\n@Author: Zuxin Liu\n@Email: zuxinl@andrew.cmu.edu\n@Date: 2020-02-26 12:27:02\n@LastEditTime: 2020-03-25 22:40:52\n@Description:\n'''\n\nimport yaml\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport argparse\nimport math\nfrom envs import grid_env\nimport time\nfrom envs.rendering import Window\nfrom random import sample\nimport pickle\nimport copy\nfrom envs.astar import A_star\nimport torch\nfrom torch import optim\nimport torch.nn.functional as F\nfrom torch.distributions import Categorical\nfrom model1 import ActorCritic\n\nidx_to_act = {0:\"N\",1:\"S\",2:\"E\",3:\"W\", 4:\"NW\",5:\"WS\",6:\"SE\",7:\"EN\",8:\".\"}\nact_to_idx = dict(zip(idx_to_act.values(),idx_to_act.keys()))\ndef compute_returns(next_value, rewards, masks, gamma=0.99):\n R = next_value\n returns = []\n for step in reversed(range(len(rewards))):\n R = rewards[step] + gamma * R * masks[step]\n returns.insert(0, R)\n return returns\n\nclass Agent(object):\n def __init__(self, map, ID=0, vis = 7):\n self.map = map\n self.id = ID\n self.vis = vis\n self.row, self.col = 2*self.vis+1, 2*self.vis+1\n self.obs_map = np.ones((3, self.row,self.col))\n self.object_to_idx = {\"obstacle\":1,\"agent\":2, \"dynamic obstacle\":3, \"free\":0}\n self.idx_to_object = dict(zip(self.object_to_idx.values(),self.object_to_idx.keys()))\n self.object_to_color = {\"obstacle\":-1,\"agent\":0.5, \"dynamic obstacle\":0, \"free\":1}\n self.path_color = 0.2\n self.goal_color = 0.5\n \n self.dynamic_obs_pose = {0:[],1:[],2:[]} # last pose, last last pose, last last last pose\n self.dynamic_obs_decay = {0:-0.8,1:-0.7,2:-0.6}\n self.agent_obs_pose = {0:[],1:[],2:[]}\n self.agent_obs_decay = {0:-0.3,1:-0.2,2:-0.1}\n self.pose_normalizer = 20\n\n self.planner = A_star(self.map, self.idx_to_object)\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n self.ac = ActorCritic().to(self.device)\n self.optimizer = None\n\n # training variables\n self.log_probs = []\n self.values = []\n self.rewards = []\n self.masks = []\n self.current_ent = 0\n self.entropy = 0\n self.collision = 0\n self.steps = 0\n\n self.subgoal_length = 3\n self.max_step = 50\n self.off_route_scale_factor = 0.5\n self.poses = []\n self.oscillation_penalty = -0.3\n\n def reset_memory(self):\n # reset training variables\n self.log_probs = []\n self.values = []\n self.rewards = []\n self.masks = []\n self.entropy = 0\n\n def plan(self, start, goal):\n path = self.planner.plan(start.copy(), goal.copy())\n if len(path):\n self.path = path\n\n def set_max_step(self, state, ratio = 4):\n pose = state['pose'][self.id].copy()\n goal = state['goal'][self.id].copy()\n self.ratio = ratio\n self.plan(pose, goal)\n self.max_step = int(len(self.path)*ratio)\n\n def off_route_reward(self, state):\n if not len(self.path):\n return 0\n pose = np.array(state['pose'][self.id])\n #print(\"pose: \",pose)\n path = np.array(self.path)\n #print(\"path shape: \", path.shape)\n diff = path-pose\n x = diff[:,0]\n y = diff[:,1]\n distance = np.sqrt(x**2+y**2)\n #print(distance)\n #print(-np.min(distance))\n reward = -np.min(distance)*self.off_route_scale_factor\n return reward\n\n def compute_reward(self, state):\n pose = np.array(state['pose'][self.id])\n r1 = self.off_route_reward(state)\n if len(self.poses)<2:\n self.poses.append(pose)\n return r1\n pose_last_last = self.poses[1]\n r2 = 0\n if np.all(pose==pose_last_last):\n r2 = self.oscillation_penalty\n self.poses[1] = self.poses[0]\n self.poses[0] = pose\n return r1+r2\n\n def update_map(self, obs, pose):\n #print(\"observation: \")\n #print(obs)\n #print(\"pose: \",pose)\n new_map = self.map.copy()\n offset = np.array([pose[1]-self.vis, pose[0]-self.vis])\n agent_idx = np.argwhere(obs==self.object_to_idx[\"agent\"])\n #print(\"offset\", offset)\n #print(\"agent in obs\", agent_idx)\n if np.size(agent_idx):\n agent_idx = agent_idx + offset\n #print(\"agent in obs\", agent_idx)\n for idx in agent_idx:\n new_map[idx[0],idx[1]] = self.object_to_idx[\"agent\"]\n \n agent_idx = np.argwhere(obs==self.object_to_idx[\"dynamic obstacle\"])\n #print(\"dynamic obs in obs\", agent_idx)\n if np.size(agent_idx):\n agent_idx = agent_idx + offset\n #print(\"dynamic obs in obs\", agent_idx)\n for idx in agent_idx:\n new_map[idx[0],idx[1]] = self.object_to_idx[\"dynamic obstacle\"]\n return new_map\n\n def preprocess(self, state, replan = False, debug = False):\n self.obs_map = np.ones((3, self.row,self.col))\n obs = state['obs'][self.id]\n pose = state['pose'][self.id]\n goal = state['goal'][self.id]\n # first channel is the obs\n obs_color = copy.deepcopy(obs)\n for key in self.object_to_idx.keys():\n obs_color[obs_color==self.object_to_idx[key]] = self.object_to_color[key]\n self.obs_map[0,:,:] = obs_color\n \n # second channel is the trajectory of dynamic obs and agents\n # dynamic_map = self.obs_map[1,:,:]\n # mask = (obs==self.object_to_idx[\"dynamic obstacle\"])\n # dynamic_map[mask] = self.object_to_color[\"dynamic obstacle\"]\n # dynamic_obs_pose_now = np.argwhere(mask)[:,::-1] + pose - np.array([self.vis, self.vis])\n # mask = (obs==self.object_to_idx[\"agent\"])\n # mask[self.vis, self.vis]=False\n # dynamic_map[mask] = self.object_to_color[\"agent\"]\n # agent_obs_pose_now = np.argwhere(mask)[:,::-1] + pose - np.array([self.vis, self.vis])\n \n # for key in self.dynamic_obs_pose.keys():\n # obs_poses = self.dynamic_obs_pose[key]\n # agent_poses = self.agent_obs_pose[key]\n # if len(obs_poses):\n # obs_poses_array = np.array(obs_poses)\n # obs_pose_in_local_coord = obs_poses_array - pose\n # idx = np.logical_and(np.abs(obs_pose_in_local_coord[:,0])<=self.vis, \n # np.abs(obs_pose_in_local_coord[:,1])<=self.vis)\n # obs_pose_in_local_coord = obs_pose_in_local_coord[idx] + np.array([self.vis, self.vis])\n # tmp = np.zeros(dynamic_map.shape)\n # tmp[obs_pose_in_local_coord[:,1],obs_pose_in_local_coord[:,0]] = self.dynamic_obs_decay[key]\n # dynamic_map += tmp\n # if len(agent_poses):\n # obs_poses_array = np.array(agent_poses)\n # obs_pose_in_local_coord = obs_poses_array - pose\n # idx = np.logical_and(np.abs(obs_pose_in_local_coord[:,0])<=self.vis, \n # np.abs(obs_pose_in_local_coord[:,1])<=self.vis)\n # obs_pose_in_local_coord = obs_pose_in_local_coord[idx] + np.array([self.vis, self.vis])\n # tmp = np.zeros(dynamic_map.shape)\n # tmp[obs_pose_in_local_coord[:,1],obs_pose_in_local_coord[:,0]] = self.agent_obs_decay[key]\n # dynamic_map += tmp \n # self.dynamic_obs_pose[2]=self.dynamic_obs_pose[1]\n # self.dynamic_obs_pose[1]=self.dynamic_obs_pose[0]\n # self.dynamic_obs_pose[0]=dynamic_obs_pose_now \n # self.agent_obs_pose[2]=self.agent_obs_pose[1]\n # self.agent_obs_pose[1]=self.agent_obs_pose[0]\n # self.agent_obs_pose[0]=agent_obs_pose_now\n \n # third channel is the reference path\n if replan:\n new_map = self.update_map(obs, pose)\n #print(new_map)\n self.planner.update_map(new_map)\n self.plan(pose, goal)\n path_array = np.array(self.path)\n if len(self.path):\n # transform the path to the agent local coordinate\n path_in_local_coord = path_array - pose\n # filter out the path out of view\n idx = np.logical_and(np.abs(path_in_local_coord[:,0])<=self.vis, \n np.abs(path_in_local_coord[:,1])<=self.vis)\n path_in_local_coord = path_in_local_coord[idx] + np.array([self.vis, self.vis])\n # set the path to 1\n\n self.obs_map[2,:,:][path_in_local_coord[:,1],path_in_local_coord[:,0]] = self.path_color\n\n # select the subgoal and draw it on the map\n if len(self.path)>self.subgoal_length:\n subgoal = self.path[self.subgoal_length]\n else:\n subgoal = goal\n\n goal_in_local_coord = subgoal-pose\n if np.abs(goal_in_local_coord[0])<=self.vis and np.abs(goal_in_local_coord[1])<=self.vis:\n goal_in_local_coord=goal_in_local_coord+np.array([self.vis, self.vis])\n self.obs_map[2,:,:][goal_in_local_coord[1],goal_in_local_coord[0]] = self.goal_color\n\n # normalized relative goal\n\n \n relative_goal = (subgoal-pose)/self.pose_normalizer\n input_val = list(relative_goal)\n theta = math.atan2(relative_goal[1],relative_goal[0])\n input_val.append(theta)\n #plt.imshow(self.obs_map)\n if debug:\n return self.obs_map.copy(), input_val, path_array\n else:\n return self.obs_map.copy(), input_val\n\n def act(self, input_img, input_val):\n state_img = torch.tensor([input_img])\n state_val = torch.tensor([input_val])\n state_img = state_img.float().to(self.device)\n state_val = state_val.float().to(self.device)\n probs, value = self.ac.forward(state_img, state_val)\n# print(probs)\n probs = torch.exp(probs)\n m = Categorical(probs)\n _, greedy_action = torch.max(probs.data, 1)\n action = m.sample()\n return greedy_action.item(), action.item(), m.log_prob(action), value, m.entropy().mean()","sub_path":"Mapper_with_BicNet/agent_no_trajectory.py","file_name":"agent_no_trajectory.py","file_ext":"py","file_size_in_byte":10130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"12918907","text":"# Program to swap values of two variable without using third variable\n\ndef Swap_Values(a,b):\n print('Before Swap value of a := %d and b := %d' %(a,b))\n a = a + b\n b = a - b\n a = a - b\n print('After Swaping value of a = %d and b = %d ' %(a,b))\n\n\n# Make a call to above function\n\nSwap_Values(20,20)\n \n \n","sub_path":"Python_Classes/Python_Day1_to_4/Python_Assignment_Exercise _1/1_Swap_Values.py","file_name":"1_Swap_Values.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"350142345","text":"#!/usr/bin/env python\n\nimport sys\nimport os\n\nfrom setuptools import setup, find_packages, __version__\n\n\ntry:\n SETUP_DIRNAME = os.path.dirname(__file__)\nexcept NameError:\n # We're probably being frozen, and __file__ triggered this NameError\n # Work around this\n SETUP_DIRNAME = os.path.dirname(sys.argv[0])\n\nif SETUP_DIRNAME != '':\n os.chdir(SETUP_DIRNAME)\n\nSETUP_DIRNAME = os.path.abspath(SETUP_DIRNAME)\n\nMETADATA = os.path.join(SETUP_DIRNAME, 'sovrin', '__metadata__.py')\n# Load the metadata using exec()\n# so we don't trigger an import of ioflo.__init__\nexec(compile(open(METADATA).read(), METADATA, 'exec'))\n\nsetup(\n name='sovrin-dev',\n version=__version__,\n description='Sovrin node',\n url='https://github.com/sovrin-foundation/sovrin',\n author=__author__,\n author_email='dev@evernym.us',\n license=__license__,\n keywords='Sovrin Genesis Transactions',\n packages=find_packages(exclude=['docs', 'docs*']),\n package_data={\n '': ['*.txt', '*.md', '*.rst', '*.json', '*.conf', '*.html',\n '*.css', '*.ico', '*.png', 'LICENSE', 'LEGAL', '*.indy']},\n include_package_data=True,\n install_requires=['indy-node==1.7.0.dev906'],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"249619503","text":"#https://leetcode.com/problems/find-all-duplicates-in-an-array/description/\r\n\r\n\"\"\"\r\nGiven an array of integers, 1 ≤ a[i] ≤ n (n = size of array), some elements appear twice and others appear once.\r\n\r\nFind all the elements that appear twice in this array.\r\n\r\nCould you do it without extra space and in O(n) runtime?\r\n\r\nExample:\r\nInput:\r\n[4,3,2,7,8,2,3,1]\r\n\r\nOutput:\r\n[2,3]\r\n\"\"\"\r\n\r\nclass Solution(object):\r\n def findDuplicates(self, nums):\r\n \"\"\"\r\n :type nums: List[int]\r\n :rtype: List[int]\r\n \"\"\"\r\n temp = {}\r\n output = []\r\n \r\n for num in nums:\r\n try:\r\n temp[num] += 1\r\n output.append(num)\r\n except:\r\n temp[num] = 1\r\n\r\n return output\r\n","sub_path":"LeetCode/Medium Difficulty/Find All Duplicates in an Array.py","file_name":"Find All Duplicates in an Array.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"225636788","text":"from selenium import webdriver\nimport pandas as pd\nimport numpy as np\nimport os\nimport util\n\n\"\"\"\nTest social media links\n\"\"\"\n\n\ndef contact_test(driver):\n driver.get(\"http://127.0.0.1:5000/contact/\")\n driver.implicitly_wait(5) # seconds\n # Make sure we're accessing the correct webpage\n assert \"Thalia\" in driver.title\n util.page_wait()\n\n email_field = driver.find_element_by_id(\"email\")\n email_field.send_keys(util.email)\n\n title_field = driver.find_element_by_id(\"title\")\n title_field.send_keys(util.title)\n\n contents_field = driver.find_element_by_id(\"contents\")\n contents_field.send_keys(util.contents)\n\n send_feedback_btn = driver.find_element_by_class_name(\"send-feedback-btn\")\n driver.execute_script(\"arguments[0].click();\", send_feedback_btn)\n\n # Check message was recorded\n util.page_wait()\n project_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\n df0 = pd.read_csv(os.path.join(project_dir, \"feedback.csv\"))\n submitted = np.array([util.email, util.title, util.contents])\n assert (df0 == submitted).all(1).any()\n\n\nif __name__ == \"__main__\":\n driver = webdriver.Firefox()\n contact_test(driver)\n driver.close()\n driver = webdriver.Chrome()\n contact_test(driver)\n driver.close()\n","sub_path":"Tests/Selenium/contact_test.py","file_name":"contact_test.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"610167384","text":"import requests\nimport datetime\nfrom flask import Blueprint\nfrom flask import render_template, current_app, url_for, request, redirect\n\nfrom application.models import DynamicModel\nfrom application.utils import json_serialiser, remove_dashes, convert_ordered_dicts_for_dl\nfrom application.forms import formfactory\nfrom application.extensions import db\n\nfrontend = Blueprint('frontend', __name__, template_folder='templates')\n\n\n@frontend.route('/')\ndef index():\n resp = requests.get(current_app.config['SCHEMA_API_URL'])\n resp.raise_for_status()\n schemas = [schema['name'] for schema in resp.json()]\n return render_template('index.html', schemas=schemas)\n\n\n@frontend.route('/', methods=['GET', 'POST'])\ndef dynamic_form(schema):\n schema_url = f\"{current_app.config['SCHEMA_URL']}/{schema}-schema.json\"\n title = schema.replace('-', ' ').capitalize()\n schema_json = requests.get(schema_url).json()\n form_object = formfactory(schema_json)\n if request.method == 'POST':\n form = form_object(obj=request.form)\n if form.validate():\n entry_data = form.data\n del entry_data['csrf_token']\n entry = DynamicModel(schema=schema, json_blob=json_serialiser(entry_data))\n db.session.add(entry)\n db.session.commit()\n obj = db.session.query(DynamicModel).order_by(DynamicModel.id.desc()).first()\n return redirect(url_for('frontend.check', schema=schema, row=obj.id))\n else:\n form = form_object()\n\n return render_template('dynamicform.html', form=form, schema=schema, title=title)\n\n\n@frontend.route('///check')\ndef check(schema, row):\n entry = DynamicModel.query.filter_by(id=row).first()\n print(entry.json_blob)\n title = remove_dashes(schema)\n data_list = convert_ordered_dicts_for_dl(entry.json_blob)\n\n return render_template('check.html', data=data_list, title=title)\n\n@frontend.route('///edit')\ndef edit(schema, row):\n schema = schema\n schema_url = f\"{current_app.config['SCHEMA_URL']}/{schema}-schema.json\"\n schema_json = requests.get(schema_url).json()\n form_object = formfactory(schema_json)\n entry = DynamicModel.query.filter_by(id=row).first()\n data = entry.json_blob\n for k, v in data.items():\n if \"date\" in k and v is not None:\n data[k] = datetime.datetime.strptime(v, '%Y-%m-%d').date()\n title = \"Editing the form\"\n form = form_object(**data)\n\n return render_template('dynamicform.html', form=form, schema=schema, title=title)\n\n\n# set the assetPath variable for use in\n# jinja templates\n@frontend.context_processor\ndef asset_path_context_processor():\n return {'assetPath': '/static/govuk-frontend/assets'}\n","sub_path":"application/frontend/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"500886619","text":"#!/usr/bin/env python3\n\n\nimport numpy as np\nfrom typing import List, Dict, Tuple\n\nfrom caffe2.python import workspace, core\nimport caffe2.proto.caffe2_pb2 as caffe2_pb2\n\nfrom ml.rl.caffe_utils import C2\nfrom ml.rl.preprocessing import identify_types\nfrom ml.rl.preprocessing.normalization import NormalizationParameters, \\\n MISSING_VALUE\nfrom ml.rl.preprocessing.identify_types import FEATURE_TYPES\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n\ndef sort_features_by_normalization(normalization_parameters):\n \"\"\"\n Helper function to return a sorted list from a normalization map.\n Also returns the starting index for each feature type\"\"\"\n # Sort features by feature type\n sorted_features = []\n feature_starts = []\n for feature_type in FEATURE_TYPES:\n feature_starts.append(len(sorted_features))\n for feature in normalization_parameters.keys():\n norm = normalization_parameters[feature]\n if norm.feature_type == feature_type:\n sorted_features.append(feature)\n return sorted_features, feature_starts\n\n\nclass PreprocessorNet:\n def __init__(self, net: core.Net, clip_anomalies: bool) -> None:\n self.clip_anomalies = clip_anomalies\n\n self._net = net\n self.ONE = self._net.NextBlob('ONE')\n self.ZERO = self._net.NextBlob('ZERO')\n self.MISSING = self._net.NextBlob('MISSING_VALUE')\n self.MISSING_U = self._net.NextBlob('MISSING_VALUE_U')\n self.MISSING_L = self._net.NextBlob('MISSING_VALUE_L')\n workspace.FeedBlob(self.ONE, np.array([1], dtype=np.float32))\n workspace.FeedBlob(self.ZERO, np.array([0], dtype=np.float32))\n workspace.FeedBlob(\n self.MISSING, np.array([MISSING_VALUE], dtype=np.float32)\n )\n workspace.FeedBlob(\n self.MISSING_U, np.array([MISSING_VALUE + 1e-4], dtype=np.float32)\n )\n workspace.FeedBlob(\n self.MISSING_L, np.array([MISSING_VALUE - 1e-4], dtype=np.float32)\n )\n self.MISSING_SCALAR = net.NextBlob('MISSING_SCALAR')\n workspace.FeedBlob(\n self.MISSING_SCALAR, np.array([MISSING_VALUE], dtype=np.float32)\n )\n net.GivenTensorFill(\n [], [self.MISSING_SCALAR], shape=[], values=[MISSING_VALUE]\n )\n self.parameters = [\n self.ZERO,\n self.ONE,\n self.MISSING,\n self.MISSING_L,\n self.MISSING_U,\n self.MISSING_SCALAR,\n ]\n\n def preprocess_blob(self, blob, normalization_parameters):\n \"\"\"\n Takes in a blob and its normalization parameters. Outputs a tuple\n whose first element is a blob containing the normalized input blob\n and whose second element contains all the parameter blobs used to\n create it.\n\n Call this from a CPU context and ensure the input blob exists in it.\n \"\"\"\n is_empty_u = self._net.NextBlob(blob + \"__isempty_u\")\n is_empty_l = self._net.NextBlob(blob + \"__isempty_l\")\n is_empty = self._net.NextBlob(blob + \"__isempty\")\n is_not_empty_bool = self._net.NextBlob(blob + \"__isnotemptybool\")\n is_not_empty = self._net.NextBlob(blob + \"__isnotempty\")\n output_blob = self._net.NextBlob(blob + \"_preprocessed\")\n zeros = self._net.NextBlob(blob + \"_zeros\")\n\n self._net.GT([blob, self.MISSING_L], [is_empty_l], broadcast=1)\n self._net.LT([blob, self.MISSING_U], [is_empty_u], broadcast=1)\n self._net.And([is_empty_l, is_empty_u], [is_empty])\n self._net.Not([is_empty], [is_not_empty_bool])\n self._net.Cast(\n [is_not_empty_bool], [is_not_empty],\n to=caffe2_pb2.TensorProto.FLOAT\n )\n for i in range(len(normalization_parameters) - 1):\n if normalization_parameters[\n i\n ].feature_type != normalization_parameters[i + 1].feature_type:\n raise Exception(\n \"Only one feature type is allowed per call to preprocess_blob!\"\n )\n feature_type = normalization_parameters[0].feature_type\n parameters: List[str] = []\n if feature_type == identify_types.BINARY:\n is_gt_zero = self._net.NextBlob(blob + \"__is_gt_zero\")\n is_lt_zero = self._net.NextBlob(blob + \"__is_lt_zero\")\n self._net.GT([blob, self.ZERO], [is_gt_zero], broadcast=1)\n self._net.LT([blob, self.ZERO], [is_lt_zero], broadcast=1)\n bool_blob = self._net.NextBlob(blob + \"__bool\")\n self._net.Or([is_gt_zero, is_lt_zero], [bool_blob])\n self._net.Cast([bool_blob], [blob], to=caffe2_pb2.TensorProto.FLOAT)\n elif feature_type == identify_types.PROBABILITY:\n self._net.Clip([blob], [blob], min=0.01, max=0.99)\n self._net.Logit([blob], [blob])\n elif feature_type == identify_types.ENUM:\n for parameter in normalization_parameters:\n possible_values = parameter.possible_values\n for x in possible_values:\n if x < 0:\n logger.fatal(\n \"Invalid enum possible value for feature: \" +\n str(x) + \" \" + str(parameter.possible_values)\n )\n raise Exception(\n \"Invalid enum possible value for feature \" + blob +\n \": \" + str(x) + \" \" +\n str(parameter.possible_values)\n )\n\n int_blob = self._net.NextBlob('int_blob')\n self._net.Cast(\n [blob],\n [int_blob],\n to=core.DataType.INT32,\n )\n\n output_int_blob = self._net.NextBlob('output_int_blob')\n feature_lengths_blob = self._net.NextBlob('feature_lengths_blob')\n feature_values_blob = self._net.NextBlob('feature_values_blob')\n one_hot_output = self._net.NextBlob('one_hot_output')\n\n # Batch one hot transform with MISSING_VALUE as a possible value\n feature_lengths = [\n len(p.possible_values) + 1 for p in normalization_parameters\n ]\n workspace.FeedBlob(\n feature_lengths_blob,\n np.array(feature_lengths, dtype=np.int32),\n )\n\n feature_values = [\n x\n for p in normalization_parameters\n for x in p.possible_values + [int(MISSING_VALUE)]\n ]\n\n workspace.FeedBlob(\n feature_values_blob,\n np.array(feature_values, dtype=np.int32),\n )\n\n parameters.extend([feature_values_blob, feature_lengths_blob])\n\n self._net.BatchOneHot(\n [int_blob, feature_lengths_blob, feature_values_blob],\n [one_hot_output],\n )\n\n # Remove missing values with a mask\n flattened_one_hot = self._net.NextBlob('flattened_one_hot')\n self._net.FlattenToVec([one_hot_output], [flattened_one_hot])\n cols_to_include = [\n [1] * len(p.possible_values) + [0]\n for p in normalization_parameters\n ]\n cols_to_include = [x for col in cols_to_include for x in col]\n mask = self._net.NextBlob('mask')\n workspace.FeedBlob(mask, np.array(cols_to_include, dtype=np.int32))\n parameters.append(mask)\n\n zero_vec = self._net.NextBlob('zero_vec')\n self._net.ConstantFill(\n [one_hot_output], [zero_vec],\n value=0,\n dtype=caffe2_pb2.TensorProto.INT32\n )\n\n repeated_mask_int = self._net.NextBlob('repeated_mask_int')\n repeated_mask_bool = self._net.NextBlob('repeated_mask_bool')\n\n self._net.Add([zero_vec, mask], [repeated_mask_int], broadcast=1)\n self._net.Cast(\n [repeated_mask_int], [repeated_mask_bool],\n to=core.DataType.BOOL\n )\n\n flattened_repeated_mask = self._net.NextBlob(\n 'flattened_repeated_mask'\n )\n self._net.FlattenToVec(\n [repeated_mask_bool], [flattened_repeated_mask]\n )\n\n flattened_one_hot_proc = self._net.NextBlob(\n 'flattened_one_hot_proc'\n )\n self._net.BooleanMask(\n [flattened_one_hot, flattened_repeated_mask],\n [flattened_one_hot_proc, flattened_one_hot_proc + 'indices']\n )\n\n one_hot_shape = self._net.NextBlob('one_hot_shape')\n self._net.Shape([one_hot_output], [one_hot_shape])\n target_shape = self._net.NextBlob('target_shape')\n shape_delta = self._net.NextBlob('shape_delta')\n workspace.FeedBlob(\n shape_delta,\n np.array([0, len(normalization_parameters)], dtype=np.int64)\n )\n parameters.append(shape_delta)\n self._net.Sub(\n [one_hot_shape, shape_delta], [target_shape], broadcast=1\n )\n self._net.Reshape(\n [flattened_one_hot_proc, target_shape],\n [output_int_blob, output_int_blob + '_old_shape'],\n )\n\n self._net.Cast(\n [output_int_blob],\n [output_blob],\n to=core.DataType.FLOAT,\n )\n\n return output_blob, parameters\n elif feature_type == identify_types.QUANTILE:\n # This transformation replaces a set of values with their quantile.\n # The quantile boundaries are provided in the normalization params.\n\n quantile_blob = self._net.NextBlob('quantile_blob')\n num_boundaries_blob = self._net.NextBlob('num_boundaries_blob')\n quantile_sizes = [\n len(norm.quantiles) for norm in normalization_parameters\n ]\n workspace.FeedBlob(\n num_boundaries_blob, np.array(quantile_sizes, dtype=np.int32)\n )\n parameters.append(num_boundaries_blob)\n\n quantiles_blob = self._net.NextBlob('quantiles_blob')\n quantile_values = np.array([], dtype=np.float32)\n quantile_labels = np.array([], dtype=np.float32)\n for norm in normalization_parameters:\n quantile_values = np.append(\n quantile_values, np.array(norm.quantiles, dtype=np.float32)\n )\n # TODO: Fix this: the np.unique is making this part not true.\n quantile_labels = np.append(\n quantile_labels,\n np.arange(len(norm.quantiles), dtype=np.float32) /\n float(len(norm.quantiles))\n )\n quantiles = np.vstack([quantile_values, quantile_labels]).T\n workspace.FeedBlob(quantiles_blob, quantiles)\n parameters.append(quantiles_blob)\n\n self._net.Percentile(\n [blob, quantiles_blob, num_boundaries_blob], [quantile_blob]\n )\n blob = quantile_blob\n elif feature_type == identify_types.CONTINUOUS or \\\n feature_type == identify_types.BOXCOX:\n boxcox_shifts = []\n boxcox_lambdas = []\n means = []\n stddevs = []\n\n for norm in normalization_parameters:\n if feature_type == identify_types.BOXCOX:\n assert norm.boxcox_shift is not None and \\\n norm.boxcox_lambda is not None\n boxcox_shifts.append(norm.boxcox_shift)\n boxcox_lambdas.append(norm.boxcox_lambda)\n means.append(norm.mean)\n stddevs.append(norm.stddev)\n\n if feature_type == identify_types.BOXCOX:\n boxcox_shift = self._net.NextBlob(\n '{}__boxcox_shift'.format(blob)\n )\n workspace.FeedBlob(\n boxcox_shift, np.array(boxcox_shifts, dtype=np.float32)\n )\n parameters.append(boxcox_shift)\n boxcox_lambda = self._net.NextBlob(\n '{}__boxcox_lambda'.format(blob)\n )\n workspace.FeedBlob(\n boxcox_lambda, np.array(boxcox_lambdas, dtype=np.float32)\n )\n parameters.append(boxcox_lambda)\n\n self._net.BatchBoxCox(\n [blob, boxcox_lambda, boxcox_shift], [blob]\n )\n\n means_blob = self._net.NextBlob('{}__preprocess_mean'.format(blob))\n workspace.FeedBlob(means_blob, np.array([means], dtype=np.float32))\n parameters.append(means_blob)\n stddevs_blob = self._net.NextBlob(\n '{}__preprocess_stddev'.format(blob)\n )\n workspace.FeedBlob(\n stddevs_blob, np.array([stddevs], dtype=np.float32)\n )\n parameters.append(stddevs_blob)\n self._net.Sub([blob, means_blob], [blob], broadcast=1, axis=0)\n self._net.Div([blob, stddevs_blob], [blob], broadcast=1, axis=0)\n if self.clip_anomalies:\n self._net.Clip([blob], [blob], min=-3.0, max=3.0)\n else:\n raise NotImplementedError(\n \"Invalid feature type: {}\".format(feature_type)\n )\n\n self._net.ConstantFill([blob], [zeros], value=0.)\n self._net.Mul([blob, is_not_empty], [output_blob])\n\n return output_blob, parameters\n\n def normalize_sparse_matrix(\n self,\n lengths_blob: str,\n keys_blob: str,\n values_blob: str,\n normalization_parameters: Dict[str, NormalizationParameters],\n blobname_prefix: str,\n split_expensive_feature_groups: bool = False,\n ) -> Tuple[str, List[str]]:\n sorted_features, _ = sort_features_by_normalization(\n normalization_parameters\n )\n int_features = [int(feature) for feature in sorted_features]\n\n dense_input, _ = C2.SparseToDenseMask(\n keys_blob,\n values_blob,\n self.MISSING_SCALAR,\n lengths_blob,\n mask=int_features\n )\n return self.normalize_dense_matrix(\n dense_input,\n sorted_features,\n normalization_parameters,\n blobname_prefix,\n split_expensive_feature_groups,\n )\n\n def normalize_dense_matrix(\n self,\n input_matrix: str,\n features: List[str],\n normalization_parameters: Dict[str, NormalizationParameters],\n blobname_prefix: str,\n split_expensive_feature_groups: bool = False,\n ) -> Tuple[str, List[str]]:\n \"\"\"\n Normalizes inputs according to parameters. Expects a dense matrix whose ith\n column corresponds to feature i.\n\n Note that the Caffe2 BatchBoxCox operator isn't implemented on CUDA GPU so\n we need to use a CPU context.\n\n :param input_matrix: Input matrix to normalize.\n :param features: Array that maps feature ids to column indices.\n :param normalization_parameters: Mapping from feature names to\n NormalizationParameters.\n :param blobname_prefix: Prefix for input blobs to norm_net.\n :param num_output_features: The number of features in an output processed\n datapoint. If set to None, this function will compute it.\n \"\"\"\n with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):\n feature_starts = self._get_type_boundaries(\n features, normalization_parameters\n )\n\n normalized_input_blobs = []\n parameters: List[str] = []\n for i, feature_type in enumerate(FEATURE_TYPES):\n start_index = feature_starts[i]\n if (i + 1) == len(FEATURE_TYPES):\n end_index = len(normalization_parameters)\n else:\n end_index = feature_starts[i + 1]\n if start_index == end_index:\n continue # No features of this type\n slices = []\n\n split_feature_group, split_intervals = \\\n self._should_split_feature_group(\n split_expensive_feature_groups,\n start_index,\n end_index,\n feature_type,\n )\n\n if split_feature_group:\n for j in range(len(split_intervals) - 1):\n slice_blob = self._get_input_blob_indexed(\n blobname_prefix,\n feature_type,\n j,\n )\n C2.net().Slice(\n [input_matrix],\n [slice_blob],\n starts=[0, split_intervals[j]],\n ends=[-1, split_intervals[j + 1]],\n )\n slices.append(\n (slice_blob, split_intervals[j], split_intervals[j + 1])\n )\n else:\n sliced_input_features = self._get_input_blob(\n blobname_prefix,\n feature_type,\n )\n\n C2.net().Slice(\n [input_matrix],\n [sliced_input_features],\n starts=[0, start_index],\n ends=[-1, end_index],\n )\n\n slices.append((sliced_input_features, start_index, end_index))\n\n for (slice_blob, start, end) in slices:\n normalized_input_blob, blob_parameters = self.preprocess_blob(\n slice_blob,\n [\n normalization_parameters[x]\n for x in features[start:end]\n ],\n )\n logger.info(\"Processed split ({}, {}) for feature type {}\".format(\n start, end, feature_type,\n ))\n parameters.extend(blob_parameters)\n normalized_input_blobs.append(normalized_input_blob)\n for i, inp in enumerate(normalized_input_blobs):\n logger.info(\"input# {}: {}\".format(i, inp))\n concatenated_input_blob, concatenated_input_blob_dim = C2.Concat(\n *normalized_input_blobs, axis=1\n )\n return concatenated_input_blob, parameters\n\n def _get_type_boundaries(\n self,\n features: List[str],\n normalization_parameters: Dict[str, NormalizationParameters],\n ) -> List[int]:\n feature_starts = []\n on_feature_type = -1\n for i, feature in enumerate(features):\n feature_type = normalization_parameters[feature].feature_type\n feature_type_index = FEATURE_TYPES.index(feature_type)\n assert feature_type_index >= on_feature_type, \\\n \"Features are not sorted by feature type!\"\n while feature_type_index > on_feature_type:\n feature_starts.append(i)\n on_feature_type += 1\n while on_feature_type < len(FEATURE_TYPES):\n feature_starts.append(len(features))\n on_feature_type += 1\n return feature_starts\n\n def _get_input_blob(self, prefix: str, feature_type: str) -> str:\n return \"{}_{}\".format(prefix, feature_type)\n\n def _get_input_blob_indexed(\n self,\n prefix: str,\n feature_type: str,\n idx: int,\n ) -> str:\n return \"{}_{}_{}\".format(prefix, feature_type, idx)\n\n def _should_split_feature_group(\n self,\n split_expensive_feature_groups: bool,\n start_index: int,\n end_index: int,\n feature_type: str,\n ) -> Tuple[bool, List[int]]:\n \"\"\"\n Since this net is CPU bound, split into independent groups, so that\n the preprocessing can be parallelized while training.\n \"\"\"\n if (not split_expensive_feature_groups):\n return False, []\n if feature_type in [identify_types.ENUM, identify_types.QUANTILE]:\n if (end_index - start_index) > 32:\n step = (end_index - start_index) // 7\n intervals = list(range(start_index, end_index, step)) + [end_index]\n return True, intervals\n return False, []\n","sub_path":"ml/rl/preprocessing/preprocessor_net.py","file_name":"preprocessor_net.py","file_ext":"py","file_size_in_byte":20713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"176310090","text":"#\n# Write code to remove duplicates from an unsorted linked list.\n# FOLLOW UP: how would you solve this problem if a temporary buffer was not\n# allowed?\n#\n\n\ndef rmdup(sll):\n \"\"\"Remove duplicates by keeping track of seen elements in a buffer.\"\"\"\n seen = set()\n prev = None\n current = sll.head\n while current:\n if current.payload in seen:\n prev.next_ = current.next_\n current = current.next_\n else:\n seen.add(current.payload)\n prev = current\n current = current.next_\n return sll # for chaining\n\n\ndef rmdup2(sll):\n \"\"\"Remove duplicates without using an additional buffer.\"\"\"\n start = sll.head\n while start:\n node = start\n while node and node.next_:\n if node.next_.payload == start.payload:\n node.next_ = node.next_.next_\n node = node.next_\n start = start.next_\n return sll\n","sub_path":"chapter2/problem1.py","file_name":"problem1.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"583576417","text":"plaintext = input(\"Enter phrase:\\n\")\ntext = plaintext.upper();\ntext = text.replace(\" \", \"\")\n\ntop_string = \"\"\nbottom_string = \"\"\nfor i in range(len(text)):\n if i % 2 == 0:\n top_string += text[i]\n else:\n bottom_string += text[i]\n\nmerged_strings = top_string + bottom_string\n\nchars_per_word = input(\"\\nThere are \" + str(len(merged_strings)) + \" chars in the coded text.\\nHow many characters do you want per word\\nin the encoded text? Note that the last word\\nmay have less characters than this value.\\n\")\nchars_per_word = int(chars_per_word)\nencoded_message = \"\"\ncurr_word = \"\"\nj = 0\n\nwhile j < chars_per_word and len(merged_strings) != 0:\n curr_word += merged_strings[0]\n merged_strings = merged_strings.replace(merged_strings[0], \"\", 1)\n\n if len(merged_strings) == 0:\n encoded_message += curr_word\n break\n\n if j == chars_per_word - 1:\n encoded_message += curr_word + \" \"\n curr_word = \"\"\n j = 0\n else:\n j += 1\n\nprint(\"\\nHere is your encoded message:\\n\")\nprint(encoded_message)","sub_path":"rail_fence_encode.py","file_name":"rail_fence_encode.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"158724432","text":"from routepy import Route, RouteNotFound\n\n\ndef callback(user_id):\n return 'from callback function: %s' % user_id\n\n\ndef before_dashboard(route, args):\n if args[0] != 8:\n return 'Filter fail'\n\n\ndef after_dashboard(route, args):\n pass\n\n\nroutes = {\n 'any': {\n '/any/!': callback\n },\n\n 'get|post': {\n '/user/!': callback\n },\n\n 'get': {\n '/data': 'tests.controller.some_func',\n\n '/users/!': {\n 'name': 'get_users',\n 'callback': 'tests.controller.some_func',\n 'group': 'dashboard'\n }\n },\n\n 'post': {\n '/route/group': {\n 'name': 'group_route',\n 'callback': 'tests.controller.some_func',\n 'group': 'api_group'\n }\n },\n\n 'groups': {\n 'api_group': {\n 'prefix': '/api/v1'\n },\n\n 'dashboard': {\n 'prefix': '/dashboard',\n 'before': before_dashboard,\n 'after': after_dashboard\n }\n }\n}\n\n\ndef test_any_route():\n r = Route(routes)\n\n assert r.run('/any/7', 'get') == 'from callback function: 7'\n assert r.run('/any/10', 'post') == 'from callback function: 10'\n assert r.run('/any/3', 'put') == 'from callback function: 3'\n assert r.run('/any/1', 'patch') == 'from callback function: 1'\n assert r.run('/any/8', 'delete') == 'from callback function: 8'\n assert r.run('/any/8', 'options') == 'from callback function: 8'\n\n\ndef test_get_and_post():\n r = Route(routes)\n\n assert r.run('/user/23', 'get') == 'from callback function: 23'\n assert r.run('/user/23', 'post') == 'from callback function: 23'\n\n try:\n r.run('/user/23', 'delete') == 'from callback function: 23'\n assert False\n except RouteNotFound:\n pass\n\n\ndef test_get():\n assert Route(routes).run('/data', 'get') == 'return from controller function'\n\n\ndef test_post_with_group():\n assert Route(routes).run('/api/v1/route/group', 'post') == 'return from controller function'\n\n\ndef test_get_with_group_filter():\n assert Route(routes).run('/dashboard/users/8', 'get') == 'Filter fail'\n","sub_path":"tests/test_from_dict.py","file_name":"test_from_dict.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"351795431","text":"import requests\nfrom bs4 import BeautifulSoup\nimport urllib\nimport re\nimport html\nfrom django.shortcuts import render, redirect\nfrom .models import News\nimport json\nimport time\n\ndef crawler(request):\n link = 'https://nba.udn.com/nba/index?gr=www'\n news_crawler(link)\n return redirect('newsapi')\n\ndef news_crawler(link):\n url = link #選擇網址\n user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 6.1; zh-CN; rv:1.9.2.15) Gecko/20110303 Firefox/3.6.15' #偽裝使用者\n headers = {'User-Agent':user_agent}\n data_res = urllib.request.Request(url=url,headers=headers)\n data = urllib.request.urlopen(data_res, timeout=20)\n sp = BeautifulSoup(data, \"html.parser\")\n #標題\n title=[]\n titles = sp.find(\"div\",{\"id\":\"news\"}).findAll(\"h3\")\n for i in titles:\n title.append(i.text)\n link=[]\n links = sp.find(\"div\",{\"id\":\"news\"}).findAll(\"a\", href = re.compile('/nba/story/'))\n for i in links:\n link.append('https://nba.udn.com/'+i['href'])\n for news_title, news_link in zip(title,link):\n content_crawler(news_title,news_link)\ndef content_crawler(news_title,news_link):\n url = news_link #選擇網址\n user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 6.1; zh-CN; rv:1.9.2.15) Gecko/20110303 Firefox/3.6.15' #偽裝使用者\n headers = {'User-Agent':user_agent}\n data_res = urllib.request.Request(url=url,headers=headers)\n data = urllib.request.urlopen(data_res, timeout=20)\n sp = BeautifulSoup(data, \"html.parser\")\n #標題\n content_list= []\n content = sp.find(\"div\",{\"id\":\"story_body_content\"}).findAll(\"span\")\n news_time = content[0].text\n for i in content:\n content_list.append(i)\n content_group = content_list[2].text\n content_group = content_group.lstrip(' NBAfacebooktwitterpinterest')\n content_group = content_group.lstrip(' 美聯社facebooktwitterpinterest')\n content_one,another = content_group.split('.inline-ad { position')\n nothing,content_two = another.split('); });')\n news_content = content_one+content_two\n print(news_content)\n photo = sp.find(\"figure\",{\"class\":\"photo_center photo-story\"}).find('img')['data-src']\n news_img = photo\n sql(news_title,news_link,news_time,news_content,news_img)\n\ndef sql(news_title,news_link,news_time,news_content,news_img):\n\n \n try:\n newsdb = Blog.objects.get(news_title=news_title)\n newsdb.news_link = news_link\n newsdb.news_time = news_time\n newsdb.news_content= news_content\n newsdb.news_img = news_img\n\n\n newsdb.save()\n print('更新資料')\n except:\n newsdb = News.objects.create(news_title=news_title,news_link=news_link, news_time=news_time, news_content=news_content, news_img=news_img)\n newsdb.save()\n print('成功存入一筆資料')\n","sub_path":"newsenv/news/newsapp/newscrawler.py","file_name":"newscrawler.py","file_ext":"py","file_size_in_byte":2812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"155614002","text":"# -*- coding: utf-8 -*-\nfrom __future__ import print_function, unicode_literals, absolute_import, generators\nfrom .compat import *\n\n# Support PyQt4/PySide with either Python 2/3\ntry:\n from PyQt4 import QtCore, QtGui\nexcept ImportError:\n from PySide import QtCore, QtGui\n\n\ndef as_str(obj):\n return '' if obj is None else str(obj)\n\n\nclass Data4ExtModel(QtCore.QAbstractTableModel):\n def __init__(self, model):\n super(Data4ExtModel, self).__init__()\n self.model = model\n\n def rowCount(self, index=None):\n return max(1, self.model.shape()[0])\n\n def columnCount(self, index=None):\n return max(1, self.model.shape()[1])\n\n def data(self, index, role):\n if role != QtCore.Qt.DisplayRole or not index.isValid():\n return None\n return as_str(self.model.data(index.row(), index.column()))\n\n\nclass Header4ExtModel(QtCore.QAbstractTableModel):\n def __init__(self, model, axis, palette):\n super(Header4ExtModel, self).__init__()\n self.model = model\n self.axis = axis\n self._palette = palette\n if self.axis == 0:\n self._shape = (self.model.header_shape()[0], self.model.shape()[1])\n else:\n self._shape = (self.model.shape()[0], self.model.header_shape()[1])\n\n def rowCount(self, index=None):\n return max(1, self._shape[0])\n\n def columnCount(self, index=None):\n return max(1, self._shape[1])\n\n def headerData(self, section, orientation, role):\n if role == QtCore.Qt.TextAlignmentRole:\n if orientation == QtCore.Qt.Horizontal:\n return QtCore.Qt.AlignCenter | QtCore.Qt.AlignBottom\n else:\n return QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter\n if role != QtCore.Qt.DisplayRole:\n return None\n return section if self.axis == (orientation - 1) else \\\n self.model.name(self.axis, section)\n\n def data(self, index, role):\n if not index.isValid() or not self._shape[self.axis]:\n return None\n row, col = (index.row(), index.column()) if self.axis == 0 \\\n else (index.column(), index.row())\n if role == QtCore.Qt.BackgroundRole:\n prev = self.model.header(self.axis, col - 1, row) if col else None\n cur = self.model.header(self.axis, col, row)\n return self._palette.midlight() if prev != cur else None\n if role != QtCore.Qt.DisplayRole: return None\n return as_str(self.model.header(self.axis, col, row))\n\n\nclass Level4ExtModel(QtCore.QAbstractTableModel):\n def __init__(self, model, palette, font):\n super(Level4ExtModel, self).__init__()\n self.model = model\n self._palette = palette\n font.setBold(True)\n self._font = font\n\n def rowCount(self, index=None):\n return max(1, self.model.header_shape()[0])\n\n def columnCount(self, index=None):\n return max(1, self.model.header_shape()[1])\n\n def headerData(self, section, orientation, role):\n if role == QtCore.Qt.TextAlignmentRole:\n if orientation == QtCore.Qt.Horizontal:\n return QtCore.Qt.AlignCenter | QtCore.Qt.AlignBottom\n else:\n return QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter\n if role != QtCore.Qt.DisplayRole: return None\n return 'L' + str(section)\n\n def data(self, index, role):\n if not index.isValid():\n return None\n if role == QtCore.Qt.FontRole:\n return self._font\n if index.row() == self.model.header_shape()[0] - 1:\n if role == QtCore.Qt.DisplayRole:\n return str(self.model.name(1, index.column()))\n elif role == QtCore.Qt.BackgroundRole:\n return self._palette.shadow()\n elif index.column() == self.model.header_shape()[1] - 1:\n if role == QtCore.Qt.DisplayRole:\n return str(self.model.name(0, index.row()))\n elif role == QtCore.Qt.BackgroundRole:\n return self._palette.shadow()\n elif role == QtCore.Qt.BackgroundRole:\n return self._palette.background()\n return None\n\n\nclass ExtTableView(QtGui.QWidget):\n def __init__(self):\n super(ExtTableView, self).__init__()\n self._selection_rec = False\n self._model = None\n\n # We manually set the inactive highlight color to differentiate the\n # selection between the data/index/header. To actually make use of the\n # palette though, we also have to manually assign a new stock delegate\n # to each table view\n palette = self.palette()\n palette.setBrush(QtGui.QPalette.Inactive,\n QtGui.QPalette.Highlight,\n self.palette().windowText())\n self.setPalette(palette)\n\n layout = QtGui.QGridLayout()\n layout.setSpacing(0)\n layout.setContentsMargins(0, 0, 0, 0)\n self.setLayout(layout)\n self.hscroll = QtGui.QScrollBar(QtCore.Qt.Horizontal)\n self.vscroll = QtGui.QScrollBar(QtCore.Qt.Vertical)\n\n self.table_level = QtGui.QTableView()\n self.table_level.setEditTriggers(QtGui.QTableWidget.NoEditTriggers)\n self.table_level.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.table_level.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.table_level.setFrameStyle(QtGui.QFrame.Plain)\n self.table_level.horizontalHeader().sectionResized.connect(self._index_resized)\n self.table_level.verticalHeader().sectionResized.connect(self._header_resized)\n self.table_level.setItemDelegate(QtGui.QItemDelegate())\n layout.addWidget(self.table_level, 0, 0)\n\n self.table_header = QtGui.QTableView()\n self.table_header.verticalHeader().hide()\n self.table_header.setEditTriggers(QtGui.QTableWidget.NoEditTriggers)\n self.table_header.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.table_header.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.table_header.setHorizontalScrollMode(QtGui.QTableView.ScrollPerPixel)\n self.table_header.setHorizontalScrollBar(self.hscroll)\n self.table_header.setFrameStyle(QtGui.QFrame.Plain)\n self.table_header.horizontalHeader().sectionResized.connect(self._column_resized)\n self.table_header.setItemDelegate(QtGui.QItemDelegate())\n layout.addWidget(self.table_header, 0, 1)\n\n self.table_index = QtGui.QTableView()\n self.table_index.horizontalHeader().hide()\n self.table_index.setEditTriggers(QtGui.QTableWidget.NoEditTriggers)\n self.table_index.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.table_index.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.table_index.setVerticalScrollMode(QtGui.QTableView.ScrollPerPixel)\n self.table_index.setVerticalScrollBar(self.vscroll)\n self.table_index.setFrameStyle(QtGui.QFrame.Plain)\n self.table_index.verticalHeader().sectionResized.connect(self._row_resized)\n self.table_index.setItemDelegate(QtGui.QItemDelegate())\n layout.addWidget(self.table_index, 1, 0)\n\n self.table_data = QtGui.QTableView()\n self.table_data.verticalHeader().hide()\n self.table_data.horizontalHeader().hide()\n self.table_data.setEditTriggers(QtGui.QTableWidget.NoEditTriggers)\n self.table_data.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.table_data.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.table_data.setHorizontalScrollMode(QtGui.QTableView.ScrollPerPixel)\n self.table_data.setVerticalScrollMode(QtGui.QTableView.ScrollPerPixel)\n self.table_data.setHorizontalScrollBar(self.hscroll)\n self.table_data.setVerticalScrollBar(self.vscroll)\n self.table_data.setFrameStyle(QtGui.QFrame.Plain)\n self.table_data.setItemDelegate(QtGui.QItemDelegate())\n layout.addWidget(self.table_data, 1, 1)\n self.setFocusProxy(self.table_data)\n\n layout.addWidget(self.hscroll, 2, 0, 2, 2)\n layout.addWidget(self.vscroll, 0, 2, 2, 2)\n\n\n def _select_columns(self, source, dest, deselect):\n if self._selection_rec: return\n self._selection_rec = True\n dsm = dest.selectionModel()\n ssm = source.selectionModel()\n dsm.clear()\n for col in (index.column() for index in ssm.selectedIndexes()):\n dsm.select(dest.model().index(0, col),\n QtGui.QItemSelectionModel.Select | QtGui.QItemSelectionModel.Columns)\n deselect.selectionModel().clear()\n self._selection_rec = False\n\n\n def _select_rows(self, source, dest, deselect):\n if self._selection_rec: return\n self._selection_rec = True\n dsm = dest.selectionModel()\n ssm = source.selectionModel()\n dsm.clear()\n for row in (index.row() for index in ssm.selectedIndexes()):\n dsm.select(dest.model().index(row, 0),\n QtGui.QItemSelectionModel.Select | QtGui.QItemSelectionModel.Rows)\n deselect.selectionModel().clear()\n self._selection_rec = False\n\n\n def model(self):\n return self._model\n\n def _column_resized(self, col, old_width, new_width):\n self.table_data.setColumnWidth(col, new_width)\n self._update_layout()\n\n def _row_resized(self, row, old_height, new_height):\n self.table_data.setRowHeight(row, new_height)\n self._update_layout()\n\n def _index_resized(self, col, old_width, new_width):\n self.table_index.setColumnWidth(col, new_width)\n self._update_layout()\n\n def _header_resized(self, row, old_height, new_height):\n self.table_header.setRowHeight(row, new_height)\n self._update_layout()\n\n def _update_layout(self):\n h_width = max(self.table_level.verticalHeader().sizeHint().width(),\n self.table_index.verticalHeader().sizeHint().width())\n self.table_level.verticalHeader().setFixedWidth(h_width)\n self.table_index.verticalHeader().setFixedWidth(h_width)\n\n last_row = self._model.header_shape()[0] - 1\n if last_row < 0:\n hdr_height = self.table_level.horizontalHeader().height()\n else:\n hdr_height = self.table_level.rowViewportPosition(last_row) + \\\n self.table_level.rowHeight(last_row) + \\\n self.table_level.horizontalHeader().height()\n self.table_header.setFixedHeight(hdr_height)\n self.table_level.setFixedHeight(hdr_height)\n\n last_col = self._model.header_shape()[1] - 1\n if last_col < 0:\n idx_width = self.table_level.verticalHeader().width()\n else:\n idx_width = self.table_level.columnViewportPosition(last_col) + \\\n self.table_level.columnWidth(last_col) + \\\n self.table_level.verticalHeader().width()\n self.table_index.setFixedWidth(idx_width)\n self.table_level.setFixedWidth(idx_width)\n\n\n def _reset_model(self, table, model):\n old_sel_model = table.selectionModel()\n table.setModel(model)\n if old_sel_model:\n del old_sel_model\n\n\n def setModel(self, model):\n self._model = model\n self._reset_model(self.table_data, Data4ExtModel(model))\n sel_model = self.table_data.selectionModel()\n sel_model.selectionChanged.connect(\n lambda *_: self._select_columns(self.table_data, self.table_header, self.table_level))\n sel_model.selectionChanged.connect(\n lambda *_: self._select_rows(self.table_data, self.table_index, self.table_level))\n\n self._reset_model(self.table_level, Level4ExtModel(model, self.palette(), self.font()))\n sel_model = self.table_level.selectionModel()\n sel_model.selectionChanged.connect(\n lambda *_: self._select_columns(self.table_level, self.table_index, self.table_data))\n sel_model.selectionChanged.connect(\n lambda *_: self._select_rows(self.table_level, self.table_header, self.table_data))\n\n self._reset_model(self.table_header, Header4ExtModel(model, 0, self.palette()))\n sel_model = self.table_header.selectionModel()\n sel_model.selectionChanged.connect(\n lambda *_: self._select_columns(self.table_header, self.table_data, self.table_index))\n sel_model.selectionChanged.connect(\n lambda *_: self._select_rows(self.table_header, self.table_level, self.table_index))\n\n self._reset_model(self.table_index, Header4ExtModel(model, 1, self.palette()))\n sel_model = self.table_index.selectionModel()\n sel_model.selectionChanged.connect(\n lambda *_: self._select_rows(self.table_index, self.table_data, self.table_header))\n sel_model.selectionChanged.connect(\n lambda *_: self._select_columns(self.table_index, self.table_level, self.table_header))\n\n # needs to be called after setting all table models\n self._update_layout()\n\n\n def setCurrentIndex(self, y, x):\n self.table_data.selectionModel().setCurrentIndex(\n self.table_data.model().index(y, x),\n QtGui.QItemSelectionModel.ClearAndSelect)\n\n def resizeIndexToContents(self):\n for col in range(self._model.header_shape()[1]):\n hdr_width = self.table_level.sizeHintForColumn(col)\n idx_width = self.table_index.sizeHintForColumn(col)\n if idx_width > hdr_width or hdr_width > idx_width * 2:\n width = idx_width\n else:\n width = hdr_width\n self.table_level.setColumnWidth(col, width)\n self._update_layout()\n\n def resizeColumnsToContents(self):\n for col in range(self._model.shape()[1]):\n hdr_width = self.table_header.sizeHintForColumn(col)\n data_width = self.table_data.sizeHintForColumn(col)\n if data_width > hdr_width or hdr_width > data_width * 2:\n width = data_width\n else:\n width = hdr_width\n self.table_header.setColumnWidth(col, width)\n self.resizeIndexToContents()\n\n\nclass Viewer(QtGui.QMainWindow):\n def __init__(self, *args, **kwargs):\n super(Viewer, self).__init__()\n self.table = ExtTableView()\n self.setCentralWidget(self.table)\n self.closed = False\n if args or kwargs:\n self.view(*args, **kwargs)\n\n def closeEvent(self, event):\n self.closed = True\n\n def view(self, model, hdr_rows=None, idx_cols=None,\n start_pos=None, metavar=None, title=None):\n self.table.setModel(model)\n shape = model.shape()\n\n if title is not None:\n self.setWindowTitle(title)\n else:\n title = \"{} rows, {} columns\".format(shape[0], shape[1])\n if metavar:\n title = \"{}: {}\".format(metavar, title)\n self.setWindowTitle(title)\n\n if shape[0] * shape[1] < 1e5:\n # resizing materializes the contents and might actually take longer\n # than loading all the data itself, so do it for small tables only\n self.table.resizeColumnsToContents()\n elif model.header_shape()[1] * shape[0] < 1e5:\n # similarly for the index\n self.table.resizeIndexToContents()\n\n self.table.setFocus()\n if start_pos:\n y = shape[0] - abs(start_pos[0]) if start_pos[0] < 0 else start_pos[0]\n x = shape[1] - abs(start_pos[1]) if start_pos[1] < 0 else start_pos[1]\n self.table.setCurrentIndex(y, x)\n\n self.showNormal()\n self.setWindowState(QtCore.Qt.WindowActive)\n self.closed = False\n","sub_path":"gtabview/viewer.py","file_name":"viewer.py","file_ext":"py","file_size_in_byte":15824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"165852970","text":"import os\nimport torch as T\nimport torchvision.transforms as transforms\nfrom torch.autograd import Variable\nimport numpy as np \nfrom PIL import Image\nimport matplotlib.pyplot as plt \nfrom skimage import img_as_float, img_as_ubyte, exposure\nfrom skimage.filters import threshold_otsu, threshold_local, gaussian, median, unsharp_mask\nfrom skimage.io import imread, imsave\nfrom skimage.color import rgb2gray\nfrom datetime import datetime\nimport cv2\nimport os\n\nfrom . import cnn \n\n\ncurrent_plate_predictions = []\n\n# function to return key for any value \ndef get_key(dictionary, val): \n for key, value in dictionary.items(): \n if str(val) == str(value): \n return key \n\nimsize = 28\nloader = transforms.Compose([transforms.Scale(imsize), transforms.ToTensor()])\n\n\ndef image_loader(image_name):\n\n image = loader(image_name).float()\n image = Variable(image, requires_grad=True)\n image = image.unsqueeze(0)\n\n return image\n\n\ndef predictCharacters(characters, column_list):\n \n model = cnn.CNNNetwork(lr=0.001, batch_size=124, epochs=50, n_classes=36, load=0)\n model.load_state_dict(T.load('{base_path}/prediction_model/my_model.pth'.format(base_path=os.path.abspath(os.path.dirname(__file__))), map_location=T.device('cpu')))\n\n results = []\n \n for character in characters:\n \n character = rgb2gray(character)\n # character = exposure.adjust_gamma(character, .3)\n # character = median(character)\n character = gaussian(character, sigma=2)\n character = unsharp_mask(character, radius=1, amount=5)\n\n try: \n thresh_val = threshold_otsu(character)\n thresh1 = thresh_val > character\n\n except: \n # an unexpected error happened when trying to process the image so we break out of the function\n return \n\n # _, ax = plt.subplots(1)\n # ax.imshow(thresh1, cmap=\"gray\")\n # plt.show()\n\n image = img_as_ubyte(thresh1)\n # image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n # ret, thresh = cv2.threshold(image, 200, 255, cv2.THRESH_BINARY)\n thresh = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)\n\n character = Image.fromarray(thresh)\n image = image_loader(character)\n\n model.eval()\n prediction = model(image)\n prediction = T.softmax(prediction, dim=1)\n classes = T.argmax(prediction, dim=1)\n\n item = classes[0].item()\n results.append(str(item))\n\n key_dict = {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, 'A': 10, 'B': 11, 'C': 12, 'D': 13, 'E': 14, 'F': 15, 'G': 16, 'H': 17, 'I': 18, 'J': 19, 'K': 20, 'L': 21, 'M': 22, 'N': 23, 'O': 24, 'P': 25, 'Q': 26, 'R': 27, 'S': 28, 'T': 29, 'U': 30, 'V': 31, 'W': 32, 'X': 33, 'Y': 34, 'Z': 35} \n\n results_list = []\n for prediction in results:\n\n char = get_key(key_dict, prediction)\n results_list.append(char) \n\n columns = column_list[:]\n column_list.sort()\n \n sorted_labels = []\n\n for segmentation in column_list:\n sorted_labels.append(results_list[columns.index(segmentation)])\n\n final_plate = ''\n for pred_label in sorted_labels:\n final_plate += pred_label\n\n\n return final_plate.lower()","sub_path":"SecureVision_LPR/lpr_backend/lpr/python_lpr/predictions.py","file_name":"predictions.py","file_ext":"py","file_size_in_byte":3251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"415591719","text":"\"\"\" Target handlers \"\"\"\nfrom ..fixtures import * # considered harmful, yadda yadda\nfrom .base import BaseTarget\nfrom django.shortcuts import reverse\n\n\nclass NoopTarget(BaseTarget):\n \"\"\" Print to stdout \"\"\"\n def __init__(self):\n \"\"\" init \"\"\"\n super(NoopTarget, self).__init__('Noop')\n self.callback_full = reverse('target_browse')\n self.singleton = True\n\n def _get_message(self, note: models.Note) -> str:\n \"\"\" Format string for output\n\n >>> note = create_test_user_cfg_note()[3]\n >>> ntgt = NoopTarget()\n >>> ntgt._get_message(note)\n \"NOOP: dummy says 'dummy note' (id: 1)\"\n \"\"\"\n text = self._truncate(note.text, 140, '...')\n\n return '{}: {} says {!r} (id: {})'.format(\n self.name.upper(),\n note.user.user.username,\n text,\n note.id\n )\n\n def authorize_config(self) -> str:\n \"\"\" Short-circuit the typical OAuth process \"\"\"\n username = self.user.user.username\n self.config.identifier = username\n self.config.user_desc = username\n self.config.remote_id = ''\n self.reset_auth_uuid()\n self.config.save()\n\n return self.get_callback_uri()\n\n def post_note(self, note: models.Note) -> models.Note:\n \"\"\" Post Note to Target\n\n >>> note = create_test_user_cfg_note()[3]\n >>> note.status == models.NOTE_OPEN\n True\n >>> ntgt = NoopTarget()\n >>> test = ntgt.post_note(note)\n NOOP: dummy says 'dummy note' (id: 1)\n \"\"\"\n print(self._get_message(note))\n return note\n","sub_path":"wenn/targets/noop.py","file_name":"noop.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"550322839","text":"#!/usr/bin/env python3\n\ndef cargo_toml(pkg: dict) -> str :\n return ('''[package]\nname = \"{name}\"\nversion = \"{version}\"\nauthors = [{authors}]\nedition = \"2018\"\ndescription = \"{description}\"\nlicense = \"MIT\"\ndocumentation = \"https://docs.rs/{name}\"\nrepository = \"{repository}\"\n\n[lib]\nname = \"{name}\"\npath = \"src/lib.rs\"\n\n[[bin]]\nname = \"{name}\"\npath = \"src/main.rs\"\nrequired-features = [\"cli\"]\n\n[dependencies]\n{dependencies_lib}\n\n# Only for the CLI\n{dependencies_cli}\n\n[features]\n{dependencies_feat}'''\n .format(name=pkg.name)\n .format(version=pkg.version)\n .format(authors=pkg.authors)\n .format(description=pkg.description)\n .format(repository=pkg.repository)\n .format(dependencies_lib=pkg.dependencies_lib)\n .format(dependencies_cli=pkg.dependencies_cli)\n .format(dependencies_feat=pkg.dependencies_feat))\n\n","sub_path":"scripts/build_release.py","file_name":"build_release.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"298904156","text":"from hwt.hdlObjects.value import Value, areValues\nfrom hwt.hdlObjects.types.defs import BOOL\nfrom hwt.hdlObjects.operator import Operator\nfrom hwt.hdlObjects.operatorDefs import AllOps\n\nBoolVal = BOOL.getValueCls()\n\nclass EnumVal(Value):\n @classmethod\n def fromPy(cls, val, typeObj):\n \"\"\"\n :param val: value of python type bool or None\n :param typeObj: instance of HdlType\n \"\"\"\n if val is None:\n valid = False\n val = typeObj._allValues[0]\n else:\n assert isinstance(val, str)\n valid = True\n \n return cls(val, typeObj, valid)\n \n def _eq__val(self, other):\n eq = self.val == other.val \\\n and self.vldMask == other.vldMask == 1\n \n vldMask = int(self.vldMask == other.vldMask == 1)\n updateTime = max(self.updateTime, other.updateTime)\n return BoolVal(eq, BOOL, vldMask, updateTime)\n def _eq(self, other):\n assert self._dtype is other._dtype\n \n if areValues(self, other):\n return self._eq__val(other)\n else:\n return Operator.withRes(AllOps.EQ, [self, other], BOOL)\n \n \n def _ne__val(self, other):\n neq = self.val != other.val \\\n and self.vldMask == other.vldMask == 1\n \n vldMask = int(self.vldMask == other.vldMask == 1)\n updateTime = max(self.updateTime, other.updateTime)\n return BoolVal(neq, BOOL, vldMask, updateTime) \n \n def __ne__(self, other):\n assert self._dtype is other._dtype\n \n if areValues(self, other):\n return self._ne__val(other)\n else:\n return Operator.withRes(AllOps.NEQ, [self, other], BOOL)\n \n","sub_path":"hwt/hdlObjects/types/enumVal.py","file_name":"enumVal.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"60332457","text":"class Employee:\n company= \"Google\" #class-attribute\n salary= 100 #class-attribute\n\nharry = Employee()\nrajni = Employee()\n\n# creating instance-attribute salary for both the objects\n# harry.salary = 300 # instance-attribute\n# rajni.salary = 400 # instance-attribute\nharry.salary = 45 # creating a new instance-attribute/variable\nprint(harry.salary)\nprint(rajni.salary)\n\n# below line throws an error as address is not present in instance/class\n# print(rajni.address)","sub_path":"ch10_oops.py/ch10_3_instance_class_attribute.py","file_name":"ch10_3_instance_class_attribute.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"566805492","text":"import logging.config\nimport os\nfrom datetime import datetime\nfrom pathlib import Path\nfrom typing import Text\n\nfrom django.conf import settings\nfrom django.utils.timezone import now\nfrom filehash import FileHash\nfrom zeep import Client\nfrom ..exceptions import SessionInfoException\n\nMEDIA_ROOT = settings.MEDIA_ROOT\n\n#\nlogger = logging.getLogger(__name__)\n\nCLIENT = Client('apps/sevovvintegration/wsdl/DIR.wsdl')\nIDENTITY = CLIENT.get_type('ns2:Identity')\nMESSAGE_INFO = CLIENT.get_type('ns2:MessageInfo')\nSESSION_INFO_RESULT = CLIENT.get_type('ns2:SessionInfo')\nARRAY_OF_MESSAGE_INFO = CLIENT.get_type('ns2:ArrayOfMessageInfo')\nDOWNLOAD_CHUNK_RESPONSE = CLIENT.get_type('ns2:DownloadChunkResponse')\nMESSAGE_VALIDATION_INFO = CLIENT.get_type('ns2:MessageValidationInfo')\n\nSHA256HASHER = FileHash('sha256')\n\nSED = 'Sed'\nPLAIN = 'Plain'\nDOCUMENT = 'Document'\n\nINPUT_MESSAGE_COUNT = 1000\nMAX_CHUNK_SIZE = 2500000\n\n\n\n\n\n\ndef get_incoming_xml_path(consumer):\n _now = now()\n\n path = os.path.join(MEDIA_ROOT,\n f'sevovv_integration/org_{consumer.id}/incoming/{_now.year}/{_now.month}/{_now.day}')\n if not os.path.exists(path):\n os.makedirs(path)\n return path\n\n\nclass CompanyInfo():\n def __init__(self, id, edrpou, system_id, password=None):\n self.id = id\n self.edrpou = edrpou\n self.system_id = system_id\n self.password = password\n\n @property\n def identity(self):\n if not self.password:\n raise Exception(f'password not exist \"{self.password}\"', )\n __identity = IDENTITY(SystemId=self.system_id, Password=self.password)\n return __identity\n\n @property\n def OrgId(self):\n return self.edrpou\n\n\nclass Message():\n def __call__(self, producer: CompanyInfo, consumer: CompanyInfo, document_xml_path: Path,\n message_id: str) -> MESSAGE_INFO:\n return self.get_message_info(producer, consumer, document_xml_path, message_id)\n\n def get_document_xml_size(self, document_xml_path: Path) -> int:\n return os.path.getsize(document_xml_path)\n\n def get_message_info(self, producer: CompanyInfo, consumer: CompanyInfo, document_xml_path: Path,\n message_id: str) -> MESSAGE_INFO:\n date = datetime.today()\n message_info = MESSAGE_INFO(CreationDate=date,\n Creator=SED,\n Format=PLAIN,\n FromOrgId=producer.OrgId,\n FromSysId=producer.system_id,\n MessageId=message_id,\n SessionId=0,\n Size=self.get_document_xml_size(document_xml_path),\n ToOrgId=consumer.OrgId,\n ToSysId=consumer.system_id,\n Type=DOCUMENT)\n print('MESSAGE_INFO: ',message_info)\n return message_info\n\n\nclass SEVUploadClient():\n def __init__(self):\n self.client = CLIENT\n\n def send_document(self, document_path: Path, producer: CompanyInfo, consumer: CompanyInfo, message_id: str):\n document_xml_hash = self.get_document_hash(document_path)\n message = Message()(producer=producer, consumer=consumer,\n document_xml_path=document_path, message_id=message_id)\n session_id, max_chunk_size = self.open_uploading_session(producer, message, document_xml_hash)\n\n self.upload_document(producer, session_id, document_path, max_chunk_size)\n result = self.check_session_info(producer, session_id)\n logger.debug(result)\n dict_result = {\"Error\":result.Error,\n 'MaxPartSize': result.MaxPartSize,\n 'MessageId': result.MaxPartSize,\n 'MessageSize': result.MessageSize,\n 'SessionId': result.SessionId,\n 'Status': result.Status,\n 'TransferredBytesCount': result.TransferredBytesCount,\n 'Type': result.Type\n }\n return dict_result\n\n # def generate_message_id(self) -> Text:\n # return uuid.uuid4().__str__().upper()\n\n def check_session_info(self, producer: CompanyInfo, session_id: int) -> SESSION_INFO_RESULT:\n print('producer:',producer.identity, 'session_id: ',session_id)\n print('CHECKING SESSION INFO')\n session_info: SESSION_INFO_RESULT = self.client.service.GetSessionInfo(identity=producer.identity,\n sessionId=session_id)\n print('SESSION INFO:',session_info)\n if session_info.Error:\n raise SessionInfoException(session_info.Error)\n\n if not session_info.MessageSize == session_info.TransferredBytesCount:\n raise SessionInfoException('MessageSize and TransferredBytesCount is no equal')\n\n return session_info\n\n def get_document_hash(self, document_xml_path: Path) -> Text:\n return SHA256HASHER.hash_file(document_xml_path).upper()\n\n def open_uploading_session(self, producer: CompanyInfo, message: MESSAGE_INFO, document_xml_hash: str) -> (\n int, int):\n print('OPENING UPLOADING SESSION')\n print('producer identity: ',producer.identity)\n pack = self.client.service.OpenUploadingSession(identity=producer.identity, messageInfo=message,\n hash=document_xml_hash)\n print('SESSION INFO:',pack)\n return pack.SessionId, pack.MaxPartSize\n\n def upload_chunk(self, producer_identity, session_id, chunk):\n print('UPLOADING CHUNK')\n print('producer identity: ',producer_identity)\n print('session_id: ',session_id)\n response = self.client.service.UploadMessageChunk(identity=producer_identity, sessionId=session_id, messageChunk=chunk)\n print(response)\n\n def get_generator(self, document_xml_path: Path, max_chunk_size: int) -> bytes:\n print('GET GENERATOR:', document_xml_path)\n print('max_chunk_size:', max_chunk_size)\n\n with open(document_xml_path, 'rb') as entry:\n for chunk in iter(lambda: entry.read(max_chunk_size), b''):\n print('CHUNK SIZE',len(chunk))\n yield chunk\n\n def upload_document(self, producer, session_id, document_xml_path, max_chunk_size):\n print('UPLOADING DOCUMENT')\n document_xml_streaming = self.get_generator(document_xml_path, max_chunk_size)\n\n for chunk in document_xml_streaming:\n self.upload_chunk(producer.identity, session_id, chunk)\n\n\nclass SEVDownloadClient():\n def __init__(self):\n self.client = CLIENT\n\n def download_messages(self, consumer: CompanyInfo):\n logger.debug('START downloading messages')\n path = get_incoming_xml_path(consumer)\n messages = self.get_input_messages(consumer.identity)\n if messages:\n return self.process_messages(consumer.identity, messages, path)\n else:\n return []\n\n def process_messages(self, consumer, messages: ARRAY_OF_MESSAGE_INFO, path):\n documents = []\n for message in messages:\n session_id = self.open_downloading_session(consumer, message.MessageId)\n document = self.download_document(message.Size, session_id, consumer, message.MessageId, path)\n documents.append(document)\n return documents\n\n def check_document_hash(self, consumer, session_id, xml_path):\n message_validation_info = self.client.service.GetMessageValidationInfo(consumer, session_id)\n file_hash = SHA256HASHER.hash_file(xml_path).upper()\n if not file_hash == message_validation_info.Hash:\n raise Exception('xml is not valid, Hash is not equal')\n return message_validation_info.Session\n\n def download_document(self, message_size, session_id, consumer, message_id, path):\n logger.debug('DOWNLOADING DOCUMENT:', message_id)\n downloaded_bites = 0\n if message_size < MAX_CHUNK_SIZE:\n chunk_size = message_size\n\n else:\n chunk_size = MAX_CHUNK_SIZE\n\n file_name = message_id + '.xml'\n file_full_path = os.path.join(path, file_name)\n\n with open(file_full_path, 'wb') as file:\n while downloaded_bites < message_size:\n if message_size - downloaded_bites < chunk_size:\n chunk_size = message_size - downloaded_bites\n data = self.download_chunk(consumer, session_id, downloaded_bites, chunk_size)\n file.write(data)\n downloaded_bites += chunk_size\n\n session_info = self.check_document_hash(consumer, session_id, file_full_path)\n self.end_document_downloading(consumer, session_info, message_size)\n return file_full_path\n\n def end_document_downloading(self, consumer, session_info, downloaded_bites):\n session_info.TransferredBytesCount = downloaded_bites\n session_info.MaxPartSize = MAX_CHUNK_SIZE\n session_info.Status = 'Delivered' # Щоб закрити сесію, і виключити повторне скачування документа\n s_info = self.client.service.EndProcessingDownloadedMessage(consumer, session_info)\n logger.info(s_info)\n\n def download_chunk(self, consumer, session_id, from_position, count):\n logger.info('DOWNLOADING CHUNK')\n res = self.client.service.DownloadMessageChunk(consumer, session_id, from_position, count)\n return res.MessageChunk\n\n def get_input_messages(self, consumer) -> ARRAY_OF_MESSAGE_INFO:\n logger.debug('GETTING input messages')\n result: ARRAY_OF_MESSAGE_INFO = self.client.service.GetInputMessages(identity=consumer,\n сount=INPUT_MESSAGE_COUNT)\n logger.debug('MESSAGES', result)\n return result\n\n def open_downloading_session(self, consumer, message_id: str):\n logger.debug('OPENING DOWNLOADING SESSION')\n session_info: SESSION_INFO_RESULT = self.client.service.OpenDownloadingSession(identity=consumer,\n messageId=message_id)\n logger.debug('SESSION ID:', session_info.SessionId)\n return session_info.SessionId\n","sub_path":"apps/sevovvintegration/services/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":10394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"135302548","text":"#!python3.5\nimport os\nimport sqlite3\nimport json\nimport time\n\nclass Tables():\n\tdef __init__(self):\n\t\t#DATABASE PATH\n\t\tself.cur_dir = os.path.dirname(os.path.abspath(__file__))\n\t\tself.db_path = os.path.join(self.cur_dir, \"db\\\\plexcan.db\")\n\t\tself.con = sqlite3.connect(self.db_path) \n\t\tself.cursor = self.con.cursor()\n\t\tself.servers = [server[0].split(\":\")[0] for server in self.cursor.execute(\"SELECT servers FROM plexservers\").fetchall()]\n\t\tself.results = {}\n\n\tdef ping(self):\n\t\tfor server in self.servers:\n\t\t\treply = os.system(\"ping {} -n 1 -w 350\".format(server))\n\t\t\ttry:\n\t\t\t\tself.results[\"Pingservers\"] += [(server, reply)]\n\t\t\texcept KeyError:\n\t\t\t\tself.results[\"Pingservers\"] = [(server, reply)]\n\nif __name__ == \"__main__\":\n\t#Main\n\tt = Tables()\n\tt.ping()\n\tjson_values = json.dumps(t.results)\n\n\tprint ('Content-type: text/html; charset=utf-8\\n\\n')\n\tprint(json_values)","sub_path":"py/ping_servers.py","file_name":"ping_servers.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"559815148","text":"suma= 0\ncontador= -1 #-1 debido a que no se cuenta el numero 0\nwhile True:\n numero = int(input(\"Num?\"))\n contador = contador + 1 \n suma= suma + numero\n if numero==0:\n break\nprint(contador)\nprint(suma)\n\n\n","sub_path":"P06_P3.py","file_name":"P06_P3.py","file_ext":"py","file_size_in_byte":211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"534656952","text":"import sys\nimport cv2\nimport os\nimport time\n\nfrom PIL import Image as Img\nfrom PIL import ImageTk\nfrom tkinter import filedialog\nfrom ocr import perform_ocr\nimport gtts as gTTS\nfrom tkinter import *\n\nimport tkinter.messagebox\n# from tkinter import Tk, Label, Button, BOTTOM, LEFT, X\nimport webbrowser\n\ndef openHelp():\n\twebbrowser.open('https://github.com/NJACKWinterOfCode/Printed-Text-recognition-and-conversion/blob/master/README.md')\n\n\nclass Window:\n\n\tglobal imageWindow\n\tglobal path\n\tdef __init__(self, master):\n\t\n\t\tmaster.title(\"Optical Character Recognition\")\n\t\tmaster.configure(background='black')\n\t\tmaster.geometry(\"800x800+500+300\")\n\t\t# image window\n\t\tself.imageWindow = None\n\t\t# input image path \n\t\tself.path = ''\n\n\t\t# Load image button\n\t\tself.load_button = Button(master, text=\"Load image\", command=self.select_image, bg=\"orange\", relief=RAISED)\n\t\tself.load_button.pack(side=TOP, anchor=E)\n\n\t\t# Extract Text button\n\t\tself.extract_button = Button(master, text=\"Extract text\", command=self.extract_text, bg=\"orange\", relief=RAISED)\n\t\tself.extract_button.pack(side=TOP, anchor=E, expand=\"yes\")\n\n\t\t# Play Text button\n\t\tself.playText_button = Button(master, text=\"Play text\", bg=\"orange\", relief=RAISED)\n\t\tself.playText_button.pack(side=TOP, anchor=E)\n\n\t\t# Quit Button\n\t\tself.quit_button = Button(master, text=\"Quit\", command=master.quit, bg=\"orange\", relief=RAISED)\n\t\tself.quit_button.pack(side=TOP, anchor=E, expand=\"yes\")\n\n\t# function to extract text from input image\n\tdef extract_text(self):\n\t\tperform_ocr(self.path)\n\n\t\t# pop-up to show extraction is completed and text file is saved\n\t\ttkinter.messagebox.showinfo('Success', 'Text file saved')\n\n\t# function to select and load image \n\tdef select_image(self):\n\n\t\t# get image path\n\t\tself.path = filedialog.askopenfilename()\n\t\t\n\t\tif len(self.path) > 0:\n\t\t\timage = cv2.imread(self.path) #read image\n\t\t\timage = cv2.resize(image, (500, 600)) #resize image\n\t\t\tcv2.imwrite(\"original_image.jpg\", image) #save original image\n\t\t\t# swap channels\n\t\t\timage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n\t\t\t# convert image to PIL format\n\t\t\timage = Img.fromarray(image)\n\n\t\t\t# convert image to ImageTk format\n\t\t\timage = ImageTk.PhotoImage(image)\n\n\t\t# if the image window is None, initialize it\n\n\tdef __init__(self, master):\n\t\t# frame = Frame(master, width=600, height=600)\n\t\t# frame.pack()\n\t\t\n\t\t# imageWindow = Label()\n\t\t# imageWindow.pack(side=TOP, padx=10, pady=10, anchor=S)\n\t\tmaster.title(\"Optical Character Recognition\")\n\t\tmaster.configure(background='black')\n\t\tmaster.geometry(\"800x800+500+300\")\n\t\tself.imageWindow = None\n\n\t\tself.load_button = Button(master, text=\"Load image\", command=self.select_image, bg=\"orange\", relief=RAISED)\n\t\tself.load_button.pack(side=TOP, anchor=E)\n\n\t\tself.extract_button = Button(master, text=\"Extract text\", bg=\"orange\", relief=RAISED)\n\t\tself.extract_button.pack(side=TOP, anchor=E, expand=\"yes\")\n\n\t\tself.playText_button = Button(master, text=\"Play text\", bg=\"orange\", relief=RAISED)\n\t\tself.playText_button.pack(side=TOP, anchor=E)\n\n\t\tself.quit_button = Button(master, text=\"Quit\", command=master.quit, bg=\"orange\", relief=RAISED)\n\t\tself.quit_button.pack(side=TOP, anchor=E, expand=\"yes\")\n\n\tdef select_image(self):\n\n\t\tpath = filedialog.askopenfilename()\n\t\t# imageWindow\n\t\tif len(path) > 0:\n\t\t\timage = cv2.imread(path)\n\t\t\timage = cv2.resize(image, (500, 600))\n\n\t\t\timage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n\t\t\timage = Img.fromarray(image)\n\n\t\t\timage = ImageTk.PhotoImage(image)\n\n\n\t\tif self.imageWindow is None:\n\t\t\tself.imageWindow = Label(image=image)\n\t\t\tself.imageWindow.image = image\n\t\t\tself.imageWindow.pack(side=LEFT)\n\n\n\t\telse:\n\t\t\tself.imageWindow.configure(image=image)\n\t\t\tself.imageWindow.image = image\n\n\nroot = Tk()\n\nmenu = Menu(root)\nroot.config(menu=menu)\nmenu.add_command(label=\"Help\", command=openHelp)\n\nb = Window(root)\nroot.mainloop()\n","sub_path":"tkinter_gui.py","file_name":"tkinter_gui.py","file_ext":"py","file_size_in_byte":3825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"532702074","text":"import cv2\nimport os\nimport time\nimport pytesseract\npytesseract.pytesseract.tesseract_cmd=r\"C:\\\\Users\\\\techno\\\\Desktop\\\\tessarest-ocr\\\\tesseract.exe\"\ncam = cv2.VideoCapture(0)\ntry:\n if not os.path.exists('data'):\n os.makedirs('data')\nexcept OSError:\n print('Error: Creating directory of data')\ncurrentframe = 0\nwhile(True):\n ret,frame = cam.read()\n if ret:\n name = './data/frame' + str(currentframe) + '.png'\n #print ('Creating...' + name)\n cv2.imwrite(name, frame)\n img=cv2.imread(r\"C:\\\\Users\\\\techno\\\\data\\\\frame{}.png\".format(currentframe))\n text=pytesseract.image_to_string(img)\n with open('techno.txt','a') as kl:\n kl.write(text)\n kl.write(\"\\n\")\n print(text)\n currentframe += 1\n time.sleep(.01)\n else:\n break\ncam.release()\ncv2.destroyAllWindows() \n\n\n\n \n\n\n\n","sub_path":"python code/main3.py","file_name":"main3.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"515754328","text":"from __future__ import division, print_function, absolute_import\n\nfrom SSINS import util\nfrom SSINS.data import DATA_PATH\nimport nose.tools as nt\nimport os\nimport numpy as np\nimport scipy.stats\n\n\ndef test_obslist():\n obsfile = os.path.join(DATA_PATH, 'obs_list.txt')\n obslist_test = ['1061313008', '1061313128', '1061318864', '1061318984']\n obslist = util.make_obslist(obsfile)\n nt.eq_(obslist_test, obslist)\n\n\ndef test_match_fraction():\n # Make up a simple event list belonging to some fictitious data with 5 times and 100 frequencies\n events = np.array([(1, 0, slice(0, 10)), (2, 0, slice(0, 10)), (3, 0, slice(10, 20))])\n Ntimes = 5\n Nfreqs = 100\n # Make the event_fraction dictionary\n event_frac = util.event_fraction(events, Nfreqs, Ntimes)\n nt.ok_(event_frac == {(0, 10): 2 / 5, (10, 20): 1 / 5})\n\n\ndef test_chisq():\n # Use bins that are typical in match_filter case\n bins = np.arange(-4, 5)\n # Make up some counts\n counts = np.array([1, 2, 5, 10, 10, 5, 2, 1])\n # Check default settings\n stat, p = util.chisq(counts, bins)\n # These happen to be the answers\n nt.ok_(np.allclose((stat, p), (3.476106234440926, 0.06226107945215504)))\n # Check expected counts weighting\n stat, p = util.chisq(counts, bins, weight='exp', thresh=5)\n # These happen to be the answers\n nt.ok_(np.allclose((stat, p), (2.6882672697527807, 0.1010896885610924)))\n","sub_path":"SSINS/tests/test_util.py","file_name":"test_util.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"439845547","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def removeElements(self, head, val):\n \"\"\"\n :type head: ListNode\n :type val: int\n :rtype: ListNode\n \"\"\"\n while head != None and head.val == val:\n head = head.next\n cp = head\n if cp == None or cp.next == None:\n return head\n while cp.next:\n if cp.next.val == val:\n cp.next = cp.next.next\n else:\n cp = cp.next\n return head","sub_path":"203.py","file_name":"203.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"65151422","text":"print(\"Elija una opcion: \\n1. __servidor1 \\n2. 3servidor\")\r\nopcion = input()\r\npalabra = ''\r\npos = 0\r\na = '_'\r\ndef opcion_palabra():\r\n if int(opcion) == 1:\r\n return '__servidor1'\r\n elif int(opcion) == 2:\r\n return '3servidor'\r\n else:\r\n print(\"Escoge una opcion valida\")\r\n opcion_palabra()\r\n\r\ndef estado0(pal,pos):\r\n if pal[pos] == '_':\r\n pos += 1\r\n estado1(pal,pos)\r\n else:\r\n print(\"Eror de sintaxis\")\r\n\r\ndef estado1(pal,pos):\r\n try:\r\n n = int(pal[pos])\r\n estado3(n,pos)\r\n except:\r\n if pal[pos] == '_':\r\n pos += 1\r\n estado1(pal,pos)\r\n elif type(pal[pos]) == str:\r\n pos += 1\r\n estado1(pal,pos)\r\n\r\ndef estado3(pal,pos):\r\n if type(pal) == int:\r\n print(\"La cadena es correcta\")\r\n else:\r\n print(\"Incorrecto\")\r\n\r\n\r\npalabra = opcion_palabra()\r\nx = estado0(palabra,pos)\r\n","sub_path":"Tarea5/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"625104129","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport time, os\n\n#updated on 2018-08-04 #2\n\nwebsites = {\n \"AML\" : \"http://generic-url.com\",\n \"KYC\" : \"http://generic-url.com\",\n \"CFI\" : \"http://generic-url.com\"\n}\n\ndriver = webdriver.Chrome(desired_capabilities = {'chromeOptions' : {'useAutomationExtension' : False}})\ndriver.set_page_load_timeout(30)\nwaitdriver = WebDriverWait(driver, 30)\nsend_alert = []\nfor key, value in websites.items():\n try:\n driver.get(value)\n waitdriver.until(EC.presence_of_element_located((By.ID, \"text\"))).send_keys(os.getenv(\"username\"))\n waitdriver.until(EC.presence_of_element_located((By.ID, \"password\"))).send_keys(os.getenv(\"passwordas\"))\n waitdriver.until(EC.presence_of_element_located((By.ID, \"Logon\"))).click()\n except:\n print(\"Couldn't login to {}\".format(key))\n send_alert.append(\"{} - {}\".format(key, value))\n continue\n try:\n waitdriver.until(EC.presence_of_element_located((By.XPATH, \"/html/body/div[2]/table[1]/tbody/tr/td[2]/a[1]\"))).click()\n except:\n print(\"Logoff not found. Checking for internal server errors...\")\n try:\n waitdriver.until(EC.presence_of_element_located((By.ID, \"Help\")))\n except:\n print(\"Banner not found. Most likely an HTTP error\")\n send_alert.append(\"{} - {}\".format(key, value))\n time.sleep(3)\ndriver.quit()\nif len(send_alert) > 0:\n print(\"Reporting issues...\")\n from servicenow import define_options, execute\n execute(define_options(description=\"Couldn't successfully perform AML daily health check for following links:\\n{}\\n\\\n Follow escalation path as advised in:\\n https://confluence.com\".format(\"\\n\".join(send_alert))))\n exit(1)\n","sub_path":"aml-check/aml.py","file_name":"aml.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"226184447","text":"# coding = utf-8\n\n\"\"\" Hybrid Res-UNet architecture with regularization of number of predicted boundary pixels\n the contract path is 3D while the expansion path is 2D.\n For input, slices before and after current slice are concatenated as a volume.\n For output, annotation of current slice is compared with the prediction (single slice)\n\"\"\"\n\nimport torch\nfrom torch import nn\nfrom .utils import _initialize_weights_2d, _initialize_weights_3d\nimport torch.nn.functional as F\n\n# 3D convolution\ndef conv_333(in_channels, out_channels, stride=1, padding=1):\n # here only the X and Y directions are padded and no padding along Z direction\n # in this way, we can make sure the central slice of the input volume will remain central\n return nn.Conv3d(in_channels, out_channels, kernel_size=3, stride=stride,\n padding=padding, bias=True)\n\nclass ResBlock3D(nn.Module):\n \"\"\" residual block \"\"\"\n def __init__(self, in_channels, out_channels, stride=1, p=0.5, downsample=None):\n super().__init__()\n self.downsample = downsample\n self.bn1 = nn.BatchNorm3d(in_channels)\n padding = 1 if stride == 1 else (0, 1, 1)\n self.conv1 = conv_333(in_channels, out_channels, stride=stride, padding=padding)\n self.bn2 = nn.BatchNorm3d(out_channels)\n self.conv2 = conv_333(out_channels, out_channels, stride=1, padding=1)\n self.relu = nn.ReLU(inplace=True)\n self.dp = nn.Dropout3d(p=p)\n\n if stride != 1 or in_channels != out_channels:\n self.downsample = nn.Sequential(\n nn.Conv3d(in_channels, out_channels,\n kernel_size=3, stride=stride, bias=False, padding=padding),\n nn.BatchNorm3d(out_channels)\n )\n\n def forward(self, x):\n residual = x\n # print(\"input residual size: {}\".format(residual.size()))\n out = self.bn1(x)\n out = self.relu(out)\n out = self.conv1(out)\n\n out = self.bn2(out)\n out = self.relu(out)\n out = self.conv2(out)\n out = self.dp(out)\n if self.downsample is not None:\n residual = self.downsample(residual)\n # print(\"output residual size: {}\".format(residual.size()))\n # print(\"output size: {}\".format(out.size()))\n out += residual\n\n return out\n\n# 2D convolution\ndef conv_33(in_channels, out_channels, stride=1):\n # since BN is used, bias is not necessary\n return nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\nclass ResBlock2D(nn.Module):\n \"\"\" 2D residual block \"\"\"\n def __init__(self, in_channels, out_channels, stride=1, p=0.5, downsample=None):\n super().__init__()\n self.downsample = downsample\n self.bn1 = nn.BatchNorm2d(in_channels)\n self.conv1 = conv_33(in_channels, out_channels, stride=stride)\n self.bn2 = nn.BatchNorm2d(out_channels)\n self.conv2 = conv_33(out_channels, out_channels, stride=1)\n self.relu = nn.ReLU(inplace=True)\n self.dp = nn.Dropout2d(p=p)\n if stride != 1 or in_channels != out_channels:\n self.downsample = nn.Sequential(\n nn.Conv2d(in_channels, out_channels,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(out_channels)\n )\n\n def forward(self, x):\n residual = x\n out = self.bn1(x)\n out = self.relu(out)\n out = self.conv1(out)\n out = self.dp(out)\n\n out = self.bn2(out)\n out = self.relu(out)\n out = self.conv2(out)\n out = self.dp(out)\n if self.downsample is not None:\n residual = self.downsample(residual)\n out += residual\n\n return out\n\nclass UpConv(nn.Module):\n \"\"\" up convolution \"\"\"\n def __init__(self, in_channels, out_channels):\n super().__init__()\n self.transconv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=2,\n stride=2, padding=0)\n\n def forward(self, skip, x):\n \"\"\" skip is 3D volume and x is 2D slice, central slice of skip is concatenated with x \"\"\"\n central_inx = skip.size(2) // 2\n skip_slice = skip[:, :, central_inx]\n\n out = self.transconv(x)\n out = torch.cat([skip_slice, out], 1)\n\n return out\n\nclass ResUNet(nn.Module):\n \"\"\" Res UNet class \"\"\"\n def __init__(self, in_channels=1, out_channels=5, n_slices=31, input_size=96, down_blocks=[32, 64, 128, 256],\n up_blocks = [256, 128, 64, 32], bottleneck = 512, p=0.5):\n super().__init__()\n self.down_blocks = down_blocks\n self.up_blocks = up_blocks\n self.n_slices = n_slices\n self.input_size = input_size\n\n self.conv1 = nn.Conv3d(in_channels, self.down_blocks[0], 3, padding=1)\n\n # contract path\n self.BlocksDown = nn.ModuleList([])\n for b_inx, down_block in enumerate(self.down_blocks):\n output_channel = self.down_blocks[b_inx]\n if b_inx == 0:\n input_channel = self.down_blocks[0]\n self.BlocksDown.append(ResBlock3D(input_channel, output_channel, stride=1, p=p))\n else:\n input_channel = self.down_blocks[b_inx-1]\n self.BlocksDown.append(ResBlock3D(input_channel, output_channel, stride=2, p=p))\n\n # bottleneck block\n # make sure there is only single one slice in current layer\n self.bottleneck = ResBlock3D(self.down_blocks[-1], bottleneck, stride=2, p=p)\n scale = 2 ** len(down_blocks)\n self.conv_n11 = nn.Conv3d(bottleneck, bottleneck, kernel_size=(n_slices//scale, 1, 1))\n\n # expansive path\n self.BlocksUp = nn.ModuleList([])\n self.TransUpBlocks = nn.ModuleList([])\n for b_inx, up_block in enumerate(self.up_blocks):\n input_channel = bottleneck if b_inx == 0 else self.up_blocks[b_inx-1]\n output_channel = self.up_blocks[b_inx]\n self.TransUpBlocks.append(UpConv(input_channel, output_channel))\n self.BlocksUp.append(ResBlock2D(input_channel, output_channel, stride=1, p=p))\n\n # final convolution layer\n self.fl = nn.Conv2d(self.up_blocks[-1], out_channels, kernel_size=1)\n\n # initialize weights\n _initialize_weights_3d(self)\n _initialize_weights_2d(self)\n\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x):\n out = self.conv1(x)\n # print(out.size())\n skip_connections = []\n for down_block in self.BlocksDown:\n out = down_block(out)\n skip_connections.append(out)\n # print(out.size())\n\n out = self.bottleneck(out)\n # if out.size(2) > 1:\n out = self.conv_n11(out) # fuse several slices in the bottleneck layer\n\n for b_inx in range(len(self.up_blocks)):\n skip = skip_connections.pop()\n if b_inx == 0:\n out = self.TransUpBlocks[b_inx](skip, out[:, :, 0])\n else:\n out = self.TransUpBlocks[b_inx](skip, out)\n\n out = self.BlocksUp[b_inx](out)\n\n output = self.fl(out)\n return output\n\ndef ResUNet28(in_channels, out_channels, n_slices=63, input_size=96, p=0.0):\n return ResUNet(in_channels=in_channels, out_channels=out_channels, n_slices=n_slices, input_size=input_size,\n down_blocks=[32, 64, 128, 256, 512], up_blocks = [512, 256, 128, 64, 32], bottleneck = 1024, p=p)\n\ndef ResUNet23(in_channels, out_channels, n_slices=31, input_size=96, p=0.0):\n return ResUNet(in_channels=in_channels, out_channels=out_channels, n_slices=n_slices, input_size=input_size,\n down_blocks=[32, 64, 128, 256], up_blocks = [256, 128, 64, 32], bottleneck = 512, p=p)\n\ndef ResUNet18(in_channels, out_channels, n_slices=15, input_size=96, p=0.0):\n return ResUNet(in_channels=in_channels, out_channels=out_channels, n_slices=n_slices, input_size=input_size,\n down_blocks=[32, 64, 128], up_blocks = [128, 64, 32], bottleneck = 256, p=p)\n\nif __name__ == \"__main__\":\n in_channels = 1\n out_channels = 3\n n_slices = 15\n input_size = 96\n unet = ResUNet18(in_channels, out_channels, n_slices=n_slices, input_size=input_size)\n print(unet)\n x = torch.FloatTensor(6, in_channels, n_slices, input_size, input_size) # the smallest patch size is 12 * 12\n y = unet(x)","sub_path":"hybrid/models/hybrid_res_unet.py","file_name":"hybrid_res_unet.py","file_ext":"py","file_size_in_byte":8457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"110917391","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/adminlteui/migrations/0002_options_valid.py\n# Compiled at: 2020-01-21 04:26:45\n# Size of source mod 2**32: 411 bytes\nfrom django.db import migrations, models\n\nclass Migration(migrations.Migration):\n dependencies = [\n ('django_admin_settings', '0001_initial')]\n operations = [\n migrations.AddField(model_name='options',\n name='valid',\n field=models.BooleanField(default=True, verbose_name='Valid'))]","sub_path":"pycfiles/django_adminlte_ui-1.5.0-py3.6/0002_options_valid.cpython-36.py","file_name":"0002_options_valid.cpython-36.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"73448043","text":"def indexOf(search, haystack):\n indx = -1\n for i, c in enumerate(haystack):\n if c == search:\n indx = i\n \n return indx\n\nclass Solution(object):\n def isValid(self, s):\n # Must be even length\n if len(s) % 2 != 0:\n return False\n\n starters = [\"(\", \"[\", \"{\"]\n enders = [\")\", \"]\", \"}\"]\n\n stack_arr = []\n \n for i, c in enumerate(s):\n if c in starters:\n stack_arr.append(c)\n elif c in enders:\n indexOfClosing = indexOf(c, enders)\n if len(stack_arr) == 0:\n return False\n poped = stack_arr.pop()\n if indexOf(poped, starters) != indexOfClosing:\n return False\n else:\n # replace the closing & openning brackets\n s = s[:indexOfClosing] + \"\" + s[indexOfClosing + 1:]\n s = s[:indexOf(poped, starters)] + \"\" + s[indexOf(poped, starters) + 1:]\n \n return len(stack_arr) == 0\n ","sub_path":"validParentheses/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"157105279","text":"class Solution(object):\n def longestCommonPrefix(self, strs):\n \"\"\"\n :type strs: List[str]\n :rtype: str\n \"\"\"\n if not strs:\n return ''\n strs = sorted(strs)\n first_str, last_str = strs[0], strs[-1]\n index = 0\n commons_chars = []\n while index < len(first_str) and index < len(last_str):\n if first_str[index] == last_str[index]:\n commons_chars.append(first_str[index])\n index += 1\n else:\n break\n return ''.join(commons_chars)\n","sub_path":"PyLang/014LongestCommonPrefix.py","file_name":"014LongestCommonPrefix.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"482781946","text":"#!/usr/bin/env python\n\nimport argparse\nfrom __init__ import md5_dir\n\nparser = argparse.ArgumentParser(description=\"\"\"\n Compute md5 hashes of all the files in a directory and its subdirectories\n and compare them to the values in md5sums. The md5sums file itself is not\n checked.\"\"\")\n\nparser.add_argument('dir', metavar='DIR', nargs='?',\n help='a directory containing files to be checked')\nparser.add_argument('--ignored_paths', nargs='?',\n help='a comma-separated list of files to be ignored')\n\nargs = parser.parse_args()\nif args.dir == None:\n parser.print_help()\n exit(0)\n\nignored_paths = []\nif args.ignored_paths != None:\n ignored_paths = args.ignored_paths.split(',')\nmd5_dir(args.dir, ignored_paths=ignored_paths)\n","sub_path":"md5_dir.py","file_name":"md5_dir.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"454977355","text":"# encoding:utf8\nimport logging\n\nlogging.basicConfig(level=logging.NOTSET)\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\nlogfile = '/home/wsj/log/daili.log'\nfh = logging.FileHandler(logfile, mode='a')\nfh.setLevel(logging.DEBUG)\nformatter = logging.Formatter(\"%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s\")\nfh.setFormatter(formatter)\nlogger.addHandler(fh)\n","sub_path":"lib/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"27282079","text":"import logging\nfrom peewee import *\nfrom customer_model import Customer, DB\n\nlogging.basicConfig(level=logging.INFO)\nLOGGER = logging.getLogger(__name__)\n\n\ndef add_customer(name, lastname, home_address, phone_number,\n email_address, status, poverty_score):\n '''atomic() is easier than transaction()'''\n try:\n with DB.atomic():\n Customer.create(\n name=name,\n lastname=lastname,\n home_address=home_address,\n phone_number=phone_number,\n email_address=email_address,\n status=status,\n credit_limit=poverty_score\n )\n except IntegrityError:\n LOGGER.warning(\"Name %s is already taken.\", name)\n\n\ndef search_customer(find_name):\n '''locate customer by name'''\n try:\n result = Customer.select().where(Customer.name == find_name).dicts().get()\n except DoesNotExist:\n result = f'{find_name} does not exist'\n return result\n\n# http://docs.peewee-orm.com/en/latest/peewee/query_examples.html?highlight=delete#delete-a-member-from-the-cd-members-table\ndef delete_customer(delete_name):\n '''delete a customer by id.'''\n try:\n with DB.atomic():\n Customer.delete().where(Customer.name == delete_name).execute()\n result = f'Succesfully deleted {delete_name}'\n except IntegrityError:\n result = f'{delete_name} does not exist'\n return result\n\n\n# http://docs.peewee-orm.com/en/latest/peewee/query_builder.html?highlight=update#update-queries\ndef update_customer_credit(name, credit_limit):\n try:\n Customer.update(poverty_score=credit_limit).where(Customer.name == name).execute()\n result = f'{name} now has a credit score of {credit_limit}'\n except DoesNotExist:\n result = f'{name} does not exist'\n return result\n\n\n# http://docs.peewee-orm.com/en/latest/peewee/query_examples.html#count-the-number-of-facilities\ndef list_active_customers():\n '''return an integer with the number of customers status is active.'''\n Customer.select().where(Customer.status == 1).count()\n","sub_path":"students/ScotchWSplenda/lesson03/assignment/Archive/basic_ops.py","file_name":"basic_ops.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"505322811","text":"#!/usr/bin/env python\n# coding=utf-8\n\n# Bibliotheken importieren\nfrom lib_oled96 import ssd1306\nfrom smbus import SMBus\nimport time\nfrom pyA20.gpio import gpio\nfrom pyA20.gpio import port\n\n\nimport dht\nimport time\nimport datetime\n\nimport sys\nimport numpy as np\n\na = 17.271\nb = 237.7 # degC\n\n#dstmp = open(\"/root/ramdisk/dstmp.txt\",\"w\")\n#dhttmp = open(\"/root/ramdisk/dhttmp.txt\",\"w\")\n\n# initialize GPIO\nPIN2 = port.PG7 #bylo zmeneno na PG7 kvuli chybe na schematu data cidla DHT\ngpio.init()\n\n# read data using pin\ninstance = dht.DHT(pin=PIN2)\n\n\nfrom PIL import ImageFont, ImageDraw, Image\n\n\n\nimport socket\n\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\ns.connect(('8.8.8.8', 1)) # connect() for UDP doesn't send packets\nip_address = s.getsockname()[0]\n\n\nhostname = socket.gethostname()\n\ntemp_sensor = '/sys/devices/w1_bus_master1/28-000006dc20e4/w1_slave' #---------------sekce cteni DS18D20--------------------------\ndef temp_raw():\n f = open(temp_sensor,'r')\n lines = f.readlines()\n f.close()\n return lines\n\ndef read_temp():\n lines = temp_raw()\n while lines[0].strip()[-3:] != 'YES':\n time.sleep(0.2)\n lines = temp_raw()\n temp_output = lines[1].find('t=')\n if temp_output != -1:\n temp_string = lines[1][temp_output+2:]\n temp_c = float(temp_string)/100.0 #puvodne bylo /1000\n temp_c = int(temp_c)\n temp_c=temp_c/10; #doplneny radek\n return temp_c\n\n\ndef dewpoint_approximation(T,RH):\n Td = (b * gamma(T,RH)) / (a - gamma(T,RH))\n return Td\n \n \ndef gamma(T,RH):\n g = (a * T / (b + T)) + np.log(RH/100.0)\n return g\n\n\n #---------------konec sekce cteni DS18D20--------------------------\nfont = ImageFont.load_default()\n# Display einrichten\ni2cbus = SMBus(0) # 0 = Raspberry Pi 1, 1 = Raspberry Pi > 1\noled = ssd1306(i2cbus)\n\nfont = ImageFont.load_default()\nfont = ImageFont.truetype('/root/lib_oled96/FreeSerif.ttf', 14)\n# Ein paar Abkürzungen, um den Code zu entschlacken\ndraw = oled.canvas\n\n# Display zum Start löschen\n\n\nwhile True:\n result = instance.read()\n if result.is_valid():\n oled.cls()\n oled.display()\n\n #-----------------------ctenu hodnot humidity DHT-------------------- \n dhthum = open(\"/root/ramdisk/dhthum.txt\",\"a\")\n\n temp = (format(result.temperature))\n #puf = int(result.humidity)/10\n #hum = (format(puf))\n\n hum = (format(result.humidity))\n #-----------------------konec ctenu hodnot humidity DHT---------------\n\n\n #-----------------------zapis hodnot na displej-----------------------\n #teplota = read_temp();\n teplota = str(read_temp());\n draw.text((0, 0), \"IP:\", font=font, fill=1) #IP adresas\n draw.text((20, 0), ip_address, font=font, fill=1)\n draw.text((0, 16), \"DHT temp:\", font=font, fill=1)\n draw.text((70, 16), temp, font=font, fill=1)\n draw.text((0, 32), \"DHT hum:\", font=font, fill=1)\n draw.text((70, 32), hum, font=font, fill=1)\n draw.text((0, 48), \"DS temp:\", font=font, fill=1)\n draw.text((70, 48), teplota, font=font, fill=1)\n oled.display()\n time.sleep(2)\n\n\n\n\n #-------------------------z8pis hodnt do RAM disku----------------------\n if not teplota:\n print('Neni teplota na DS')\n else:\n dstmp = open(\"/root/ramdisk/dstmp.txt\",\"w\")\n dstmp.write(teplota)\n dstmp.close()\n \n\n if not temp:\n print('Neni teplota na DHT')\n else:\n dhttmp = open(\"/root/ramdisk/dhttmp.txt\",\"w\")\n dhttmp.write(temp)\n dhttmp.close()\n T=float(temp)\n\n if not dhthum:\n print('Neni vlhkost na DHT')\n else:\n dhthum = open(\"/root/ramdisk/dhthum.txt\",\"w\")\n dhthum.write(hum)\n dhthum.close()\n RH = float(hum)\n \n Td = dewpoint_approximation(T,RH)\n print ('T, RH',T,RH)\n print ('Td=',Td)\n time.sleep(2)\n","sub_path":"DATA_v1/oled.py","file_name":"oled.py","file_ext":"py","file_size_in_byte":4423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"532114836","text":"# uncompyle6 version 3.6.7\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: /home/oshara/GSoC/DBPedia/airML/src/airML.py\n# Compiled at: 2020-05-08 12:36:03\n# Size of source mod 2**32: 373 bytes\nimport sys, os\n\ndef kbox_execute():\n JAR_EXECUTE = 'java -jar kbox.jar'\n if len(sys.argv) == 1:\n returned_output = os.system(JAR_EXECUTE)\n else:\n arg = ' '.join(sys.argv[1:])\n execute = JAR_EXECUTE + ' ' + arg\n returned_output = os.system(execute)\n\n\nif __name__ == '__main__':\n kbox_execute()","sub_path":"pycfiles/airobot-0.1.0-py3-none-any/airML.cpython-37.py","file_name":"airML.cpython-37.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"428784078","text":"from classifying_models.db_data_per_country import getRawDataFromDB\nfrom classifying_models.classifying_and_saving_radius_models import classifyingAndSavingRadiusModels\n\n#irelandConnectionString = 'mongodb+srv://epcuser2:pw12epc559@epcfull-2jvr7.mongodb.net/test?retryWrites=true&w=majority'\nirelandConnectionString = 'mongodb+srv://ire_1:t9YjjOigsWGmPTJJ@cluster0.fxx98.mongodb.net/EPC?retryWrites=true&w=majority'\n\nqueryThermalDataFields = 'ratedDwelling.thermalData.finalEnergyDemand.value'\nirelandCountryString = \"Ireland\"\n\n#irelandQueryLimit = 80000\nirelandQueryLimit = 90000\n\ndbData = getRawDataFromDB(\n irelandCountryString, irelandConnectionString, queryThermalDataFields, irelandQueryLimit)\n\nclassifyingAndSavingRadiusModels(\n irelandCountryString.lower(), queryThermalDataFields, dbData)\n","sub_path":"src/at/uibk/epc/classifying/knn/classifying_and_saving_radius_models_ireland.py","file_name":"classifying_and_saving_radius_models_ireland.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"183348646","text":"from kafka import KafkaConsumer\nfrom pymongo import MongoClient\nfrom json import loads\nfrom pprint import pprint\nfrom sys import stdout\nfrom time import sleep\nimport math\nimport pymysql\n\nSQL_PWD = \"pwd\"\n\nCEND = '\\033[0m'\nCRED = '\\033[91m'\nCGREEN = '\\33[32m'\nCYELLOW = '\\33[33m'\nCBLUE = '\\33[34m'\nCVIOLET = '\\33[35m'\n\n\ndef registerWhale(addr):\n conn = pymysql.connect(\"127.0.0.1\", \"root\", SQL_PWD, \"ETH\")\n cur = conn.cursor()\n _str = f\"INSERT INTO iswhale (`address`, `whale`) VALUES ('{addr}', True);\"\n cur.execute(_str)\n conn.commit()\n cur.close()\n conn.close()\n print(f\"Whale {addr} registered!\")\n\n\ndef isWhales(addr):\n conn = pymysql.connect(\"127.0.0.1\", \"root\", SQL_PWD, \"ETH\")\n cur = conn.cursor()\n _str = f\"SELECT id FROM iswhale WHERE address = '{addr}';\"\n cur.execute(_str)\n rep = [c for c in cur]\n cur.close()\n conn.close()\n if len(rep) == 0:\n print(\"..Unrecorded fish...Taking note of that one..\")\n registerWhale(addr)\n return \"New Unknown Whale\"\n elif rep[0][0] is not None:\n return rep[0][0]\n else:\n return \"Unknown Whale\"\n\n\ndef logs(tx):\n if 100 < int(tx['value'], 16) / 1e+18 < 1000:\n COLOR = CGREEN\n elif 1000 < int(tx['value'], 16) / 1e+18 < 10000:\n COLOR = CBLUE\n elif 10000 < int(tx['value'], 16) / 1e+18:\n COLOR = CRED\n else:\n COLOR = CVIOLET\n\n print(f'{COLOR} transaction occured at block {int(tx[\"block\"], 16)}, {tx[\"from\"]} send {int(tx[\"value\"], 16) / math.pow(10, 18)} '\n f'ETH priced at {tx[\"price\"]} to {tx[\"to\"]} {CEND}')\n\n\ndef dumpInSql(tx):\n conn = pymysql.connect(\"127.0.0.1\", \"root\", SQL_PWD, \"ETH\")\n cur = conn.cursor()\n _str = f\"INSERT INTO suspect_tx (`from`, `value`, `to`, `blockNo`, `price`) VALUES \" \\\n f\"({tx['from']}, {int(tx['value'], 16) / math.pow(10, 18)}, {tx['to']}, {int(tx['block'], 16)}, {tx['price']});\"\n cur.execute(_str)\n conn.commit()\n cur.close()\n conn.close()\n print('...Tx stored into sql...')\n\n\nif __name__ == '__main__':\n consumerTx = KafkaConsumer('Tx', bootstrap_servers=['localhost:9092'], auto_offset_reset='earliest',\n enable_auto_commit=True,\n group_id='my-group', value_deserializer=lambda x: loads(x.decode('utf-8')))\n\n print(\"waiting for messages... \")\n\n for message in consumerTx:\n tx = message.value\n\n if int(tx['value'], 16) / 1e+18 > 10000:\n '''Record the transaction if value > 10000'''\n #dumpInSql(tx)\n\n '''Check if `from` or `to` are known whale, if so, name them; if not take record of it'''\n tx['from'] = isWhales(tx['from'])\n tx['to'] = isWhales(tx['to'])\n\n '''display logs'''\n logs(tx)\n\n else:\n print('\\nnot interresting tx... Not recorded... Next...')\n logs(tx)\n\n\n\n","sub_path":"non-repertorie/script/v1/consumer2.py","file_name":"consumer2.py","file_ext":"py","file_size_in_byte":2906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"336893955","text":"'''\nHelper.py\nSteven Laan 6036031\nAuke Wiggers 6036163\n\nAuxiliary file, containing tokens for unknown words, numerics, functions that \nare used in multiple classes.\n'''\nimport re\n\nDIGIT = '\\d+(?:\\,\\d*)*(?:\\.\\d*)?'\nNUMERIC = 'xxxnumericxxx'\nUNKNOWN = 'xxxunknownxxx'\nSPLIT_TOKEN = '@'\nUNARY_TOKEN = '%%%%%'\nTOP_SYMBOL = 'TOP'\nLEX_SPLIT = '##'\n\ndef replaceDigits(sentence):\n '''\n Replaces digits in a given string by a token, and returns the replaced \n parts as a list and the resulting word.\n '''\n # Match numerals\n matchstring = '{0}(?:/{1})?'.format(DIGIT, DIGIT)\n # Match all written numerals and their capitalcased variant\n matchstring += '|(?:zero|one|two|three|four|five|six|seven|eight|nine)'\n matchstring += '|(?:Zero|One|Two|Three|Four|Five|Six|Seven|Eight|Nine)'\n \n return re.sub(matchstring, NUMERIC, sentence)\n \ndef determineFilename(prefix, \n lowercase, \n replace_numeric, \n unknown_threshold, \n head_lex,\n suffix):\n '''\n Determine the filename of savefiles based on set parameters. \n '''\n name = prefix + '_'\n name = name + 'lc_' if lowercase else name\n name = name + 'rn_' if replace_numeric else name\n name = name + 'ut{0}_'.format(unknown_threshold) if unknown_threshold else name\n name = name + 'hl{0}_'.format(head_lex) if head_lex > 1 else name\n name += suffix + '.txt'\n return name\n \ndef writeDictToTxt(filename, dictionary):\n '''\n Write a single dictionary to a file, space-separated (not by pickle). \n '''\n w = open(filename, 'w')\n for line in unpack(dictionary): \n w.write( line + '\\n' )\n w.close()\n \ndef unpack(input_value):\n '''\n Returns a list of space-separated strings: 'k1 k2 .. kn value' for a \n dictionary of n levels deep. \n '''\n if type(input_value) is dict:\n string_list = list()\n for k,v in input_value.iteritems():\n if type(k) is tuple:\n k = ','.join( k )\n for processed in unpack(v):\n string_list.append(k + ' ' + ''.join(processed))\n return string_list\n elif type(input_value) is set:\n return [','.join( input_value )]\n else:\n return [str(input_value)]","sub_path":"source/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":2318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"479436849","text":"import os\nimport hashlib\nfrom innovate import utils\n\nfrom django.test import TestCase\n\n\nclass UploadUtilsTest(TestCase):\n \"\"\"Various tests for utilities we use when uploading files.\"\"\"\n\n def assertEmpty(self, l):\n \"\"\"Custom assertion. Test that any iterable is empty.\"\"\"\n return hasattr(l, '__iter__') and len(l) == 0\n\n def test_directory_parititioning(self):\n \"\"\"Test that files are partitioned into upload directories.\"\"\"\n test = lambda r, exp, s: [exp == utils.get_partition_id(i, s)\n for i in range(*r)]\n all_true = lambda l: filter(lambda x: not x, l)\n self.assertEmpty(all_true(test((1, 10), 1, 10)))\n self.assertEmpty(all_true(test((11, 20), 2, 10)))\n self.assertEmpty(all_true(test((1001, 2000), 2, 1000)))\n\n def test_filenames(self):\n \"\"\"Test that filenames are properly encoded on upload.\"\"\"\n def run_battery(filename):\n safe_name = utils.safe_filename(filename)\n name, ext = os.path.splitext(safe_name)\n assert safe_name != filename\n assert len(safe_name) == 32 + len(ext)\n assert isinstance(safe_name, str)\n run_battery('index.php')\n run_battery('02134')\n run_battery(u'\\x123')\n run_battery(hashlib.sha1('myimage.jpg').hexdigest())\n run_battery(hashlib.md5('myimage.jpg').hexdigest())\n\n def test_filenames_malicious_extension(self):\n \"\"\"Ensure malicious users can't trick file encoding.\"\"\"\n safe_name = utils.safe_filename('fdasfdsa.index.php')\n name, ext = os.path.splitext(safe_name)\n assert ext == '.php'\n","sub_path":"apps/innovate/tests/test_uploads.py","file_name":"test_uploads.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"379232280","text":"\"\"\"\r\nScrieți un program care să înlocuiască într-o propoziție toate aparițiile unui cuvânt 𝑠𝑠 cu un cuvânt\r\n𝑡𝑡. Atenție, NU se poate utiliza metoda replace! De ce?\r\n\"\"\"\r\n\r\n# nu putem utiliza functia replace in mod corespunzator in aceasta problema\r\n# un motiv este faptul ca t poate fi un substring al lui s\r\n# Exemplu:\r\n# sir = \"Ionel si Ion sunt colegi\"\r\n# s = \"Ion\"\r\n# t = \"Gigel\"\r\n# daca utilizam functia replace, output-ul va arata astfel : \"Gigelel si Gigel sunt colegi\", ceea ce nu ne dorim\r\n# pentru aceasta situatia, vom utiliza metoda .split()\r\n\r\nsir = input(\"Introduceti sirul: \")\r\ns = input(\"Introduceti sirul de inlocuit: \") #citim\r\nt = input(\"Introduceti sirul cu care va fi inlocuit s: \")\r\n\r\nsirFinal = '' #in acest sir o sa punem sirul rezultat final in urma modificarilor\r\n\r\nfor cuv in sir.split(): # pentru fiecare cuvant din lista de cuvinte (explicatii .split() mai jos)\r\n if cuv == s: # daca cuvantul nostru este cel de inlocuit\r\n sirFinal += t # ii punem inlocuitorul in sirul final\r\n else:\r\n sirFinal += cuv #altfel adaugam cuvantul\r\n sirFinal += ' ' #adaugam spatii ptr a separa cuvintele intre ele\r\n\r\nprint(sirFinal) #afisare\r\n\r\n# Explicatii functia .split()\r\n# sir.split(sep) returneaza o lista ce contine cuvintele sirului sir, delimitate de separatorul sep\r\n# daca sep nu este mentionat, atunci by-default se considera spatiul ca separator","sub_path":"Laborator PA/2/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"564303463","text":"from keras.models import load_model,Sequential\nfrom keras.preprocessing import image\nimport numpy as np\nfrom keras import models\n\nmodel = load_model(\"cats_dogs_model_2.h5\")\nmodel.summary()\n\nimg = image.load_img(\"C:/All/Data/dogs-vs-cats-mini/test/cat/cat.11008.jpg\",target_size=(150,150))\nimg_tensor = image.img_to_array(img)\nimg_tensor = np.expand_dims(img_tensor,axis=0)\nimg_tensor /= 255. # scale to 0~1\nprint(img_tensor.shape)\n\nimport matplotlib.pyplot as plt\nplt.imshow(img_tensor[0])\n\nlayer_outputs = [layer.output for layer in model.layers[:8]]\nactivation_model = models.Model(inputs=model.input,outputs=layer_outputs)\nactivations = activation_model.predict(img_tensor)\nlayer1_ac = activations[0]\nprint(layer1_ac.shape)\nfor i in range(32):\n plt.matshow(layer1_ac[0,:,:,i],cmap='viridis')\n\nplt.show()\n","sub_path":"flowers/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"619501335","text":"import os\nimport logging\nimport argparse\nimport numpy as np\nfrom tqdm import tqdm\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\n\nfrom seq2seq import models, utils\nfrom seq2seq.data.dictionary import Dictionary\nfrom seq2seq.data.dataset import Seq2SeqDataset, BatchSampler\nfrom seq2seq.models import ARCH_MODEL_REGISTRY, ARCH_CONFIG_REGISTRY\n\n\ndef get_args():\n \"\"\" Defines training-specific hyper-parameters. \"\"\"\n parser = argparse.ArgumentParser('Sequence to Sequence Model')\n \n # Add data arguments\n parser.add_argument('--data', default = 'europarl_prepared',\n help = 'path to data directory')\n parser.add_argument('--source-lang', default = 'de', help = 'source language')\n parser.add_argument('--target-lang', default = 'en', help = 'target language')\n parser.add_argument('--max-tokens', default = None, type = int,\n help = 'maximum number of tokens in a batch')\n parser.add_argument('--batch-size', default = 10, type = int,\n help = 'maximum number of sentences in a batch')\n parser.add_argument('--train-on-tiny', action = 'store_true',\n help = 'train model on a tiny dataset')\n \n # Add model arguments\n parser.add_argument('--arch', default = 'lstm',\n choices = ARCH_MODEL_REGISTRY.keys(), help = 'model architecture')\n parser.add_argument('--cuda', default = False, help = 'Use a GPU')\n \n # Add optimization arguments\n parser.add_argument('--max-epoch', default = 100, type = int,\n help = 'force stop training at specified epoch')\n parser.add_argument('--clip-norm', default = 4.0,\n type = float, help = 'clip threshold of gradients')\n parser.add_argument('--lr', default = 0.0003,\n type = float, help = 'learning rate')\n parser.add_argument('--patience', default = 10, type = int,\n help = 'number of epochs without improvement on validation set before early stopping')\n \n # Add checkpoint arguments\n parser.add_argument('--log-file', default = None, help = 'path to save logs')\n parser.add_argument('--save-dir', default = 'checkpoints',\n help = 'path to save checkpoints')\n parser.add_argument('--restore-file', default = 'checkpoint_last.pt',\n help = 'filename to load checkpoint')\n parser.add_argument('--save-interval', type = int, default = 1,\n help = 'save a checkpoint every N epochs')\n parser.add_argument('--no-save', action = 'store_true',\n help = 'don\\'t save models or checkpoints')\n parser.add_argument('--epoch-checkpoints',\n action = 'store_true', help = 'store all epoch checkpoints')\n \n # Parse twice as model arguments are not known the first time\n args, _ = parser.parse_known_args()\n model_parser = parser.add_argument_group(\n argument_default = argparse.SUPPRESS)\n ARCH_MODEL_REGISTRY[args.arch].add_args(model_parser)\n args = parser.parse_args()\n ARCH_CONFIG_REGISTRY[args.arch](args)\n return args\n\n\ndef main(args):\n \"\"\"\n Calc loss and perplexity on training and validation set\n \"\"\"\n logging.info('Commencing Validation!')\n torch.manual_seed(42)\n np.random.seed(42)\n \n utils.init_logging(args)\n \n # Load dictionaries [for each language]\n src_dict = Dictionary.load(os.path.join(\n args.data, 'dict.{:s}'.format(args.source_lang)))\n logging.info('Loaded a source dictionary ({:s}) with {:d} words'.format(\n args.source_lang, len(src_dict)))\n tgt_dict = Dictionary.load(os.path.join(\n args.data, 'dict.{:s}'.format(args.target_lang)))\n logging.info('Loaded a target dictionary ({:s}) with {:d} words'.format(\n args.target_lang, len(tgt_dict)))\n \n # Load datasets\n def load_data(split):\n return Seq2SeqDataset(\n src_file = os.path.join(\n args.data, '{:s}.{:s}'.format(split, args.source_lang)),\n tgt_file = os.path.join(\n args.data, '{:s}.{:s}'.format(split, args.target_lang)),\n src_dict = src_dict, tgt_dict = tgt_dict)\n \n train_dataset = load_data(\n split = 'train') if not args.train_on_tiny else load_data(split = 'tiny_train')\n valid_dataset = load_data(split = 'valid')\n \n # Build model and optimization criterion\n model = models.build_model(args, src_dict, tgt_dict)\n logging.info('Built a model with {:d} parameters'.format(\n sum(p.numel() for p in model.parameters())))\n criterion = nn.CrossEntropyLoss(\n ignore_index = src_dict.pad_idx, reduction = 'sum')\n \n if torch.cuda.is_available() and args.cuda:\n model = model.cuda()\n # Instantiate optimizer and learning rate scheduler\n optimizer = torch.optim.Adam(model.parameters(), args.lr)\n \n # Load last checkpoint if one exists\n state_dict = utils.load_checkpoint(args, model, optimizer) # lr_scheduler\n \n \n train_loader = \\\n torch.utils.data.DataLoader(train_dataset, num_workers = 1, collate_fn = train_dataset.collater,\n batch_sampler = BatchSampler(train_dataset, args.max_tokens, args.batch_size, 1,\n 0, shuffle = True, seed = 42))\n \n # Calculate validation loss\n train_perplexity = validate(\n args, model, criterion, train_dataset, 0)\n \n valid_perplexity = validate(\n args, model, criterion, valid_dataset, 0)\n \n\ndef validate(args, model, criterion, valid_dataset, epoch):\n \"\"\" Validates model performance on a held-out development set. \"\"\"\n valid_loader = \\\n torch.utils.data.DataLoader(valid_dataset, num_workers = 1, collate_fn = valid_dataset.collater,\n batch_sampler = BatchSampler(valid_dataset, args.max_tokens, args.batch_size, 1, 0,\n shuffle = False, seed = 42))\n model.eval()\n stats = OrderedDict()\n stats['valid_loss'] = 0\n stats['num_tokens'] = 0\n stats['batch_size'] = 0\n if torch.cuda.is_available() and args.cuda:\n model = model.cuda()\n # Iterate over the validation set\n for i, sample in enumerate(valid_loader):\n if len(sample) == 0:\n continue\n \n if torch.cuda.is_available() and args.cuda:\n for k in sample:\n if type(sample[k]) == torch.Tensor:\n sample[k] = sample[k].cuda()\n with torch.no_grad():\n # Compute loss\n output, attn_scores = model(\n sample['src_tokens'], sample['src_lengths'], sample['tgt_inputs'])\n loss = criterion(output.view(-1, output.size(-1)),\n sample['tgt_tokens'].view(-1))\n # Update tracked statistics\n stats['valid_loss'] += loss.item()\n stats['num_tokens'] += sample['num_tokens']\n stats['batch_size'] += len(sample['src_tokens'])\n \n # Calculate validation perplexity\n stats['valid_loss'] = stats['valid_loss'] / stats['num_tokens']\n perplexity = np.exp(stats['valid_loss'])\n stats['num_tokens'] = stats['num_tokens'] / stats['batch_size']\n \n logging.info(\n 'Epoch {:03d}: {}'.format(epoch, ' | '.join(key + ' {:.3g}'.format(value) for key, value in stats.items())) +\n ' | valid_perplexity {:.3g}'.format(perplexity))\n \n return perplexity\n\n\nif __name__ == '__main__':\n args = get_args()\n args.device_id = 0\n \n # Set up logging to file\n logging.basicConfig(filename = args.log_file, filemode = 'a', level = logging.INFO,\n format = '%(levelname)s: %(message)s')\n if args.log_file is not None:\n # Logging to console\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n logging.getLogger('').addHandler(console)\n \n main(args)\n","sub_path":"validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":8066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"291689878","text":"import data_ver as dv\nimport tensorflow as tf\nimport numpy as np\nimport random\nimport vgg\nimport cv2\nimport region\n\nFLAGS=tf.app.flags.FLAGS\ntf.app.flags.DEFINE_string('test-dataset',None,\n 'directory where the test dataset is placed')\ntf.app.flags.DEFINE_string('model','./ckpt',\n 'directory where the model is stored ')\ntf.app.flags.DEFINE_string('prediction-file',None,\n 'the name and path of the prediction file')\n\nfinal_header=['id','x1','y1','x2','y2','x3','y3','havestar']\ndef main(_):\n if not getattr(FLAGS,'test-dataset'):\n raise ValueError('you must supply the test dataset')\n if not getattr(FLAGS,'prediction-file'):\n raise ValueError('you must supply the prediction file')\n ver_dir=getattr(FLAGS,'test-dataset')\n ckpt_dir=getattr(FLAGS,'model')\n prediction_file=getattr(FLAGS,'prediction-file')\n if ver_dir[-1]=='/':\n ver_dir=ver_dir[0:-1]\n if ckpt_dir[-1]=='/':\n ckpt_dir=ckpt_dir[0:-1]\n if prediction_file[-1]=='/':\n prediction_file=prediction_file[0:-1]\n print('test dataset is ',ver_dir)\n print('model is in ',ckpt_dir)\n print('prediction file is ',prediction_file)\n\n list_id=dv.get_list(ver_dir)\n input_img=tf.placeholder(tf.float32,shape=[None,24,24,3],name=\"img-input\")\n model=vgg.vgg(input_img,0.7,1)\n model_out=model.fc_out\n target=tf.placeholder(tf.float32,[None,1],name=\"y-target\")\n sig_out=tf.nn.sigmoid(model_out)\n xentropy_loss=-tf.multiply(target,tf.log(tf.clip_by_value(sig_out,1e-7,1.0)))-tf.multiply((1.-target),tf.log(tf.clip_by_value(1.-sig_out,1e-7,1.0)))\n loss=tf.reduce_mean(xentropy_loss)\n\n with tf.name_scope(\"evaluate\"):\n round_out=tf.cast(tf.round(sig_out),\"int32\")\n correct_pre=tf.equal(round_out,tf.cast(target,\"int32\"))\n acc=tf.reduce_mean(tf.cast(correct_pre,\"float\"))\n\n init_learning_rate=0.001\n learning_rate=tf.Variable(init_learning_rate,trainable=False,dtype=tf.float32)\n learning_rate_decay_op=learning_rate.assign(0.8*learning_rate)\n optimizer=tf.train.AdamOptimizer(learning_rate).minimize(loss)\n \n sess=tf.Session()\n saver=tf.train.Saver()\n ckpt=tf.train.get_checkpoint_state(ckpt_dir)\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess,ckpt.model_checkpoint_path)\n else:\n raise Exception(\"wrong restore path\")\n final_answer=[]\n\n for i,j in enumerate(list_id):\n img=region.read_img(ver_dir,j[0])\n region_array,new_rows,new_cols,pos_array=region.get_img_region(img)\n score=sess.run(model_out,feed_dict={input_img:region_array})\n max_three=dv.get_max_three(score,img.shape,(new_rows,new_cols))\n d_x=new_cols-img.shape[1]\n d_y=new_rows-img.shape[0]\n final_answer.append([[pos_array[k][0]-d_x,pos_array[k][1]-d_y] for k in max_three])\n\n final_list=[]\n final_list.append(final_header)\n for i,j in enumerate(list_id):\n temp=[]\n temp.append(j[0])\n for k in range(3):\n temp.append(final_answer[i][k][0])\n temp.append(final_answer[i][k][1])\n if final_answer[i][0][0]>=0 and final_answer[i][0][1]>=0:\n temp.append(1)\n else:\n temp.append(0)\n final_list.append(temp)\n f=open(prediction_file,\"w+\",newline='')\n writer=csv.writer(f)\n writer.writerows(final_list)\n f.close()\nif __name__=='__main__':\n tf.app.run()\n","sub_path":"code/ver_main.py","file_name":"ver_main.py","file_ext":"py","file_size_in_byte":3479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"554551662","text":"from flask import Flask, render_template, request\nimport requests\nfrom bs4 import BeautifulSoup\n\napp = Flask(__name__)\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n firstname = request.form.get('firstname')\n firstname1 = firstname.replace(\" \", \"+\")\n scraped_url = f\"https://www.google.com/search?q=%22{firstname1}%22&start=1&num=100\"\n print(scraped_url)\n h = {\n \"accept-language\":\"en-US;q=0.8,en;q=0.7\",\n \"user-agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36\"\n }\n response = requests.get(scraped_url, headers=h)\n #print(response)\n #response = requests.get(scraped_url)\n html = BeautifulSoup(response.text)\n div = html.find_all(\"span\", class_=\"aCOpRe\")\n #print(div)\n r = []\n for i in div:\n i = str(i)\n i = i.replace(\"\"\"\"\"\", \"\").replace(\"\"\"\"\"\", \"\").replace(\"\", \"\").replace(\"\", \"\").replace(\"—\", \"\").replace(\"\", \"\").replace(\"\", \"\")\n #r.append(i)\n #print(firstname)\n firstname = str(firstname)\n if firstname in i:\n print(i)\n r.append(i)\n internet_presence = r\n return render_template('index.html', internet_presence=internet_presence)\n else:\n return render_template('index.html')\n \nif __name__ == \"__main__\":\n\tapp.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"187835047","text":"# load tensorflow for deep learning\nimport tensorflow as tf\nimport numpy as np # for matrix multiplication\nfrom pickle_handler import PickleHandler # handle pickle data\nfrom sys import argv\nfrom tqdm import tqdm\n\nfilename, modelfile, outputfile = argv\n\n\n# load gotdata\ngotData = PickleHandler(\"./got.pkl\")\n\n\ndef loss(labels, logits):\n return tf.keras.losses.sparse_categorical_crossentropy(\n labels, logits, from_logits=True\n )\n\n\n# define a new model\nmodel = tf.keras.Sequential(\n [\n # a embedding layer\n tf.keras.layers.Embedding(\n gotData.vocab_size(), 256, batch_input_shape=[1, None]\n ),\n # lstm layer\n tf.keras.layers.LSTM(\n 512,\n stateful=True,\n return_sequences=True,\n recurrent_initializer=\"glorot_uniform\",\n ),\n # dropout layer\n tf.keras.layers.Dropout(0.4),\n tf.keras.layers.LSTM(\n 256,\n stateful=True,\n return_sequences=True,\n recurrent_initializer=\"glorot_uniform\",\n ),\n # dropout layer\n tf.keras.layers.Dropout(0.4),\n # lstm layer\n tf.keras.layers.LSTM(\n 128,\n stateful=True,\n return_sequences=True,\n recurrent_initializer=\"glorot_uniform\",\n ),\n # dropout layer\n tf.keras.layers.Dropout(0.4),\n tf.keras.layers.LSTM(\n 64,\n stateful=True,\n return_sequences=True,\n recurrent_initializer=\"glorot_uniform\",\n ),\n # dropout layer\n tf.keras.layers.Dropout(0.4),\n # dense layer\n tf.keras.layers.Dense(gotData.vocab_size()),\n ]\n)\n\n# load model weights\nmodel.load_weights(modelfile)\n\nprint(\"priniting model summary....\")\nprint(model.summary())\n\n# generator function\ndef generator_function(model, string_input):\n\n # num of chars to generate\n num_generate = 1000\n\n input_val = [gotData.char2idx[s] for s in string_input]\n input_val = tf.expand_dims(input_val, 0)\n\n # set a empty generator list\n text_generated = []\n\n # temperature for our prediction\n temperature = 1.0\n\n # reset all the states of model\n model.reset_states()\n\n # iterate into negerate\n for i in tqdm(range(num_generate), ncols=100):\n # get the predictions\n predictions = model(input_val)\n\n # remove the batch dimsd\n predictions = tf.squeeze(predictions, 0)\n\n # using categorial data for the predictions\n predictions = predictions / temperature\n prediction_id = tf.random.categorical(predictions, num_samples=1)[-1, 0].numpy()\n\n # pass the hidden current output to model as an input along with the hidden state\n input_val = tf.expand_dims([prediction_id], 0)\n\n # append into text generated\n text_generated.append(gotData.idx2char[prediction_id])\n\n # if i % 1000 == 0:\n # print(\"Generated {}% of string\".format(i / 100))\n return string_input + \"\".join(text_generated)\n\n\nwith open(outputfile, 'w', encoding='utf-8') as fp:\n text = generator_function(model, u\"JON: \")\n fp.write(text)","sub_path":"lstm_4_predict.py","file_name":"lstm_4_predict.py","file_ext":"py","file_size_in_byte":3154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"394190466","text":"\"\"\"Simple convolutional neural network classififer.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom common import metrics\nfrom common import ops\nfrom common import resnet\n\n\ndef get_params():\n return {\n \"weight_decay\": 0.0002,\n }\n\n\ndef model(features, labels, mode, params):\n \"\"\"CNN classifier model.\"\"\"\n images = features[\"image\"]\n if mode != tf.estimator.ModeKeys.PREDICT:\n labels = labels[\"label\"]\n\n training = mode == tf.estimator.ModeKeys.TRAIN\n tf.keras.backend.set_learning_phase(training)\n\n image_shape = (512, 512, 3)\n if training:\n image_shape = (140, 140, 3)\n images = tf.random_crop(images, [params.batch_size, 140, 140, 3])\n images = tf.keras.applications.inception_v3.preprocess_input(images)\n inception = tf.keras.applications.inception_v3.InceptionV3(\n input_shape=image_shape, include_top=False,\n weights='imagenet' if training else None,\n input_tensor=images,\n pooling='avg')\n for layer in inception.layers:\n layer.trainable = False\n\n logits = tf.layers.dense(inception(images), params.num_classes,\n kernel_regularizer=tf.contrib.layers.l2_regularizer(params.weight_decay))\n\n predictions = tf.argmax(logits, axis=-1)\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n return {\"predictions\": predictions}, None, None\n\n loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)\n\n eval_metrics = {\n \"accuracy\": tf.metrics.accuracy(labels, predictions),\n \"top_1_error\": tf.metrics.mean(metrics.top_k_error(labels, logits, 1)),\n }\n\n return {\"predictions\": predictions}, loss, eval_metrics\n","sub_path":"model/keras_inception_v3.py","file_name":"keras_inception_v3.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"380236939","text":"# MIT License\n#\n# Copyright (c) 2018 Evgeny Medvedev, evge.medvedev@gmail.com\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport pytest\nfrom dateutil.parser import parse\n\nfrom eosetl.service.eos_block_range_service import EosBlockRangeService\nfrom blockchainetl_common.graph.graph_operations import OutOfBoundsError\nfrom tests.eosetl.job.helpers import get_eos_rpc\nfrom tests.helpers import skip_if_slow_tests_disabled\n\n\n@pytest.mark.parametrize(\"date,expected_start_block,expected_end_block\", [\n skip_if_slow_tests_disabled(['2018-06-08', 1, 1]),\n skip_if_slow_tests_disabled(['2018-06-09', 2, 13338]),\n skip_if_slow_tests_disabled(['2018-11-01', 24569043, 24741696]),\n skip_if_slow_tests_disabled(['2018-11-02', 24741697, 24914090]),\n skip_if_slow_tests_disabled(['2019-06-01', 61118944, 61291688]),\n])\ndef test_get_block_range_for_date(date, expected_start_block, expected_end_block):\n eos_block_range_service = get_new_eos_block_range_service()\n parsed_date = parse(date)\n blocks = eos_block_range_service.get_block_range_for_date(parsed_date)\n assert (expected_start_block, expected_end_block) == blocks\n\n\n@pytest.mark.parametrize(\"date\", [\n skip_if_slow_tests_disabled(['2030-01-01'])\n])\ndef test_get_block_range_for_date_fail(date):\n eos_service = get_new_eos_block_range_service()\n parsed_date = parse(date)\n with pytest.raises(OutOfBoundsError):\n eos_service.get_block_range_for_date(parsed_date)\n\n\ndef get_new_eos_block_range_service():\n rpc = get_eos_rpc(\"online\")\n return EosBlockRangeService(rpc)\n","sub_path":"tests/eosetl/service/test_eos_block_range_service.py","file_name":"test_eos_block_range_service.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"276988466","text":"# coding=utf-8\n\n# ----------------------------------------\nfrom __future__ import unicode_literals, print_function\n\nimport os\nimport time\n\nfrom zhihu_oauth import ZhihuClient\n\n\nTOKEN_FILE = 'token.pkl'\n\n\nclient = ZhihuClient()\n\nif os.path.isfile(TOKEN_FILE):\n client.load_token(TOKEN_FILE)\nelse:\n client.login_in_terminal()\n client.save_token(TOKEN_FILE)\n\n\n# gender用户的性别属性,0为女,1为男\n#is_follower 是否关注了我?\n\n\n\n\n\nwhile True:\n me = client.me()\n c1={}\n d1={}\n\n #赞同总数\n v1=me.voteup_count\n print('最初赞同数量是:'+str(v1))\n\n\n for i in me.answers:\n\n try:\n #将回答名称和对应的最后点赞同户名加入字典\n c1.setdefault(i.question.title,i.voters)\n #d1.setdefault(i.question.title,i.voteup_count)\n\n except:\n print('该用户名好像有问题')\n #print (c1)\n\n\n\n \n\n #重新登录,刷新状态\n print('即将重新载入中户数据')\n time.sleep(10)\n client2 = ZhihuClient()\n client2.load_token(TOKEN_FILE)\n me2 = client2.me()\n #更新后的赞同总数\n v2=me2.voteup_count\n print('重新载入赞同数量是:'+str(v2))\n\n if v2>v1:\n #最后一个点赞的人\n print ('赞同变化了')\n for n in me2.answers:\n try:\n # and n.voteup_count>d1[n.question.title]\n if n.voters[0] not in c1[n.question.title]:\n print ('开始发送私信')\n \n me2.message(n.voters[0],'谢谢你的赞')\n if n.voters[0].is_follower is True:\n pass\n else:\n print ('对方没有关注我.立马私信')\n time.sleep(2)\n me2.message(n.voters[0],'heihei')\n #time.sleep(5)\n break \n #i.voters[0]\n except:\n # 点赞的人的名字\n print ('最新点赞的人是'+str(n.voters[0].name))\n #time.sleep(10)\n else:\n \n print('-------------------------------')\n print('赞同变化,但是找不到点赞的人')\n time.sleep(10)\n else:\n print('赞同没有变化啊啊 啊啊')\n \n\n\n","sub_path":"赞同后自动发私信.py","file_name":"赞同后自动发私信.py","file_ext":"py","file_size_in_byte":2335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"138494151","text":"k,n = input().split() # wprowadzenie punktu startowego k, oraz liczby ruchów n\nn=int(n)# punkt startowy\n\nwynik = 0 #zainiciowanie zmiennej która bedzie przetrzymywać wynik jakim jest liczba unikalnych ciagow znaków\n\n\n#plansza przedstawiona jako dwuwymiarowa tablica gdzie :\n# 0 - granice planszy\n# 1 - zajęte pole\nplansza = [[0,0,0,\"A\",0,0,0],\n [0,0,\"B\",\"C\",\"D\",0,0],\n [0,\"E\",0,\"F\",\"G\",\"H\",0],\n [\"I\",\"J\",\"K\",\"L\",\"M\",0,\"N\"],\n [0,\"O\",\"P\",0,\"Q\",\"R\",0],\n [0,0,\"S\",\"T\",\"U\",0,0],\n [0,0,0,\"V\",0,0,0]]\n\n\n\n\n#znalezienie pozycji startowej\nr,c = 0,0 #r - rows czyli wiersze tablicy c - columns czyli kolumny tablicy\nfor i in plansza:\n r = r+1\n c = 0\n for j in i:\n c = c + 1\n if j == k :\n break\n if j == k:\n break\n############cofniecie koordynatow pozycji startowej o 1\nc = c-1#####poniewaz numeracja pozycji listy zaczyna sie od 0\nr = r-1#####\n############\n\n#funkcja ruchu\ndef ruch(r,c,n,plansza,wynik):\n #listy zawierajace modyfikatory koordynatów nastepnego miejsca na planszy po wykonaniu kroku 1\n #r oznacza czy jest to krok po wierszach natomiast c czy krok w kolumnach\n krok1rPi = [r+2,r-2]\n krok1cPo = [c+1,c-1]\n krok1rPo = [r + 1, r - 1]\n krok1cPi = [c + 2, c - 2]\n #pętla wykonujaca wszystkie mozliwosci kroku 1 dla przesuniec w pionie\n for i in krok1rPi:\n for j in krok1cPo:\n if i >= 0 and i < len(plansza) and j >= 0 and j < len(plansza): #warunek sprawdzajacy czy można pozostać na danym polu, czy nie jest to konie planszy albo zablokowane miejsce\n if plansza[i][j] !=0:\n r=i\n c=j\n if n - 1 > 0: # warunek sprawdzajacy czy po wykonaniu kroku 1 zostały do wykonania jeszcze jakies ruchy\n krok2r = [r + 1, r - 1]#jeśli tak ustawia listy modyfikatorów koordynatów dla kroku 2 ustawiajac pole po zakonczeniu kroku 1 jako koordynaty startowe\n krok2c = [c + 1, c - 1]\n n=n-1\n for x in krok2r:\n for z in krok2c:\n if x >= 0 and x < len(plansza) and z >= 0 and z < len(plansza):\n if plansza[x][z] != 0:\n if n -1 > 0: #warunek sprawdzajacy czy zostały jeszcze punkty ruchy\n n=n-1\n wynik = ruch(x,z,n,plansza,wynik) #jeśli tak rekurencyjnie zostaje wywołana funkcja ruchu dzieki czemu powracamy do kroku 1\n n=n+1\n else:\n wynik=wynik+1 # jesli nie ma juz punktów ruchu dodaje 1 do liczby unikalnych ciagów znaków\n n=n+1\n else:\n wynik = wynik + 1\n # pętla wykonujaca wszystkie mozliwosci kroku 1 dla przesuniec w poziomie\n for i in krok1rPo:\n for j in krok1cPi:\n if i >= 0 and i < len(plansza) and j >= 0 and j < len(plansza) :\n if plansza[i][j] !=0:\n r=i\n c=j\n if n - 1 > 0:\n krok2r = [r + 1, r - 1]\n krok2c = [c + 1, c - 1]\n n=n-1\n for x in krok2r:\n for z in krok2c:\n if x >= 0 and x < len(plansza) and z >= 0 and z < len(plansza):\n if plansza[x][z] != 0:\n if n -1 > 0:\n n=n-1\n wynik = ruch(x,z,n,plansza,wynik)\n n=n+1\n else:\n wynik = wynik + 1\n n=n+1\n else:\n wynik = wynik+1\n return wynik\n\n\n\nprint(ruch(r,c,n,plansza,wynik)) #wywolanie ilosci wszystkich możliwych unikalnych ciagów znaków\n","sub_path":"zadanie.py","file_name":"zadanie.py","file_ext":"py","file_size_in_byte":4208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"10715611","text":"import os\n\n\ndef a_very_big_sum(array):\n sum_ = 0\n for i in range(0, len(array)):\n sum_ += array[i]\n return sum_\n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n ar_count = int(input())\n\n ar = list(map(int, input().rstrip().split()))\n\n result = a_very_big_sum(ar)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","sub_path":"Warmup/a_very_big_sum.py","file_name":"a_very_big_sum.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"211093119","text":"# Custom imports below\nimport re\nimport email\nfrom bs4 import UnicodeDammit\n\n\ndef body(b, log):\n if b.is_multipart():\n for part in b.walk():\n ctype = part.get_content_type()\n cdispo = str(part.get('Content-Disposition'))\n\n # skip any text/plain (txt) attachments\n if ctype == 'text/plain' and 'attachment' not in cdispo:\n try:\n body = part.get_payload(decode=True).decode('utf-8') # decode\n except Exception as ex:\n log.debug(ex)\n log.debug(\"Failed to parse message as UTF-8, attempting to detwingle first before retrying parse\")\n body = UnicodeDammit.detwingle(part.get_payload(decode=True)).decode('utf-8', errors='ignore')\n elif ctype == 'text/html' and 'attachment' not in cdispo:\n try:\n body = part.get_payload(decode=True).decode('utf-8').replace('\\n','') # decode\n except Exception as ex:\n log.debug(ex)\n log.debug(\"Failed to parse message as UTF-8, attempting to detwingle first before retrying parse\")\n\n body = UnicodeDammit.detwingle(part.get_payload(decode=True)).decode('utf-8', errors='ignore').replace('\\n','')\n\n\n return body\n else:\n try:\n return b.get_payload(decode=True).decode('utf-8')\n except:\n log.debug(u\"\\uE05A\".encode('unicode-escape'))\n return UnicodeDammit.detwingle(b.get_payload(decode=True)).decode('utf-8', errors='ignore').replace('\\n','')\n\ndef attachments(mail, log):\n attachments = []\n\n filename_pattern = re.compile('name=\".*\"')\n\n count = 0\n for part in mail.walk():\n count += 1\n if part.get_content_maintype() == 'multipart':\n continue\n if part.get('Content-Disposition') is None:\n continue\n\n filename = part.get_filename()\n\n if filename is None:\n # Attempt to get filename from Content-Type header\n content_line = filename_pattern.findall(part.get('Content-Type'))\n # Test if array has contents\n if content_line:\n # Attempt parsing filename, it *might* be here\n filename = content_line[0].lstrip('name=').strip('\"')\n\n log.debug('Content-Type filename: %s', filename)\n # Fall back to a dynamic file name chosen by us\n if not filename:\n filename = 'Attachment-{}'.format(count)\n log.debug('Dynamic filename: %s', filename)\n\n\n content = part.get_payload(decode=False)\n if type(content) != type(''):\n #attached email\n content = part.as_string()\n log.debug('Content not string')\n content = content.replace(\"\\r\\n\",\"\")\n attachments.append({\n 'filename': filename,\n 'content': content,\n 'content_type': part.get_content_type(),\n })\n\n if count == 0:\n log.debug(\"No attachment\")\n attachments.append({\n 'filename': '',\n 'content': '',\n 'content_type': '',\n })\n return attachments\n","sub_path":"eml/komand_eml/util/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"295156046","text":"from neuroevolution.nodes_genome import NodesGenome\nfrom neuroevolution.links_genome import LinksGenome\nfrom neuroevolution.node_gene import NodeGene\nfrom neuroevolution.link_gene import LinkGene\nfrom copy import copy\nfrom typing import List, Union, Tuple\nimport random\nimport pickle\n\n\nclass Genome:\n def __init__(self, nodes_genome: NodesGenome, links_genome: LinksGenome):\n self.nodes_genome: NodesGenome = nodes_genome\n self.links_genome: LinksGenome = links_genome\n self.fitness: Union[None, float] = None\n self.species = None\n\n def get_possible_new_links(self) -> List[Tuple[NodeGene, NodeGene]]:\n possible_links: List[Tuple[NodeGene, NodeGene]] = []\n\n for outgoing_node in self.nodes_genome.input_genes + self.nodes_genome.hidden_genes:\n for ingoing_node in self.nodes_genome.hidden_genes + self.nodes_genome.output_genes:\n # if it would be a duplicate - skip it\n if self.links_genome.link_genes.get((outgoing_node, ingoing_node)) is not None:\n continue\n if self.nodes_genome.is_link_possible(outgoing_node, ingoing_node):\n possible_links.append((outgoing_node, ingoing_node))\n\n return possible_links\n\n def get_random_similar(self, neuroevolution_instance):\n nodes_genome_copy = copy(self.nodes_genome)\n links_genome_copy = self.links_genome.get_copy()\n for link_gene in links_genome_copy.link_genes.values():\n link_gene.weight = random.uniform(neuroevolution_instance.config.weight_interval[0], neuroevolution_instance.config.weight_interval[1])\n return Genome(nodes_genome_copy, links_genome_copy)\n\n @classmethod\n def get_random_genome(cls, neuroevolution_instance, input_genes: List[NodeGene], output_genes: List[NodeGene]):\n # create nodes genome\n nodes_genome: NodesGenome = NodesGenome()\n nodes_genome.input_genes = input_genes\n nodes_genome.output_genes = output_genes\n\n # create links genome\n links_genome: LinksGenome = LinksGenome()\n for input_gene in nodes_genome.input_genes:\n for output_gene in nodes_genome.output_genes:\n if random.random() < neuroevolution_instance.config.init_connection_prob:\n neuroevolution_instance.add_or_check_link(input_gene, output_gene)\n weight_interval = neuroevolution_instance.config.weight_interval\n weight = random.uniform(weight_interval[0], weight_interval[1])\n link_gene = LinkGene(input_gene, output_gene, weight, False)\n links_genome.add_gene(link_gene)\n\n # prevent genome with 0 links (empty network with no single link)\n if len(links_genome.link_genes) == 0:\n input_gene = random.choice(nodes_genome.input_genes)\n output_gene = random.choice(nodes_genome.output_genes)\n neuroevolution_instance.add_or_check_link(input_gene, output_gene)\n weight_interval = neuroevolution_instance.config.weight_interval\n weight = random.uniform(weight_interval[0], weight_interval[1])\n link_gene = LinkGene(input_gene, output_gene, weight, False)\n links_genome.add_gene(link_gene)\n\n return cls(nodes_genome, links_genome)\n\n def save(self, file_name: Union[str, None] = None):\n if file_name is None:\n file_name = \"genome\" + str(id(self)) + \".pkl\"\n with open(file_name, 'wb') as otp:\n pickle.dump(self, otp, pickle.HIGHEST_PROTOCOL)\n\n @staticmethod\n def load(file_name: str):\n with open(file_name, 'rb') as inp:\n return pickle.load(inp)\n\n def __str__(self):\n string = \"Individual [ \" + str(id(self)) + \" ]\\n\"\n if self.fitness is None:\n string += \"Fitness: Not tested\\n\"\n else:\n string += \"Fitness: <\" + str(self.fitness) + \">\\n\"\n string += \"Species identifier: { \" + str(self.species.identifier) + \" }\\n\"\n string += self.nodes_genome.__str__() + \"\\n\"\n string += self.links_genome.__str__()\n return string\n","sub_path":"neuroevolution/genome.py","file_name":"genome.py","file_ext":"py","file_size_in_byte":4147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"628834745","text":"import sys\n\nfrom flask_cors import cross_origin\n\nsys.path.insert(0, '../code2vec')\n\nimport os\nimport pathlib\n\nimport numpy as np\nimport torch\nfrom flask import Flask, jsonify, request\n\nfrom code2vec import load_model_dynamically\nfrom config import Config\nfrom interactive_predict import InteractivePredictor\nfrom model import PredictionHead\n\n\napp = Flask(__name__)\n\nmodel = PredictionHead(384, 128, 128, 4)\nmodel.load_state_dict(torch.load('./state_dict.uu', map_location=torch.device('cpu')))\nmodel.eval()\n\nconfig = Config(set_defaults=True, load_from_args=True, verify=True)\ncode2vec_model = load_model_dynamically(config)\npredictor = InteractivePredictor(config, code2vec_model)\nconfig.log('Done creating code2vec model')\n\n\nlabel2name = {\n 0: 'parallel_inheritance_hierarchies',\n 1: 'god_classes',\n 2: 'data_class',\n 3: 'feature_envy',\n}\n\n\ndef get_embedding(filename):\n return torch.FloatTensor(\n np.array([float(x) for x in filename.open().read().split()]).reshape(384, -1)\n ).unsqueeze(0)\n\n\n@app.route('/predict', methods=['POST', 'OPTIONS'])\n@cross_origin()\ndef predict():\n if request.method == 'POST':\n file = request.files['file']\n\n file.save(f'../code2vec/input/{file.filename}')\n predictor.predict('../code2vec/input', '../code2vec/output')\n embedding = get_embedding(\n pathlib.Path(f'../code2vec/output/{file.filename.split(\".\")[0]}' + '.txt')\n )\n probabilities = [\n torch.sigmoid(value).cpu().data.numpy().tolist() for value in model.forward(embedding)\n ][0]\n\n os.path.exists(f'../code2vec/input/{file.filename}') and os.remove(\n f'../code2vec/input/{file.filename}'\n )\n os.path.exists(f'../code2vec/output/{file.filename.split(\".\")[0]}.txt') and os.remove(\n f'../code2vec/output/{file.filename.split(\".\")[0]}.txt'\n )\n\n return jsonify({label2name[index]: value for index, value in enumerate(probabilities)})\n\n\n@app.route('/test', methods=['GET', 'OPTIONS'])\n@cross_origin()\ndef test():\n return jsonify('Hi, there!')\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0')\n","sub_path":"web_service/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"1870262","text":"import cv2\nimport sys\nimport time\nfrom imutils.video import FPS\nfrom imutils.video import VideoStream\nimport imutils\n\n#grabbing the cascade file to be used\ncascPath = sys.argv[1]\ntargetCascade = cv2.CascadeClassifier(cascPath)\n\n#setting up the PiCamera and it's variables\nvideo_stream = VideoStream(usePiCamera=True, resolution=(480,320), framerate=32).start()\ntime.sleep(2)\n\n\n# capture frames from the camera using the piCamera library\nwhile True:\n\n # Capture frame-by-frame\n image = video_stream.read()\n\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n target = targetCascade.detectMultiScale(\n gray,\n scaleFactor=1.5,\n minNeighbors=1,\n minSize=(30, 30),\n flags=cv2.CASCADE_SCALE_IMAGE)\n\n print(\"Printing target:\", target)\n # Draw a rectangle around the target and print out the x,y,w,h values\n for (x, y, w, h) in target:\n cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)\n print(\"X: {}, Y: {}, W: {}, H: {}\".format(x, y, w, h))\n\n # Display the resulting frame\n # show the frame\n cv2.imshow(\"Frame\", image)\n key = cv2.waitKey(1) & 0xFF\n \n #break code\n if key == cv2.waitKey(1) & 0xFF == ord('q'):\n break\n","sub_path":"Motor/Classifier.py","file_name":"Classifier.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"137647640","text":"# coding: utf-8\n\nfrom flask.ext.script import Command, Option\nfrom .models import Comment\n\n\nclass ListComment(Command):\n \"prints a list of medias\"\n\n command_name = 'list_medias'\n\n option_list = (\n Option('--title', '-t', dest='title'),\n )\n\n def run(self, title=None):\n\n comments = Comment.objects\n if title:\n comments = comments(title=title)\n\n for comment in comments:\n print(comment)\n","sub_path":"quokka/modules/comments/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"169925857","text":"#################################################\n### IMPORT NECESSARY LIBRARIES ###\n#################################################\nimport os\nimport pandas as pd\nfrom flask import (\n Flask,\n render_template,\n jsonify,\n request,\n redirect)\nfrom flask_sqlalchemy import SQLAlchemy\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\nimport urllib\n\nfrom datetime import datetime\n\nfrom config import msql_serverName, msql_dbName\n# from config import dbuser, dbpassword, dbhost, dbport, dbname\n\n#################################################\n#### FLASK SETUP ####\n#################################################\napp = Flask(__name__)\n\n#################################################\n### # DATABASE SETUP ####\n#################################################\n\n################### POSTGRES CONNECTION STRINGS #########################\n# connection_string1 = f'{pg_user}:{password}@localhost:5432/{db_name}'\n# connection_string2 = f'{dbuser}:{dbpassword}@database-1.cvmfiiilpm7y.us-east-1.rds.amazonaws.com:{dbport}/{dbname}'\n# Heroku connection_string = postgres://merbhejlcbbizc:c47f98a8b46d8c32180a3d6c3420fc4b9d3711c5ba4afc316d94c551504fdcb2@ec2-23-22-191-232.compute-1.amazonaws.com:5432/d84r12ktb080ua\n\n##################### MSSQL CONNECTION STRINGS ##########################\nconn_str = (\n r'Driver=ODBC Driver 17 for SQL Server;'\n rf'Server={msql_serverName};'\n rf'Database={msql_dbName};'\n r'Trusted_Connection=yes;'\n)\nquoted_conn_str = urllib.parse.quote_plus(conn_str)\n\n\n######## ERROR HANDLING FOR CONNECTION TO A CLOUD DB FOR HOSTING ########\ntry:\n db_uri = os.environ['DATABASE_URL']\nexcept KeyError:\n db_uri = f\"mssql+pyodbc:///?odbc_connect={quoted_conn_str}\"\n\nprint(db_uri)\napp.config['SQLALCHEMY_DATABASE_URI'] = db_uri\n\ndb = SQLAlchemy(app)\n\n######################## CONNECT TO DATABASE ############################\n##### Postgres #####\n# engine = create_engine(f'postgresql://{connection_string2}')\n\n##### Jsonify the data #####\nengine = create_engine(f'mssql+pyodbc:///?odbc_connect={quoted_conn_str}')\n\n######################### CONNECT TO SESSION ############################\nsession = Session(engine)\nconnection = engine.connect()\n\n# youtubeVids = pd.read_sql(f\"SELECT * FROM youtube_table_v1\", connection)\nyoutubeVids = pd.read_sql(f\"SELECT * FROM youtube_table_v2\", connection)\n\n############# DATA CLEAN - FIX ISSUES AND RENAME COLUMNS ################\n# FIX THE 29 NON PROFITS ISSUE\nyoutubeVids['categoryId'] = youtubeVids['categoryId'].replace(\n [\"29\"], \"Nonprofits & Activism\")\n\n# RENAME COLUMNS\nyoutubeVids = youtubeVids.rename(columns={'country': 'country', 'video_id': 'video_id', 'title': 'title', 'publishedAt': 'publishedAt', 'channelTitle': 'channelTitle', 'categoryId': 'categoryId',\n 'trending_date': 'trending_date', 'view_count': 'views', 'likes': 'likes', 'dislikes': 'dislikes', 'comment_count': 'comments', 'thumbnail_link': 'thumbnail_link'})\nconnection.close()\nsession.close()\n\n#################################################\n#### HOME ROUTE ####\n#################################################\n@app.route(\"/\")\ndef home():\n # print(index.html)\n return render_template(\"index.html\")\n\n#################################################\n#### ROUTES FOR DROP DOWN MENU ####\n#################################################\n\n##################### ROUTE FOR DROP DOWN MENU 1 ########################\n@app.route(\"/dropdown1\")\ndef dropdown1():\n country_df = youtubeVids['country'].value_counts()\n countryList = country_df.index.tolist()\n print(countryList)\n return jsonify(countryList)\n\n##################### ROUTE FOR DROP DOWN MENU 2 ########################\n@app.route(\"/dropdown2\")\ndef dropdown2():\n youtubeVids['categoryId'] = youtubeVids['categoryId'].replace(\n [\"29\"], \"Nonprofits & Activism\")\n category_df = youtubeVids['categoryId'].value_counts()\n categoryList = category_df.index.tolist()\n print(categoryList)\n return jsonify(categoryList)\n\n##################### ROUTE FOR DROP DOWN MENU 3 ########################\n@app.route(\"/dropdown3\")\ndef dropdown3():\n metricList = ['views', 'likes', 'dislikes', 'comments']\n print(metricList)\n return jsonify(metricList)\n\n#################################################\n#### ROUTES FOR DATA ####\n#################################################\n\n####################### ROUTE FOR BAR GRAPH 1 ##########################\n@app.route(\"/dataset1//\")\ndef dataset1(country, metric):\n # Fix the 29 vs Non profits issue\n youtubeVids['categoryId'] = youtubeVids['categoryId'].replace(\n [\"29\"], \"Nonprofits & Activism\")\n\n # Sort dataframe by country & category & metric\n barGraph1Data = youtubeVids[youtubeVids[\"country\"] == country]\n barGraph1Data = barGraph1Data.groupby('categoryId').mean()\n barGraph1Data = barGraph1Data[metric]\n\n ##### Convert data to a dictionary #####\n barGraph1Data = barGraph1Data.to_dict()\n ##### Jsonify the data #####\n return jsonify(barGraph1Data)\n\n####################### ROUTE FOR BAR GRAPH 2 ##########################\n@app.route(\"/dataset2///\")\ndef dataset2(country=None, category=None, metric=None):\n # Fix the 29 vs Non profits issue\n youtubeVids['categoryId'] = youtubeVids['categoryId'].replace(\n [\"29\"], \"Nonprofits & Activism\")\n\n # Sort dataframe by country & category & select columns to keep\n barGraph2Data = youtubeVids[youtubeVids[\"country\"] == country]\n barGraph2Data = barGraph2Data[barGraph2Data[\"categoryId\"] == category]\n barGraph2Data = barGraph2Data.loc[:, [\n \"views\", \"comments\", \"likes\", \"dislikes\", \"country\", \"categoryId\"]]\n\n # Create a table (df) of metric values vs the count of each value for bargraph\n metric_values = [\"views\", \"comments\", \"likes\", \"dislikes\"]\n metricMaxValues = []\n n = 0\n for metric in metric_values:\n step1_df = barGraph2Data.sort_values(by=metric, ascending=False)\n step2_df = step1_df[metric]\n step3 = step2_df.values.tolist()\n metricMaxValues.append(step3[0])\n\n # Create the actual table (df)\n barGraph2_df = pd.DataFrame(\n {'Metric_Values': metric_values,\n 'Max_Value': metricMaxValues\n })\n ##### Convert data to a dictionary #####\n barGraph2_df = barGraph2_df.to_dict()\n ##### Jsonify the data #####\n return jsonify(barGraph2_df)\n\n####################### ROUTE FOR LINE GRAPH ##########################\n@app.route(\"/dataset3//\")\ndef dataset3(country, metric):\n lineData = youtubeVids[youtubeVids[\"country\"] == country]\n\n # add a timestamp column to dataframe\n timestamps = []\n for index, row in lineData.iterrows():\n t = row[\"publishedAt\"]\n td = datetime(t.year, t.month, t.day)\n datetime.timestamp(td)\n timestamps.append(datetime.timestamp(td))\n lineData[\"timestamp\"] = timestamps\n\n # get top three categories\n topThree = list(lineData.groupby([\"categoryId\"]).sum()[\n \"likes\"].sort_values(ascending=False).index[0:3])\n\n # Select one category and group by timeStamp\n first = lineData[lineData[\"categoryId\"] == topThree[0]]\n first = first.groupby(\"timestamp\").sum()\n\n ##### Convert data to a dictionary #####\n first = first[metric].to_dict()\n ##### Jsonify the data #####\n return jsonify(first)\n\n####################### ROUTE FOR TOP 10 TABLE ##########################\n@app.route(\"/dataset4///\")\ndef dataset4(country=None, category=None, metric=None):\n # Fix the 29 vs Non profits issue\n youtubeVids['categoryId'] = youtubeVids['categoryId'].replace(\n [\"29\"], \"Nonprofits & Activism\")\n\n # Sort dataframe by country & category\n table_df = youtubeVids[youtubeVids[\"country\"] == country]\n table_df = table_df[table_df[\"categoryId\"] == category]\n\n # print('metric=', metric)\n # Sort dataframe (largest to smallest) by metric selected\n sorted_table_df = table_df.sort_values(by=metric, ascending=False)\n\n # Remove duplicate videos from dataframe\n sorted_table_df = sorted_table_df.drop_duplicates(\n subset='title', keep=\"first\")\n\n # print('metric=', metric)\n # Select top 10 (based on metric selected) from dataframe\n top10TableData_df = sorted_table_df.nlargest(10, metric)\n\n # Select columns to keep for table\n top10TableData_df = top10TableData_df[['categoryId', 'country', 'title', 'channelTitle',\n 'views', 'comments', 'trending_date', 'likes', 'dislikes', 'video_id', 'thumbnail_link']]\n\n ##### Convert data to a dictionary #####\n top10TableData = top10TableData_df.to_dict(orient=\"records\")\n ##### Jsonify the data #####\n # print(jsonify(top10TableData))\n return jsonify(top10TableData)\n# return render_template('test_out.html', data=top10TableData)\n\n######################### ROUTE FOR ALL DATA ############################\n@app.route(\"/allData\")\ndef allData():\n allData = youtubeVids\n\n allData = allData.to_dict()\n\n return jsonify(allData)\n\n#################################################\n#### CLOSE IF LOOP ####\n#################################################\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n#################################################\n#### END OF FLASK APP ####\n#################################################\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"38639447","text":"from DateTime import DateTime\nfrom bs4 import BeautifulSoup\n\n#from Products.docstypesAC.interfaces.docmanual import Idocmanual\n#from Products.docstypesAC.interfaces.docchapter import Idocchapter\n#from Products.docstypesAC.interfaces.docsection import Idocsection\nfrom Products.docstypesAC.interfaces.docdocument import Idocdocument\n#from Products.docstypesAC.interfaces.dochandout import Idochandout\n#from Products.docstypesAC.interfaces.docdowntime import Idocdowntime\n\nfrom Products.Handout.interfaces.handout import IHandout\nfrom Products.DowntimeProcedure.interfaces.downtimeprocedure import Idowntimeprocedure\n\n(HIDDEN_FIELD, SECTION_FIELD, INFO_FIELD, INFO_URL_FIELD, META_FIELD, ) = range(1, 6)\nINFO_FIELD_TITLE = \"Document Information\"\n\nDOWNTIMEPROCEDURE_FIELDS = (\n (\"description\", \"Description\", SECTION_FIELD),\n (\"downtimeproceduretechnology\", \"Affected Technology\", INFO_FIELD),\n (\"downtimeprocedureprocedure\", \"Procedure\", SECTION_FIELD),\n (\"downtimeprocedureguideline\", \"Guideline\", SECTION_FIELD),\n (\"downtimeprocedurelastupdated\", \"Last Updated Date\", META_FIELD),\n (\"downtimeprocedureowner\", \"Owner\", META_FIELD),\n (\"downtimeprocedurereviewer\", \"Reviewer\", META_FIELD),\n (\"downtimeprocedurenotification\", \"Notification/Communication\", SECTION_FIELD),\n (\"title\", \"Title\", HIDDEN_FIELD),\n)\n\nHANDOUT_FIELDS = (\n (\"description\", \"Description\", SECTION_FIELD), \n (\"perdocumentnumber\", \"Document Number\", INFO_FIELD), \n (\"perdocument\", \"Document\", INFO_URL_FIELD), \n (\"getPerdocument\", \"Document\", INFO_URL_FIELD), \n (\"perkeywords\", \"Keywords\", META_FIELD), \n (\"perwebsiteaddress\", \"Website Address\", INFO_FIELD), \n (\"pereducationcontent\", \"Education Content\", SECTION_FIELD), \n (\"peraccessinformation\", \"Access Information\", SECTION_FIELD), \n (\"perauthorname\", \"Author Name\", META_FIELD), \n (\"perauthoremail\", \"Author Email\", META_FIELD), \n (\"perauthorphonenumber\", \"Author Phone Number\", META_FIELD), \n (\"perICD10codes\", \"ICD-10 Codes\", META_FIELD), \n (\"perICD9codes\", \"ICD-9 Codes\", META_FIELD), \n (\"perLOINCcode\", \"LOINC Code\", META_FIELD), \n (\"perGPIcode\", \"GPI Code\", META_FIELD), \n (\"perCPTcode\", \"CPT Code\", META_FIELD), \n (\"peradditionalinformation\", \"Additional Information\", META_FIELD),\n (\"perliteracylevel\", \"Literacy Level\", META_FIELD), \n (\"permediatype\", \"Media Type\", META_FIELD), \n (\"perlanguage\", \"Language\", META_FIELD), \n (\"persource\", \"Source\", META_FIELD), \n (\"perage\", \"Age\", META_FIELD), \n (\"pergender\", \"Gender\", META_FIELD), \n (\"pertopic\", \"Topic\", META_FIELD),\n (\"perlastupdated\", \"Last Updated\", META_FIELD), \n (\"title\", \"Title\", HIDDEN_FIELD),\n)\n \nDOCUMENT_FIELDS = (\n (\"description\", \"Description\", SECTION_FIELD), \n (\"chapter\", \"Chapter\", INFO_FIELD), \n (\"section\", \"Section\", INFO_FIELD), \n (\"docnumber\", \"Number\", INFO_FIELD), \n (\"previousnumber\", \"Previous Number(if applicable)\", INFO_FIELD), \n (\"docowner\", \"Owner\", META_FIELD), \n (\"lastupdated\", \"Date Last Updated\", META_FIELD), \n (\"activepolicy\", \"Is this policy active?\", META_FIELD), \n (\"documenttype\", \"Document Type\", META_FIELD), \n (\"documentkeywords\", \"Keywords\", META_FIELD), \n (\"policysubject\", \"Subject\", INFO_FIELD), \n (\"policyeffectivedate\", \"Effective Date\", META_FIELD), \n (\"policy\", \"Policy\", SECTION_FIELD), \n (\"definitions\", \"Definitions\", SECTION_FIELD), \n (\"procedure\", \"Procedure\", SECTION_FIELD), \n (\"signature\", \"Signature\", META_FIELD), \n (\"date\", \"Date\", META_FIELD), \n (\"versioninformation\", \"Version Information\", META_FIELD), \n (\"attachment\", \"File Attachment\", INFO_URL_FIELD), \n (\"attachments\", \"Additional Attachments\", SECTION_FIELD), \n (\"title\", \"Title\", HIDDEN_FIELD),\n)\n \nPER_FIELDS = (\n (\"description\", \"Description\", SECTION_FIELD), \n (\"perdocumentnumber\", \"Document Number\", INFO_FIELD), \n (\"perdocument\", \"Document\", INFO_URL_FIELD), \n (\"getPerdocument\", \"Document\", INFO_URL_FIELD), \n (\"perwebsiteaddress\", \"Website Address\", INFO_FIELD), \n (\"pereducationcontent\", \"Education Content\", SECTION_FIELD), \n (\"peraccessinformation\", \"Access Information\", META_FIELD), \n (\"perauthorname\", \"Author Name\", META_FIELD), \n (\"perauthoremail\", \"Author Email\", META_FIELD), \n (\"perauthorphonenumber\", \"Author Phone Number\", META_FIELD), \n (\"perkeywords\", \"Keywords\", META_FIELD), \n (\"perICD10codes\", \"ICD-10 Codes\", META_FIELD), \n (\"perICD9codes\", \"ICD-9 Codes\", META_FIELD), \n (\"peradditionalinformation\", \"Additional Information\", META_FIELD),\n (\"perliteracylevel\", \"Literacy Level\", META_FIELD), \n (\"permediatype\", \"Media Type\", META_FIELD), \n (\"perlanguage\", \"Language\", META_FIELD), \n (\"persource\", \"Source\", META_FIELD), \n (\"perLOINCcode\", \"LOINC Code\", META_FIELD), \n (\"perGPIcode\", \"GPI Code\", META_FIELD), \n (\"perCPTcode\", \"CPT Code\", META_FIELD), \n (\"perage\", \"Age\", META_FIELD), \n (\"pergender\", \"Gender\", META_FIELD), \n (\"pertopic\", \"Topic\", META_FIELD),\n (\"modification_date\", \"Late Updated\", META_FIELD), \n (\"title\", \"Title\", HIDDEN_FIELD),\n)\n\n\ndef isDocument(obj):\n for interface in INTERFACES:\n if interface.providedBy(obj):\n return True\n return False\n\ndef getType(obj):\n for interface in INTERFACES:\n if interface.providedBy(obj):\n return interface.__name__\n return None\n \ndef getDocumentGenerator(obj):\n for interface in INTERFACES:\n if interface.providedBy(obj):\n return FIELDS_INFO[interface][0]\n return None\n\n \nclass HtmlSectionBuilder:\n def __init__(self, ignoreEmptyValue=True):\n self._ignoreEmptyValue = ignoreEmptyValue\n self._html = None\n \n def start(self):\n self._html = \"\"\n \n def addField(self, title, value):\n if self._ignoreEmptyValue and not value:\n return\n self._html += \"%s:%s
\" %(title, value)\n\n def addUrlField(self, title, url, name):\n self._html += \"\"\"%s:%s
\"\"\" %(title, url, name)\n \n def end(self):\n return self._html\n \n \nclass DocumentGenerator:\n def __init__(self, fieldsInfo):\n self.__fieldsInfo = fieldsInfo\n \n def __call__(self, obj):\n # 1. Generate document info section\n htmlBuilder = HtmlSectionBuilder()\n htmlBuilder.start()\n for info in self.__fieldsInfo:\n (fieldname, fieldtitle, fieldtype) = info\n if hasattr(obj, fieldname):\n if fieldtype == INFO_FIELD:\n htmlBuilder.addField(fieldtitle, str(getattr(obj, fieldname)))\n elif fieldtype == INFO_URL_FIELD :\n docobj = obj.getPerdocument() #hack: why is perdocument not an attribute?\n if docobj:\n htmlBuilder.addUrlField(fieldtitle, \"http://attachment/\", getattr(docobj, \"filename\", \"unknown\"))\n documentInfo = htmlBuilder.end()\n documentInfoAdded = False\n # 2. Generate document\n data = []\n for fieldname, fieldtitle, fieldtype in self.__fieldsInfo:\n if hasattr(obj, fieldname):\n if fieldtype == SECTION_FIELD:\n fieldvalue = getattr(obj, fieldname, None)\n if fieldvalue:\n data.append({\"title\": fieldtitle, \"content\": str(fieldvalue)})\n elif fieldtype in (INFO_FIELD, INFO_URL_FIELD) and not documentInfoAdded and documentInfo:\n data.append({\"title\": INFO_FIELD_TITLE, \"content\": documentInfo})\n documentInfoAdded = True\n return {\"title\": obj.title, \"sections\":data}\n \n\ndef getMetadataGenerator(obj):\n for interface in INTERFACES:\n if interface.providedBy(obj):\n return FIELDS_INFO[interface][1]\n return None\n\n\nclass MetadataGenerator:\n def __init__(self, fieldsInfo):\n self.__fieldsInfo = fieldsInfo\n \n def __call__(self, obj):\n data = []\n for fieldname, fieldtitle, fieldtype in self.__fieldsInfo:\n if hasattr(obj, fieldname):\n if fieldtype == META_FIELD:\n fieldvalue = getattr(obj, fieldname, None)\n fieldvalue = self._processValue(fieldvalue)\n if fieldvalue:\n data.append({\"title\": fieldtitle, \"content\": fieldvalue})\n return data\n \n def _processValue(self, value):\n if isinstance(value, DateTime):\n return value.fCommon() #ISO8601()\n elif isinstance(value, (list, tuple)):\n return \",\".join(value)\n return self._html2text(str(value))\n \n def _html2text(self, html):\n return BeautifulSoup(html).get_text()\n\n\ndef getFieldsInfo(obj):\n for interface in INTERFACES:\n if interface.providedBy(obj):\n return FIELDS_INFO[interface][2]\n return None\n \n\nclass FieldsInfo:\n def __init__(self, fieldsInfo):\n d = {}\n for fieldname, fieldtitle, fieldtype in fieldsInfo:\n d[fieldname] = (fieldtype, fieldtitle, )\n self.__fieldsInfo = d\n \n def getFieldInfo(self, fieldname):\n return self.__fieldsInfo.get(fieldname, None)\n \n def isMetadata(self, fieldname):\n fieldinfo = self.__fieldsInfo.get(fieldname, None)\n return not fieldinfo or fieldinfo[0] == META_FIELD\n \n def getFieldTitle(self, fieldname):\n fieldinfo = self.__fieldsInfo.get(fieldname, None)\n if not fieldinfo:\n return fieldname\n return fieldinfo[1]\n\n#\n# Note, that ISections, IChapters are not a document type - it's a folder for IManuals (from old code/product reference)\nFIELDS_INFO = { \n Idocdocument : (DocumentGenerator(DOCUMENT_FIELDS), MetadataGenerator(DOCUMENT_FIELDS), FieldsInfo(DOCUMENT_FIELDS)),\n# Idochandout : (DocumentGenerator(HANDOUT_FIELDS), MetadataGenerator(HANDOUT_FIELDS), FieldsInfo(HANDOUT_FIELDS)),\n# Idocdowntime : (DocumentGenerator(DOWNTIMEPROCEDURE_FIELDS), MetadataGenerator(DOWNTIMEPROCEDURE_FIELDS), FieldsInfo(DOWNTIMEPROCEDURE_FIELDS)),\n\n# IManuals : (DocumentGenerator(MANUALS_FIELDS), MetadataGenerator(MANUALS_FIELDS), FieldsInfo(MANUALS_FIELDS)),\n IHandout : (DocumentGenerator(HANDOUT_FIELDS), MetadataGenerator(HANDOUT_FIELDS), FieldsInfo(HANDOUT_FIELDS)),\n Idowntimeprocedure : (DocumentGenerator(DOWNTIMEPROCEDURE_FIELDS), MetadataGenerator(DOWNTIMEPROCEDURE_FIELDS), FieldsInfo(DOWNTIMEPROCEDURE_FIELDS)),\n\n}\n\nINTERFACES = FIELDS_INFO.keys()\n\n","sub_path":"Products/UVA/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"434088134","text":"class BinarySearch(list):\n def __init__(self, a, b):\n self.a = a\n self.b = b\n\n \"\"\"poulate the list b with valid content with it's length determined\n by a.\n \"\"\"\n for number in range(self.a):\n list.append(self, self.b)\n self.b += b\n\n self.length = self.a\n\n def search(self, value):\n \"\"\" Get the index of the item with an expected number of loops in\\\n array\n :params value:\n :return: a dictionary containing {count: value, index: value}:\n \"\"\"\n item_in_list = False\n upper_limit = (self.length - 1)\n lower_limit = 0\n count = 0\n try:\n index = self.index(value)\n item_in_list = True\n except ValueError:\n index = -1\n item_in_list\n while lower_limit <= upper_limit and value != self[upper_limit] and item_in_list:\n middle_item = (lower_limit + upper_limit) // 2\n middle_value = self[middle_item]\n if value > middle_value:\n lower_limit = middle_item + 1\n count += 1\n elif value < middle_value:\n upper_limit = middle_item - 1\n count += 1\n else:\n count += 1\n break\n return {'count': count, 'index': index}\n","sub_path":"boot-camp-18-day-4/binary_search/binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"300969296","text":"# -*- coding: utf-8 -*-\n\"\"\" \n@Time : 2020/11/30 17:41\n@Author : liufubin\n@FileName: test_index_calculation_public.py\n@description: 指标计算测试用例(汇总指标计算)\n\"\"\"\nimport unittest\nfrom test_case.combination_master.fund_research.fund_page.performance import test_indicator_api\nfrom public_method.indicator_calculation_method.alpha import Alpha\nfrom public_method.indicator_calculation_method.beta import Beta\nfrom public_method.indicator_calculation_method.calmar import Calmar\nfrom public_method.indicator_calculation_method.biggest_monthly_down import BiggesrMonthlyDown\nfrom public_method.indicator_calculation_method.distance_highest_rate import DistanceHighestRate\nfrom public_method.indicator_calculation_method.down_capture import DownCapture\nfrom public_method.indicator_calculation_method.downside_risk import DownsideRisk\nfrom public_method.indicator_calculation_method.downward_capture import DownwardCapture\nfrom public_method.indicator_calculation_method.information_ratio import InformationRatio\nfrom public_method.indicator_calculation_method.jensen import Jensen\nfrom public_method.indicator_calculation_method.kappa import Kappa\nfrom public_method.indicator_calculation_method.kurtosis import Kurtosis\nfrom public_method.indicator_calculation_method.omega_ratio import OmegaRtio\nfrom public_method.indicator_calculation_method.range_return_rate import RangeReturnRate\nfrom public_method.indicator_calculation_method.sharpe import Sharepe\nfrom public_method.indicator_calculation_method.skewness import Skewness\nfrom public_method.indicator_calculation_method.sotino_ratio import SotinoRatio\nfrom public_method.indicator_calculation_method.sotino_ratio_MAR import SotioRatioMar\nfrom public_method.indicator_calculation_method.standard_deviation import StandardDeviation\nfrom public_method.indicator_calculation_method.success_rate import SuccessRate\nfrom public_method.indicator_calculation_method.track_error import TrackError\nfrom public_method.indicator_calculation_method.treynor import Treynor\nfrom public_method.indicator_calculation_method.uplink_capture import UplinkCapture\nfrom public_method.indicator_calculation_method.upward_capture import UpwardCapture\nfrom request_date.combination_master.index_calculation_data import InvestmentCertificateBiomedical\n\n\nclass TestIndexCalculationPublic(unittest.TestCase):\n isskip = 1\n monthly_fund = InvestmentCertificateBiomedical.month_fund # 获取的计算指标的月度收益率\n benchmark_monthlys = InvestmentCertificateBiomedical.benchmark_monthlys # 获取的计算指标的基准收益率\n years = InvestmentCertificateBiomedical.years # 获取的需要年化的年数\n risk_frees = 0.015 # 无风险收益率\n date_start_fund = InvestmentCertificateBiomedical.date_start_fund # 获取需要的日频净值数据(carmar指标计算需要)\n date_end_fund = InvestmentCertificateBiomedical.date_end_fund # 或许需要的日频净值数据(carmar指标计算需要)\n start_fund = InvestmentCertificateBiomedical.start_fund # 计算指标的开始净值\n end_fund = InvestmentCertificateBiomedical.end_fund # 计算指标的结束净值\n # url = 'https://master-test.simuwang.com/dataservice/v1/secuirty/MF00003TND/indicator?' \\\n # 'startDate=2019-06-01&endDate=2020-05-31&dataSource=Daily&frequency=Monthly&benchmarkId=IN00000008' \\\n # '&sampleId=MF00003TND&riskOfFreeId=IN0000000M&userId=864859&indexs=MF00003TND&t=1607076223729'\n # indicator_api_result = requests.get(url=url)\n\n def setUp(self) -> None:\n pass\n\n @unittest.skipIf(isskip == 0, '计算阿尔法用例跳过')\n def test_calculate_alpha(self, isannual=True):\n \"\"\"计算阿尔法测试用例,monthly_fund为基金月度收益率,benchmark_monthlys为基准月度收益率\"\"\"\n monthly_fund = TestIndexCalculationPublic.monthly_fund\n benchmark_monthlys = TestIndexCalculationPublic.benchmark_monthlys\n alpha_result = Alpha.alpha(monthly_fund_field=monthly_fund,\n benchmark_monthly=benchmark_monthlys, isannual=isannual)\n indicator_api_result = test_indicator_api.TestIndicator.combination_test_environment().json()\n api_alpha_result = float(indicator_api_result['data']['dataset'][0]['map']['Alpha'])\n self.assertTrue(round(alpha_result * 100, 2) == round(api_alpha_result * 100, 2), '计算的阿尔法与接口返回的结果不一致')\n print('阿尔法计算结果:', alpha_result)\n\n @unittest.skipIf(isskip == 0, '计算贝塔用例跳过')\n def test_calculate_beta(self):\n \"\"\"计算贝塔测试用例,monthly_fund为基金月度收益率,benchmark_monthlys为基准月度收益率\"\"\"\n monthly_fund = TestIndexCalculationPublic.monthly_fund\n benchmark_monthlys = InvestmentCertificateBiomedical.benchmark_monthlys\n beta_result = Beta.beta(monthly_fund_field=monthly_fund, benchmark_monthly=benchmark_monthlys)\n indicator_api_result = test_indicator_api.TestIndicator.combination_test_environment().json()\n api_beta_result = float(indicator_api_result['data']['dataset'][0]['map']['Beta'])\n self.assertTrue(round(api_beta_result, 2) == round(beta_result, 2), '计算的贝塔与接口返回的结果不一致')\n print('贝塔计算结果:', beta_result)\n\n @unittest.skipIf(isskip == 0, '计算当月最大下跌用例跳过')\n def test_biggest_monthly_down(self):\n \"\"\"计算当月最大下跌,monthly_fund为基金月度收益率\"\"\"\n monthly_fund = TestIndexCalculationPublic.monthly_fund\n down_min_result = BiggesrMonthlyDown.biggest_monthly_down(month_fund_yield=monthly_fund)\n print('当月最大下跌计算结果:', down_min_result)\n\n @unittest.skipIf(isskip == 0, '计算下跌月份比用例跳过')\n def test_down_month_ratio(self):\n \"\"\"计算下跌月份比,monthly_fund为基金月度收益率\"\"\"\n monthly_fund = TestIndexCalculationPublic.monthly_fund\n down_rate_result = BiggesrMonthlyDown.down_month_ratio(month_fund_yield=monthly_fund)\n print('下跌月份比计算结果为:', down_rate_result)\n\n @unittest.skipIf(isskip == 0, '计算跑赢指数用例跳过')\n def test_batting_average(self):\n \"\"\"计算跑赢指数(胜率),monthly_fund为基金月度收益率\"\"\"\n monthly_fund = TestIndexCalculationPublic.monthly_fund\n benchmark_monthlys = InvestmentCertificateBiomedical.benchmark_monthlys\n win_rate_result = BiggesrMonthlyDown.batting_average(monthly_fund_field=monthly_fund,\n benchmark_monthly=benchmark_monthlys)\n indicator_api_result = test_indicator_api.TestIndicator.combination_test_environment().json()\n api_winrate_result = float(indicator_api_result['data']['dataset'][0]['map']['WinRate'])\n self.assertTrue(round(api_winrate_result * 100, 2) == round(win_rate_result * 100, 2), '计算的胜率与接口返回的结果不一致')\n print('胜率计算结果为:', win_rate_result)\n\n @unittest.skipIf(isskip == 0, '计算盈亏比用例跳过')\n def tset_profit_loss_ratio(self):\n \"\"\"计算盈亏比,monthly_fund为基金月度收益率\"\"\"\n monthly_fund = TestIndexCalculationPublic.monthly_fund\n prolit_loss_result = BiggesrMonthlyDown.profit_loss_ratio(monthly_fund_field=monthly_fund)\n print('盈亏比计算结果:', prolit_loss_result)\n\n @unittest.skipIf(isskip == 0, '计算卡玛用例跳过')\n def test_calculate_calmar(self):\n \"\"\"计算卡玛测试用例,monthly_fund为基金月度收益率,benchmark_monthlys为基准月度收益率\"\"\"\n date_start_value = TestIndexCalculationPublic.date_start_fund\n date_end_value = TestIndexCalculationPublic.date_end_fund\n fund_net_value = TestIndexCalculationPublic.monthly_fund\n max_fund = 1.069973 # 计算最大回撤的最高点\n min_fund = 0.940556 # 计算最大回撤的最低点\n calmar_result = Calmar.calmar(startvalue=date_start_value, endvalue=date_end_value, max_fund=max_fund,\n min_fund=min_fund, valuedates=362, fund_net_value=fund_net_value)\n print(calmar_result)\n indicator_api_result = test_indicator_api.TestIndicator.combination_test_environment().json()\n api_calmar_result = float(indicator_api_result['data']['dataset'][0]['map']['CalmarRatio'])\n print(api_calmar_result)\n self.assertTrue(round(calmar_result, 2) == round(api_calmar_result, 2), '计算的calmar与接口返回的结果不一致')\n print('卡玛计算结果:', calmar_result)\n\n @unittest.skipIf(isskip == 0, '距最高净值比用例跳过')\n def test_distance_highest_rate(self):\n \"\"\"计算距最高净值比,daily_fund为基金日频净值列表\"\"\"\n daily_fund = TestIndexCalculationPublic.monthly_fund\n distance_highest_result = DistanceHighestRate.distance_highest_rate(fund_daily_rate=daily_fund)\n print('距最高净值比计算结果为:', distance_highest_result)\n\n @unittest.skipIf(isskip == 0, '下行捕获率用例跳过')\n def test_down_capture(self):\n \"\"\"计算下行捕获率测试用例,monthly_fund为基金月度收益率,benchmark_monthlys为基准月度收益率\"\"\"\n monthly_fund = TestIndexCalculationPublic.monthly_fund\n benchmark_monthlys = InvestmentCertificateBiomedical.benchmark_monthlys\n down_capture_result = DownCapture.down_capture(monthly_fund_field=monthly_fund,\n benchmark_monthly=benchmark_monthlys)\n indicator_api_result = test_indicator_api.TestIndicator.combination_test_environment().json()\n api_down_capture_result = float(indicator_api_result['data']['dataset'][0]['map']['DownCaptureRatio'])\n self.assertTrue(round(down_capture_result, 2) == round(api_down_capture_result, 2), '计算的下行捕获率与接口返回的结果不一致')\n print('下行捕获率计算结果为:', down_capture_result)\n\n @unittest.skipIf(isskip == 0, '下行风险用例跳过')\n def test_downside_risk(self, isannual=True):\n \"\"\"计算下行风险测试用例,monthly_fund为基金月度收益率,benchmark_monthlys为基准月度收益率\"\"\"\n monthly_fund = TestIndexCalculationPublic.monthly_fund\n risk_frees = TestIndexCalculationPublic.risk_frees\n downside_risk_result = DownsideRisk.downside_risk(monthly_fund_field=monthly_fund,\n risk_free=risk_frees, isannual=isannual)\n indicator_api_result = test_indicator_api.TestIndicator.combination_test_environment().json()\n api_downside_risk_result = float(indicator_api_result['data']['dataset'][0]['map']['DownsideStdDev'])\n self.assertTrue(round(api_downside_risk_result * 100, 2) == round(downside_risk_result * 100, 2),\n '计算的下行风险与接口返回的结果不一致')\n print('下行风险计算结果为:', downside_risk_result)\n\n @unittest.skipIf(isskip == 0, '下行捕获收益率用例跳过')\n def test_downward_capture(self):\n \"\"\"计算下行下行捕获收益测试用例,monthly_fund为基金月度收益率,benchmark_monthlys为基准月度收益率\"\"\"\n monthly_fund = TestIndexCalculationPublic.monthly_fund\n benchmark_monthlys = InvestmentCertificateBiomedical.benchmark_monthlys\n downward_capture = DownwardCapture.downward_capture(monthly_fund_field=monthly_fund,\n benchmark_monthly=benchmark_monthlys)\n indicator_api_result = test_indicator_api.TestIndicator.combination_test_environment().json()\n api_down_capture_result = float(indicator_api_result['data']['dataset'][0]['map']['DownCaptureReturn'])\n self.assertTrue(round(api_down_capture_result * 100, 2) == round(downward_capture * 100, 2),\n '计算的下行捕获收益率与接口返回的结果不一致')\n print('下行捕获收益率计算结果:', downward_capture)\n\n @unittest.skipIf(isskip == 0, '信息比率用例跳过')\n def test_information_ratio(self):\n \"\"\"计算信息比率测试用例,monthly_fund为基金月度收益率,benchmark_monthlys为基准月度收益率\"\"\"\n monthly_fund = TestIndexCalculationPublic.monthly_fund\n benchmark_monthlys = InvestmentCertificateBiomedical.benchmark_monthlys\n information_ratio_result = InformationRatio.information_ratio(monthly_fund_field=monthly_fund,\n benchmark_monthly=benchmark_monthlys,\n isannual=True)\n indicator_api_result = test_indicator_api.TestIndicator.combination_test_environment().json()\n api_information_result = float(indicator_api_result['data']['dataset'][0]['map']['InformationRatio'])\n self.assertTrue(round(api_information_result, 2) == round(information_ratio_result, 2),\n '计算的信息比率与接口返回的结果不一致')\n print('信息比率计算结果为:', information_ratio_result)\n\n @unittest.skipIf(isskip == 0, '詹森指数用例跳过')\n def test_jensen(self):\n \"\"\"计算詹森比率测试用例,monthly_fund为基金月度收益率,benchmark_monthlys为基准月度收益率\"\"\"\n monthly_fund = TestIndexCalculationPublic.monthly_fund\n benchmark_monthlys = InvestmentCertificateBiomedical.benchmark_monthlys\n risk_frees = TestIndexCalculationPublic.risk_frees\n jensen_result = Jensen.jensern(monthly_fund_field=monthly_fund, benchmark_monthly=benchmark_monthlys,\n risk_free=risk_frees, isannual=True)\n indicator_api_result = test_indicator_api.TestIndicator.combination_test_environment().json()\n api_jensen_result = float(indicator_api_result['data']['dataset'][0]['map']['Jensen'])\n self.assertTrue(round(jensen_result * 100, 2) == round(api_jensen_result * 100, 2),\n '计算的詹森指数与接口返回的结果不一致')\n print('詹森指数计算结果为:', jensen_result)\n\n @unittest.skipIf(isskip == 0, '卡帕指数用例跳过')\n def test_kappa(self, isannual=True):\n \"\"\"计算卡帕指数测试用例,monthly_fund为基金月度收益率,benchmark_monthlys为基准月度收益率\"\"\"\n startvalue = TestIndexCalculationPublic.start_fund\n endvalue = TestIndexCalculationPublic.end_fund\n monthly_fund = TestIndexCalculationPublic.monthly_fund\n risk_frees = TestIndexCalculationPublic.risk_frees\n kappa_result = Kappa.kappa(startvalue=startvalue, endvalue=endvalue, yesrs=1,\n monthly_fund_field=monthly_fund, risk_free=risk_frees, isannual=isannual)\n indicator_api_result = test_indicator_api.TestIndicator.combination_test_environment().json()\n api_kappa_result = float(indicator_api_result['data']['dataset'][0]['map']['Kappa'])\n self.assertTrue(round(kappa_result, 2) == round(api_kappa_result, 2),\n '计算的卡帕与接口返回的结果不一致')\n print('卡帕计算结果为:', kappa_result)\n\n @unittest.skipIf(isskip == 0, '峰度计算用例跳过')\n def test_kurtosis(self):\n \"\"\"计算峰度测试用例,monthly_fund为基金月度收益率,benchmark_monthlys为基准月度收益率\"\"\"\n monthly_fund = TestIndexCalculationPublic.monthly_fund\n kurtosis_result = Kurtosis.kurtosis(month_fund_yield=monthly_fund)\n indicator_api_result = test_indicator_api.TestIndicator.combination_test_environment().json()\n api_kurtosis_result = float(indicator_api_result['data']['dataset'][0]['map']['Kurtosis'])\n self.assertTrue(round(kurtosis_result, 2) == round(api_kurtosis_result, 2),\n '计算的卡帕与接口返回的结果不一致')\n print('峰度计算结果为:', kurtosis_result)\n\n @unittest.skipIf(isskip == 0, '欧米伽用例跳过')\n def test_omega_ratio(self):\n \"\"\"计算下欧米伽测试用例,monthly_fund为基金月度收益率,benchmark_monthlys为基准月度收益率\"\"\"\n startvalue = TestIndexCalculationPublic.start_fund\n endvalue = TestIndexCalculationPublic.end_fund\n monthly_fund = TestIndexCalculationPublic.monthly_fund\n risk_frees = TestIndexCalculationPublic.risk_frees\n omega_result = OmegaRtio.omega_ratio(startvalue=startvalue, endvalue=endvalue,\n valuedates=365, risk_free_year=risk_frees, monthly_fund_field=monthly_fund)\n indicator_api_result = test_indicator_api.TestIndicator.combination_test_environment().json()\n api_omega_result = float(indicator_api_result['data']['dataset'][0]['map']['Omega'])\n self.assertTrue(round(omega_result, 2) == round(api_omega_result, 2),\n '计算的欧米伽与接口返回的结果不一致')\n print('欧米伽计算结果为:', omega_result)\n\n @unittest.skipIf(isskip == 0, '区间收益率用例跳过')\n def test_interval_return_rate(self, isannual=False):\n \"\"\"计算区间收益测试用例,monthly_fund为基金月度收益率,benchmark_monthlys为基准月度收益率\"\"\"\n startvalue = TestIndexCalculationPublic.start_fund\n endvalue = TestIndexCalculationPublic.end_fund\n interval_return_result = RangeReturnRate.annual_earnning_dates(startvalue=startvalue, endvalue=endvalue,\n isannual=isannual, valuedates=365)\n indicator_api_result = test_indicator_api.TestIndicator.combination_test_environment().json()\n api_interval_return_result = float(indicator_api_result['data']['dataset'][0]['map']['IntervalReturn'])\n self.assertTrue(round(interval_return_result * 100, 2) == round(api_interval_return_result * 100, 2),\n '计算的区间收益与接口返回的结果不一致')\n print('区间收益率计算结果为', interval_return_result)\n\n @unittest.skipIf(isskip == 0, '区间收益率年化用例跳过')\n def test_interval_return_rate_annual(self, isannual=True):\n \"\"\"计算下区间年化收益测试用例,monthly_fund为基金月度收益率,benchmark_monthlys为基准月度收益率\"\"\"\n startvalue = TestIndexCalculationPublic.start_fund\n endvalue = TestIndexCalculationPublic.end_fund\n interval_return_result = RangeReturnRate.annual_earnning_dates(startvalue=startvalue, endvalue=endvalue,\n isannual=isannual, valuedates=365)\n indicator_api_result = test_indicator_api.TestIndicator.combination_test_environment().json()\n api_interval_return_result = float(indicator_api_result['data']['dataset'][0]['map']['AnnualReturn'])\n self.assertTrue(round(interval_return_result * 100, 2) == round(api_interval_return_result * 100, 2),\n '计算的年化收益与接口返回的结果不一致')\n print('年化收益率计算结果为', interval_return_result)\n\n @unittest.skipIf(isskip == 0, '夏普用例跳过')\n def test_sharpe(self, isannual=True):\n \"\"\"计算夏普比率测试用例,monthly_fund为基金月度收益率,benchmark_monthlys为基准月度收益率\"\"\"\n monthly_fund = TestIndexCalculationPublic.monthly_fund\n risk_frees = TestIndexCalculationPublic.risk_frees\n sharp_result = Sharepe.sharpe(monthly_fund_field=monthly_fund, risk_free=risk_frees, isannual=isannual)\n indicator_api_result = test_indicator_api.TestIndicator.combination_test_environment().json()\n api_sharpe_result = float(indicator_api_result['data']['dataset'][0]['map']['SharpeRatio'])\n self.assertTrue(round(sharp_result, 2) == round(api_sharpe_result, 2),\n '计算的夏普比率与接口返回的结果不一致')\n print('夏普计算结果为:', sharp_result)\n\n @unittest.skipIf(isskip == 0, '偏度用例跳过')\n def test_skewness(self):\n \"\"\"计算偏度测试用例,monthly_fund为基金月度收益率,benchmark_monthlys为基准月度收益率\"\"\"\n monthly_fund = TestIndexCalculationPublic.monthly_fund\n skewness_result = Skewness.skewness(month_fund_yield=monthly_fund)\n indicator_api_result = test_indicator_api.TestIndicator.combination_test_environment().json()\n api_skewness_result = float(indicator_api_result['data']['dataset'][0]['map']['Skewness'])\n self.assertTrue(round(skewness_result, 2) == round(api_skewness_result, 2),\n '计算的偏度比率与接口返回的结果不一致')\n print('偏度计算结果为:', skewness_result)\n\n @unittest.skipIf(isskip == 0, '索提诺用例跳过')\n def test_sotino_ratio(self, isannual=True):\n \"\"\"计算索提诺比率测试用例,monthly_fund为基金月度收益率,benchmark_monthlys为基准月度收益率\"\"\"\n monthly_fund = TestIndexCalculationPublic.monthly_fund\n risk_frees = TestIndexCalculationPublic.risk_frees\n sotino_result = SotinoRatio.sotio_ratio(monthly_fund_field=monthly_fund, risk_free=risk_frees,\n isannual=isannual)\n indicator_api_result = test_indicator_api.TestIndicator.combination_test_environment().json()\n api_sotino_result = float(indicator_api_result['data']['dataset'][0]['map']['SortinoRatio'])\n self.assertTrue(round(sotino_result, 2) == round(api_sotino_result, 2),\n '计算的索提诺比率与接口返回的结果不一致')\n print('索提诺计算结果为:', sotino_result)\n\n @unittest.skipIf(isskip == 0, '索提诺MAR用例跳过')\n def test_sotino_mar(self, isannual=True):\n \"\"\"计算索提诺MAR测试用例,monthly_fund为基金月度收益率,benchmark_monthlys为基准月度收益率\"\"\"\n monthly_fund = TestIndexCalculationPublic.monthly_fund\n risk_frees = TestIndexCalculationPublic.risk_frees\n sotio_mar_result = SotioRatioMar.sotio_ratio_mar(monthly_fund_field=monthly_fund, risk_free=risk_frees,\n isannual=isannual)\n indicator_api_result = test_indicator_api.TestIndicator.combination_test_environment().json()\n api_sotino_mar_result = float(indicator_api_result['data']['dataset'][0]['map']['SortinoRatioMAR'])\n self.assertTrue(round(sotio_mar_result, 2) == round(api_sotino_mar_result, 2),\n '计算的索提诺比率(MAR)与接口返回的结果不一致')\n print('索提诺mar计算结果为:', sotio_mar_result)\n\n @unittest.skipIf(isskip == 0, '标准差用例跳过')\n def test_standard_deviation(self, isannual=True):\n \"\"\"计算标准差测试用例,monthly_fund为基金月度收益率,benchmark_monthlys为基准月度收益率\"\"\"\n monthly_fund = TestIndexCalculationPublic.monthly_fund\n standard_annual_result = StandardDeviation.standard_deviation(month_earning_list=monthly_fund,\n is_annual=isannual)\n indicator_api_result = test_indicator_api.TestIndicator.combination_test_environment().json()\n api_standard_annual_result = float(indicator_api_result['data']['dataset'][0]['map']['AnnualStdDev'])\n self.assertTrue(round(standard_annual_result * 100, 2) == round(api_standard_annual_result * 100, 2),\n '计算的年化标准差与接口返回的结果不一致')\n print('标准差计算结果为:', standard_annual_result)\n\n @unittest.skipIf(isskip == 0, '胜率用例跳过')\n def test_success_rate(self):\n \"\"\"计算胜率测试用例,monthly_fund为基金月度收益率,benchmark_monthlys为基准月度收益率\"\"\"\n monthly_fund = TestIndexCalculationPublic.monthly_fund\n benchmark_monthlys = InvestmentCertificateBiomedical.benchmark_monthlys\n success_result = SuccessRate.success_rate(monthly_fund_field=monthly_fund, benchmark_monthly=benchmark_monthlys)\n indicator_api_result = test_indicator_api.TestIndicator.combination_test_environment().json()\n api_winrate_result = float(indicator_api_result['data']['dataset'][0]['map']['WinRate'])\n self.assertTrue(round(api_winrate_result * 100, 2) == round(success_result * 100, 2), '计算的胜率与接口返回的结果不一致')\n print('胜率计算结果为:', success_result)\n\n @unittest.skipIf(isskip == 0, '跟踪误差用例跳过')\n def test_track_error(self, isannual=True):\n \"\"\"计算跟踪误差测试用例,monthly_fund为基金月度收益率,benchmark_monthlys为基准月度收益率\"\"\"\n monthly_fund = TestIndexCalculationPublic.monthly_fund\n benchmark_monthlys = InvestmentCertificateBiomedical.benchmark_monthlys\n track_error_result = TrackError.track_error(monthly_fund_field=monthly_fund,\n benchmark_monthly=benchmark_monthlys, isannual=isannual)\n indicator_api_result = test_indicator_api.TestIndicator.combination_test_environment().json()\n api_track_error_result = float(indicator_api_result['data']['dataset'][0]['map']['TrackingError'])\n self.assertTrue(round(track_error_result * 100, 2) == round(api_track_error_result * 100, 2),\n '计算的跟踪误差与接口返回的结果不一致')\n print('跟踪误差计算结果为:', track_error_result)\n\n @unittest.skipIf(isskip == 0, '特雷诺用例跳过')\n def test_treynor(self):\n \"\"\"计算特雷诺测试用例,monthly_fund为基金月度收益率,benchmark_monthlys为基准月度收益率\"\"\"\n risk_frees = TestIndexCalculationPublic.risk_frees\n month_fund = TestIndexCalculationPublic.monthly_fund\n benchmark_monthlys = TestIndexCalculationPublic.benchmark_monthlys\n start_fund = TestIndexCalculationPublic.start_fund\n end_fund = TestIndexCalculationPublic.end_fund\n treynor_result = Treynor.treynor(risk_free_year=risk_frees, monthly_fund_field=month_fund,\n benchmark_monthly=benchmark_monthlys, startvalue=start_fund,\n endvalue=end_fund)\n indicator_api_result = test_indicator_api.TestIndicator.combination_test_environment().json()\n api_treynor_result = float(indicator_api_result['data']['dataset'][0]['map']['TreynorRatio'])\n self.assertTrue(round(treynor_result, 2) == round(api_treynor_result, 2),\n '计算的特雷诺比率与接口返回的结果不一致')\n print('特雷诺计算结果为:', treynor_result)\n\n @unittest.skipIf(isskip == 0, '上行捕获率用例跳过')\n def test_uplink_capture(self):\n \"\"\"计算上行捕获率测试用例,monthly_fund为基金月度收益率,benchmark_monthlys为基准月度收益率\"\"\"\n month_fund = TestIndexCalculationPublic.monthly_fund\n benchmark_monthlys = TestIndexCalculationPublic.benchmark_monthlys\n uplink_capture_result = UplinkCapture.uplink_capture(monthly_fund_field=month_fund,\n benchmark_monthly=benchmark_monthlys)\n indicator_api_result = test_indicator_api.TestIndicator.combination_test_environment().json()\n api_upcapture_result = float(indicator_api_result['data']['dataset'][0]['map']['UpCaptureRatio'])\n self.assertTrue(round(uplink_capture_result, 2) == round(api_upcapture_result, 2),\n '计算的上行捕获率与接口返回的结果不一致')\n print('上行捕获率计算结果为:', uplink_capture_result)\n\n @unittest.skipIf(isskip == 0, '上行捕获收益率用例跳过')\n def test_upward_capture(self):\n \"\"\"计算上行捕获收益率测试用例,monthly_fund为基金月度收益率,benchmark_monthlys为基准月度收益率\"\"\"\n month_fund = TestIndexCalculationPublic.monthly_fund\n benchmark_monthlys = TestIndexCalculationPublic.benchmark_monthlys\n upward_capture_result = UpwardCapture.upward_capture(monthly_fund_field=month_fund,\n benchmark_monthly=benchmark_monthlys)\n indicator_api_result = test_indicator_api.TestIndicator.combination_test_environment().json()\n api_upcapture_result = float(indicator_api_result['data']['dataset'][0]['map']['UpCaptureReturn'])\n self.assertTrue(round(upward_capture_result, 2) == round(api_upcapture_result, 2),\n '计算的上行捕获收益率与接口返回的结果不一致')\n print('上行捕获收益率计算结果为:', upward_capture_result)\n\n @staticmethod\n def owner_calculation_result_dict():\n \"\"\"计算指标汇总成字典展示\"\"\"\n # risk_frees = TestIndexCalculationPublic.risk_frees\n month_fund = TestIndexCalculationPublic.monthly_fund\n benchmark_monthlys = TestIndexCalculationPublic.benchmark_monthlys\n start_fund = TestIndexCalculationPublic.start_fund\n end_fund = TestIndexCalculationPublic.end_fund\n owner_calculation_dict = dict()\n owner_calculation_dict['Alpha'] = Alpha.alpha(monthly_fund_field=month_fund,\n benchmark_monthly=benchmark_monthlys, isannual=True)\n owner_calculation_dict['IntervalReturn'] = RangeReturnRate.annual_earnning_dates(startvalue=start_fund,\n endvalue=end_fund,\n isannual=False,\n valuedates=365)\n\n\nif __name__ == '__main__':\n dir_name = dir(TestIndexCalculationPublic)\n case_name = []\n suite = unittest.TestSuite()\n for value in dir_name:\n if value.startswith('test'):\n case_name.append(value)\n suite.addTest(TestIndexCalculationPublic(value))\n print(case_name)\n runner = unittest.TextTestRunner(verbosity=2)\n runner.run(suite)\n","sub_path":"test_case/combination_master/fund_research/fund_page/performance/test_index_calculation_public.py","file_name":"test_index_calculation_public.py","file_ext":"py","file_size_in_byte":30721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"1546684","text":"def pronic(n):\n for i in range(1,(n//2)+1):\n if i*(i+1)==n:\n return True\n return False\nx=input()\nara=[x[i:j+1]for i in range(len(x)) for j in range(i,len(x))]\nfinal=set()\nfor i in ara:\n if pronic(int(i)):\n final.add(int(i))\nprint(sorted(final))\n","sub_path":"pronicnumber.py","file_name":"pronicnumber.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"381757445","text":"# -*- coding: utf-8 -*-\n\"\"\"\n/***************************************************************************\n AdnReport\n A QGIS plugin\n Prégénérer les fichiers et dossier pour la génération de rapport pour ADN\n -------------------\n begin : 2018-01-08\n git sha : $Format:%H$\n copyright : (C) 2018 by gbruel/metis\n email : g.bruel@metis-reseaux.fr\n ***************************************************************************/\n\n/***************************************************************************\n * *\n * This program is free software; you can redistribute it and/or modify *\n * it under the terms of the GNU General Public License as published by *\n * the Free Software Foundation; either version 2 of the License, or *\n * (at your option) any later version. *\n * *\n ***************************************************************************/\n\"\"\"\nfrom PyQt4.QtCore import QSettings, QTranslator, qVersion, QCoreApplication\nfrom PyQt4.QtGui import QAction, QIcon\nfrom PyQt4 import QtGui, QtCore\nimport sys\n\n\n# Initialize Qt resources from file resources.py\nimport resources\n# Import the code for the dialog\nfrom Adn_Report_dialog import AdnReportDialog\nfrom os.path import expanduser\nimport os.path, csv, time, shutil # specific \n\n\nclass AdnReport:\n \"\"\"QGIS Plugin Implementation.\"\"\"\n export_result = []\n\n def __init__(self, iface):\n \"\"\"Constructor.\n\n :param iface: An interface instance that will be passed to this class\n which provides the hook by which you can manipulate the QGIS\n application at run time.\n :type iface: QgsInterface\n \"\"\"\n # Save reference to the QGIS interface\n self.iface = iface\n # initialize plugin directory\n self.plugin_dir = os.path.dirname(__file__)\n # initialize locale\n locale = QSettings().value('locale/userLocale')[0:2]\n locale_path = os.path.join(\n self.plugin_dir,\n 'i18n',\n 'AdnReport_{}.qm'.format(locale))\n\n if os.path.exists(locale_path):\n self.translator = QTranslator()\n self.translator.load(locale_path)\n\n if qVersion() > '4.3.3':\n QCoreApplication.installTranslator(self.translator)\n\n # Declare instance attributes\n self.actions = []\n self.menu = self.tr(u'&Rapport ADN')\n # TODO: We are going to let the user set this up in a future iteration\n self.toolbar = self.iface.addToolBar(u'AdnReport')\n self.toolbar.setObjectName(u'AdnReport')\n\n # noinspection PyMethodMayBeStatic\n def tr(self, message):\n \"\"\"Get the translation for a string using Qt translation API.\n\n We implement this ourselves since we do not inherit QObject.\n\n :param message: String for translation.\n :type message: str, QString\n\n :returns: Translated version of message.\n :rtype: QString\n \"\"\"\n # noinspection PyTypeChecker,PyArgumentList,PyCallByClass\n return QCoreApplication.translate('AdnReport', message)\n\n\n def add_action(\n self,\n icon_path,\n text,\n callback,\n enabled_flag=True,\n add_to_menu=True,\n add_to_toolbar=True,\n status_tip=None,\n whats_this=None,\n parent=None):\n \"\"\"Add a toolbar icon to the toolbar.\n\n :param icon_path: Path to the icon for this action. Can be a resource\n path (e.g. ':/plugins/foo/bar.png') or a normal file system path.\n :type icon_path: str\n\n :param text: Text that should be shown in menu items for this action.\n :type text: str\n\n :param callback: Function to be called when the action is triggered.\n :type callback: function\n\n :param enabled_flag: A flag indicating if the action should be enabled\n by default. Defaults to True.\n :type enabled_flag: bool\n\n :param add_to_menu: Flag indicating whether the action should also\n be added to the menu. Defaults to True.\n :type add_to_menu: bool\n\n :param add_to_toolbar: Flag indicating whether the action should also\n be added to the toolbar. Defaults to True.\n :type add_to_toolbar: bool\n\n :param status_tip: Optional text to show in a popup when mouse pointer\n hovers over the action.\n :type status_tip: str\n\n :param parent: Parent widget for the new action. Defaults None.\n :type parent: QWidget\n\n :param whats_this: Optional text to show in the status bar when the\n mouse pointer hovers over the action.\n\n :returns: The action that was created. Note that the action is also\n added to self.actions list.\n :rtype: QAction\n \"\"\"\n\n # Create the dialog (after translation) and keep reference\n self.dlg = AdnReportDialog()\n\n icon = QIcon(icon_path)\n action = QAction(icon, text, parent)\n action.triggered.connect(callback)\n action.setEnabled(enabled_flag)\n\n if status_tip is not None:\n action.setStatusTip(status_tip)\n\n if whats_this is not None:\n action.setWhatsThis(whats_this)\n\n if add_to_toolbar:\n self.toolbar.addAction(action)\n\n if add_to_menu:\n self.iface.addPluginToMenu(\n self.menu,\n action)\n\n self.actions.append(action)\n\n return action\n\n def initGui(self):\n \"\"\"Create the menu entries and toolbar icons inside the QGIS GUI.\"\"\"\n\n icon_path = ':/plugins/AdnReport/icon.png'\n self.add_action(\n icon_path,\n text=self.tr(u'Rapports ADN'),\n callback=self.run,\n parent=self.iface.mainWindow())\n\n\n def unload(self):\n \"\"\"Removes the plugin menu item and icon from QGIS GUI.\"\"\"\n for action in self.actions:\n self.iface.removePluginMenu(\n self.tr(u'&Rapport ADN'),\n action)\n self.iface.removeToolBarIcon(action)\n # remove the toolbar\n del self.toolbar\n\n\n def isInList(self, val, li):\n \"\"\"Return index of value find in list or -1 if value is not exist in list\"\"\"\n res = False \n if val and li:\n try :\n res = li.index(val) \n except ValueError:\n res = False\n return res \n \n def rmDblToCombo(self,array,cb):\n cb.clear()\n cb.addItem(\"Select all opportunity\")\n \"\"\"Remove dupplicate value from given array and import unic values to given combo\"\"\"\n cb.setEnabled(True); \n t = list(set(array))\n clean = []\n for elem in t:\n typeVar = type(elem).__name__\n if typeVar == \"unicode\" or typeVar == \"str\": \n if cb.findText(elem) < 0:\n clean.append(elem)\n cb.addItem(elem) \n return clean\n \n \n def searchFile(self): \n \"\"\"Open window to search template file\"\"\"\n \"\"\"Update text box with path value\"\"\"\n def test(string, expression):\n test = False\n if string in expression:\n test = True\n return test \n validFormat = \"xls\"\n file = QtGui.QFileDialog.getOpenFileName(None, 'Open file')\n \"\"\"Valid file format\"\"\"\n isValid = test(validFormat, file)\n if not isValid or isValid == \"\" :\n file = \"Please, select valid file !\"\n \"\"\"Update text box with path value\"\"\"\n return self.dlg.pathTpl.setText(file)\n\n def searchFolder(self):\n \"\"\"Method to get path in order to export file to path\"\"\" \n folder = QtGui.QFileDialog.getExistingDirectory(None, 'Open folder', expanduser('~')) \n \"\"\"Update text box with path value\"\"\"\n self.dlg.pathFolder.setText(folder)\n\n def getLayerFromCb(self, cbString):\n res = False\n layers = self.iface.legendInterface().layers();\n for x in layers:\n if x.name() == cbString:\n res = x\n break\n return res\n \n def layersToCombo(self, combo):\n \"\"\"Create array to use map layers\"\"\"\n layer = \"\"\n layer_list= []\n layers = self.iface.legendInterface().layers();\n for layer in layers:\n if layer.name() and layer.type() == 0:\n layer_list.append(layer.name())\n combo.addItems(layer_list)\n\n def getLayerFields(self,layer):\n fieldsName = []\n \"\"\"parse layer to get opportunity values\"\"\" \n fields = layer.dataProvider().fields()\n for field in fields:\n fieldsName.append(field.name())\n return fieldsName\n\n def fieldValues(self, layer, val):\n # retourne les valeurs pour un champ donné dans une couche donnée\n \"\"\"if user select layer in combo, return attributes as list \"\"\" \n res = False\n if val != \"\": \n cbList = [] \n fields = self.getLayerFields(layer) # list of fields \n idx = self.isInList(val, fields) # control if field exist in layer \n # Correction apply : if index is first, index = int(0). So, python indentify index as False.\n if idx != False or idx > -1: \n features = layer.getFeatures() # array that contain all attributes values without fields name\n for el in features:\n cbList.append(el.attributes()[idx]) \n res = cbList # return list of opportunity states values \n return res\n\n def oppFiltering(self, idFromGc, idFromSy, gcLayer, syLayer, cbOfState, cbO): \n \"\"\"return opportunity according to state value or not\"\"\"\n finalAttr = [] \n def getOppFromLayer (layer, cbId, cbSt, cbOp):\n oppResult = []\n layerRead = self.getLayerFromCb(layer.currentText())\n idLayer = cbId.currentText()\n state = cbSt.currentText()\n defaultValue = cbSt.itemText(0)\n if layerRead != False:\n cbOp.clear()\n self.export_result = {} \n filterVal = [] \n cbOp.addItem(\"Select all opportunity\") \n # return list of id for gc layer \n layerOpp = self.fieldValues(layerRead, idLayer) \n # return all features \n layerFeatures = layerRead.getFeatures()\n # return all fields \n layerFields = self.getLayerFields(layerRead)\n # return position of given field in layer fields \n posId = self.isInList(idLayer, layerFields) # to get id attributes # bug \n posState = self.isInList(\"statut\",layerFields) # si on a bien le champ statut donne alors la position du champ, sinon renvoi false \n \n if posState != False or posState > -1: \n filterVal = self.fieldValues(layerRead,\"statut\") \n \n for feature in layerFeatures: # on regarde toutes les features de la couche\n idAttr = feature.attributes()[posId] # on prend la valeur de l'id pour la feature \n if state == defaultValue :\n oppResult.append(idAttr)\n else:\n stateAttr = feature.attributes()[posState] # on prend le statut pour cette même feature \n isFilter = self.isInList(state,filterVal) # on test si la valeur sélectionnée est dans la liste des statuts \n if isFilter != False or isFilter > -1: # si c'est le cas, alors on filtre \n if stateAttr == state: # on filtre donc sur le statut souhaité pour ne prendre que les features qui ont un statut identique au statut sélectionné \n oppResult.append(idAttr) # on ajoutera la feature dans une liste \n return oppResult\n # return sum of opportunity for each combo whithout duplicate value\n listGc = getOppFromLayer(gcLayer, idFromGc, cbOfState, cbO) \n listSy = getOppFromLayer(syLayer, idFromSy, cbOfState, cbO)\n finalAttr = listGc + listSy\n \n return self.rmDblToCombo(finalAttr,cbO)\n \n def cbStateEl(self, combo):\n # get count of cb items and returns the text for the given index in the combobox\n cbData = []\n for i in range(combo.count()):\n cbData.append(combo.itemText(i))\n return cbData \n \n def cbUpdate(self,cb,val):\n \"\"\"Function to parse state combo list and remove state not listed in selected ids\"\"\" \n attributes = []\n cb.clear()\n cb.addItem(\"Select all \" + val)# display default message \n layerGC = self.getLayerFromCb(self.dlg.comboGC.currentText()) \n layerSynthese = self.getLayerFromCb(self.dlg.comboSynthese.currentText())\n if layerGC != False :\n listValuesGc = self.fieldValues(layerGC,val) \n if listValuesGc != False :\n attributes = attributes + listValuesGc\n if layerSynthese != False:\n listValuesSynthese = self.fieldValues(layerSynthese,val)\n if listValuesSynthese != False:\n attributes = attributes + listValuesSynthese # list all opportunity from layers \n if len(attributes)>0:\n cb.setEnabled(True); \n self.rmDblToCombo(attributes,cb)\n else : \n cb.setEnabled(False)\n\n def createFile(self):\n \"\"\"create folder to contain report by opportunity\"\"\" \n listOpp = self.cbStateEl(self.dlg.cbOpp) \n layers = [\n self.getLayerFromCb(self.dlg.comboGC.currentText()),\n self.getLayerFromCb(self.dlg.comboSynthese.currentText()) \n ] \n selectOpp = self.dlg.cbOpp.currentText() #get selected value in combo\n defaultValue = self.dlg.cbOpp.itemText(0)\n if(selectOpp) != defaultValue:\n listOpp = [selectOpp] \n # use this code if user select all\n if len(listOpp)>1:\n del(listOpp[0])\n for opp in listOpp: \n '''create folder'''\n folder = self.dlg.pathFolder.text() + \"/\"+opp\n if not os.path.exists(folder):\n os.makedirs(folder)\n '''copy template'''\n template = self.dlg.pathTpl.text()\n shutil.copy(template,folder) # copie du template\n '''export to csv'''\n for layer in layers: # traitement par couche\n if layer != False:\n docName = False\n # create csv file\n if \"gc\" in layer.name() or \"GC\" in layer.name() or \"Gc\" in layer.name():\n docName = folder+\"/gc.csv\" \n elif \"synthese\" in layer.name() or \"Synthese\" in layer.name() or \"Synthèse\" in layer.name() or \"synthèse\" in layer.name(): \n docName = folder+\"/synthese.csv\"\n # control docname is not wrong\n if docName != False:\n output_file = open(docName,\"w\")\n # get and add fields to csv\n fields = layer.pendingFields()\n fieldname = [field.name() for field in fields]\n lineField = line = \",\".join(fieldname) + \"\\n\"\n unicode_fields = lineField.encode(\"utf-8\") \n output_file.write(unicode_fields)\n # filter features to add to csv\n features = layer.getFeatures() \n for f in features: \n # get attribute \n attr = [el for el in f.attributes()]\n # parse all feature's values\n for val in range(len(attr)):\n item = attr[val] \n if item == opp: \n find = self.isInList(val, listOpp) \n # if feature is search write in csv\n if find != False or find > -1:\n line = \",\".join(unicode(f[x]) for x in fieldname) + \"\\n\"\n unicode_line = line.encode(\"utf-8\") \n output_file.write(unicode_line) \n output_file.close() \n def updateCbId(self,val,combo,st): \n \"\"\"We begin by activate state combo and load this combo by states values\"\"\"\n self.cbUpdate(st, \"statut\")\n \"\"\"Search Id in given layer's fields name and load fields name in this combo\"\"\"\n selectLayer = \"\"\n fieldsName = []\n idFind = \"\"\n layers = self.iface.legendInterface().layers()\n idx = 0\n \"\"\"Get layer's name selected in combobox and return real layer object from Qgis canvas\"\"\"\n selectLayer = self.getLayerFromCb(val)\n \"\"\"From layer parse fields and return field name that contain \"id\" value \"\"\"\n if combo and val and (selectLayer != False) :\n # update id combo\n combo.clear()\n combo.setEnabled(True)\n fieldsName = self.getLayerFields(selectLayer) # get fields name\n combo.addItems(fieldsName) # load values in combo id\n \"\"\"Search first occurency that contain \"id\" value and define as default index\"\"\"\n for name in fieldsName:\n if (\"id\" in name) or (\"Id\" in name) or (\"ID\" in name) or (\"iD\" in name): # if field name contain \"id\" str we set this name index by default combo value\n idx = fieldsName.index(name)\n break\n combo.setCurrentIndex(idx)\n else:\n \"\"\"Restore default combo state\"\"\"\n combo.clear() \n combo.addItem(\"Select id\")\n combo.setEnabled(False)\n \n\n \"\"\"Init combo elements\"\"\"\n def initCb (self, cb, cbId, cbSt):\n #load layer list to combobox \n self.layersToCombo(cb)\n # event on clic \n cb.currentIndexChanged.connect(lambda: self.updateCbId(cb.currentText(), cbId, cbSt)) \n \n def run(self):\n \"\"\"Run method that performs all the real work\"\"\"\n # show the dialog\n self.dlg.show()\n \"\"\"\"To connect event to gui elements\"\"\"\n cbGC = self.dlg.comboGC\n cbSynthese = self.dlg.comboSynthese\n cbGcId = self.dlg.idGC\n cbSyntheseId = self.dlg.idSynthese\n cbState = self.dlg.cbState\n cbOpp = self.dlg.cbOpp\n # init combo\n self.initCb(cbGC, cbGcId,cbState)\n self.initCb(cbSynthese, cbSyntheseId,cbState)\n # buttons\n self.dlg.buttonFile.clicked.connect(self.searchFile) \n self.dlg.buttonFolder.clicked.connect(self.searchFolder) \n\n '''here we need to load opportunity list wehen user select id field to get opp values''' \n for el in [cbGcId, cbSyntheseId, cbState] :\n el.currentIndexChanged.connect(lambda: self.oppFiltering(cbGcId, cbSyntheseId, cbGC, cbSynthese, cbState, cbOpp))\n self.state = [] \n # Run the dialog event loop\n result = self.dlg.exec_()\n # See if OK was pressed\n if result:\n # Do something useful here - delete the line containing pass and\n self.createFile()\n # substitute with your code.\n pass\n","sub_path":"AdnReport/Adn_Report.py","file_name":"Adn_Report.py","file_ext":"py","file_size_in_byte":20910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"402286355","text":"import requests\nimport csv\nfrom bs4 import BeautifulSoup as bs\nfrom datetime import datetime\n\nbase_url = \"https://www.finam.ru/quotes/stocks/russia/\"\n\n\ndef parse(url):\n session = requests.Session()\n shares = []\n i = 0\n while True:\n i += 1\n request = session.get(\"https://www.finam.ru/quotes/stocks/russia/?pageNumber=\" + str(i))\n if request.status_code == 500:\n break\n print(request.status_code)\n print(i)\n soup = bs(request.content, 'html.parser')\n rows = soup.find_all('tr', attrs={'class': 'pages2-QuoteOnline-components-QuoteTable-___QuoteTable__tableRow___1AApT pages2-QuoteOnline-components-QuoteTable-___QuoteTable__withHover___1vTk9'})\n for row in rows:\n title = row.find('a', attrs={\"class\": \"pages2-QuoteOnline-components-QuoteTable-components-InstrumentLink-___InstrumentLink__instrument___1POp_\"}).text\n price = row.find_all('td')[2].text\n shares.append({\n 'title': title,\n 'price': price\n })\n with open('index.csv', 'a') as csv_file:\n writer = csv.writer(csv_file)\n print('\\n')\n writer.writerow(['title', 'price', datetime.now()])\n for share in shares:\n writer.writerow([share['title'], share['price']])\n\n\nparse(base_url)\n\n","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"559830296","text":"'''\nFrom> Make your own neural network\n'''\nimport numpy\nimport scipy.special\nimport matplotlib.pyplot\n\n\nclass neuralNetwork():\n\n def __init__(self, inputnodes, hiddennodes, outputnodes, learningrate):\n # Set number of nodes in each input, hidden, output layer\n self.inodes = inputnodes\n self.hnodes = hiddennodes\n self.onodes = outputnodes\n\n # Link weight matrices, input to hidden, and hidden to out\n self.wih = numpy.random.normal(0.0, pow(self.hnodes, -0.5), (self.hnodes, self.inodes))\n self.who = numpy.random.normal(0.0, pow(self.onodes, -0.5), (self.onodes, self.hnodes))\n\n # Learning rage\n self.lr = learningrate\n\n # Sigmoid activation\n self.activation_function = lambda x:scipy.special.expit(x)\n\n pass\n\n def train(self, input_list, target_list):\n # TODO > input_list comes in 2 functionsand we repeat code, do a decorator\n # Convert input list to 2d array\n inputs = numpy.array(input_list, ndmin=2).T\n targets = numpy.array(target_list, ndmin=2).T\n\n # TODO > Also we repeat more code..\n # Calculate signals into hidden layer\n hidden_inputs = numpy.dot(self.wih, inputs)\n # Calculate signals emerging from hidden layer\n hidden_outputs = self.activation_function(hidden_inputs)\n\n # Calculate signals into final output layer\n final_inputs = numpy.dot(self.who, hidden_outputs)\n # Calculate signals emerging from final output layer\n final_outputs = self.activation_function(final_inputs)\n\n # Error is the (target - actual)\n output_errors = targets - final_outputs\n # Gidden layer error is the output_errors, split by weights, recombined at hidden nodes\n hidden_errors = numpy.dot(self.who.T, output_errors)\n\n # Update the weights for the links between the hidden and output layers\n self.who += self.lr * numpy.dot((output_errors * final_outputs * (1.0 - final_outputs)), \\\n numpy.transpose(hidden_outputs))\n\n # Update the weights for the links between the input and output hidden\n self.wih += self.lr * numpy.dot((hidden_errors * hidden_outputs * (1.0 - hidden_outputs)), \\\n numpy.transpose(inputs))\n\n\n pass\n\n # Make a question to the nn\n def query(self, input_list):\n # Convert input list to 2d array\n inputs = numpy.array(input_list, ndmin=2).T\n\n # Calculate signals into hidden layer\n hidden_inputs = numpy.dot(self.wih, inputs)\n # Calculate signals emerging from hidden layer\n hidden_outputs = self.activation_function(hidden_inputs)\n\n # Calculate signals into final output layer\n final_inputs = numpy.dot(self.who, hidden_outputs)\n # Calculate signals emerging from final output layer\n final_outputs = self.activation_function(final_inputs)\n\n return final_outputs\n\n\n\nif __name__ == '__main__':\n print('Init')\n # Creating a neural network\n # input_nodes = 3\n # hidden_nodes = 3\n # output_nodes = 3\n # learning_rate = 0.3\n\n # n = neuralNetwork(input_nodes, hidden_nodes, output_nodes, learning_rate)\n\n # print(n.query([1.0, 0.5, -1.5]))\n\n input_nodes = 784\n hidden_nodes = 100\n output_nodes = 10\n\n learning_rate = 0.1\n\n n = neuralNetwork(input_nodes, hidden_nodes, output_nodes, learning_rate)\n\n # Mnist train\n training_data_file = open('mnist_dataset/mnist_train_100.csv', 'r')\n training_data_list = training_data_file.readlines()\n training_data_file.close()\n\n # Train the network\n epochs = 5\n\n print('Training')\n for e in range(epochs):\n for record in training_data_list:\n all_values = record.split(',')\n # Scale and shift inputs\n inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01\n targets = numpy.zeros(output_nodes) + 0.01\n targets[int(all_values[0])] = 0.99\n n.train(inputs, targets)\n\n # Mnist test\n test_data_file = open('mnist_dataset/mnist_test_10.csv', 'r')\n test_data_list = test_data_file.readlines()\n test_data_file.close()\n\n # Test the network\n scorecard = []\n\n print('Testing')\n for record in test_data_list:\n # Todo , dupped code, make a better function\n all_values = record.split(',')\n correct_label = int(all_values[0])\n inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01\n outputs = n.query(inputs)\n label = numpy.argmax(outputs)\n if label == correct_label:\n scorecard.append(1)\n else:\n scorecard.append(0)\n\n # Calculate performance rate\n scorecard_array = numpy.asarray(scorecard)\n print('performance = ' + str(scorecard_array.sum() / scorecard_array.size))\n\n\n\n\n\n","sub_path":"myonn.py","file_name":"myonn.py","file_ext":"py","file_size_in_byte":4813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"92696618","text":"from PIL import Image, ImageDraw\nfrom random import random, uniform\nimport colorsys\nimport copy\n\n\nclass Gradient(object):\n def __init__(self, image_size):\n self.image_size = image_size\n\n def generate_color(self):\n hue, lightness, satuation = random(), \\\n uniform(0.45, 0.65), uniform(0.6, 1)\n\n return {\"hue\": hue, \"lightness\": lightness,\n \"satuation\": satuation}\n\n def generate_second_color(self, color):\n second_color = copy.copy(color)\n hue = second_color[\"hue\"] + (1 / 360 * 90)\n if hue > 1:\n hue = hue - 1\n\n second_color[\"hue\"] = hue\n return second_color\n\n def convert_hls_to_rgb(self, hls_color):\n rgb_color = colorsys.hls_to_rgb(hls_color[\"hue\"],\n hls_color[\"lightness\"],\n hls_color[\"satuation\"])\n rgb_color = list(map(lambda x: int(x * 255), rgb_color))\n\n return rgb_color\n\n def random_gradient(self):\n img = Image.new(\"RGB\", (self.image_size, self.image_size), \"#FFFFFF\")\n draw = ImageDraw.Draw(img)\n\n first_color = self.generate_color()\n second_color = self.generate_second_color(first_color)\n\n first_color = self.convert_hls_to_rgb(first_color)\n second_color = self.convert_hls_to_rgb(second_color)\n\n r, g, b = first_color[0], first_color[1], first_color[2]\n delta_r = (second_color[0] - r) / float(self.image_size)\n delta_g = (second_color[1] - g) / float(self.image_size)\n delta_b = (second_color[2] - b) / float(self.image_size)\n for i in range(self.image_size):\n r, g, b = r + delta_r, g + delta_g, b + delta_b\n draw.line((i, 0, i, self.image_size),\n fill=(int(r), int(g), int(b)))\n\n img = img.rotate(45)\n scaled_size = self.image_size + self.image_size * 0.45\n img = img.resize((int(scaled_size), int(scaled_size)))\n x = (self.image_size * 0.45) / 2\n x2 = self.image_size + ((self.image_size * 0.45) / 2)\n img = img.crop((int(x), int(x), int(x2), int(x2)))\n\n return img\n","sub_path":"reddit_to_imgurImage/gradient.py","file_name":"gradient.py","file_ext":"py","file_size_in_byte":2161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"548601963","text":"from flask import (\n Blueprint, flash, g, redirect, render_template, request, url_for\n)\nfrom werkzeug.exceptions import abort\n\nfrom flaskr.auth import login_required\nfrom flaskr.db import get_db\nfrom datetime import datetime\n\nbp = Blueprint('dataAnalysis', __name__)\n\n\n@bp.route('/')\ndef index():\n db = get_db()\n posts = db.execute(\n 'SELECT p.id, ticker, description, bidprice, askPrice, volume, valueTimestamp, author_id, username'\n ' FROM scripvalue p JOIN user u ON p.author_id = u.id'\n ' ORDER BY valueTimestamp DESC'\n ' LIMIT 5'\n ).fetchall()\n return render_template('dataAnalysis/index.html', posts=posts)\n\n\n@bp.route('/manual', methods=('GET', 'POST'))\n@login_required\ndef create():\n if request.method == 'POST':\n ticker = request.form['ticker']\n askPrice = request.form['askPrice']\n bidPrice = request.form['bidPrice']\n volume = request.form['volume']\n valueTimestampStr = request.form['timestamp']\n description = request.form['description']\n error = None\n\n if not ticker:\n error = 'Ticker is required.'\n\n if not askPrice:\n error = 'Ask price is required.'\n\n if not bidPrice:\n error = 'Bid price is required.'\n\n if not volume:\n error = 'Volume is required.'\n\n if not valueTimestampStr:\n error = 'Timestamp is required'\n\n value_time_stamp_obj = None\n try:\n value_time_stamp_obj = datetime.strptime(valueTimestampStr, '%d/%m/%Y %H:%M')\n except ValueError:\n error = 'Timestamp should be in dd/mm/YYYY HH:MM format'\n\n if error is not None:\n flash(error)\n else:\n\n db = get_db()\n db.execute(\n 'INSERT INTO scripvalue (ticker, askPrice, bidPrice, volume, valueTimestamp, author_id)'\n ' VALUES (?, ?, ?, ?, ?, ?)',\n (ticker, askPrice, bidPrice,volume,value_time_stamp_obj, g.user['id'])\n )\n db.commit()\n return redirect(url_for('dataAnalysis.index'))\n\n return render_template('dataAnalysis/manual.html')","sub_path":"flaskr/dataAnalysis.py","file_name":"dataAnalysis.py","file_ext":"py","file_size_in_byte":2160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"31993690","text":"import cv2\r\nimport os\r\nfrom glob2 import glob\r\nimport os, os.path, shutil\r\nimport numpy as np\r\n# def make_file():\r\n #批量复制创建文件夹,\r\nos.getcwd()\r\nos.chdir('E:/123456/Images')\r\nfile = glob('*')\r\nprint(file)\r\nos.chdir('E:/123456/Images3')\r\nfor JPG in file:\r\n isExists = os.path.exists(JPG)\r\n if not isExists:\r\n os.makedirs(JPG)\r\n#批量平移图片存入已创建文件夹\r\nos.getcwd()\r\nos.chdir('E:/123456/Images')\r\nfiles = glob('*/*.jpg')\r\n\r\nroot_path = \"E:/123456/Images3/\"\r\n\r\nfor jpg in files: #确认文件格式\r\n img = cv2.imdecode(np.fromfile(jpg, dtype=np.uint8), -1)\r\n imgInfo = img.shape\r\n cols = imgInfo[0]\r\n rows = imgInfo[1]\r\n # 平移矩阵M:[[1,0,x],[0,1,y]]\r\n M = np.float32([[1, 0, 15], [0, 1, 0]])\r\n dst = cv2.warpAffine(img, M, (rows , cols))\r\n # splitName = jpg.split(\".\")\r\n # newName = splitName[0]\r\n # cv2.imwrite(root_path+newName + '_flip.jpg', horizontal_img)\r\n cv2.imencode('.jpg', dst)[1].tofile(root_path+jpg) # 保存图片\r\n\r\n\r\n","sub_path":"PythonScript/PythonScript/pictureleft.py","file_name":"pictureleft.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"262954025","text":"import os, EdgeClass, NodeClass\n\nfrom myMethods import *\n\ndef main():\n edgecounter = 0\n edgeread = []\n edgelist = []\n vertexlist = []\n#Scan the file once to build the nodes\n filename = \"Graph.txt\"\n file = open(filename, \"r\")\n numberOfNodes = file.readline().strip()\n numberOfNodes = int(numberOfNodes)\n for x in range(numberOfNodes):\n nodeNumber = file.readline().strip()\n nodeNumber = int(nodeNumber)\n nodeName = file.readline().strip()\n vertexlist.append(NodeClass.Node(nodeName,nodeNumber))\n numberOfEdges = file.readline().strip()\n numberOfEdges = int(numberOfEdges)\n for i in range(numberOfEdges):\n edgeread.append(file.readline().strip())\n edgeread.clear()\n file.close()\n edgeread.clear()\n#Scan the file again to bu\n filename2 = \"Graph.txt\"\n files = open(filename2, \"r\")\n numberOfNodes = files.readline().strip()\n numberOfNodes = int(numberOfNodes)\n for x in range(numberOfNodes):\n nodeNumber = files.readline().strip()\n nodeNumber = int(nodeNumber)\n nodeName = files.readline().strip()\n numberOfEdges = files.readline().strip()\n numberOfEdges = int(numberOfEdges)\n for i in range(numberOfEdges):\n edgeread.append(files.readline().strip())\n edgeread = edgeread[0].split()\n next = edgeread[0]\n next = int(next)\n distance = edgeread[1]\n distance = float(distance)\n long = len(vertexlist)\n edgelist.append(EdgeClass.Edge(distance, vertexlist[x], vertexlist[next-1], \"edge{}.png\".format(edgecounter)))\n end = len(edgelist)\n vertexlist[x].adjacenciesList.append(edgelist[end-1])\n edgeread.clear()\n edgecounter = edgecounter + 1\n\n files.close()\n edgeread.clear()\n start = None\n while start is None:\n os.system(\"cls\")\n begin_value = input(\"Please enter the node where you wish start from (1-15): \")\n try:\n start = int(begin_value)\n if not (1 <= start <= 15):\n print(\"{input} is not between 1-15, please re-enter.\".format(input=start))\n start = None\n os.system(\"PAUSE\")\n except:\n print(\"{input} is not an integer, please re-enter your selection.\".format(input=begin_value))\n os.system(\"PAUSE\")\n finish = None\n while finish is None:\n end_value = input(\"Please enter the where node you wish end on (1-15): \")\n try:\n finish = int(end_value)\n if not (1 <= finish <= 15):\n print(\"{input} is not between 1-15, please re-enter.\".format(input=finish))\n finish = None\n os.system(\"PAUSE\")\n except:\n print(\"{input} is not an integer, please re-enter your selection.\".format(input=end_value))\n os.system(\"PAUSE\")\n\n calculateshortestpath(vertexlist,vertexlist[finish - 1])\n getshortestpath(vertexlist[start - 1])\n edgecounter = 0\n repeat = None\n while repeat is None:\n input_value = input(\"Enter 1 to repeat the calculation or 2 to exit: \")\n try:\n repeat = int(input_value) # try and convert the string input to a number\n if (repeat == 1):\n os.system(\"cls\")\n main()\n elif (repeat == 2):\n quit()\n else:\n repeat = None\n except ValueError:\n print(\"{input} is not a number, please enter a number only\".format(input=input_value)) # Prompt to renter\n\n\nmain()\n","sub_path":"HW3_2/HW3_2.py","file_name":"HW3_2.py","file_ext":"py","file_size_in_byte":3627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"376306501","text":"# 学校:四川轻化工大学\r\n# 学院:自信学院\r\n# 学生:胡万平\r\n# 开发时间:2021/9/18 10:10\r\n\r\n#单分支结构\r\nmoney = 1000 #余额\r\ns = int(input('请输入取款金额')) #取款金额\r\n#判断余额是否充足\r\nif money >= s:\r\n money = money - s\r\n print('取款成功,余额为',money)","sub_path":"chapter4/demo3.py","file_name":"demo3.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"180338294","text":"import csv\nimport logging\n# make deterministic\nfrom mingpt.utils import set_seed\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\nimport math\nfrom torch.utils.data import Dataset\nfrom mingpt.baseline import GPT, GPTConfig\nfrom mingpt.trainer_acc import Trainer, TrainerConfig\nfrom mingpt.model_simulator import GPT as GPT_simu\nfrom mingpt.model_simulator import GPTConfig as GPTConfig_simu\nfrom mingpt.trainer_simulator import Trainer as Trainer_simu\nfrom mingpt.trainer_simulator import TrainerConfig as TrainerConfig_simu\nfrom mingpt.utils import sample\nfrom collections import deque\nimport random\nimport torch\nimport pickle\nimport blosc\nimport argparse\nfrom create_dataset import create_dataset\nimport pandas as pd\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--seed', type=int, default=123)\nparser.add_argument('--context_length', type=int, default=30)\nparser.add_argument('--epochs', type=int, default=5)\nparser.add_argument('--model_type', type=str, default='reward_conditioned')\nparser.add_argument('--num_steps', type=int, default=500000)\nparser.add_argument('--num_buffers', type=int, default=50)\nparser.add_argument('--game', type=str, default='Breakout')\nparser.add_argument('--batch_size', type=int, default=128)\n# \nparser.add_argument('--trajectories_per_buffer', type=int, default=10, help='Number of trajectories to sample from each of the buffers.')\nparser.add_argument('--data_dir_prefix', type=str, default='./dqn_replay/')\nargs = parser.parse_args()\n\nset_seed(args.seed)\n\nclass StateActionReturnDataset(Dataset):\n\n def __init__(self, data, block_size, actions,actions_neg, actions_len, return_step, done_idxs, rtgs, timesteps): \n self.block_size = block_size\n self.vocab_size = 5010\n # self.vocab_size = actions.shape[0] \n self.data = data\n self.actions = actions\n self.actions_neg = actions_neg\n self.actions_len = actions_len\n self.return_step = return_step\n self.done_idxs = done_idxs\n self.rtgs = rtgs\n self.timesteps = timesteps\n \n def __len__(self):\n return len(self.data) - self.block_size\n\n def __getitem__(self, idx):\n block_size = self.block_size // 3\n done_idx = idx + block_size\n for i in self.done_idxs:\n if i > idx and i>block_size: # first done_idx greater than idx\n done_idx = min(int(i), done_idx)\n break\n idx = done_idx - block_size\n # states = torch.tensor(np.array(self.data[idx:done_idx]), dtype=torch.float32).reshape(block_size, -1) # (block_size, 4*84*84)\n # states = states / 255.\n # states = torch.tensor(self.data[idx:done_idx], dtype=torch.long).unsqueeze(1)\n # actions = torch.tensor(self.actions[idx:done_idx], dtype=torch.long).unsqueeze(1) # (block_size, 1)\n states = torch.tensor(self.data[idx:done_idx], dtype=torch.long)\n actions = torch.tensor(self.actions[idx:done_idx], dtype=torch.long)\n actions_neg = torch.tensor(self.actions_neg[idx:done_idx], dtype=torch.long)\n actions_len = torch.tensor(self.actions_len[idx:done_idx], dtype=torch.long)\n return_step = torch.tensor(self.return_step[idx:done_idx], dtype=torch.float32)\n \n rtgs = torch.tensor(self.rtgs[idx:done_idx], dtype=torch.float32).unsqueeze(1)\n timesteps = torch.tensor(self.timesteps[idx:idx+1], dtype=torch.int64).unsqueeze(1)\n return states, actions,actions_neg, actions_len, return_step, rtgs, timesteps\n\n\n\n# 4Rec accuracy\n\n# data_load_num\n# 小于4893\nidx_num=3000\n\n\n#划分数据集\nidx_num_train = int(0.8 * idx_num)\nidx_num_test = idx_num-idx_num_train\n\nuser_retain=pd.read_csv('./WSDM/user_retain_seq.csv')\n# obss=user_retain['obss'].values\nrtgs=user_retain['rtg'].values\n# actions=user_retain['actions'].values\nactions_len=user_retain['actions_len'].values\nreturn_step=user_retain['return_step'].values\ntimesteps=user_retain['timesteps'].values\ndone_idx_file=pd.read_csv('./WSDM/done_idx_seq.csv')\ndone_idxs=done_idx_file['done_idx'].values\n\naction_seq=pd.read_csv('./WSDM/action_seq.csv')\nactions=action_seq['actions'].values.reshape(-1,20)\nstate_seq=pd.read_csv('./WSDM/state_seq.csv')\nobss=state_seq['obss'].values.reshape(-1,30)\n\naction_seq_neg=pd.read_csv('./WSDM/action_seq_small.csv')\nactions_neg=action_seq_neg['actions'].values.reshape(-1,20)\nran_pad = actions.shape[0]-actions_neg.shape[0]\nactions_neg = np.concatenate((actions_neg,actions_neg[:ran_pad]),0)\n\naction_seq_large=pd.read_csv('./WSDM/action_seq_large.csv')\nactions_large=action_seq_large['actions'].values.reshape(-1,20)\nran_pad_large = actions.shape[0]-actions_large.shape[0]\nactions_large = np.concatenate((actions_large,actions_large[:ran_pad_large]),0)\nfor i in range(actions_neg.shape[0]):\n if return_step[i]<5:\n actions_neg[i]=actions_large[i]\nprint('start training!')\n\ndef re_index(actions,obss):\n vocab_size=5010\n import random\n idx_list=list(range(vocab_size))\n random.shuffle(idx_list)\n action_dic={}\n action_flag=0\n action_new=[]\n obss_new=[]\n for i in range(actions.shape[0]):\n action_day=[]\n for j in range(len(actions[i])):\n if str(actions[i][j]) in action_dic.keys():\n action_day.append(action_dic[str(actions[i][j])])\n else:\n action_day.append(idx_list[action_flag])\n action_dic[str(actions[i][j])]=idx_list[action_flag]\n action_flag+=1\n action_new.append(action_day) \n for i in range(obss.shape[0]):\n obss_day=[]\n for j in range(len(obss[i])):\n if str(obss[i][j]) in action_dic.keys():\n obss_day.append(action_dic[str(obss[i][j])])\n else:\n obss_day.append(idx_list[action_flag])\n action_dic[str(obss[i][j])]=idx_list[action_flag]\n action_flag+=1\n obss_new.append(obss_day)\n return action_new, obss_new, vocab_size\n\ndef timestep_paddle(timesteps_train):\n time_flag_train=0\n timesteps_list_train=list(timesteps_train)\n for i in range(len(timesteps_list_train)):\n if timesteps_list_train[i]==0:\n time_flag_train+=1\n if time_flag_train==2:\n timesteps_list_train.insert(i,timesteps_list_train[i-1]+1)\n break\n timesteps_train=np.array(timesteps_list_train)\n return timesteps_train\n \nsample_num=done_idxs[idx_num]\nactions=actions[:sample_num+1]\nactions_neg=actions_neg[:sample_num+1]\nactions_len=actions_len[:sample_num+1]\nreturn_step=return_step[:sample_num+1]\nobss=obss[:sample_num+1]\nvocab_size=5013\n\n#train_dataset\nsample_num_train=done_idxs[idx_num_train]\nobss_train=obss[:sample_num_train]\nrtgs_train=rtgs[:sample_num_train]\nactions_train=actions[:sample_num_train]\nactions_neg_train=actions_neg[:sample_num_train]\n\nactions_len_train=actions_len[:sample_num_train]\nreturn_step_train=return_step[:sample_num_train]\ntimesteps_train=timesteps[:sample_num_train]\ndone_idxs_train=done_idxs[:idx_num_train+1]\ntimesteps_train=timestep_paddle(timesteps_train)\n\ntrain_dataset = StateActionReturnDataset(obss_train, args.context_length*3, actions_train,actions_neg_train, actions_len_train, return_step_train, done_idxs_train, rtgs_train, timesteps_train)\n\n#test_dataset\nsample_num_test=done_idxs[idx_num]\nprint('interaction number is:',sample_num_test)\nobss_test=obss[sample_num_train:sample_num_test]\nrtgs_test=rtgs[sample_num_train:sample_num_test]\nactions_test=actions[sample_num_train:sample_num_test]\nactions_neg_test=actions_neg[sample_num_train:sample_num_test]\nactions_len_test=actions_len[sample_num_train:sample_num_test]\nreturn_step_test=return_step[sample_num_train:sample_num_test]\ntimesteps_test=timesteps[sample_num_train:sample_num_test]\ndone_idxs_test=done_idxs[idx_num_train+1:idx_num+1]-sample_num_train\ntimesteps_test=timestep_paddle(timesteps_test)\n\ntest_dataset = StateActionReturnDataset(obss_test, args.context_length*3, actions_test,actions_neg_test, actions_len_test, return_step_test, done_idxs_test, rtgs_test, timesteps_test)\n\nprint('item number is:',vocab_size)\n\nmconf = GPTConfig(vocab_size, train_dataset.block_size,\n n_layer=2, n_head=8, n_embd=128, model_type=args.model_type, max_timestep=89)\nmodel = GPT(mconf)\n\nmconf_simu = GPTConfig_simu(vocab_size, train_dataset.block_size,\n n_layer=2, n_head=8, n_embd=128, model_type=args.model_type, max_timestep=89)\nmodel_simu = GPT_simu(mconf_simu)\n\n# initialize a trainer instance and kick off training\nepochs = args.epochs\n\n# model_simu=trainer_simu.train()\nPATH='./simulator/my_model.pth'\nmodel_simu.load_state_dict(torch.load(PATH))\n\n\ntconf = TrainerConfig(max_epochs=epochs, batch_size=args.batch_size, learning_rate=0.01,\n lr_decay=False, warmup_tokens=512*20, final_tokens=2*len(train_dataset)*args.context_length*3,\n num_workers=4, seed=args.seed, model_type=args.model_type, game=args.game, max_timestep=89)\ntrainer = Trainer(model, model_simu, train_dataset, test_dataset, tconf)\n\ntrainer.train()\n\n","sub_path":"run_baseline.py","file_name":"run_baseline.py","file_ext":"py","file_size_in_byte":9123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"196226561","text":"import logging\nimport os\n\nimport numpy as np\n\nfrom doom_py import DoomGame, Mode, Button, GameVariable, ScreenFormat, ScreenResolution, Loader\nfrom gym import error, spaces\nfrom gym.envs.doom import doom_env\n\nlogger = logging.getLogger(__name__)\n\nclass DoomCorridorEnv(doom_env.DoomEnv):\n \"\"\"\n ------------ Training Mission 2 - Corridor ------------\n This map is designed to improve your navigation. There is a vest\n at the end of the corridor, with 6 enemies (3 groups of 2). Your goal\n is to get to the vest as soon as possible, without being killed.\n\n Allowed actions:\n [0] - ATTACK - Shoot weapon - Values 0 or 1\n [9] - MOVE_RIGHT - Move to the right - Values 0 or 1\n [10] - MOVE_LEFT - Move to the left - Values 0 or 1\n [12] - MOVE_FORWARD - Move forward - Values 0 or 1\n [13] - TURN_RIGHT - Turn right - Values 0 or 1\n [14] - TURN_LEFT - Turn left - Values 0 or 1\n Note: see controls.md for details\n\n Rewards:\n + dX - For getting closer to the vest\n - dX - For getting further from the vest\n -100 - Penalty for being killed\n\n Goal: 1,270 points\n Reach the vest (try also killing guards, rather than just running)\n\n Ends when:\n - Player touches vest\n - Player is dead\n - Timeout (1 minutes - 2,100 frames)\n -----------------------------------------------------\n \"\"\"\n def __init__(self):\n super(DoomCorridorEnv, self).__init__()\n package_directory = os.path.dirname(os.path.abspath(__file__))\n self.loader = Loader()\n self.game = DoomGame()\n self.game.load_config(os.path.join(package_directory, 'assets/deadly_corridor.cfg'))\n self.game.set_vizdoom_path(self.loader.get_vizdoom_path())\n self.game.set_doom_game_path(self.loader.get_freedoom_path())\n self.game.set_doom_scenario_path(self.loader.get_scenario_path('deadly_corridor.wad'))\n self.screen_height = 480 # Must match .cfg file\n self.screen_width = 640 # Must match .cfg file\n # action indexes are [0, 9, 10, 12, 13, 14]\n self.action_space = spaces.HighLow(np.matrix([[0, 1, 0]] * 6))\n self.observation_space = spaces.Box(low=0, high=255, shape=(self.screen_height, self.screen_width, 3))\n self.game.set_window_visible(False)\n self.viewer = None\n self.game.init()\n self.game.new_episode()\n","sub_path":"gym/envs/doom/doom_corridor.py","file_name":"doom_corridor.py","file_ext":"py","file_size_in_byte":2584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"538042971","text":"'''\n508. Most Frequent Subtree Sum\nDescription Submission Solutions Add to List\nTotal Accepted: 2731\nTotal Submissions: 5251\nDifficulty: Medium\nContributors: Cyber233\nGiven the root of a tree, you are asked to find the most frequent subtree sum. The subtree sum of a node is defined as the sum of all the node values formed by the subtree rooted at that node (including the node itself). So what is the most frequent subtree sum value? If there is a tie, return all the values with the highest frequency in any order.\n\nExamples 1\nInput:\n\n 5\n / \\\n2 -3\nreturn [2, -3, 4], since all the values happen only once, return all of them in any order.\nExamples 2\nInput:\n\n 5\n / \\\n2 -5\nreturn [2], since 2 happens twice, however -5 only occur once.\nNote: You may assume the sum of values in any subtree is in the range of 32-bit signed integer.\n\nSubscribe to see which companies asked this question.\n'''\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n sumdict = {}\n def findFrequentTreeSum(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[int]\n \"\"\"\n if not root:\n return []\n sumdict = {'max':0}\n self.nodesum(root, sumdict)\n sumlist = []\n for key in sumdict.keys():\n if key != 'max' and sumdict[key] == sumdict['max']:\n sumlist.append(key)\n return sumlist\n def nodesum(self, node, sumdict):\n if not node.left and not node.right:\n self.dictadd(sumdict, node.val)\n return node.val\n elif not node.left:\n sbtsum = node.val + self.nodesum(node.right, sumdict)\n self.dictadd(sumdict, sbtsum)\n return sbtsum\n elif not node.right:\n sbtsum = node.val + self.nodesum(node.left, sumdict)\n self.dictadd(sumdict, sbtsum)\n return sbtsum\n else:\n sbtsum = node.val + self.nodesum(node.left, sumdict) + self.nodesum(node.right, sumdict)\n self.dictadd(sumdict, sbtsum)\n return sbtsum\n def dictadd(self, dict1, a):\n if dict1.has_key(a):\n dict1[a] += 1\n else:\n dict1[a] = 1\n dict1['max'] = max(dict1[a], dict1['max'])\n","sub_path":"en/most-frequent-subtree-sum.py","file_name":"most-frequent-subtree-sum.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"134546049","text":"import math\nfrom PIL import Image\nimport copy\n\n\n\ndef convertImageToIntensityMatrix(im): #\n width,height = im.size\n pixels = im.getdata()\n return [[pixels[y*width+x] for x in range(height)] for y in range(height)]\n\ndef convertIntensityMatrixToImage(matrix): #Grayscale image\n im = Image.new('RGB',(len(matrix[0]),len(matrix)))\n im.putdata([pixel for row in matrix for pixel in row ])\n return im\n\n\ndef getNewPixel(matrix,h,x,y,index):\n result = 0\n offset = math.floor(len(h)/2)\n for j in range(y-offset,y+offset+1):\n for i in range(x-offset,x+offset+1):\n result += math.floor(matrix[j][i][index]*h[j-y+offset][i-x+offset])\n return result\n\n\ndef getNewPixel_2(matrix,h,x,y,index):\n offset = math.floor(len(h)/2)\n result = 0\n for j in range(y-offset,y+offset+1):\n for i in range(x-offset,x+offset+1):\n result += math.floor(matrix[j][i][index] * h[y-j][x-i])\n return result\n\ndef performFilter(matrix, filter):\n width, height = len(matrix[0]), len(matrix)\n offset = math.floor(len(filter)/2)\n new_matrix = copy.deepcopy(matrix)\n for x in range(offset,width-offset):\n for y in range(offset,height-offset):\n new_matrix[y][x] = (getNewPixel(matrix,filter,x,y,0),getNewPixel(matrix,filter,x,y,1),getNewPixel(matrix,filter,x,y,2))\n return new_matrix\n\nim = Image.open( \"images/4.1.07-jelly-beans.tiff\")\nim.show()\nh_a = [[1/9 for x in range(3)] for x in range(3)]\nh_g = [[1/256, 4/256, 6/256, 4/256, 1/256],[4/256, 16/256, 24/256, 16/256, 4/256], [6/256, 24/256, 36/256, 24/256, 6/256],[4/256, 16/256, 24/256, 16/256, 4/256],[1/256, 4/256, 6/256, 4/256, 1/256]]\n\nim = convertIntensityMatrixToImage(performFilter(convertImageToIntensityMatrix(im),h_g))\nim.show()\n","sub_path":"Python/task_3b.py","file_name":"task_3b.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"519702440","text":"'''\nMonte Carlo simulator support classes\nAuthor: Jordan Eriksen\nDate: 2021-09-17\n'''\nimport sys, os\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import norm\nimport datetime\n\nclass dimension:\n def __init__(self,nominal,tolerance=0,description='Undefined',n=1000000):\n self.mean = nominal\n self.std = tolerance/3\n self.description = description\n self.n = n\n self._calcSigma()\n self._simulate()\n \n def _calcSigma(self):\n self.sigma = {\n 1:(self.mean - 1*self.std,self.mean + 1*self.std),\n 2:(self.mean - 2*self.std,self.mean + 2*self.std),\n 3:(self.mean - 3*self.std,self.mean + 3*self.std),\n 6:(self.mean - 6*self.std,self.mean + 6*self.std)\n }\n \n def _simulate(self):\n '''\n Return distribution of results assuming symmetric tolerances\n '''\n self.distribution = np.random.normal(self.mean,self.std,self.n)\n\n def sigTable(self):\n '''\n Output table of means and sigma1,2,3 for the final stack and the subcomponents.\n '''\n self.output_table = {}\n self.output_table[self.description] = {\n '-6 sig':self.sigma[6][0],\n '-3 sig':self.sigma[3][0],\n '-2 sig':self.sigma[2][0],\n '-1 sig':self.sigma[1][0],\n 'mean':self.mean,\n '+1 sig':self.sigma[1][1],\n '+2 sig':self.sigma[2][1],\n '+3 sig':self.sigma[3][1],\n '+6 sig':self.sigma[6][1]}\n \n self.output_DF = pd.DataFrame(self.output_table).transpose().round(3)\n self.output_DF = self.output_DF[['-6 sig','-3 sig','-2 sig','-1 sig','mean','+1 sig','+2 sig','+3 sig','+6 sig']]\n\n return self.output_DF\n \n def save(self,name):\n '''\n Save output DataFrame as pickle file\n '''\n\n if not self.output_DF:\n self.simulate()\n\n self.output_DF.to_pickle(f'./Output/{datetime.datetime.now().date()} {name}.pkl')\n\nclass stack:\n '''\n Sort combine and manage the stack\n '''\n def __init__(self):\n self.dimensions = {}\n self.descList = []\n self.sigTable = pd.DataFrame(columns=['-6 sig','-3 sig','-2 sig','-1 sig','mean','+1 sig','+2 sig','+3 sig','+6 sig'])\n self.stackTable = pd.DataFrame(columns=['Description','Sign','Mean','Tolerance'])\n\n def _new(self,dim,sign):\n '''\n Updates stack data after add or sub\n '''\n self.dimensions[dim.description] = dim\n self.descList.append((dim.description,sign))\n\n if sign == '+':\n self.distribution = self.stack + dim.distribution\n elif sign == '-':\n self.distribution = self.stack - dim.distribution\n\n self.sigTable = self.sigTable.append(dim.sigTable)\n self.stackTable = self.stackTable.append({\n 'Description':dim.description,\n 'Sign':sign,\n 'Nominal':dim.mean,\n 'Tolerance':dim.std*3\n })\n\n def add(self, dim):\n '''\n Add dimension to the stack\n '''\n self._new(dim,'+')\n\n def sub(self,dim):\n '''\n Subtract dimension from the stack\n '''\n self._new(dim,'-')\n\n def rm(self):\n '''\n Request user input to determine which entry in the stack to remove\n '''\n print('Select dimensions to remove:')\n num = 0\n for i in self.dimensions:\n num += 1\n print(f' [{num}] -> {i} ')\n print('')\n\n rmed = input(f'Variable(s) to remove? [list | int]: ')\n itemsRemove = [int(i) for i in list(rmed) if i not in [',','[',']',' ']]\n\n for i in itemsRemove:\n self.dimensions.pop(self.descList[i])\n\n def combine(self):\n '''\n Combine constituent dimensions to build tolerance stack\n '''\n","sub_path":"monteClasses.py","file_name":"monteClasses.py","file_ext":"py","file_size_in_byte":3903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"592689000","text":"\"\"\"\nID: dotekin1\nLANG: PYTHON3\nTASK: milk2\n\"\"\"\nfrom itertools import accumulate\nfrom collections import defaultdict, OrderedDict\n\nwith open(\"milk2.in\") as fin:\n N = fin.readline()\n farmers = [line.strip().split(' ') for line in fin.readlines()]\n\ncows = defaultdict(int)\n\nfor start, end in farmers:\n cows[int(start)] += 1\n cows[int(end)] -= 1\n \nprint(cows)\n\n# cur = 0\nstart_milk = None\nmax_milk = max_none = 0\n\nsorted_cows = sorted(cows.items())\nstart_milk, cur = sorted_cows[0]\nprint(start_milk, cur)\nfor t, change in sorted_cows[1:]: \n cur += change\n print(t, cur)\n \n if cur == 0 and start_milk is not None:\n milk_time = t - start_milk\n max_milk = max(milk_time, max_milk)\n start_milk = None\n start_none = t\n elif cur != 0 and start_milk is None:\n none_time = t - start_none\n max_none = max(none_time, max_none)\n start_milk = t\n\nwith open(\"milk2.out\", \"w\") as fout:\n fout.write(f\"{max_milk} {max_none}\\n\")\n","sub_path":"Training/1/1.3/milk2/milk2.py","file_name":"milk2.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"230125369","text":"import functools\nimport json\n\nimport paho.mqtt.client as mqtt\nimport time\nimport logging\n\n\ndef test_connection(mqtt_config):\n logging.info(mqtt_config)\n\n def on_message(client, userdata, message):\n logging.info(\"received message: \", str(message.payload.decode(\"utf-8\")))\n\n mqtt_broker = mqtt_config['broker_hostname']\n\n client = mqtt.Client(mqtt_config['client_name'])\n client.connect(mqtt_broker)\n\n # client.publish(\"TEMPERATURE\", randNumber)\n\n client.loop_start()\n\n client.subscribe(\"test1/topic1\")\n client.on_message = on_message\n\n time.sleep(60)\n client.loop_stop()\n\n\ndef _on_mqtt_message(mqtt_client, paho_client, userdata, message):\n logging.info('received message {0}: {1}'.format(message.topic, str(message.payload.decode(\"utf-8\"))))\n topic_func = mqtt_client.get_topic_func(message.topic)\n if topic_func:\n topic_func(message.topic, json.loads(message.payload.decode(\"utf-8\")))\n\n\nclass MqttClient:\n def __init__(self, mqtt_config, topic_config):\n self.mqtt_config = mqtt_config\n self.topic_config = topic_config\n self.client = mqtt.Client(mqtt_config['client_name'])\n self.topic_list = []\n\n def connect_subscribe(self, topic_list):\n try:\n self.client.connect(self.mqtt_config['broker_hostname'])\n self.client.on_message = functools.partial(_on_mqtt_message, self)\n self.client.loop_start()\n logging.info('MQTT connected to {0}'.format(self.mqtt_config['broker_hostname']))\n\n self.topic_list = topic_list\n\n for topic_elem in topic_list:\n self.client.subscribe(topic_elem[0])\n except Exception as e:\n logging.warning('Error on MQTT connection (retry later): {0}'.format(e))\n\n def disconnect(self):\n self.client.loop_stop()\n\n def publish(self, topic, content_dict):\n self.client.publish(topic=topic, payload=json.dumps(content_dict), retain=True)\n\n def publish_volatile(self, topic, content_dict):\n self.client.publish(topic=topic, payload=json.dumps(content_dict), retain=False)\n\n def get_topic_func(self, topic_name):\n for topic_elem in self.topic_list:\n if topic_elem[0] == topic_name:\n return topic_elem[1]\n return None\n\n def is_connected(self):\n return self.client.is_connected()\n","sub_path":"safechicken/mqttclient.py","file_name":"mqttclient.py","file_ext":"py","file_size_in_byte":2370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"175341851","text":"# coding: utf-8\n\nimport imp\nfrom sklearn.cluster import KMeans\nimport numpy as np\nimport csv\nimport math\nfrom matplotlib import pyplot as plt\n## The params have been defined\nC_Lambda = 0\nlearningRate = 0.1\nM = 9\nPHI = []\n## The data has been processed usig the following functions\n\ndef GetTargetVector(filePath):\n t = []\n iterator = 0\n with open(filePath, 'rU') as f:\n reader = csv.reader(f)\n for row in reader:\n iterator += 1\n if (iterator > 1):\n t.append(float(row[-1]))\n return np.array(t)\n\n## Here we calculate the data columns with 0 variance and delete them as they have 0 significance.\ndef GenerateRawData(filePath, singular_data):\n dataMatrix = []\n iterator = 0\n with open(filePath, 'rU') as fi:\n reader = csv.reader(fi)\n for row in reader:\n iterator += 1\n if (iterator > 1):\n dataRow = []\n for column in row:\n dataRow.append(column)\n dataMatrix.append(dataRow)\n\n dataMatrix = np.delete(dataMatrix, [0,1], axis=1)\n dataMatrix = np.delete(dataMatrix, singular_data, axis=1)\n dataMatrix = np.delete(dataMatrix, np.s_[-1:], axis=1)\n dataMatrix = [[int(float(j)) for j in i] for i in dataMatrix]\n dataMatrix = np.transpose(dataMatrix)\n return dataMatrix\n\n## Big SIgma is nothing but the covariance matrix\ndef GenerateBigSigma(Data):\n BigSigma = np.zeros((len(Data),len(Data)))\n # print(len(Data))\n DataT = np.transpose(Data)\n TrainingLen = len(DataT)\n # print(len(DataT[0]))\n varVect = []\n for i in range(0,len(DataT[0])):\n vct = []\n for j in range(0,int(TrainingLen)):\n vct.append(Data[i][j])\n varVect.append(np.var(vct))\n noise = np.random.normal(0, 1, len(Data))\n for j in range(len(Data)):\n BigSigma[j][j] = varVect[j]+ noise[j]\n BigSigma = 2000*BigSigma\n #BigSigma = np.dot(1,BigSigma)\n ##print (\"BigSigma Generated..\")\n return BigSigma\n\n##\ndef GenerateBigSigmaSingular(Data):\n BigSigma = np.zeros((len(Data),len(Data)))\n # print(len(Data))\n DataT = np.transpose(Data)\n TrainingLen = len(DataT)\n # print(len(DataT[0]))\n varVect = []\n for i in range(0,len(DataT[0])):\n vct = []\n for j in range(0,int(TrainingLen)):\n vct.append(Data[i][j])\n varVect.append(np.var(vct))\n for j in range(len(Data)):\n BigSigma[j][j] = varVect[j]\n #BigSigma = np.dot(1,BigSigma)\n ##print (\"BigSigma Generated..\")\n return BigSigma\n\n## The weights have been calculated using closed form\ndef GetWeightsClosedForm(PHI, T, Lambda):\n Lambda_I = np.identity(len(PHI[0]))\n for i in range(0,len(PHI[0])):\n Lambda_I[i][i] = Lambda\n PHI_T = np.transpose(PHI)\n PHI_SQR = np.dot(PHI_T,PHI)\n PHI_SQR_LI = np.add(Lambda_I,PHI_SQR)\n PHI_SQR_INV = np.linalg.inv(PHI_SQR_LI)\n INTER = np.dot(PHI_SQR_INV, PHI_T)\n W = np.dot(INTER, T)\n return W\n\n## The design matrix for all the datasets have been created\ndef GetPhiMatrix(Data, MuMatrix, BigSigma, TrainingPercent = 100):\n DataT = np.transpose(Data)\n TrainingLen = math.ceil(len(DataT)*(TrainingPercent*0.01))\n PHI = np.zeros((int(TrainingLen),len(MuMatrix)))\n BigSigInv = np.linalg.inv(BigSigma)\n for C in range(0,len(MuMatrix)):\n for R in range(0,int(TrainingLen)):\n PHI[R][C] = GetRadialBasisOut(DataT[R], MuMatrix[C], BigSigInv)\n bias = np.ones((int(len(PHI)), 1))\n PHI = np.append(PHI, bias, 1)\n return PHI\n\n## RBF is nothing but a gaussian function which is calculated\ndef GetRadialBasisOut(DataRow,MuRow, BigSigInv):\n phi_x = math.exp(-0.5*GetScalar(DataRow,MuRow,BigSigInv))\n return phi_x\n\ndef GetScalar(DataRow,MuRow, BigSigInv):\n R = np.subtract(DataRow,MuRow)\n T = np.dot(BigSigInv,np.transpose(R))\n L = np.dot(R,T)\n return L\n\n# ## This calculates the output using the PHI matrix and the weights obtained from Moore Penrose Inversion.\ndef GetValTest(VAL_PHI,W):\n Y = np.dot(W,np.transpose(VAL_PHI))\n ##print (\"Test Out Generated..\")\n return Y\n\n## For linear regression we have calculated the value of ERMS\ndef GetErms(VAL_TEST_OUT,ValDataAct):\n sum = 0.0\n accuracy = 0.0\n counter = 0\n val = 0.0\n for i in range (0,len(VAL_TEST_OUT)):\n sum = sum + math.pow((ValDataAct[i] - VAL_TEST_OUT[i]),2)\n if(int(np.around(VAL_TEST_OUT[i], 0)) == ValDataAct[i]):\n counter+=1\n\n accuracy = (float((counter*100))/float(len(VAL_TEST_OUT)))\n return [str(accuracy),str(math.sqrt(sum/len(VAL_TEST_OUT)))]\n\n## Used to delete the coloumns containing varaince as 0.\ndef singular_features(filePath):\n dataMatrix = []\n singular_feature = []\n iterator = 0\n with open(filePath, 'rU') as fi:\n reader = csv.reader(fi)\n for row in reader:\n iterator += 1\n if (iterator > 1):\n dataRow = []\n for column in row:\n dataRow.append(column)\n dataMatrix.append(dataRow)\n\n dataMatrix = np.delete(dataMatrix, [0,1], axis=1)\n dataMatrix = np.delete(dataMatrix, np.s_[-1:], axis=1)\n dataMatrix = [[int(float(j)) for j in i] for i in dataMatrix]\n dataMatrix = np.transpose(dataMatrix)\n BigSigma = GenerateBigSigmaSingular(dataMatrix)\n for i in range(len(BigSigma)):\n if BigSigma[i][i] == 0:\n singular_feature.append(i)\n # print(len(BigSigma))\n #print (\"Data Matrix Generated..\")\n return singular_feature\n\n## The features with 0 variance has been extracted so that they can be deleted as they throw the\n## singular matrix error. Also these features are of no use as they are same for all the datapoints\n## so they cause no change in the output.\n\nsingular_data = singular_features('training.csv')\n# singular_data = [450, 452, 456, 457]\nprint(\"Singular feature columns are:\" + str(singular_data))\nprint(\"---------PLEASE WAIT----------------\")\nprint(\"Generating training target Vector\")\ntrainingTarget = GetTargetVector('training.csv')\nprint(\"---------PLEASE WAIT----------------\")\nprint(\"Generating training Rawdata Vector\")\ntrainingData = GenerateRawData('training.csv',singular_data)\nprint(\"---------PLEASE WAIT----------------\")\nprint(\"Generating testing target Vector\")\ntestingTarget = GetTargetVector('testing.csv')\nprint(\"---------PLEASE WAIT----------------\")\nprint(\"Generating testing Rawdata Vector\")\ntestingData = GenerateRawData('testing.csv',singular_data)\nprint(\"---------PLEASE WAIT---------------\")\nprint(\"Generating valiadtion target Vector\")\nvalidationTarget = GetTargetVector('validation.csv')\nprint(\"---------PLEASE WAIT----------------\")\nprint(\"Generating validation Rawdata Vector\")\nvalidationData = GenerateRawData('validation.csv',singular_data)\nprint(\"All data matrices has been created.\")\nprint ('----------------------------------------------------')\nprint(\"Shape of Training feature data\" + str(trainingData.shape))\nprint(\"Shape of Training Target data\" + str(trainingTarget.shape))\nprint(\"Shape of Testing feature data\" + str(testingData.shape))\nprint(\"Shape of Testing Target data\" + str(testingTarget.shape))\nprint(\"Shape of Validation feature data\" + str(validationData.shape))\nprint(\"Shape of Validation Target data\" + str(validationTarget.shape))\nprint ('----------------------------------------------------')\nprint(\"Finding out the k means cluster and Phi matrix for all the datasets\")\nprint(\"Also finding out the closed form solution.\")\nprint(\"This might take some time. Please wait\")\nErmsArr = []\nAccuracyArr = []\n\nkmeans = KMeans(n_clusters=M, random_state=0).fit(np.transpose(trainingData))\nMu = kmeans.cluster_centers_\n\nBigSigma = GenerateBigSigma(trainingData)\nTRAINING_PHI = GetPhiMatrix(trainingData, Mu, BigSigma, 100)\nTEST_PHI = GetPhiMatrix(testingData, Mu, BigSigma, 100)\nVAL_PHI = GetPhiMatrix(validationData, Mu, BigSigma, 100)\n\nprint(\"Shape of feature center matrix\" + str(Mu.shape))\nprint(\"Shape of Variance matrix\" + str(BigSigma.shape))\nprint(\"Training design matrix\" + str(TRAINING_PHI.shape))\nprint(\"Validation design matrix\" + str(VAL_PHI.shape))\nprint(\"Testing design matrix\" +str(TEST_PHI.shape))\nprint ('UBITname = ysaraf')\nprint ('Person Number = 50290453')\nprint ('----------------------------------------------------')\nprint (\"-----------Handwriting features dataset-------------\")\nprint ('----------------------------------------------------')\nprint (\"-------Closed Form with Radial Basis Function-------\")\nprint ('----------------------------------------------------')\n# ## Gradient Descent solution for Linear Regression\nprint ('----------------------------------------------------')\nprint ('--------------Please Wait for 2 mins!----------------')\nprint ('----------------------------------------------------')\n\n## Random initialization of weights is done. The extra 1 term has been added for the bias.\nW_Now = np.random.rand(M+1,)\nLa = C_Lambda\nL_Erms_Val = []\nL_Accuracy_Val = []\nL_Erms_TR = []\nL_Accuracy_TR = []\nL_Erms_Test = []\nL_Accuracy_Test = []\nW_Mat = []\nprint(\"These iterations might take some significant time depending upon the number of basis function taken\")\nfor i in range(1):\n for i in range(len(TRAINING_PHI)):\n\n print ('---------Iteration: ' + str(i) + '--------------')\n prediction = np.dot(np.transpose(W_Now),TRAINING_PHI[i])\n Delta_E_D = -np.dot((trainingTarget[i] - prediction),TRAINING_PHI[i])\n La_Delta_E_W = np.dot(La,W_Now)\n Delta_E = np.add(Delta_E_D,La_Delta_E_W)\n Delta_W = -np.dot(learningRate,Delta_E)\n W_T_Next = W_Now + Delta_W\n W_Now = W_T_Next\n print(\"Training Target: \" + str(trainingTarget[i]) + \" ,Prediction:\" + str(prediction))\n TR_TEST_OUT = GetValTest(TRAINING_PHI,W_T_Next)\n Erms_TR = GetErms(TR_TEST_OUT,trainingTarget)\n L_Erms_TR.append(float(Erms_TR[1]))\n L_Accuracy_TR.append(float(Erms_TR[0]))\n print(\"Train Accuracy :\" + str(Erms_TR[0]) + \" ,Train ERMS:\" + str(Erms_TR[1]))\n\n # -----------------ValidationData Accuracy---------------------#\n VAL_TEST_OUT = GetValTest(VAL_PHI,W_T_Next)\n Erms_Val = GetErms(VAL_TEST_OUT,validationTarget)\n L_Erms_Val.append(float(Erms_Val[1]))\n L_Accuracy_Val.append(float(Erms_Val[0]))\n print(\"Val Accuracy :\" + str(Erms_Val[0]) + \" ,Val ERMS:\" + str(Erms_Val[1]))\n\nTEST_OUT_GD = GetValTest(TEST_PHI,W_Now)\nTestAccuracyGD = GetErms(TEST_OUT_GD,testingTarget)\nprint (\"E_rms Testing = \" + str(TestAccuracyGD[1]))\nprint (\"Testing Accuracy = \" + str(TestAccuracyGD[0]))\n\nprint ('----------Gradient Descent Solution--------------------')\nprint (\"Number of Basis Function: \" + str(M))\nprint (\"Learning Rate used: \" + str(learningRate))\nprint (\"Regularization constant: \" + str(La))\nprint (\"E_rms Training = \" + str(np.around(min(L_Erms_TR),5)))\nprint (\"E_rms Validation = \" + str(np.around(min(L_Erms_Val),5)))\n\n## Plots for the training and validation sets are made for ERMS and accuracy.\nplt.subplot(2, 2, 1)\nplt.plot(L_Erms_TR)\nplt.title('Training ERMS')\nplt.xlabel('Number of datapoints fed:')\n\nplt.subplot(2, 2, 2)\nplt.plot(L_Erms_Val)\nplt.title(\"Validation erms\")\nplt.xlabel('Number of datapoints fed:')\n\nplt.subplot(2, 2, 3)\nplt.plot(L_Accuracy_TR)\nplt.title('Training Accuracy')\nplt.xlabel('Number of datapoints fed:')\n\nplt.subplot(2, 2, 4)\nplt.plot(L_Accuracy_Val)\nplt.title(\"Validation Accuracy\")\nplt.xlabel('Number of datapoints fed:')\n\nplt.show()\n","sub_path":"Project-2/code/GSCDataset/LinearRegression.py","file_name":"LinearRegression.py","file_ext":"py","file_size_in_byte":11649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"250848836","text":"\n# coding: utf-8\n\n# In[32]:\n\n\nimport os\nimport numpy as np\nfrom shutil import copy\n\n\n# In[24]:\n\n\nemote_dict = {0:\"neutral\",1:\"anger\",2:\"contempt\",3:\"disgust\",4:\"fear\",5:\"happy\",6:\"sadness\",7:\"surprise\"}\n#0=neutral, 1=anger, 2=contempt, 3=disgust, 4=fear, 5=happy, 6=sadness, 7=surprise\n\n\n# Copies over the peak emotions and neutral emotions to another folder.\n\n# In[82]:\n\n\npath = \"Project\\\\Data\\\\Emotion_labels\\\\Emotion\\\\\"\nimgdirpath = \"Project\\\\Data\\\\extended-cohn-kanade-images\\\\cohn-kanade-images\\\\\"\nnewdir = \"Project\\\\Parsed\\\\\"\n\nfor p_id in os.listdir(path):\n p_hasNeutral = False\n \n for emote in os.listdir(path+p_id):\n \n if os.listdir(path+p_id+\"\\\\\"+emote) != None:\n \n #if folder for that person doesn't exist\n if os.path.isdir(newdir+p_id) == False:\n os.mkdir(newdir+p_id)\n \n #if emote_f for person p_id satisfied the FAC\n for emote_f in os.listdir(path+p_id+\"\\\\\"+emote):\n \n #read the type of emotion\n openf = open(path+p_id+\"\\\\\"+emote+\"\\\\\"+emote_f,\"r\")\n \n s = openf.read()\n s = int(s.split()[0][0:1])\n print(s)\n \n #get emotion from code\n emotion = emote_dict[s]\n print(path+p_id+\"\\\\\"+emote+\"\\\\\"+emote_f)\n print(emotion)\n \n #copy image over and rename with emotion suffix\n imgpath = imgdirpath+p_id+\"\\\\\"+emote+\"\\\\\"+os.listdir(imgdirpath+p_id+\"\\\\\"+emote)[-1]\n newpath = newdir+p_id+\"\\\\\"\n copy(imgpath,newpath)\n os.rename(newpath+os.listdir(imgdirpath+p_id+\"\\\\\"+emote)[-1],newpath+emotion+\".png\")\n \n #copy neutral over, if hasnt been copied yet\n if p_hasNeutral == False:\n imgpath = imgdirpath+p_id+\"\\\\\"+emote+\"\\\\\"+os.listdir(imgdirpath+p_id+\"\\\\\"+emote)[0]\n copy(imgpath,newpath)\n os.rename(newpath+os.listdir(imgdirpath+p_id+\"\\\\\"+emote)[0],newpath+\"neutral.png\")\n p_hasNeutral = True\n \n\n","sub_path":"Project Image Parsing.py","file_name":"Project Image Parsing.py","file_ext":"py","file_size_in_byte":2188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"42512046","text":"from __future__ import absolute_import, print_function, unicode_literals\n\nfrom ..yobj.bool import FT\nfrom tlib import TestCase\nfrom itertools import repeat\n\n\nclass T(TestCase):\n r\"\"\"\n test if `FT` behave like `False`, `True` against `==`, `!=`.\n \"\"\"\n def test(self):\n compare_with = 0, 1, r'', r'a'\n n = compare_with.__len__()\n f, t = FT\n\n def gen(b):\n g = (\n (\n (l == r), (l != r),\n (r == l), (r != l),\n )\n for l, r in zip(repeat(b, n), compare_with)\n )\n return tuple(g)\n\n _0, _1, _2, _3 = map(gen, (False, True, f, t))\n expected = _0, _1\n actual = _2, _3\n self.assertEqual(expected, actual)\n","sub_path":"pypeline/test/bool.py","file_name":"bool.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"478290986","text":"# -*- coding: utf-8 -*-\nfrom . import topology\nfrom ... import toolz\nfrom ...errors import MarketGroupError\nfrom ..utils import get_single_reference_product\nfrom .markets import allocate_suppliers, annotate_exchange\nimport copy\nimport itertools\nimport logging\nimport numpy as np\nfrom functools import reduce\n\nlogger = logging.getLogger('ocelot')\n\n\ndef link_market_group_suppliers(data):\n \"\"\"Link suppliers to market groups, and adjust production volumes.\"\"\"\n filter_func = lambda x: x['type'] == \"market group\"\n market_groups = dict(toolz.groupby(\n 'reference product',\n list(filter(filter_func, data))\n ))\n\n # Check to make sure names are consistent\n for group in list(market_groups.values()):\n if not len({ds['name'] for ds in group}) == 1:\n raise MarketGroupError(\"Inconsistent activity names in market group\")\n\n for ref_product, groups in list(market_groups.items()):\n suppliers = [ds for ds in data\n if ds['type'] == 'market activity'\n and ds['reference product'] == ref_product]\n\n # Put groups second so that if there are duplicates, the group will be retrieved\n location_lookup = {x['location']: x for x in suppliers}\n supplier_lookup = copy.deepcopy(location_lookup)\n location_lookup.update({x['location']: x for x in groups})\n\n tree = topology.tree(itertools.chain(suppliers, groups))\n\n if [1 for x in groups if x['location'] == 'RoW']:\n # Handling RoW is a little tricky. The RoW market group can contain\n # markets which are not covered by other market groups. So we have\n # to resolve what RoW means in each context.\n row_faces = topology('__all__').difference(\n set.union(*[topology(x['location']) for x in groups])\n )\n # This will include RoW, if present, but not GLO\n row_activities = [x for x in suppliers\n if not topology(x['location']).difference(row_faces)\n and x['location'] != 'GLO']\n\n # RoW suppliers need to be removed from GLO suppliers\n if 'GLO' in tree:\n for obj in row_activities:\n if (obj['location'] != 'RoW'\n and obj['location'] in tree['GLO']):\n del tree['GLO'][obj['location']]\n else:\n row_activities = []\n\n # Turn `tree` from nested dictionaries to flat list of key, values.\n # Breadth first search\n def unroll(lst, dct):\n for key, value in list(dct.items()):\n lst.append((key, value))\n for value in list(dct.values()):\n if value:\n lst = unroll(lst, value)\n return lst\n\n flat = unroll([], tree)\n\n # Shouldn't exist - means that markets overlap\n for loc, children in flat:\n if children and not location_lookup[loc]['type'] == 'market group':\n raise MarketGroupError\n\n def translate(obj):\n return annotate_exchange(get_single_reference_product(obj), obj)\n\n for parent, children in flat[::-1]:\n # Special case RoW\n if parent == 'RoW':\n obj = location_lookup[parent]\n obj['suppliers'] = [translate(act) for act in row_activities]\n else:\n obj = location_lookup[parent]\n obj['suppliers'] = [translate(location_lookup[child])\n for child in children]\n\n # Also add supplier if market and market group have same location\n if (parent in supplier_lookup\n and location_lookup[parent]['type'] == 'market group'\n and parent != 'RoW'):\n obj['suppliers'].append(translate(supplier_lookup[parent]))\n\n # For consistency in testing\n obj['suppliers'].sort(key=lambda x: x['code'])\n\n for exc in obj['suppliers']:\n logger.info({\n 'type': 'table element',\n 'data': (obj['name'], obj['location'], exc['location'])\n })\n\n if not obj['suppliers']:\n del obj['suppliers']\n continue\n\n allocate_suppliers(obj)\n\n return data\n\nlink_market_group_suppliers.__table__ = {\n 'title': \"Link and allocate suppliers for market groups. Suppliers can be market activities or other market groups.\",\n 'columns': [\"Name\", \"Location\", \"Supplier Location\"]\n}\n\n\ndef check_markets_only_supply_one_market_group(data):\n \"\"\"Validation function to make sure that a market only supplies one market group.\n\n Some markets have supplied multiple market groups in the past, probably due to a GIS implementation which considered one market group at a time.\n\n Raises a ``MarketGroupError`` if duplicate supply is found.\"\"\"\n filter_func = lambda x: x['type'] == \"market group\"\n market_groups = dict(toolz.groupby(\n 'name',\n list(filter(filter_func, data))\n ))\n\n code_dict = {x['code']: x for x in data}\n\n message = \"Activity {} ({}) supplies multiple market groups: {} {} and {}.\"\n\n for name, groups in list(market_groups.items()):\n for group in groups:\n input_codes = {exc['code'] for exc in group['exchanges']\n if exc['type'] == 'from technosphere'}\n for other in (obj for obj in groups if obj is not group):\n for exc in (exc for exc in other['exchanges']\n if exc['type'] == 'from technosphere'\n and exc['code'] in input_codes):\n # Duplicate are only prohibited if one market group is\n # completely within another market group.\n one = topology(group['location'])\n two = topology(other['location'])\n if one.difference(two) and two.difference(one):\n continue\n\n act = code_dict[exc['code']]\n raise MarketGroupError(message.format(\n act['name'], act['location'],\n name, group['location'], other['location'],\n ))\n return data\n\n\ndef create_flow_array_from_dataset_and_dict(flow, ds, dct):\n \"\"\"\"\"\"\n ds_dict = {exc}\n return np.array([])\n\n\ndef link_market_group_consumers(data):\n filter_func = lambda x: x['type'] != \"market group\"\n for ds in reduce(filter_func, data):\n pass\n","sub_path":"ocelot/transformations/locations/market_groups.py","file_name":"market_groups.py","file_ext":"py","file_size_in_byte":6594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"219448682","text":"class NumArray:\n def __init__(self):\n self.__values = []\n \n def __len__(self):\n print('__len__', len(self.__values))\n return len(self.__values)\n \n def append(self, value):\n self.__check_value(value)\n self.__values.append(value)\n \n def __getitem__(self, key):\n print('__getitem__', key)\n self.__check_key(key)\n return self.__values[key]\n \n def __setitem__(self, key, value):\n print('__setitem__', key, value)\n self.__check_key(key)\n self.__check_value(value)\n self.__values[key] = value\n \n def __check_key(self, key):\n if not isinstance(key, int): raise TypeError('keyはint型のみ可。:{0}'.format(type(key)))\n if len(self.__values) <= key: raise IndexError('keyが正数のときは0〜{0}の値のみ可。'.format(len(self.__values)-1))\n if key < len(self.__values) * -1: raise IndexError('keyが負数のときは-1〜{0}の値のみ可。'.format(len(self.__values) * -1))\n \n def __check_value(self, value):\n if not isinstance(value, int): raise TypeError('valueはint型のみ可。:{0}'.format(type(value)))\n \n\nn = NumArray()\n#n[0] = 0\n#n[1] = 100\n#n[2] = -100\nn.append(0)\nn.append(100)\nn.append(-100)\nprint(n)\nprint(n[0])\nprint(n[1])\nprint(n[2])\nprint(n[-1])\nprint(n[-2])\nprint(n[-3])\n","sub_path":"22/00/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"141531528","text":"from bs4 import BeautifulSoup\nfrom clarifai import rest\nfrom clarifai.rest import ClarifaiApp\nimport requests\nimport json\n\napp = ClarifaiApp(api_key='f7c8abc1ad6047f6a3a8e55749dce3dd');\nmodel = app.models.get(\"general-v1.3\")\n\ndef fillTweets(soup):\n for tweet in soup.findAll('div', {'class': 'tweet'}):\n tweetsIds.append(tweet['data-tweet-id'])\n tweets.append(tweet)\n\n for img in tweet.findAll('div', {'class': 'AdaptiveMediaOuterContainer'}):\n for i in img.findAll('img'):\n tweetsImgs.append(i['src'])\n\nname = \"emmanuelmacron\"\nurl = \"twitter.com/i/profiles/show/\" + name + \"/timeline/tweets/\"\n\nbaseUrl = \"\"\nr = requests.get(\"https://\" + url)\n\njsonValue = json.loads(str(r.text))\n\nsoup = BeautifulSoup(jsonValue['items_html'], \"html.parser\")\ntweetsIds = []\ntweets = []\ntweetsImgs = []\n\nfillTweets(soup)\n\nx = 0\n\nwhile x <= 5:\n x += 1\n ur2 = \"twitter.com/i/profiles/show/realDonaldTrump/timeline/tweets?include_available_features=1&include_entities=1&max_position=\" + tweetsIds[-1] + \"&reset_error_state=false\"\n r = requests.get(\"https://\" + ur2)\n\n jsonValue = json.loads(str(r.text))\n soup = BeautifulSoup(jsonValue['items_html'], \"html.parser\")\n fillTweets(soup)\n\n\n#print(tweetsImgs)\n\nfor img in tweetsImgs:\n print(model.predict_by_url(url=img))","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"309876817","text":"# Run all baseline experiments\n# run with bigram, bag_of_words, and gru\n\nfrom options import options, print_options, make_model_save_name_from_options\nfrom training_baseline import baseline_trainer\n\n#set exeriment specific options\n# baseline options\noptions['baseline_model'] = True\noptions['penalty_cost'] = 'penalty_l2'\n\n\noptions['Char_level'] = False\noptions['hinge_decay_percent'] = 1.0\noptions['baseline_print_costs_before_max'] = False\n\n\n\n\noptions['target_sentences'] = ['self']\n\noptions['encoder'] = 'zoneout_gru' #'gru' # 'bigram' # 'bag_of_words' # 'zoneout_gru'\noptions['is_test_time'] = False\noptions['z_prob_states'] = 0.05\n\noptions['exp_name'] = 'expA_Baselines_' + options['encoder']\n\noptions['n_words'] = 40000\noptions['max_sent_len'] = 35\n\nfor dim in [128, 300]:\n for l2_neg_sample in [True, False]:\n\n if (options['encoder'] == 'bag_of_words' or options['encoder'] == 'bag_of_words'\n or options['encoder'] == 'gru') and l2_neg_sample == True:\n continue # because these were done before my bug fix\n\n options['dim_word'] = dim\n options['hinge'] = options['dim_word'] # 128 in the paper\n options['penalty_cost_alpha'] = 1.0 #/(options['dim']*1.0) # note, needs to be positive\n options['dim'] = options['dim_word']\n\n options['baseline_num_l1_negative_samples'] = 20\n if l2_neg_sample:\n options['baseline_num_l2_negative_samples'] = 20\n else:\n options['baseline_num_l2_negative_samples'] = 0\n\n print(make_model_save_name_from_options(options))\n print_options(options)\n baseline_trainer(options)","sub_path":"Experiments/expA_baselines.py","file_name":"expA_baselines.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"518373959","text":"\"\"\"Exercício Python 061: Refaça o DESAFIO 051, lendo o primeiro termo e a razão\nde uma PA, mostrando os 10 primeiros termos da progressão usando a estrutura while.\"\"\"\n\nfrom time import sleep\n\nc = int(0)\n\nprint(' \\033[1;34mProgressão Aritmética')\n\ntermo = int(input('Digite o primeiro termo da PA: '))\nrazao = int(input('Digite a razão da PA: '))\n\nprint('\\nOs dez primeiros termos desta PA são:')\n\nwhile c < 10:\n print(f'{termo:3}', end=' ')\n termo += razao\n c += 1\n\n sleep(0.8)\n","sub_path":"ExerciceList/ex061.py","file_name":"ex061.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"530600628","text":"\"\"\"\nTests for the base command implementation\n\"\"\"\n\nimport unittest\nimport unittest.mock\n\nimport asynctest\n\nimport botman.bot\nimport botman.commands.base\nimport botman.errors\n\nimport tests.mixins\n\n@asynctest.fail_on(unused_loop=False)\nclass TestCommand(tests.mixins.DiscordMockMixin, asynctest.TestCase):\n \"\"\"\n Tests for the base command implementation\n \"\"\"\n\n def test_description_default(self):\n \"\"\"\n Tests that the description defaults to pydocs\n \"\"\"\n\n mock_handler = asynctest.CoroutineMock(\n __name__='test',\n __doc__='This is a description',\n )\n\n command = botman.commands.base.Command('test', mock_handler)\n\n self.assertEqual(\n 'This is a description',\n command.description,\n 'Description defaulted to pydocs',\n )\n\n def test_matches_default(self):\n \"\"\"\n Tests that matches defaults to True with no validators\n \"\"\"\n\n mock_handler = asynctest.CoroutineMock(__name__='test')\n command = botman.commands.base.Command('test', mock_handler)\n\n mock_bot = self.get_mock_bot()\n mock_message = self.get_mock_message('test')\n\n self.assertTrue(\n command.matches(mock_bot, mock_message, ''),\n 'Mathces defaults to true',\n )\n\n def test_matches_calls_validators(self):\n \"\"\"\n Tests that matches calls the validators\n \"\"\"\n\n mock_handler = asynctest.CoroutineMock(__name__='test')\n command = botman.commands.base.Command('test', mock_handler)\n\n mock_validator = unittest.mock.Mock()\n mock_validator.return_value = False\n\n command.validators.append(mock_validator)\n\n mock_bot = self.get_mock_bot()\n mock_message = self.get_mock_message('test')\n\n self.assertFalse(\n command.matches(mock_bot, mock_message, ''),\n 'Matches returned the correct value',\n )\n\n mock_validator.assert_called_with(mock_bot, mock_message, '')\n\n def test_parse_arguments(self):\n \"\"\"\n Tests that we can parse command line arguments\n \"\"\"\n\n mock_handler = asynctest.CoroutineMock(__name__='test')\n command = botman.commands.base.Command('test', mock_handler)\n command.parameters = {\n 'req_val': botman.commands.base.StringArg(required=True),\n 'opt_val': botman.commands.base.StringArg(default='test'),\n }\n\n self.assertDictEqual(\n {\n 'req_val': 'my_val',\n 'opt_val': 'test',\n },\n command.parse_arguments('my_val'),\n 'Arguments were correctly parsed',\n )\n\n def test_parse_arguments_whitespace(self):\n \"\"\"\n Tests that whitespace is ignored when parsing\n \"\"\"\n\n mock_handler = asynctest.CoroutineMock(__name__='test')\n command = botman.commands.base.Command('test', mock_handler)\n command.parameters = {\n 'val_1': botman.commands.base.StringArg(required=True),\n 'val_2': botman.commands.base.StringArg(required=True),\n }\n\n self.assertDictEqual(\n {\n 'val_1': 'one',\n 'val_2': 'two',\n },\n command.parse_arguments('one two '),\n 'Parser ignored whitespace',\n )\n\n def test_parse_arguments_quotes(self):\n \"\"\"\n Tests that quotes are respected when parsing\n \"\"\"\n\n mock_handler = asynctest.CoroutineMock(__name__='test')\n command = botman.commands.base.Command('test', mock_handler)\n command.parameters = {\n 'val_1': botman.commands.base.StringArg(required=True),\n 'val_2': botman.commands.base.StringArg(required=True),\n }\n\n self.assertDictEqual(\n {\n 'val_1': 'one two',\n 'val_2': 'three',\n },\n command.parse_arguments('\"one two\" three'),\n 'Parser respected quotes',\n )\n\n def test_parse_arguments_rest(self):\n \"\"\"\n Tests that the extra arguments are put in the last argument\n \"\"\"\n\n mock_handler = asynctest.CoroutineMock(__name__='test')\n command = botman.commands.base.Command('test', mock_handler)\n command.parameters = {\n 'val_1': botman.commands.base.StringArg(required=True),\n 'val_2': botman.commands.base.StringArg(required=True),\n }\n\n self.assertDictEqual(\n {\n 'val_1': 'one',\n 'val_2': 'two three',\n },\n command.parse_arguments('one two three'),\n 'Parser respected quotes',\n )\n\n def test_parse_arguments_rest_str(self):\n \"\"\"\n Tests that the extra arguments are ignored if the last arg isnt a string\n \"\"\"\n\n mock_handler = asynctest.CoroutineMock(__name__='test')\n command = botman.commands.base.Command('test', mock_handler)\n command.parameters = {\n 'val_1': botman.commands.base.StringArg(required=True),\n 'val_2': botman.commands.base.IntArg(required=True),\n }\n\n self.assertDictEqual(\n {\n 'val_1': 'one',\n 'val_2': 2,\n },\n command.parse_arguments('one 2 three'),\n 'Parser respected quotes',\n )\n\n async def test_call_not_matches(self):\n \"\"\"\n Tests that the handler is not called when the message doesn't match\n \"\"\"\n\n mock_handler = asynctest.CoroutineMock(__name__='test')\n command = botman.commands.base.Command('test', mock_handler)\n\n mock_validator = unittest.mock.Mock()\n mock_validator.return_value = False\n\n command.validators.append(mock_validator)\n\n mock_bot = self.get_mock_bot()\n mock_message = self.get_mock_message('test')\n\n self.assertFalse(\n await command(mock_bot, mock_message, ''),\n 'Command returned false since it was not handled',\n )\n\n mock_handler.assert_not_called()\n\n async def test_call_matches(self):\n \"\"\"\n Tests that the handler is called when the message matches\n \"\"\"\n\n mock_handler = asynctest.CoroutineMock(__name__='test')\n command = botman.commands.base.Command('test', mock_handler)\n\n message = self.get_mock_message('testification')\n\n mock_bot = self.get_mock_bot()\n\n self.assertTrue(\n await command(mock_bot, message, ''),\n 'Command returned true since it was handled',\n )\n\n mock_handler.assert_called_with(mock_bot, message)\n\n","sub_path":"tests/commands/test_base.py","file_name":"test_base.py","file_ext":"py","file_size_in_byte":6668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"125807563","text":"\"\"\"This module provides simple Docker command-line functionality.\"\"\"\n\n\nfrom subprocess import CompletedProcess, run\nfrom typing import List, Optional\n\nfrom .return_code import ReturnCodeEnum\n\n\nDEFAULT_DOCKER_MODE_ON = False\n\n\nclass DockerRunReturnCode(ReturnCodeEnum):\n \"\"\"Invocation of the `docker run` command can result in one of the\n following special return codes. Any other return code is the result of\n invoking the indicated command in the container.\n\n Reference:\n https://docs.docker.com/engine/reference/run/#exit-status\n \"\"\"\n\n DockerDaemonError = 125\n ContainedCommandCannotBeInvokedError = 126\n ContainedCommandCannotBeFoundError = 127\n\n\nclass DockerRunError(Exception):\n \"\"\"An error raised when something fails while invoking Docker.\"\"\"\n\n def __init__(self, returncode: DockerRunReturnCode, stderr: str):\n super().__init__(f\"Error running Docker: {returncode.name}\\n stderr output captured below:\\n\\n{stderr}\")\n\n\ndef docker_run(container: str,\n args: Optional[List[str]] = None,\n input_bytes: Optional[bytes] = None) -> CompletedProcess:\n \"\"\"Runs a Docker container, with the optional arguments and input if\n provided.\n\n If the execution produces an error, a DockerRunError will be raised.\n \"\"\"\n if args is None:\n args = []\n command = ['docker', 'run', *args, container]\n # NOTE: flake8 doesn't seem to handle the calls to `run` correctly, but\n # mypy reports everything is fine here so we `noqa` to prevent flake8\n # complaining about what it doesn't understand.\n result = run(command, capture_output=True, input=input_bytes) # noqa\n if DockerRunReturnCode.has_value(result.returncode):\n code = DockerRunReturnCode(result.returncode)\n raise DockerRunError(code, result.stderr.decode())\n return result\n","sub_path":"sweetpea/core/generate/tools/docker_utility.py","file_name":"docker_utility.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"219847444","text":"\"\"\"\nGiven two arrays, write a function to compute their intersection.\n\nExample:\nGiven nums1 = [1, 2, 2, 1], nums2 = [2, 2], return [2].\n\nNote:\nEach element in the result must be unique.\nThe result can be in any order.\n\"\"\"\n\n\nclass Solution1(object):\n def intersection(self, nums1, nums2):\n \"\"\"\n :type nums1: List[int]\n :type nums2: List[int]\n :rtype: List[int]\n \"\"\"\n numsIn1, intersect = set(), set()\n for num in nums1:\n numsIn1.add(num)\n\n for num in nums2:\n if num in numsIn1:\n intersect.add(num)\n\n return list(intersect)\n\n\nclass Solution(object):\n def intersection(self, nums1, nums2):\n \"\"\"\n :type nums1: List[int]\n :type nums2: List[int]\n :rtype: List[int]\n \"\"\"\n res = []\n nums1.sort()\n nums2.sort()\n\n len1, len2 = len(nums1), len(nums2)\n i, j = 0, 0\n while i < len1 and j < len2:\n n1, n2 = nums1[i], nums2[j]\n if n1 == n2:\n if not res or n1 != res[-1]:\n res.append(n1)\n i += 1\n j += 1\n elif n1 < n2:\n i += 1\n elif n1 > n2:\n j += 1\n return res\n","sub_path":"easy/IntersectionOfTwoArrays.py","file_name":"IntersectionOfTwoArrays.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"13847903","text":"# -*- coding: utf-8 -*-\r\n# https://www.cnblogs.com/zhaof/p/8490045.html\r\n# 阻塞和await\r\n\r\nimport time\r\nimport asyncio\r\nimport sys\r\n\r\nnow = lambda: time.time()\r\n\r\nasync def do_some_work(x):\r\n print(\"waiting:\",x)\r\n await asyncio.sleep(x)\r\n return \"Done after {}s\".format(x)\r\n\r\n\r\nstart = now()\r\nprint(\"do_some_work: %s\" % do_some_work)\r\n#sys.exit(0)\r\n#这里是一个协程对象,这个时候 do_some_work 并没有执行、\r\ncoroutine = do_some_work(2)\r\nprint(coroutine)\r\n\r\n# 创建一个事件 loop\r\nloop = asyncio.get_event_loop()\r\n\r\n#task = asyncio.ensure_future(coroutine)\r\ntask = loop.create_task(coroutine)\r\nprint(\"task before run: %s \" % task)\r\n# 将协程加入到事件循环loop\r\nloop.run_until_complete(task)\r\nprint(\"task after run: %s\" % task)\r\nprint(\"Task ret:\", task.result())\r\nprint(\"Time:\", now()-start)\r\n\r\n","sub_path":"beyond/liaoxuefeng/asyncio/python_asyncio_model/asyncio6_await.py","file_name":"asyncio6_await.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"82318594","text":"\"\"\"\n练习2: 使用进程池完成\n拷贝一个文件夹, (文件夹下全都是普通文件没有子文件夹)\n\n* os.mkdir(\"xxx\") 创建一个新文件夹\n* 将目标文件夹下的文件都复制到新文件夹中\n 把复制每个文件看做进程池要执行的一件事\n\"\"\"\nfrom multiprocessing import Pool,Queue\nimport os\n\nq = Queue() # 消息队列\nold_folder = \"/home/tarena/FTP/\"\nnew_folder = \"./ftp/\"\n\n# 拷贝一个文件\ndef copy(filename):\n fr = open(old_folder+filename,'rb')\n fw = open(new_folder+filename,'wb')\n while True:\n data = fr.read(1024)\n if not data:\n break\n n = fw.write(data) # n表示已经拷贝了多少\n q.put(n) # 放入消息队列\n fr.close()\n fw.close()\n\n# 获取要拷贝的文件的大小\ndef get_size():\n total_size = 0\n # 累加每个文件大小\n for file in os.listdir(old_folder):\n total_size += os.path.getsize(old_folder+file)\n return total_size # 总大小\n\n\ndef main():\n os.mkdir(new_folder) # 创建新文件夹\n total_size = get_size() # 获取总大小\n\n # 创建进程池\n pool = Pool()\n # 复制一个文件就用一次copy函数\n for file in os.listdir(old_folder):\n pool.apply_async(func=copy,\n args=(file,))\n\n # 获取已经拷贝的大小\n copy_size = 0\n while copy_size < total_size:\n copy_size += q.get() # 获取已经拷贝字节\n # round(float,n) 保留小数点后 n 位\n print(round(copy_size/total_size*100,2),'%')\n\n pool.close()\n pool.join()\n\n\nif __name__ == '__main__':\n main()\n\n\n\n\n\n\n","sub_path":"day13/exercise_3.py","file_name":"exercise_3.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"454617537","text":"# -*- coding: utf-8 -*-\n\n__author__ = 'jbo'\n\nfrom django.core.cache import cache\nimport datetime as dt\n\ndef clear_banners_cache():\n from .models import Banner, Zone\n\n cache.delete(\"banners_zones\")\n cache.delete(\"banners_zones_eng\")\n\n for zone in Zone.objects.all():\n cache.delete(\"banner_placement:{0}\".format(zone.id))\n\n for banner in Banner.objects.all():\n cache.delete(\"hbanner.{0}\".format(banner.id))\n\n\ndef get_expires():\n # Сколько секунд в 1 дне\n sec_in_day = 86400\n # dt.datetime.today().weekday()\n d = dt.datetime.today()\n t = sec_in_day - (60 * 60 * d.hour) - (60 * d.minute)\n if t > 10:\n return t\n else:\n return sec_in_day","sub_path":"banners/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"328097478","text":"import torch\nimport torch.nn as nn\n\ninput_dim = 3\n\nclass My_Net_V2(nn.Module):\n \"\"\"Encoder is part of both TrajectoryGenerator and\n TrajectoryDiscriminator\"\"\"\n def __init__(\n self, seq_len=8, embedding_dim=30, first_h_dim=30, second_h_dim=60, mlp_dim=1024, num_layers=1,\n dropout=0.0, use_cuda=0\n ):\n super(My_Net_V2, self).__init__()\n\n self.mlp_dim = mlp_dim\n self.embedding_dim = embedding_dim\n self.first_h_dim = first_h_dim\n self.second_h_dim = second_h_dim\n self.num_layers = num_layers\n self.use_cuda = use_cuda\n self.seq_len = seq_len\n\n self.encoder = nn.LSTM(\n embedding_dim, first_h_dim, num_layers, dropout=dropout\n )\n\n self.state_embedding = nn.Linear(first_h_dim, second_h_dim)\n\n self.encoder2 = nn.LSTM(\n embedding_dim, second_h_dim, num_layers, dropout=dropout\n )\n\n self.hidden2pos = nn.Linear(second_h_dim, input_dim)\n\n self.relu = nn.ReLU()\n\n self.spatial_embedding = nn.Linear(input_dim, embedding_dim)\n\n def init_hidden(self, batch):\n state0 = torch.zeros(self.num_layers, batch, self.first_h_dim)\n state1 = torch.zeros(self.num_layers, batch, self.first_h_dim)\n\n if self.use_cuda == 1:\n state0 = state0.cuda()\n state1 = state1.cuda()\n\n return (state0, state1)\n\n def forward(self, obs_traj):\n\n batch = obs_traj.size(1)\n obs_traj_embedding = self.spatial_embedding(obs_traj.contiguous().view(-1, input_dim))\n obs_traj_embedding = obs_traj_embedding.view(-1, batch, self.embedding_dim)\n encoder_state_tuple = self.init_hidden(batch)\n output, state = self.encoder(obs_traj_embedding, encoder_state_tuple)\n\n state_0 = self.state_embedding(state[0])\n state_1 = self.state_embedding(state[1])\n\n output, state = self.encoder2(output, (state_0, state_1))\n\n cur_pos = self.hidden2pos(output.view(-1, self.second_h_dim))\n\n cur_pos = cur_pos.view(-1, batch, input_dim)\n\n return cur_pos\n","sub_path":"trajectory-prediction-master/lstmv2/models_lstm_v2.py","file_name":"models_lstm_v2.py","file_ext":"py","file_size_in_byte":2086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"471470300","text":"import json\nimport pytest\nimport socket\nimport time\n\nfrom fixtures import (\n revaultd_stakeholder, revaultd_manager, bitcoind, directory, test_base_dir,\n test_name, revaultd_factory\n)\nfrom utils import TIMEOUT, wait_for, RpcError\n\ndef test_revaultd_stakeholder_starts(revaultd_stakeholder):\n revaultd_stakeholder.rpc.call(\"stop\")\n revaultd_stakeholder.wait_for_log(\"Stopping revaultd.\")\n revaultd_stakeholder.wait_for_log(\"Bitcoind received shutdown.\")\n revaultd_stakeholder.proc.wait(TIMEOUT)\n\n\ndef test_revaultd_manager_starts(revaultd_manager):\n revaultd_manager.rpc.call(\"stop\")\n revaultd_manager.wait_for_log(\"Stopping revaultd.\")\n revaultd_manager.wait_for_log(\"Bitcoind received shutdown.\")\n revaultd_manager.proc.wait(TIMEOUT)\n\n\ndef test_getinfo(revaultd_manager, bitcoind):\n res = revaultd_manager.rpc.call(\"getinfo\")\n assert res[\"network\"] == \"regtest\"\n assert res[\"sync\"] == 1.0\n assert res[\"version\"] == \"0.0.2\"\n\n wait_for(lambda: revaultd_manager.rpc.call(\"getinfo\")[\"blockheight\"] > 0)\n height = revaultd_manager.rpc.call(\"getinfo\")[\"blockheight\"]\n bitcoind.generate_block(1)\n wait_for(lambda: revaultd_manager.rpc.call(\"getinfo\")[\"blockheight\"]\n == height + 1)\n\n\ndef test_listvaults(revaultd_manager, bitcoind):\n res = revaultd_manager.rpc.call(\"listvaults\")\n assert res[\"vaults\"] == []\n\n # Send to a deposit address, we detect one unconfirmed vault\n amount_sent = 0.75\n addr = revaultd_manager.rpc.call(\"getdepositaddress\")[\"address\"]\n txid = bitcoind.rpc.sendtoaddress(addr, amount_sent)\n revaultd_manager.wait_for_log(\"Got a new unconfirmed deposit\")\n vault_list = revaultd_manager.rpc.call(\"listvaults\")[\"vaults\"]\n assert len(vault_list) == 1\n assert vault_list[0][\"status\"] == \"unconfirmed\"\n assert vault_list[0][\"txid\"] == txid\n assert vault_list[0][\"amount\"] == amount_sent * 10**8\n\n # Generate 5 blocks, it is still unconfirmed\n bitcoind.generate_block(5)\n assert revaultd_manager.rpc.call(\"listvaults\")[\"vaults\"][0][\"status\"] == \\\n \"unconfirmed\"\n\n # 1 more block will get it confirmed\n bitcoind.generate_block(1)\n revaultd_manager.wait_for_log(f\"Vault at .*{txid}.* is now confirmed\")\n assert revaultd_manager.rpc.call(\"listvaults\")[\"vaults\"][0][\"status\"] == \\\n \"funded\"\n\n # Of course, it persists across restarts.\n revaultd_manager.rpc.call(\"stop\")\n revaultd_manager.proc.wait(TIMEOUT)\n revaultd_manager.start()\n vault_list = revaultd_manager.rpc.call(\"listvaults\")[\"vaults\"]\n assert len(vault_list) == 1\n assert vault_list[0][\"status\"] == \"funded\"\n assert vault_list[0][\"txid\"] == txid\n assert vault_list[0][\"amount\"] == amount_sent * 10**8\n\n # And we can filter the result by status\n vault_list = revaultd_manager.rpc.call(\"listvaults\",\n [[\"unconfirmed\"]])[\"vaults\"]\n assert len(vault_list) == 0\n vault_list = revaultd_manager.rpc.call(\"listvaults\",\n [[\"funded\"]])[\"vaults\"]\n assert len(vault_list) == 1\n assert vault_list[0][\"status\"] == \"funded\"\n assert vault_list[0][\"txid\"] == txid\n assert vault_list[0][\"amount\"] == amount_sent * 10**8\n\n # And we can filter the result by outpoints\n outpoint = f\"{txid}:{vault_list[0]['vout']}\"\n vault_list = revaultd_manager.rpc.call(\"listvaults\",\n [[], [outpoint]])[\"vaults\"]\n assert len(vault_list) == 1\n assert vault_list[0][\"status\"] == \"funded\"\n assert vault_list[0][\"txid\"] == txid\n assert vault_list[0][\"amount\"] == amount_sent * 10**8\n\n outpoint = f\"{txid}:{100}\"\n vault_list = revaultd_manager.rpc.call(\"listvaults\",\n [[], [outpoint]])[\"vaults\"]\n assert len(vault_list) == 0\n\n\ndef test_getdepositaddress(revaultd_factory, bitcoind):\n (stks, mans) = revaultd_factory.deploy(4, 2)\n addr = stks[0].rpc.call(\"getdepositaddress\")[\"address\"]\n\n # If we don't use it, we'll get the same. From us and everyone else\n for n in stks + mans:\n assert addr == n.rpc.call(\"getdepositaddress\")[\"address\"]\n\n # But if we do, we'll get the next one (but the same from everyone)!\n bitcoind.rpc.sendtoaddress(addr, 0.22222)\n stks[0].wait_for_logs([\"Got a new unconfirmed deposit\",\n \"Incremented deposit derivation index\"])\n addr2 = stks[0].rpc.call(\"getdepositaddress\")[\"address\"]\n assert addr2 != addr\n for n in stks[1:] + mans:\n n.wait_for_logs([\"Got a new unconfirmed deposit\",\n \"Incremented deposit derivation index\"])\n assert addr2 == n.rpc.call(\"getdepositaddress\")[\"address\"]\n\n\ndef test_getrevocationtxs(revaultd_factory, bitcoind):\n (stks, mans) = revaultd_factory.deploy(4, 2)\n addr = stks[0].rpc.call(\"getdepositaddress\")[\"address\"]\n\n # If the vault isn't known, it'll fail (note: it's racy for others but\n # behaviour is the same is the vault isn't known)\n txid = bitcoind.rpc.sendtoaddress(addr, 0.22222)\n stks[0].wait_for_logs([\"Got a new unconfirmed deposit\",\n \"Incremented deposit derivation index\"])\n vault = stks[0].rpc.listvaults()[\"vaults\"][0]\n for n in stks + mans:\n with pytest.raises(RpcError, match=\".* does not refer to a known and \"\n \"confirmed vault\"):\n n.rpc.getrevocationtxs(f\"{vault['txid']}:{vault['vout']}\")\n\n # Now, get it confirmed. They all derived the same transactions\n bitcoind.generate_block(6, txid)\n wait_for(lambda: stks[0].rpc.listvaults()[\"vaults\"][0][\"status\"] == \"funded\")\n txs = stks[0].rpc.getrevocationtxs(f\"{vault['txid']}:{vault['vout']}\")\n for n in stks[1:] + mans:\n wait_for(lambda: n.rpc.listvaults()[\"vaults\"][0][\"status\"] == \"funded\")\n assert txs == n.rpc.getrevocationtxs(f\"{vault['txid']}:{vault['vout']}\")\n\n\ndef test_listtransactions(revaultd_factory, bitcoind):\n (stks, mans) = revaultd_factory.deploy(4, 2)\n\n addr = stks[0].rpc.call(\"getdepositaddress\")[\"address\"]\n txid = bitcoind.rpc.sendtoaddress(addr, 0.22222)\n wait_for(lambda: len(stks[0].rpc.call(\"listvaults\")[\"vaults\"]) > 0)\n vault = stks[0].rpc.call(\"listvaults\")[\"vaults\"][0]\n deposit = f\"{vault['txid']}:{vault['vout']}\"\n\n res = stks[0].rpc.listtransactions([deposit])[\"transactions\"][0]\n # Sanity check the API\n assert (\"deposit\" in res and \"unvault\" in res and \"cancel\" in res\n and \"emergency\" in res and \"unvault_emergency\" in res)\n assert (stks[0].rpc.listtransactions([deposit]) ==\n stks[0].rpc.listtransactions())\n # The deposit is always fully signed..\n assert \"hex\" in res[\"deposit\"]\n # .. And broadcast\n assert \"received_at\" in res[\"deposit\"]\n # .. But right now it's not confirmed\n assert \"blockheight\" not in res[\"deposit\"]\n\n # Get it confirmed\n bitcoind.generate_block(6, txid)\n wait_for(lambda: stks[0].rpc.listvaults()[\"vaults\"][0][\"status\"] == \"funded\")\n res = stks[0].rpc.listtransactions([deposit])[\"transactions\"][0]\n assert \"blockheight\" in res[\"deposit\"]\n\n # Sanity check they all output the same transactions..\n sorted_res = sorted(res.items())\n for n in stks[1:] + mans:\n res = n.rpc.listtransactions([deposit])[\"transactions\"][0]\n assert sorted(res.items()) == sorted_res\n","sub_path":"tests/test_misc.py","file_name":"test_misc.py","file_ext":"py","file_size_in_byte":7413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"497906413","text":"import numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport cv2\nimport time\n\n#InfoGAN_mnist_38noise_DC-2000:不使用latent code的损失,也不使用分类损失,就是一般的DCGAN\n#InfoGAN_mnist_38noise_1-2000:latent_loss的损失权重为1\n#InfoGAN_mnist_38noise_01-2000:latent_loss的损失权重为0.1\n#InfoGAN_mnist_100noise_1-2000:latent_loss的损失权重为1,且初始噪声长度为100,以上三者都是38\n\n# 可对比InfoGAN_mnist_38noise_DC和InfoGAN_mnist_38noise_1可以看到,在相同噪声长度的情况下,\n# InfoGAN随着latent code变化更明显且有意义,而DCGAN变化不明显(此时前两维的latent code和噪声\n# 中其他维度的值没什么区别,只是一般的噪声,不带有潜在编码的意义)\n\nmodel_path = 'models_fashion/pb/InfoGAN_fashion_38noise_-2000.pb'\n\nimage_height = 28\nimage_width = 28\nprior_size=38\ntest_sample_num = 30\nlatent_code_size = 2\none_hot=np.eye(10)\n\ndef eval():\n sess = tf.Session()\n with tf.gfile.FastGFile(model_path, \"rb\") as fr:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(fr.read())\n sess.graph.as_default()\n tf.import_graph_def(graph_def, name=\"\")\n\n sess.run(tf.global_variables_initializer())\n\n prior_input = sess.graph.get_tensor_by_name('z_prior:0')\n latent_code_input = sess.graph.get_tensor_by_name('latent_code:0')\n generated_output = sess.graph.get_tensor_by_name('generated_output:0')\n label_place = sess.graph.get_tensor_by_name('label:0')\n while True:\n #指定标签\n # label_org = np.array([[3]])\n #随机标签\n label_org = np.random.randint(0, 10, size=[1, ])\n z_prior = np.random.uniform(-1, 1, size=(1, prior_size))\n\n latent_codes = np.ones([latent_code_size, test_sample_num])\n latent_codes[1, :] = np.linspace(-1.0, 1.0, test_sample_num)\n latent_codes[0, :] = 1.0\n latent_codes = latent_codes.T\n\n #noise和label不变,改变latent code,查看对应的变化\n for latent_code in latent_codes:\n latent_code = latent_code[np.newaxis,:]\n image_output = sess.run(generated_output,feed_dict={\n prior_input:z_prior,label_place:label_org,\n latent_code_input:latent_code})\n # print(image_output)\n image_reshape_org = image_output[0].reshape((image_height,image_width))\n\n\n image_reshape = ((image_reshape_org+1)/2)*255.0\n image_show = image_reshape.astype(np.uint8)\n\n print(\"label:\",label_org)\n image_show=cv2.resize(image_show,(image_height*2,image_width*2))\n cv2.imshow(\"image_fine\", image_show)\n cv2.waitKey(0)\n\n\n\nif __name__ == '__main__':\n eval()","sub_path":"Mnist_Based/InfoGAN/eval_InfoGAN_latent.py","file_name":"eval_InfoGAN_latent.py","file_ext":"py","file_size_in_byte":2777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"346994392","text":"#!/usr/bin/python3\r\n# -*- coding: utf-8 -*-\r\n#import timeit\r\nfrom sys import stdin, stderr, stdout\r\nimport cProfile, pstats\r\nimport re\r\n\r\npr = cProfile.Profile()\r\npr.disable()\r\n\r\nclass Node():\r\n def __init__(self):\r\n self.children = {}\r\n self.pos = []\r\n\r\n def addWord(self, word, pos):\r\n try:\r\n self.children[word[0]].addWord(word[1:], pos)\r\n except KeyError as e:\r\n self.children[word[0]] = Node()\r\n self.children[word[0]].addWord(word[1:], pos)\r\n except IndexError as e:\r\n self.pos.append(pos)\r\n\r\n\r\ndef bygg(ordliste):\r\n # SKRIV DIN KODE HER\r\n head = Node()\r\n for (word, pos) in ordliste:\r\n head.addWord(word, pos)\r\n\r\n return head\r\n\r\ndef posisjoner_spm(word, node, index=0):\r\n #pr.enable()\r\n posList = []\r\n\r\n #return [posisjoner(word,child,index+1) for child in node.children.values()]\r\n\r\n for child in node.children.values():\r\n posList[0:0] = posisjoner(word, child, index + 1)\r\n\r\n #pr.disable()\r\n return posList\r\n\r\n\r\ndef posisjoner(word, node, index=0):\r\n # --- base case ---\r\n if index == len(word):\r\n return node.pos\r\n\r\n # --- recursive search ---\r\n try:\r\n #if word[index] in node.children.keys():\r\n return posisjoner(word, node.children[word[index]], index + 1)\r\n #except KeyError:\r\n except:\r\n if word[index] == '?':\r\n return posisjoner_spm(word, node, index)\r\n\r\n return []\r\n\r\ndef main():\r\n\r\n #ord = stdin.readline().split()\r\n sentence = stdin.readline()\r\n print(ord)\r\n pat = re.compile(r'\\w+\\?*\\w*', re.I|re.M)\r\n\r\n #print(re.findall(pat, ord))\r\n\r\n fi = re.finditer(pat, sentence)\r\n print(\"Match:\", fi)\r\n\r\n for word in stdin:\r\n \r\n print(word, end='')\r\n\r\n #fi = re.findall(pat, sentence)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n #pr.enable()\r\n\r\n main()\r\n #pr.disable()\r\n\r\n #sortby = 'cumulative'\r\n #ps = pstats.Stats(pr, stream=stdout).sort_stats(sortby)\r\n #ps.print_stats()","sub_path":"tdt4120/ordbok.py","file_name":"ordbok.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"95886595","text":"\"\"\"\nSegmentation U-net for 3D mri data\nhttps://arxiv.org/abs/1701.03056\n\nAdjusted with Monte Carlo dropout\n\"\"\"\nfrom tensorflow.keras.layers import Conv3D, Conv3DTranspose, PReLU, Add, Concatenate, Input, Reshape, Dropout, Activation, Layer, Activation, UpSampling3D, AveragePooling3D\nimport tensorflow.keras.backend as K\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.initializers import RandomNormal, Zeros, Ones\nimport numpy as np\nimport random\nimport tensorflow as tf\nimport math\nfrom tfk_instance_norm import InstanceNormalization\nfrom tensorflow.keras.layers import Lambda\n\n############ Helpful functions ###########\n\ndef getShape(x):\n inputShape = []\n for i in range(1,5):\n inputShape.append(int(x.get_shape()[i]))\n return tuple(inputShape)\n\ndef crop(i):\n # Crops (or slices) a Tensor on a given dimension from start to end\n # example : to crop tensor x[:, :, 5:10]\n # call slice(2, 5, 10) as you want to crop on the second dimension\n\n def func(x):\n return x[:,:,i]\n return Lambda(func)\n\n############ Contracting block ###########\n\ndef contrac_block(inp,features):\n conv = Conv3D(features, (1,1,1), strides=(2, 2,2), padding='valid',kernel_initializer=RandomNormal(mean=0.0, stddev=0.01, seed=None))(inp)\n batch = InstanceNormalization(axis=4)(conv)\n Pre = PReLU()(batch)\n conv2 = Conv3D(features, (3,3,3), strides=(1, 1,1), padding='same',kernel_initializer=RandomNormal(mean=0.0, stddev=0.01, seed=None))(Pre)\n add = Add()([conv,conv2])\n batch2 = InstanceNormalization(axis=4)(add)\n Pre2 = PReLU()(batch2)\n drop2 = Dropout(0.2)(Pre2, training = True)\n return drop2\n\n############ Expanding block ###########\n\ndef expand_block(inp,inp2,features):\n conv = Conv3D(features, (1,1,1), strides=(1,1,1), padding='valid',kernel_initializer=RandomNormal(mean=0.0, stddev=0.01, seed=None))(inp)\n batch = InstanceNormalization(axis=4)(conv)\n Pre = PReLU()(batch)\n \n # Deconvolution\n outShap = list(getShape(conv))\n for i in range(3):\n outShap[i]=2*outShap[i]\n outShap.insert(0,None)\n outShap = tuple(outShap)\n deconv = Conv3DTranspose(features, (1, 1, 1),strides=(2,2,2), padding='valid',kernel_initializer=RandomNormal(mean=0.0, stddev=0.01, seed=None))(Pre)\n\n batch2 = InstanceNormalization(axis=4)(deconv)\n Pre2 = PReLU()(batch2)\n merg = Concatenate(axis=-1)([Pre2,inp2])\n\n #Convolution\n conv2 = Conv3D(features, (3,3,3), strides=(1,1,1), padding='same',kernel_initializer=RandomNormal(mean=0.0, stddev=0.01, seed=None))(merg)\n batch3 = InstanceNormalization(axis=4)(conv2)\n Pre3 = PReLU()(batch3)\n drop2 = Dropout(0.2)(Pre3, training = True)\n\n \n return drop2\n\n############ MCD U-net architecture ###########\n\ndef MCD_UNet(input_shape=(144, 144, 144, 1),outputChannel=3):\n inp = Input(shape=input_shape)\n conv = Conv3D(8, (3,3,3), strides=(1,1,1), padding='same',kernel_initializer=RandomNormal(mean=0.0, stddev=0.01, seed=None))(inp)\n batch = InstanceNormalization(axis=4)(conv)\n Pre = PReLU()(batch)\n\n #Contracting Blocks\n con1 = contrac_block(batch,16)\n con2 = contrac_block(con1,32)\n con3 = contrac_block(con2,64)\n\n #Expanding Blocks\n exp1 = expand_block(con3,con2,32)\n exp2 = expand_block(exp1,con1,16)\n exp3 = expand_block(exp2,Pre,8)\n\n conv1 = Conv3D(outputChannel, (1,1,1), strides=(1,1,1), padding='valid',kernel_initializer=RandomNormal(mean=0.0, stddev=0.01, seed=None))(exp3)\n\n conv2 = Conv3D(outputChannel, (1,1,1), strides=(1,1,1), padding='valid',kernel_initializer=RandomNormal(mean=0.0, stddev=0.01, seed=None))(exp2)\n\n conv3 = Conv3D(outputChannel, (1,1,1), strides=(1,1,1), padding='valid',kernel_initializer=RandomNormal(mean=0.0, stddev=0.01, seed=None))(exp1)\n up = UpSampling3D(size=(2, 2, 2))(conv3)\n\n merg = Add()([up,conv2])\n up2 = UpSampling3D(size=(2, 2, 2))(merg)\n\n merg2 = Add()([up2,conv1])\n\n predConv = Conv3D(outputChannel, (1,1,1), strides=(1,1,1), padding='valid',kernel_initializer=RandomNormal(mean=0.0, stddev=0.01, seed=None))(merg2)\n \n\n out = Activation(\"softmax\")(predConv)\n\n return Model(inputs=inp, outputs=out)\n\n\n","sub_path":"MC_Unet/MCD_unet.py","file_name":"MCD_unet.py","file_ext":"py","file_size_in_byte":4172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"115704892","text":"import numpy as np\nimport pytest\nfrom numpy.testing import assert_allclose, assert_array_equal\nfrom scipy.interpolate import BSpline\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import KBinsDiscretizer, SplineTransformer\nfrom sklearn.utils.fixes import linspace, sp_version\n\nfrom pkg_resources import parse_version\n\n\n# TODO: add PolynomialFeatures if it moves to _polynomial.py\n@pytest.mark.parametrize(\"est\", (SplineTransformer,))\ndef test_polynomial_and_spline_array_order(est):\n \"\"\"Test that output array has the given order.\"\"\"\n X = np.arange(10).reshape(5, 2)\n\n def is_c_contiguous(a):\n return np.isfortran(a.T)\n\n assert is_c_contiguous(est().fit_transform(X))\n assert is_c_contiguous(est(order=\"C\").fit_transform(X))\n assert np.isfortran(est(order=\"F\").fit_transform(X))\n\n\n@pytest.mark.parametrize(\n \"params, err_msg\",\n [\n ({\"degree\": -1}, \"degree must be a non-negative integer.\"),\n ({\"degree\": 2.5}, \"degree must be a non-negative integer.\"),\n ({\"degree\": \"string\"}, \"degree must be a non-negative integer.\"),\n ({\"n_knots\": 1}, \"n_knots must be a positive integer >= 2.\"),\n ({\"n_knots\": 1}, \"n_knots must be a positive integer >= 2.\"),\n ({\"n_knots\": 2.5}, \"n_knots must be a positive integer >= 2.\"),\n ({\"n_knots\": \"string\"}, \"n_knots must be a positive integer >= 2.\"),\n ({\"knots\": 1}, \"Expected 2D array, got scalar array instead:\"),\n ({\"knots\": [1, 2]}, \"Expected 2D array, got 1D array instead:\"),\n (\n {\"knots\": [[1]]},\n r\"Number of knots, knots.shape\\[0\\], must be >= 2.\",\n ),\n (\n {\"knots\": [[1, 5], [2, 6]]},\n r\"knots.shape\\[1\\] == n_features is violated.\",\n ),\n (\n {\"knots\": [[1], [1], [2]]},\n \"knots must be sorted without duplicates.\",\n ),\n ({\"knots\": [[2], [1]]}, \"knots must be sorted without duplicates.\"),\n (\n {\"extrapolation\": None},\n \"extrapolation must be one of 'error', 'constant', 'linear', \"\n \"'continue' or 'periodic'.\",\n ),\n (\n {\"extrapolation\": 1},\n \"extrapolation must be one of 'error', 'constant', 'linear', \"\n \"'continue' or 'periodic'.\",\n ),\n (\n {\"extrapolation\": \"string\"},\n \"extrapolation must be one of 'error', 'constant', 'linear', \"\n \"'continue' or 'periodic'.\",\n ),\n ({\"include_bias\": None}, \"include_bias must be bool.\"),\n ({\"include_bias\": 1}, \"include_bias must be bool.\"),\n ({\"include_bias\": \"string\"}, \"include_bias must be bool.\"),\n (\n {\"extrapolation\": \"periodic\", \"n_knots\": 3, \"degree\": 3},\n \"Periodic splines require degree < n_knots. Got n_knots=\"\n \"3 and degree=3.\"\n ),\n (\n {\"extrapolation\": \"periodic\", \"knots\": [[0], [1]], \"degree\": 2},\n \"Periodic splines require degree < n_knots. Got n_knots=2 and \"\n \"degree=2.\"\n )\n ],\n)\ndef test_spline_transformer_input_validation(params, err_msg):\n \"\"\"Test that we raise errors for invalid input in SplineTransformer.\"\"\"\n X = [[1], [2]]\n\n with pytest.raises(ValueError, match=err_msg):\n SplineTransformer(**params).fit(X)\n\n\ndef test_spline_transformer_manual_knot_input():\n \"\"\"\n Test that array-like knot positions in SplineTransformer are accepted.\n \"\"\"\n X = np.arange(20).reshape(10, 2)\n knots = [[0.5, 1], [1.5, 2], [5, 10]]\n st1 = SplineTransformer(degree=3, knots=knots).fit(X)\n knots = np.asarray(knots)\n st2 = SplineTransformer(degree=3, knots=knots).fit(X)\n for i in range(X.shape[1]):\n assert_allclose(st1.bsplines_[i].t, st2.bsplines_[i].t)\n\n\n@pytest.mark.parametrize(\"extrapolation\", [\"continue\", \"periodic\"])\ndef test_spline_transformer_integer_knots(extrapolation):\n \"\"\"Test that SplineTransformer accepts integer value knot positions.\"\"\"\n X = np.arange(20).reshape(10, 2)\n knots = [[0, 1], [1, 2], [5, 5], [11, 10], [12, 11]]\n _ = SplineTransformer(\n degree=3,\n knots=knots,\n extrapolation=extrapolation\n ).fit_transform(X)\n\n\ndef test_spline_transformer_feature_names():\n \"\"\"Test that SplineTransformer generates correct features name.\"\"\"\n X = np.arange(20).reshape(10, 2)\n splt = SplineTransformer(n_knots=3, degree=3, include_bias=True).fit(X)\n feature_names = splt.get_feature_names()\n assert_array_equal(\n feature_names,\n [\n \"x0_sp_0\",\n \"x0_sp_1\",\n \"x0_sp_2\",\n \"x0_sp_3\",\n \"x0_sp_4\",\n \"x1_sp_0\",\n \"x1_sp_1\",\n \"x1_sp_2\",\n \"x1_sp_3\",\n \"x1_sp_4\",\n ],\n )\n\n splt = SplineTransformer(n_knots=3, degree=3, include_bias=False).fit(X)\n feature_names = splt.get_feature_names([\"a\", \"b\"])\n assert_array_equal(\n feature_names,\n [\n \"a_sp_0\",\n \"a_sp_1\",\n \"a_sp_2\",\n \"a_sp_3\",\n \"b_sp_0\",\n \"b_sp_1\",\n \"b_sp_2\",\n \"b_sp_3\",\n ],\n )\n\n\n@pytest.mark.parametrize(\"degree\", range(1, 5))\n@pytest.mark.parametrize(\"n_knots\", range(3, 5))\n@pytest.mark.parametrize(\"knots\", [\"uniform\", \"quantile\"])\n@pytest.mark.parametrize(\"extrapolation\", [\"constant\", \"periodic\"])\ndef test_spline_transformer_unity_decomposition(\n degree,\n n_knots,\n knots,\n extrapolation\n):\n \"\"\"Test that B-splines are indeed a decomposition of unity.\n\n Splines basis functions must sum up to 1 per row, if we stay in between\n boundaries.\n \"\"\"\n X = np.linspace(0, 1, 100)[:, None]\n # make the boundaries 0 and 1 part of X_train, for sure.\n X_train = np.r_[[[0]], X[::2, :], [[1]]]\n X_test = X[1::2, :]\n\n if extrapolation == \"periodic\":\n n_knots = n_knots + degree # periodic splines require degree < n_knots\n\n splt = SplineTransformer(\n n_knots=n_knots,\n degree=degree,\n knots=knots,\n include_bias=True,\n extrapolation=extrapolation\n )\n splt.fit(X_train)\n for X in [X_train, X_test]:\n assert_allclose(np.sum(splt.transform(X), axis=1), 1)\n\n\n@pytest.mark.parametrize([\"bias\", \"intercept\"], [(True, False), (False, True)])\ndef test_spline_transformer_linear_regression(bias, intercept):\n \"\"\"Test that B-splines fit a sinusodial curve pretty well.\"\"\"\n X = np.linspace(0, 10, 100)[:, None]\n y = np.sin(X[:, 0]) + 2 # +2 to avoid the value 0 in assert_allclose\n pipe = Pipeline(\n steps=[\n (\n \"spline\",\n SplineTransformer(\n n_knots=15,\n degree=3,\n include_bias=bias,\n extrapolation=\"constant\",\n ),\n ),\n (\"ols\", LinearRegression(fit_intercept=intercept)),\n ]\n )\n pipe.fit(X, y)\n assert_allclose(pipe.predict(X), y, rtol=1e-3)\n\n\n@pytest.mark.parametrize(\"knots, n_knots, degree\", [\n (\"uniform\", 5, 3),\n (\"uniform\", 12, 8),\n (\n [[-1.0, 0.0], [0, 1.0], [0.1, 2.0], [0.2, 3.0], [0.3, 4.0], [1, 5.0]],\n 100, # this gets ignored.\n 3\n )\n])\ndef test_spline_transformer_periodicity_of_extrapolation(\n knots, n_knots, degree\n):\n \"\"\"Test that the SplineTransformer is periodic for multiple features.\"\"\"\n X_1 = linspace((-1, 0), (1, 5), 10)\n X_2 = linspace((1, 5), (3, 10), 10)\n\n splt = SplineTransformer(\n knots=knots,\n n_knots=n_knots,\n degree=degree,\n extrapolation=\"periodic\"\n )\n splt.fit(X_1)\n\n assert_allclose(splt.transform(X_1), splt.transform(X_2))\n\n\n@pytest.mark.parametrize([\"bias\", \"intercept\"], [(True, False), (False, True)])\ndef test_spline_transformer_periodic_linear_regression(bias, intercept):\n \"\"\"Test that B-splines fit a periodic curve pretty well.\"\"\"\n # \"+ 3\" to avoid the value 0 in assert_allclose\n def f(x):\n return np.sin(2 * np.pi * x) - np.sin(8 * np.pi * x) + 3\n\n X = np.linspace(0, 1, 101)[:, None]\n pipe = Pipeline(\n steps=[\n (\n \"spline\",\n SplineTransformer(\n n_knots=20,\n degree=3,\n include_bias=bias,\n extrapolation=\"periodic\",\n ),\n ),\n (\"ols\", LinearRegression(fit_intercept=intercept)),\n ]\n )\n pipe.fit(X, f(X[:, 0]))\n\n # Generate larger array to check periodic extrapolation\n X_ = np.linspace(-1, 2, 301)[:, None]\n predictions = pipe.predict(X_)\n assert_allclose(predictions, f(X_[:, 0]), atol=0.01, rtol=0.01)\n assert_allclose(predictions[0:100], predictions[100:200], rtol=1e-3)\n\n\n@pytest.mark.skipif(\n sp_version < parse_version(\"1.0.0\"),\n reason=\"Periodic extrapolation not yet implemented for BSpline.\",\n)\ndef test_spline_transformer_periodic_spline_backport():\n \"\"\"Test that the backport of extrapolate=\"periodic\" works correctly\"\"\"\n X = np.linspace(-2, 3.5, 10)[:, None]\n degree = 2\n\n # Use periodic extrapolation backport in SplineTransformer\n transformer = SplineTransformer(\n degree=degree,\n extrapolation=\"periodic\",\n knots=[[-1.0], [0.0], [1.0]]\n )\n Xt = transformer.fit_transform(X)\n\n # Use periodic extrapolation in BSpline\n coef = np.array([[1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]])\n spl = BSpline(np.arange(-3, 4), coef, degree, \"periodic\")\n Xspl = spl(X[:, 0])\n assert_allclose(Xt, Xspl)\n\n\ndef test_spline_transformer_periodic_splines_periodicity():\n \"\"\"\n Test if shifted knots result in the same transformation up to permutation.\n \"\"\"\n X = np.linspace(0, 10, 101)[:, None]\n\n transformer_1 = SplineTransformer(\n degree=3,\n extrapolation=\"periodic\",\n knots=[[0.0], [1.0], [3.0], [4.0], [5.0], [8.0]]\n )\n\n transformer_2 = SplineTransformer(\n degree=3,\n extrapolation=\"periodic\",\n knots=[[1.0], [3.0], [4.0], [5.0], [8.0], [9.0]]\n )\n\n Xt_1 = transformer_1.fit_transform(X)\n Xt_2 = transformer_2.fit_transform(X)\n\n assert_allclose(Xt_1, Xt_2[:, [4, 0, 1, 2, 3]])\n\n\n@pytest.mark.parametrize(\"degree\", [3, 5])\ndef test_spline_transformer_periodic_splines_smoothness(degree):\n \"\"\"Test that spline transformation is smooth at first / last knot.\"\"\"\n X = np.linspace(-2, 10, 10_000)[:, None]\n\n transformer = SplineTransformer(\n degree=degree,\n extrapolation=\"periodic\",\n knots=[[0.0], [1.0], [3.0], [4.0], [5.0], [8.0]]\n )\n Xt = transformer.fit_transform(X)\n\n delta = (X.max() - X.min()) / len(X)\n tol = 10 * delta\n\n dXt = Xt\n # We expect splines of degree `degree` to be (`degree`-1) times\n # continuously differentiable. I.e. for d = 0, ..., `degree` - 1 the d-th\n # derivative should be continous. This is the case if the (d+1)-th\n # numerical derivative is reasonably small (smaller than `tol` in absolute\n # value). We thus compute d-th numeric derivatives for d = 1, ..., `degree`\n # and compare them to `tol`.\n #\n # Note that the 0-th derivative is the function itself, such that we are\n # also checking its continuity.\n for d in range(1, degree + 1):\n # Check continuity of the (d-1)-th derivative\n diff = np.diff(dXt, axis=0)\n assert np.abs(diff).max() < tol\n # Compute d-th numeric derivative\n dXt = diff / delta\n\n # As degree `degree` splines are not `degree` times continously\n # differentiable at the knots, the `degree + 1`-th numeric derivative\n # should have spikes at the knots.\n diff = np.diff(dXt, axis=0)\n assert np.abs(diff).max() > 1\n\n\n@pytest.mark.parametrize([\"bias\", \"intercept\"], [(True, False), (False, True)])\n@pytest.mark.parametrize(\"degree\", [1, 2, 3, 4, 5])\ndef test_spline_transformer_extrapolation(bias, intercept, degree):\n \"\"\"Test that B-spline extrapolation works correctly.\"\"\"\n # we use a straight line for that\n X = np.linspace(-1, 1, 100)[:, None]\n y = X.squeeze()\n\n # 'constant'\n pipe = Pipeline(\n [\n [\n \"spline\",\n SplineTransformer(\n n_knots=4,\n degree=degree,\n include_bias=bias,\n extrapolation=\"constant\",\n ),\n ],\n [\"ols\", LinearRegression(fit_intercept=intercept)],\n ]\n )\n pipe.fit(X, y)\n assert_allclose(pipe.predict([[-10], [5]]), [-1, 1])\n\n # 'linear'\n pipe = Pipeline(\n [\n [\n \"spline\",\n SplineTransformer(\n n_knots=4,\n degree=degree,\n include_bias=bias,\n extrapolation=\"linear\",\n ),\n ],\n [\"ols\", LinearRegression(fit_intercept=intercept)],\n ]\n )\n pipe.fit(X, y)\n assert_allclose(pipe.predict([[-10], [5]]), [-10, 5])\n\n # 'error'\n splt = SplineTransformer(\n n_knots=4, degree=degree, include_bias=bias, extrapolation=\"error\"\n )\n splt.fit(X)\n with pytest.raises(ValueError):\n splt.transform([[-10]])\n with pytest.raises(ValueError):\n splt.transform([[5]])\n\n\ndef test_spline_transformer_kbindiscretizer():\n \"\"\"Test that a B-spline of degree=0 is equivalent to KBinsDiscretizer.\"\"\"\n rng = np.random.RandomState(97531)\n X = rng.randn(200).reshape(200, 1)\n n_bins = 5\n n_knots = n_bins + 1\n\n splt = SplineTransformer(\n n_knots=n_knots, degree=0, knots=\"quantile\", include_bias=True\n )\n splines = splt.fit_transform(X)\n\n kbd = KBinsDiscretizer(\n n_bins=n_bins, encode=\"onehot-dense\", strategy=\"quantile\"\n )\n kbins = kbd.fit_transform(X)\n\n # Though they should be exactly equal, we test approximately with high\n # accuracy.\n assert_allclose(splines, kbins, rtol=1e-13)\n\n\n@pytest.mark.parametrize(\"n_knots\", [5, 10])\n@pytest.mark.parametrize(\"include_bias\", [True, False])\n@pytest.mark.parametrize(\"degree\", [3, 5])\ndef test_spline_transformer_n_features_out(n_knots, include_bias, degree):\n \"\"\"Test that transform results in n_features_out_ features.\"\"\"\n splt = SplineTransformer(\n n_knots=n_knots,\n degree=degree,\n include_bias=include_bias\n )\n X = np.linspace(0, 1, 10)[:, None]\n splt.fit(X)\n\n assert splt.transform(X).shape[1] == splt.n_features_out_\n","sub_path":"sklearn/preprocessing/tests/test_polynomial.py","file_name":"test_polynomial.py","file_ext":"py","file_size_in_byte":14540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"318388476","text":"# -*- encoding: utf-8 -*-\n#\n# Copyright 2013 Jay Pipes\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport logging\nimport sys\n\nimport mock\nimport testtools\nfrom testtools import matchers\n\nfrom procession import config\nfrom procession import log\n\n\n# We cannot use tests.base.UnitTest, because that creates a logging\n# fixture that manipulates the log handling for root logger.\nclass TestLogging(testtools.TestCase):\n\n def test_log_config_file(self):\n with mock.patch('logging.config.fileConfig') as fc_mock:\n options = {\n 'log': {\n 'conf_file': '/some/path'\n },\n }\n conf = config.Config(**options)\n log.init(conf)\n fc_mock.assert_called_once_with('/some/path')\n\n def test_null_logger_removed_from_root(self):\n sh = logging.StreamHandler(sys.stderr)\n nh = logging.NullHandler()\n rl = logging.getLogger()\n rl.setLevel(logging.DEBUG)\n rl.addHandler(nh)\n rl.addHandler(sh)\n self.assertThat(rl.handlers, matchers.Contains(nh))\n self.assertThat(rl.handlers, matchers.Contains(sh))\n conf = config.Config()\n log.init(conf)\n self.assertThat(rl.handlers, matchers.Not(matchers.Contains(nh)))\n self.assertThat(rl.handlers, matchers.Contains(sh))\n","sub_path":"tests/test_log.py","file_name":"test_log.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"75863924","text":"import pygame\n\nWIDTH, HEIGHT = 900, 500 #высота и ширина окна\n\nWIN = pygame.display.set_mode((WIDTH, HEIGHT)) #создание окна\npygame.display.set_caption(\"Let's learn Python\") #заголовок окна\nBORDER = pygame.Rect(0, 0, WIDTH, HEIGHT)\nFPS = 60 #фпс\nVEL = 5 #движение 5 пикселей\nCHANGE = True\nCHANGE2 = True\nCAT_IMAGE_WIDTH = 128\nCAT_IMAGE_HEIGHT = 128 #размеры кота\nCAT_IMAGE = pygame.image.load(\"cat.png\") #обозначить картинку\nCAT = pygame.transform.scale(CAT_IMAGE, (128, 128)) #изменить картинку\n#CAT = pygame.transform.rotate(CAT, 90) #повернуть на 90 градусов\n\ndef cat_move(hit_box):\n global CHANGE #движение лево право\n global CHANGE2 #верх низ\n if hit_box.x >= 0 and hit_box.x + VEL < WIDTH - CAT_IMAGE_WIDTH and CHANGE == True:\n hit_box.x += VEL\n else:\n CHANGE = False\n if hit_box.x - VEL > 0 and CHANGE == False:\n hit_box.x -= VEL\n else:\n CHANGE = True\n if hit_box.y >= 0 and hit_box.y + VEL < HEIGHT - CAT_IMAGE_HEIGHT and CHANGE2 == True:\n hit_box.y += VEL\n else:\n CHANGE2 = False\n if hit_box.y - VEL > 0 and CHANGE2 == False:\n hit_box.y -= VEL\n else:\n CHANGE2 = True\n\ndef draw_windows(hit_box): #функция для отрисовки, последовательно\n WIN.fill((125, 125, 125)) #заполнить цветом\n WIN.blit(CAT, (hit_box.x, hit_box.y)) #загрузить картинку\n #pygame.draw.rect(WIN, (125, 0, 0), (0, 0, 100, 100)) #нарисовать квадрат\n #окно, цвет, координаты x и y, ширина высота\n #pygame.draw.rect(WIN, (0, 0, 0), BORDER) #квадрат границы\n pygame.display.update() #обновить окно\n\ndef main():\n hit_box = pygame.Rect(0, 0, CAT_IMAGE_WIDTH, CAT_IMAGE_HEIGHT)\n clock = pygame.time.Clock() #для работы фпс\n condition = True\n while condition:\n clock.tick(FPS)\n for event in pygame.event.get():\n if event.type == pygame.QUIT: #тип\n condition = False\n #keys_pressed = pygame.key.get_pressed() #кнопки, нажатые одновременно\n cat_move(hit_box)\n draw_windows(hit_box) #рисуем\n pygame.quit() #цикл обновления окна, обязательно\n\n\nif __name__ == \"__main__\": #для импортирования\n main()","sub_path":"PyGame2.py","file_name":"PyGame2.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"603390351","text":"from abtsract_robot import AbstractRobot\n\nclass Robot(AbstractRobot):\n def __init__(self):\n self.arms = 2\n self.legs = 2\n self.os = 'Linux Cyborg'\n self.ai = 'BrainSim V3.91'\n self.price = 25000\n\n def clone(self):\n robot = Robot()\n\n robot.arms = self.arms\n robot.legs = self.legs\n robot.os = self.os\n robot.ai = self.ai\n robot.price = self.price\n\n return robot","sub_path":"creational/prototype/robot.py","file_name":"robot.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"582339250","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 27 23:11:33 2018\n@author: tom\n@author: joe\n\"\"\"\nimport pandas as pd\nimport random\nimport numpy as np\nfrom operator import itemgetter\nfrom datetime import datetime, timedelta\nimport math\nimport csv\nimport os.path\n\n#import numpy as np\n#from sklearn.feature_selection import VarianceThreshold\n\n# def getWeek(DataFrame):\n\n\ndef getLocation(DataFrame):\n '''\n Grabs the day and location from a dataframe generated by the csv file stored in the amazon s3 bucket.\n '''\n # filters the Day and Place column only\n filtered = DataFrame[['Place', 'Time']].copy()\n # remove rows with Nan in any column\n df = filtered.dropna()\n return df\n\n\ndef getActivity(DataFrame):\n '''\n Gets time and activity from the csv file store in the amazon s3 bucket\n '''\n # filters the Day and Place column only\n filtered = DataFrame[['Day', 'Time', 'Activity']]\n\n # remove rows with Nam in any column\n df = filtered.dropna()\n\n final = df[(df.Activity != 'walk')]\n final = final[(final.Activity != 'lesson')]\n final = final[(final.Activity != 'home time')]\n final = final[(final.Activity != 'vehicle')]\n final = final[(final.Activity != 'groceries')]\n final = final[(final.Activity != 'sleep')]\n final = final[(final.Activity != 'drinks')]\n final = final[(final.Activity != 'religion')]\n final = final[(final.Activity != 'exhibition')]\n return df\n\n\ndef getTodayLoc(DataFrame):\n '''\n This returns a dataframe of location data for one day\n grabs the last index in the file (indicating today's date)\n use index to return all the locations from today as Dataframe\n from the csv file stored in the amazon s3 bucket. \n '''\n day = int(datetime.strftime(datetime.now(), '%Y%m%d')) # \"\"\"FIXME\"\"\"\n df = DataFrame[DataFrame.Day == day]\n df = getLocation(df)\n\n return df\n\n\ndef getYesterdayLoc(DataFrame):\n '''\n get all data from yesterday\n uses datetime library to grab the all data from yesterday\n returns all the location from yesterday as a dataframe \n '''\n day = int(datetime.strftime(datetime.now(), '%Y%m%d')) # \"\"\"FIXME\"\"\"\n # print(day)\n df = DataFrame[DataFrame.Day == day]\n df = getLocation(df)\n return df\n\n\ndef checkLocList(DataFrame):\n ''' \n We return geolocations that are not from today, these locations get cleaned and double cheked in the udivs question set.\n steps------------------------------------------------------------------------------------------------# \n 1 creates a list of all the places visited in yesterday in placesVistedList\n 2 make an empty list that stores incorrect locations called inCorrect_loc\n iterate untill you have a list of 3\n 3 grab a random place from the data set, check it against the placesvisitedList\n if the random place does not exitst inside the place visted list \n append it to the inCorrect_loc list:\n else continue \n '''\n\n df = getYesterdayLoc(DataFrame)\n df = df.drop_duplicates(subset='Place', keep='first')\n df = df['Place']\n return df\n\n\n#this returns the time of place in the format HH:MM AM/PM----------------------------------------------#\ndef getHourTime(DataFrame):\n ''' This is a helper function that returns the time from a geolocation in Hours and Minutes and AM or PM'''\n date_time = DataFrame['Time'].iloc[0]\n time = datetime.strptime(date_time, '%a %b %d %H:%M:%S %Z %Y')\n hour_time = time.strftime('%I:%M %p')\n return hour_time\n\n# This grabs location --------------------------------------------------------------------------------- #\n\n\ndef getData(DataFrame, Amount):\n ''' This is a helper function to return a location for the udivs system'''\n lastday = DataFrame.iloc[:, 1]\n lastindex = len(lastday.index)\n #count = o\n #lastIndex = Activities\n return lastday[lastindex]\n\n\ndef getDuration(DataFrame):\n '''function returns an array of applications used in a day each with a total duration '''\n day = int(datetime.strftime(datetime.now(), '%Y%m%d')) # \"\"\"FIXME\"\"\"\n df = data[data.Day == day]\n df = df[['Time', 'Activity', 'Duration ms']].copy()\n df = df.dropna()\n df = df[df['Activity'].str.contains(\"phone:\")]\n group = df.groupby('Activity').sum()\n return group\n\n\ndef convertms(ms):\n ''' This helper function converts the milliseconds into minutes for a question in the UDIVS system. It return the floor minute'''\n minutes = (ms/(1000*60))\n minutes = math.floor(minutes)\n return minutes\n\n# -----------------------------------------------------------------------------------------------------#\n\n\ndef getRecentApp():\n ''' This helper function returns the most recent app used for the UDIVS system'''\n day = somDay_df['Activity'].dropna()\n for x in day[::-1]:\n # print(x)\n if \"phone:\" not in x:\n continue\n ans = x\n break\n return ans\n\n#-------------------------------------------------------------------------------------------------------#\n# get the first location that is not the current location, generate incorrect answers\n\n\ndef getRecentLocation():\n ''' Returns the most recent app used by the user for the UDIVS system'''\n x = 1\n while(True):\n curLoc = somDay_df['Place'].iloc[-x]\n if curLoc == \"nan\":\n x = x+1\n else:\n break\n # print(\"curLock:\",curLoc)\n\n locData = somDay_df['Place'].dropna()\n # print(locData)\n ans = \"\"\n for x in locData[::-1]:\n if x != curLoc:\n ans = x\n break\n return ans\n\n\ndef getOptions(n):\n '''\n This is the logic to produce the questions, the incorrect answers, and the actual answer for the \n UDIVS survey.\n '''\n options = []\n q_string = '' # empty string to be returned\n # question options for \"which app did you use most recently\n if n == 0:\n # Which app did you use most recently?\n ans = getRecentApp()\n options.append(ans)\n count = 1\n q_string = 'Which app did you use most recently ?'\n # this loop gives an array of answers called options for the user to choose from\n day = somDay_df['Activity'].dropna()\n for x in day:\n flag = 0\n if \"phone:\" in x:\n for y in options:\n if x == y:\n flag = 1\n if flag == 0:\n options.append(x)\n count = count + 1\n if count == 4:\n break\n random.shuffle(options, random.random)\n return q_string, ans, options\n\n elif n == 1:\n # What place were you at most recently?\n ans = getRecentLocation()\n options.append(ans)\n count = 1\n locData = somDay_df['Place'].dropna()\n q_string = 'What place were you at most recently ?'\n # This loop gives an array of answers called options for the user to choose from\n for x in locData:\n flag = 0\n for y in options:\n if x == y:\n flag = 1\n if flag == 0:\n options.append(x)\n count = count + 1\n if count == 4:\n break\n random.shuffle(options, random.random)\n return q_string, ans, options\n\n elif n == 2:\n # which place were you at around:(time) ?\n time_loc = getTodayLoc(data)\n ans_data = time_loc.sample(n=1)\n ans = ans_data['Place'].iloc[0]\n options.append(ans)\n q_string = 'Which place were you at around', getHourTime(\n ans_data), 'today ?'\n dummy_data = getLocation(data)\n count = 1\n while count < 4:\n random_day = dummy_data.sample(n=1)\n place = random_day['Place'].iloc[0]\n flag = 0\n for y in options:\n if y == place:\n flag = 1\n if flag == 1:\n pass\n else:\n options.append(place)\n count = count + 1\n random.shuffle(options, random.random)\n return q_string, ans, options\n\n elif n == 3:\n # Which of these places did you go to yesterday ?\n time_loc = getYesterdayLoc(data)\n ans_data = time_loc.sample(n=1)\n ans = ans_data['Place'].iloc[0]\n options.append(ans)\n placesVisited = checkLocList(data)\n q_string = 'Which of these places did you go to yesterday ?'\n dummy_data = getLocation(data)\n count = 1\n while count < 4:\n random_day = dummy_data.sample(n=1)\n place = random_day['Place'].iloc[0]\n flag = 0\n for z in placesVisited:\n if z == place:\n flag = 1\n for y in options:\n if y == place:\n flag = 1\n if flag == 1:\n pass\n else:\n options.append(place)\n count = count + 1\n random.shuffle(options, random.random)\n return q_string, ans, options\n\n elif n == 4:\n # About how long did you use __ for ?\n options = ['0-10 minutes', '11-20 minutes',\n '21-30 minutes', '+30 minutes']\n groups = getDuration(data)\n activity = groups.sample(n=1)\n miliseconds = int(activity['Duration ms'])\n minutes = convertms(miliseconds)\n app = activity.index[0]\n\n print(\"About how long did you use\",\n app.replace('phone: ', '', 1), \"today?\")\n\n if minutes <= 10:\n ans = options[0]\n elif minutes <= 20:\n ans = options[1]\n elif minutes <= 30:\n ans = options[2]\n else:\n ans = options[3]\n\n return ans, options\n elif n == 5:\n # which app did you use most frequently today ?\n q_string = 'Which app did you use most frequently today ?'\n applicationList = []\n count = 1\n day = somDay_df['Activity'].dropna()\n for x in day:\n if \"phone:\" in x:\n applicationList.append(x)\n app_df = pd.DataFrame(data=applicationList)\n ans = app_df[0].value_counts().idxmax()\n\n options.append(ans)\n for x in day:\n flag = 0\n if \"phone:\" in x:\n for y in options:\n if x == y:\n flag = 1\n if flag == 0:\n options.append(x)\n count = count + 1\n if count == 4:\n break\n random.shuffle(options, random.random)\n return q_string, ans, options\n\n\n''' \ndata = pd.read_csv('../../userdevice_data/Joe_Data/Smarter_time/timeslots.csv')\n\n# new version of filter to one day without hardcoding\nlast_index = len(data) - 1\nday = data.loc[last_index, 'Day']\nsomDay_df = data[data.Day == day]\n#-------------------------------------------------------------------------------------------------------------------------#\n'''\n'''\nThis is where the actual survey begins, we ask the user three questions form or question set\nThis is a score fusion with a random question form features chosen from the data set\n'''\n'''\n#-------------------------------------------------------------------------------------------------------------------------#\n\nprint(\"Welcome to Joe's Device ! See if you can enter!\")\nquestions=['Which app did you use most recently?','What place were you at most recently?','which place were you at around ','Which of these places did you go to yesterday?', \n 'How long were you on this app?','Which app did you use most frequently today?']\nrandomNums=random.sample(range(0,6),3)\nprint(randomNums)\n# Ask the user if they are genuine or an imposter to collect the data properly\nuser = 2\ngenuine = True\nwhile(user !=1 and user !=0):\n print(\"Are you a genuine(1)user or an imposter(0)?\")\n user =int(input(\"0: imposter\\n1: genuine\\n\"))\n print(user)\n if (user == 0):\n genuine = False\n else:\n genuine =True\n \nscore = 0\ncount = 1\nfor n in randomNums:\n ans,options = getOptions(n)\n #print(ans) # This is where we normaly print the answer for debugging\n for o in options:\n print(count,\". \",o)\n count = count+1\n userAns=int(input(\"input answer here: \")) # Utilize Switch CasegetOptions(n)\n if genuine:\n user = 'genuine'\n else:\n user = 'imposter'\n Q_Num = n + 1\n file = open('../raw_scores/question' + str(Q_Num) + '_' + user + '.csv','a')\n writer = csv.writer(file)\n if ans == options[userAns-1]:\n score = score + 1\n Qdata = [1] \n writer.writerow(Qdata)\n else:\n Qdata = [0]\n writer.writerow(Qdata)\n file.close() \n count = 1\n\nif genuine:\n user = 'genuine'\nelse:\n user = 'imposter'\n\n# This will write the score to the appropriate file\nscores = [score]\nfile = open('../raw_scores/survey_score_'+user+'.csv','a')\nwriter = csv.writer(file)\nwriter.writerow(scores)\nfile.close()\n\n#------------------------------------------------------------------------------ This is where the data analysis goes-------------------------------------------#\n'''\n\n'''\nThis section of code is to to produce the False Reject Rate, The False Acceptance Rate,\nand True Reject Rate, True Accept Rate for the total system as well as analysis on each question'''\n'''\n\n# Generate genuine and imposter scores with the seed at 1\ngenuine_scores = pd.read_csv('../raw_scores/survey_score_genuine.csv')\nimposter_scores = pd.read_csv('../raw_scores/survey_score_imposter.csv')\n\nQ1_gen = pd.read_csv('../raw_scores/question1_genuine.csv')\nQ1_imp = pd.read_csv('../raw_scores/question1_imposter.csv')\n\nQ2_gen = pd.read_csv('../raw_scores/question2_genuine.csv')\nQ2_imp = pd.read_csv('../raw_scores/question2_imposter.csv')\n\nQ3_gen = pd.read_csv('../raw_scores/question3_genuine.csv')\nQ3_imp = pd.read_csv('../raw_scores/question3_imposter.csv')\n\nQ4_gen = pd.read_csv('../raw_scores/question4_genuine.csv')\nQ4_imp = pd.read_csv('../raw_scores/question4_imposter.csv')\n\nQ5_gen = pd.read_csv('../raw_scores/question5_genuine.csv')\nQ5_imp = pd.read_csv('../raw_scores/question5_imposter.csv')\n\n'''\n","sub_path":"questions.py","file_name":"questions.py","file_ext":"py","file_size_in_byte":14343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"427275024","text":"# coding: utf-8\n# Author: 阿财(Rgveda@github)(11652964@qq.com)\n# Created date: 2020-02-27\n#\n# The MIT License (MIT)\n#\n# Copyright (c) 2016-2018 yutiansut/QUANTAXIS\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"\nBitfinex api\n具体api文档参考:https://docs.bitfinex.com/docs\n\"\"\"\nimport requests\nimport json\nimport datetime\nimport time\nfrom dateutil.tz import tzutc\nimport pandas as pd\nimport numpy as np\nfrom dateutil.parser import parse\nfrom dateutil.relativedelta import relativedelta\nfrom requests.exceptions import ConnectTimeout, SSLError, ReadTimeout, ConnectionError\nfrom retrying import retry\nfrom urllib.parse import urljoin\n\nfrom QUANTAXIS.QAUtil.QADate_Adv import (\n QA_util_timestamp_to_str,\n QA_util_datetime_to_Unix_timestamp,\n QA_util_timestamp_to_str,\n QA_util_print_timestamp,\n)\nfrom QUANTAXIS.QAUtil import (\n QA_util_log_info,\n)\n\nTIMEOUT = 10\nILOVECHINA = \"同学!!你知道什么叫做科学上网么? 如果你不知道的话,那么就加油吧!蓝灯,喵帕斯,VPS,阴阳师,v2ray,随便什么来一个!我翻墙我骄傲!\"\nBitfinex_base_url = \"https://api-pub.bitfinex.com/\"\n\ncolumn_names = [\n 'start_time',\n 'open',\n 'high',\n 'low',\n 'close',\n 'volume',\n 'close_time',\n 'quote_asset_volume',\n 'num_trades',\n 'buy_base_asset_volume',\n 'buy_quote_asset_volume',\n 'Ignore'\n]\n\n\"\"\"\nQUANTAXIS 和 Bitfinex 的 frequency 常量映射关系\n\"\"\"\nBitfinex2QA_FREQUENCY_DICT = {\n \"1m\": '1min',\n \"5m\": '5min',\n \"15m\": '15min',\n \"30m\": '30min',\n \"1h\": '60min',\n \"1d\": 'day',\n}\n\"\"\"\nBitfinex 只允许一次获取 200bar,时间请求超过范围则只返回最新200条\n\"\"\"\nFREQUENCY_SHIFTING = {\n \"60\": 12000,\n \"300\": 60000,\n \"900\": 180000,\n \"1800\": 360000,\n \"3600\": 720000,\n \"86400\": 17280000\n}\n\n\ndef format_bitfinex_data_fields(datas, symbol, frequency):\n \"\"\"\n # 归一化数据字段,转换填充必须字段,删除多余字段\n 参数名 \t类型 \t描述\n time \tString \t开始时间\n open \tString \t开盘价格\n high \tString \t最高价格\n low \tString \t最低价格\n close \tString \t收盘价格\n volume \tString \t交易量\n \"\"\"\n frame = pd.DataFrame(datas, columns=column_names)\n frame['symbol'] = 'BITFINEX.{}'.format(symbol)\n # UTC时间转换为北京时间,接收到的数据有时候 tz-aware 有时候又是变�� non tz-aware,\n # 改了几次代码,既不能单纯 tz_localize 也不能单纯 tz_convert\n # dt.tz_localize(None) 是 Stackoverflow 的解决方案,先观察效果\n frame['datetime'] = pd.to_datetime(\n frame['time']\n ).dt.tz_localize(None).dt.tz_localize('Asia/Shanghai')\n frame['date'] = frame['datetime'].dt.strftime('%Y-%m-%d')\n frame['datetime'] = frame['datetime'].dt.strftime('%Y-%m-%d %H:%M:%S')\n # GMT+0 String 转换为 UTC Timestamp\n frame['time_stamp'] = pd.to_datetime(frame['time']\n ).astype(np.int64) // 10**9\n frame['date_stamp'] = pd.to_datetime(\n frame['date']\n ).dt.tz_localize('Asia/Shanghai').astype(np.int64) // 10**9\n frame['created_at'] = int(\n time.mktime(datetime.datetime.now().utctimetuple())\n )\n frame['updated_at'] = int(\n time.mktime(datetime.datetime.now().utctimetuple())\n )\n frame.drop(['time'], axis=1, inplace=True)\n frame['trade'] = 1\n frame['amount'] = frame.apply(\n lambda x: float(x['volume']) *\n (float(x['open']) + float(x['close'])) / 2,\n axis=1\n )\n if (frequency not in ['1day', 'day', '86400', '1d']):\n frame['type'] = OKEx2QA_FREQUENCY_DICT[frequency]\n return frame\n\n\n@retry(stop_max_attempt_number=3, wait_random_min=50, wait_random_max=100)\ndef QA_fetch_bitfinex_symbols():\n \"\"\"\n 获取交易币对的列表,查询各币对的交易限制和价格步长等信息。\n 限速规则:20次/2s\n HTTP请求 GET/api/spot/v3/instruments\n \"\"\"\n url = urljoin(Bitfinex_base_url, \"/api/v1/exchangeInfo\")\n retries = 1\n datas = list()\n while (retries != 0):\n try:\n req = requests.get(url, timeout=TIMEOUT)\n retries = 0\n except (ConnectTimeout, ConnectionError, SSLError, ReadTimeout):\n retries = retries + 1\n if (retries % 6 == 0):\n print(ILOVECHINA)\n print(\"Retry /api/v1/exchangeInfo #{}\".format(retries - 1))\n time.sleep(0.5)\n\n if (retries == 0):\n # 成功获取才处理数据,否则继续尝试连接\n symbol_lists = json.loads(req.content)\n if len(symbol_lists) == 0:\n return []\n for symbol in symbol_lists:\n datas.append(symbol)\n\n return datas\n\n\n@retry(stop_max_attempt_number=3, wait_random_min=50, wait_random_max=100)\ndef QA_fetch_bitfinex_kline_with_auto_retry(\n symbol,\n start_time,\n end_time,\n frequency,\n):\n \"\"\"\n Get the latest symbol‘s candlestick data raw method\n 获取币对的K线数据。K线数据按请求的粒度分组返回,k线数据最多可获取200条(说明文档中2000条系错误)。\n 限速规则:20次/2s\n HTTP请求 GET/api/spot/v3/instruments//candles\n \"\"\"\n url = urljoin(\n OKEx_base_url,\n \"/api/spot/v3/instruments/{:s}/candles\".format(symbol)\n )\n retries = 1\n while (retries != 0):\n try:\n start_epoch = datetime.datetime.fromtimestamp(\n start_time,\n tz=tzutc()\n )\n end_epoch = datetime.datetime.fromtimestamp(end_time, tz=tzutc())\n req = requests.get(\n url,\n params={\n \"granularity\": frequency,\n \"start\": start_epoch.isoformat().replace(\"+00:00\", \"Z\"), # Z结尾的ISO时间 String\n \"end\": end_epoch.isoformat() .replace(\"+00:00\", \"Z\") # Z结尾的ISO时间 String\n },\n timeout=TIMEOUT\n )\n # 防止频率过快被断连\n time.sleep(0.5)\n retries = 0\n except (ConnectTimeout, ConnectionError, SSLError, ReadTimeout):\n retries = retries + 1\n if (retries % 6 == 0):\n print(ILOVECHINA)\n print(\"Retry /api/spot/v3/instruments #{}\".format(retries - 1))\n time.sleep(0.5)\n\n if (retries == 0):\n # 成功获取才处理数据,否则继续尝试连接\n msg_dict = json.loads(req.content)\n\n if ('error_code' in msg_dict):\n print('Error', msg_dict)\n return None\n\n return msg_dict\n\n return None\n\n\ndef QA_fetch_bitfinex_kline(\n symbol,\n start_time,\n end_time,\n frequency,\n callback_func=None\n):\n \"\"\"\n Get the latest symbol‘s candlestick data\n 时间倒序切片获取算法,是各大交易所获取1min数据的神器,因为大部分交易所直接请求跨月跨年的1min分钟数据\n 会直接返回空值,只有将 start_epoch,end_epoch 切片细分到 200/300 bar 以内,才能正确返回 kline,\n 火币和binance,OKEx 均为如此,直接用跨年时间去直接请求上万bar 的 kline 数据永远只返回最近200条数据。\n \"\"\"\n datas = list()\n reqParams = {}\n reqParams['from'] = end_time - FREQUENCY_SHIFTING[frequency]\n reqParams['to'] = end_time\n\n while (reqParams['to'] > start_time):\n if ((reqParams['from'] > QA_util_datetime_to_Unix_timestamp())) or \\\n ((reqParams['from'] > reqParams['to'])):\n # 出现“未来”时间,一般是默认时区设置,或者时间窗口滚动前移错误造成的\n QA_util_log_info(\n 'A unexpected \\'Future\\' timestamp got, Please check self.missing_data_list_func param \\'tzlocalize\\' set. More info: {:s}@{:s} at {:s} but current time is {}'\n .format(\n symbol,\n frequency,\n QA_util_print_timestamp(reqParams['from']),\n QA_util_print_timestamp(\n QA_util_datetime_to_Unix_timestamp()\n )\n )\n )\n # 跳到下一个时间段\n reqParams['to'] = int(reqParams['from'] - 1)\n reqParams['from'] = int(reqParams['from'] - FREQUENCY_SHIFTING[frequency])\n continue\n\n klines = QA_fetch_okex_kline_with_auto_retry(\n symbol,\n reqParams['from'],\n reqParams['to'],\n frequency,\n )\n if (klines is None) or \\\n (len(klines) == 0) or \\\n ('error' in klines):\n # 出错放弃\n break\n\n reqParams['to'] = int(reqParams['from'] - 1)\n reqParams['from'] = int(reqParams['from'] - FREQUENCY_SHIFTING[frequency])\n\n if (klines is None) or \\\n ((len(datas) > 0) and (klines[-1][0] == datas[-1][0])):\n # 没有更多数据\n break\n\n datas.extend(klines)\n\n if (callback_func is not None):\n frame = format_okex_data_fields(klines, symbol, frequency)\n callback_func(frame, OKEx2QA_FREQUENCY_DICT[frequency])\n\n if len(datas) == 0:\n return None\n\n # 归一化数据字段,转换填充必须字段,删除多余字段\n frame = format_okex_data_fields(datas, symbol, frequency)\n return frame\n\n\ndef QA_fetch_bitfinex_kline_min(\n symbol,\n start_time,\n end_time,\n frequency,\n callback_func=None\n):\n \"\"\"\n Get the latest symbol‘s candlestick data with time slices\n 时间倒序切片获取算法,是各大交易所获取1min数据的神器,因为大部分交易所直接请求跨月跨年的1min分钟数据\n 会直接返回空值,只有将 start_epoch,end_epoch 切片细分到 200/300 bar 以内,才能正确返回 kline,\n 火币和binance,OKEx 均为如此,用上面那个函数的方式去直接请求上万bar 的分钟 kline 数据是不会有结果的。\n \"\"\"\n reqParams = {}\n reqParams['from'] = end_time - FREQUENCY_SHIFTING[frequency]\n reqParams['to'] = end_time\n\n requested_counter = 1\n datas = list()\n while (reqParams['to'] > start_time):\n if ((reqParams['from'] > QA_util_datetime_to_Unix_timestamp())) or \\\n ((reqParams['from'] > reqParams['to'])):\n # 出现“未来”时间,一般是默认时区设置,或者时间窗口滚动前移错误造成的\n QA_util_log_info(\n 'A unexpected \\'Future\\' timestamp got, Please check self.missing_data_list_func param \\'tzlocalize\\' set. More info: {:s}@{:s} at {:s} but current time is {}'\n .format(\n symbol,\n frequency,\n QA_util_print_timestamp(reqParams['from']),\n QA_util_print_timestamp(\n QA_util_datetime_to_Unix_timestamp()\n )\n )\n )\n # 跳到下一个时间段\n reqParams['to'] = int(reqParams['from'] - 1)\n reqParams['from'] = int(reqParams['from'] - FREQUENCY_SHIFTING[frequency])\n continue\n\n klines = QA_fetch_okex_kline_with_auto_retry(\n symbol,\n reqParams['from'],\n reqParams['to'],\n frequency,\n )\n if (klines is None) or \\\n (len(klines) == 0) or \\\n ('error' in klines):\n # 出错放弃\n break\n\n reqParams['to'] = int(reqParams['from'] - 1)\n reqParams['from'] = int(reqParams['from'] - FREQUENCY_SHIFTING[frequency])\n\n if (callback_func is not None):\n frame = format_okex_data_fields(klines, symbol, frequency)\n callback_func(frame, OKEx2QA_FREQUENCY_DICT[frequency])\n\n if (len(klines) == 0):\n return None\n\n\nif __name__ == '__main__':\n # url = urljoin(Bitfinex_base_url, \"/api/v1/exchangeInfo\")\n # print(url)\n # a = requests.get(url)\n # print(a.content)\n # print(json.loads(a.content))\n import pytz\n from dateutil.tz import *\n\n tz = pytz.timezone(\"Asia/Shanghai\")\n url = urljoin(Bitfinex_base_url, \"/api/v1/klines\")\n start = time.mktime(\n datetime.datetime(2018,\n 6,\n 13,\n tzinfo=tzutc()).timetuple()\n )\n end = time.mktime(\n datetime.datetime(2018,\n 6,\n 14,\n tzinfo=tzutc()).timetuple()\n )\n print(start * 1000)\n print(end * 1000)\n data = QA_fetch_bitfinex_kline(\"ETHBTC\", start, end, '1d')\n print(len(data))\n print(data[0])\n print(data[-1])\n","sub_path":"QUANTAXIS/QAFetch/QABitfinex.py","file_name":"QABitfinex.py","file_ext":"py","file_size_in_byte":13813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"445605970","text":"import os\nimport uuid\nfrom abc import ABC, abstractmethod\nfrom typing import Union, Optional, Dict, Any\n\nimport numpy as np\nfrom boardgame2 import BoardGameEnv\nfrom stable_baselines3 import PPO\n\nfrom multi_process_mcts import MultiProcessMonteCarlo, model_policy\nfrom reversi_state import CustomReversiState\n\n\nclass BasePlayer(ABC):\n def __init__(self,\n player: int = 1,\n env: BoardGameEnv = None,\n flatten_action: bool = False,\n name: str = None\n ):\n self.id = uuid.uuid4()\n self.name = name if name is not None else self.__class__.__name__\n self.env = env\n self.player = player # player number. 1 o -1\n self.flatten_action = flatten_action\n self.board_shape = self.env.board.shape[0]\n\n @abstractmethod\n def predict(self, board: np.ndarray) -> Union[int, np.ndarray]:\n \"\"\"\n Returns the action to play given a board.\n :param board: Numpy array of board_shape x board_shape with current board\n :return: Numpy array of dimension 2 with row and column to play if flatten_action is False.\n If flatten_action is True, it returns an int with the slot number.\n \"\"\"\n\n def __str__(self):\n return self.name\n\n def __eq__(self, other):\n return self.id == other.id\n\n\nclass GreedyPlayer(BasePlayer):\n def __init__(self,\n player: int = 1,\n env: BoardGameEnv = None,\n flatten_action: bool = False,\n **custom_kwargs: Optional[Dict[str, Any]] # Make subclass constructor generic\n ):\n super().__init__(player, env, flatten_action)\n\n def predict(self, board: np.ndarray) -> Union[int, np.ndarray]:\n valid_actions = np.argwhere(self.env.get_valid((board, self.player)) == 1)\n if len(valid_actions) == 0:\n action = self.env.PASS\n else:\n moves_score = []\n for a in valid_actions:\n next_state, _, _, _ = self.env.next_step((board, self.player), a)\n moves_score.append(next_state[0].sum() * self.player)\n best_score = max(moves_score)\n best_actions = valid_actions[np.array(moves_score) == best_score]\n action = best_actions[np.random.randint(len(best_actions))]\n if self.flatten_action:\n return action[0] * self.board_shape + action[1]\n else:\n return action\n\n\nclass RandomPlayer(BasePlayer):\n def __init__(self,\n player: int = 1,\n env: BoardGameEnv = None,\n flatten_action: bool = False,\n **custom_kwargs: Optional[Dict[str, Any]] # Make subclass constructor generic\n ):\n super().__init__(player, env, flatten_action)\n\n def predict(self, board: np.ndarray) -> Union[int, np.ndarray]:\n valid_actions = np.argwhere(self.env.get_valid((board, self.player)) == 1)\n if len(valid_actions) == 0:\n action = self.env.PASS\n else:\n action = valid_actions[np.random.randint(len(valid_actions))]\n if self.flatten_action:\n return action[0] * self.board_shape + action[1]\n else:\n return action\n\n\nclass DictPolicyPlayer(BasePlayer):\n def __init__(self,\n player: int = 1,\n env: BoardGameEnv = None,\n flatten_action: bool = False,\n dict_folder: str = 'mdp/pi_func_only_winner.npy',\n **custom_kwargs: Optional[Dict[str, Any]] # Make subclass constructor generic\n ):\n super().__init__(player, env, flatten_action)\n self.pi_dict = np.load(dict_folder, allow_pickle=True).item()\n\n def predict(self, board: np.ndarray) -> Union[int, np.ndarray]:\n board_tuple = tuple((board * self.player).reshape(-1))\n action = self.pi_dict[board_tuple]\n if self.flatten_action:\n return action\n else:\n return np.array([action // self.board_shape, action % self.board_shape])\n\n\nclass TorchPlayer(BasePlayer):\n def __init__(self,\n player: int = 1,\n env: BoardGameEnv = None,\n flatten_action: bool = False,\n model_path: str = None,\n deterministic: bool = True,\n only_valid: bool = True,\n mcts: bool = False,\n levelLimit: int = None,\n device: str = 'auto',\n mtcs_n_processes: int = None,\n **custom_kwargs: Optional[Dict[str, Any]] # Make subclass constructor generic\n ):\n\n if model_path is None:\n raise Exception(\"model_path cannot be None\")\n\n super().__init__(player, env, flatten_action, os.path.splitext(os.path.basename(model_path))[0])\n\n self.model = PPO.load(model_path, device=device)\n self.model_path = model_path\n self.deterministic = deterministic\n self.only_valid = only_valid\n self.mcts = mcts\n self.levelLimit = levelLimit\n self.mtcs_n_processes = mtcs_n_processes\n\n def predict(self, board: np.ndarray) -> Union[int, np.ndarray]:\n if self.mcts:\n action = self._get_action_with_mcts(board)\n action = action.action\n if self.flatten_action:\n return action[0] * self.board_shape + action[1]\n else:\n return action\n else:\n obs = self.player * board\n if self.only_valid:\n obs = [obs, self.env.get_valid((obs, 1))]\n # The model expects a batch of observations.\n # Make a batch of 1 obs\n obs = [obs]\n action = self.model.predict(obs, deterministic=self.deterministic)[0]\n\n if self.flatten_action:\n return action\n else:\n return np.array([action // self.board_shape, action % self.board_shape])\n\n def get_model_path(self):\n return self.model_path\n\n def _get_action_with_mcts(self, board: np.ndarray) -> Union[int]:\n searcher = MultiProcessMonteCarlo(levelLimit=self.levelLimit,\n n_processes=self.mtcs_n_processes,\n explorationConstant=0.0,\n rolloutPolicy=model_policy(self.model))\n\n state = CustomReversiState(self.env, (board, self.player))\n return searcher.search(initialState=state)\n\n def __str__(self):\n monte_carlo = f\"- MCTS\" if self.mcts else \"\"\n return f\"{self.__class__.__name__}({self.name}{monte_carlo})\"\n","sub_path":"players.py","file_name":"players.py","file_ext":"py","file_size_in_byte":6709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"417833821","text":"import sys\nimport logging\n\nfrom flask import Flask, render_template, request, redirect, url_for\nfrom bokeh.embed import components\n\nimport content.data\n\napp = Flask(__name__)\napp.debug = True\napp.vars = dict()\n\nlogging.basicConfig(stream=sys.stdout, level=logging.INFO)\n\n@app.route('/')\ndef main():\n return redirect('/index')\n\n\n@app.route('/index', methods=['GET','POST'])\ndef index():\n if request.method == 'GET':\n return render_template('index.html')\n else:\n ticker = request.form['ticker']\n columns = request.form.getlist('features')\n\n if ticker and columns:\n app.logger.info('requested symbol: ' + ticker)\n app.logger.info('requested columns: ' + ', '.join(columns))\n app.vars['symbol'] = ticker\n app.vars['columns'] = columns\n return redirect(url_for('graph'))\n else:\n return redirect(url_for('error'))\n\n\n@app.route('/error')\ndef error():\n return render_template('error.html')\n\n\n@app.route('/graph')\ndef graph():\n symbol = app.vars['symbol']\n columns = app.vars['columns']\n try:\n status, result = content.data.fetch(symbol)\n if status:\n chart = content.data.plot(result, symbol, columns)\n script, div = components(chart)\n table = content.data.summarize(result, columns).to_html()\n return render_template('graph.html', script=script, div=div, table=table,\n message='Generated graph for ' + symbol)\n else:\n app.logger.error('Data for symbol {0} could not be downloaded: {1}'.format(symbol, result))\n return redirect(url_for('error'))\n except Exception as e:\n app.logger.error('Unexpected Error: ' + e.message)\n return redirect(url_for('error'))\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"597182740","text":"import socket\nimport threading\nimport sys\n\ndef read_msg(sock_cli):\n while True:\n data = sock_cli.recv(65535)\n if len(data) ==0:\n break\n print(data)\n\nsock_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsock_client.connect((\"127.0.0.1\",6666))\n\nsock_client.send(bytes(sys.argv[1], \"utf-8\"))\n\nthread_client = threading.Thread(target=read_msg, args=(sock_client,))\nthread_client.start()\n\nwhile True:\n dest = input(\"Masukkan username tujuan (ketikkan bcast untuk broadcast):\")\n msg = input(\"Masukkan pesan:\")\n\n if msg == \"exit\":\n sock_client.close()\n break\n sock_client.send(bytes(\"{}|{}\".format(dest, msg), \"utf-8\"))\n","sub_path":"chat_client.py","file_name":"chat_client.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"413125576","text":"from random import randint, random, sample\nfrom time import sleep\nfrom cqc.pythonLib import CQCConnection, qubit\n\n\nM = 8\nN = 2\nwait = 2 \nresults_of_qubit = [ [] for i in range(M) ]\naccepted_qubits_from_client = []\n\ndef merchants():\n with CQCConnection(\"Bob\") as Bob:\n \n for serial in range(M): \n for j in range(N):\n q1 = Bob.recvQubit()\n q2 = Bob.recvQubit()\n random_bit = randint(0,1)\n if random_bit == 1:\n q1.H()\n q2.H()\n m1 = q1.measure()\n m2 = q2.measure()\n results_of_qubit[j].append((m1, m2,random_bit))\n for serial in range(M): \n for j in range(N):\n sleep(wait) \n Bob.sendClassical(\"Alice\", results_of_qubit[j][serial]) \n print(\"Now the merchant sent the outcomes and basis to the bank: \",results_of_qubit[j][serial] )\n sleep(wait)\n print(\"Now the merchant sent all\") \n \n \nif __name__ == \"__main__\":\n merchants()\n","sub_path":"QuantumToken/QuantumTokenMerchant.py","file_name":"QuantumTokenMerchant.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"8589204","text":"import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport PIL.Image as Image\nimport tensorflow_hub as hub\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n\n# mobilenet_v2\ndef get_image_classifier():\n url = 'https://tfhub.dev/google/tf2-preview/mobilenet_v2/classification/4'\n\n model = tf.keras.Sequential()\n model.add(tf.keras.layers.Input(shape=[224, 224, 3]))\n model.add(hub.KerasLayer(url))\n model.summary()\n\n labels_path = tf.keras.utils.get_file(\n 'ImageNetLabels.txt',\n 'https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt'\n )\n\n # labels = open(labels_path).read().split() # 두 단어로 된 레이블이 있어서 실패\n # labels = open(labels_path).read().split('\\n') # 마지막에 빈 줄 있어서 실패\n # print(labels[:3], labels[-3:]) # ['background', 'tench', 'goldfish'] ['ear', 'toilet tissue', '']\n # print(len(labels)) # 1002\n\n labels = open(labels_path).read().splitlines()\n # print(len(labels)) # 1001\n # labels = open(labels_path).readlines() # 개행문자 포함\n # labels = [w.strip() for w in labels]\n # print(labels) # ['background', 'tench', 'goldfish', ...]\n\n return model, np.array(labels)\n\n\ndef classify_image():\n img_url = 'https://storage.googleapis.com/download.tensorflow.org/example_images/grace_hopper.jpg'\n img_path = tf.keras.utils.get_file('grace_hopper.jpg', img_url)\n # print(img_path) # /Users/jeonghoonkim/.keras/datasets/grace_hopper.jpg\n\n # 문제\n # 다운로드한 이미지를 그래프로 그려보세요\n img_hopper = Image.open(img_path).resize([224, 224])\n # print(img_hopper) # \n\n # plt.imshow(img_hopper)\n # plt.show()\n\n array_hopper = np.array(img_hopper)\n print(array_hopper.shape) # (224, 224, 3)\n\n plt.subplot(1, 2, 1)\n plt.title('original')\n plt.imshow(array_hopper)\n\n print(np.min(array_hopper), np.max(array_hopper)) # 0 255\n\n scaled_hopper = array_hopper / 255 # minmax scaling\n # scaled_hopper = array_hopper / 510\n # scaled_hopper = array_hopper / 127\n\n model, labels = get_image_classifier()\n # preds = model.predict([array_hopper]) # error\n # preds = model.predict(array_hopper[np.newaxis])\n # preds = model.predict(array_hopper.reshape(1, 224, 224, 3))\n # print(preds.shape) # (1, 1001)\n #\n # preds_arg = np.argmax(preds[0])\n # print(preds_arg, labels[preds_arg]) # 722 pillow\n\n preds = model.predict(scaled_hopper.reshape(1, 224, 224, 3))\n preds_arg = np.argmax(preds[0])\n print(preds_arg, labels[preds_arg]) # 653 military uniform\n\n plt.subplot(1, 2, 2)\n plt.title('scaled: {}'.format(labels[preds_arg]))\n plt.imshow(scaled_hopper)\n plt.show()\n\n\n# 문제\n# 제너레이터를 사용해서 꽃 데이터 32개를 예측하세요\ndef classify_by_generator():\n img_url = 'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz'\n img_path = tf.keras.utils.get_file('flower_photos', img_url, untar=True)\n # print(img_path) # /Users/jeonghoonkim/.keras/datasets/flower_photos\n\n data_gen = ImageDataGenerator(rescale=1/255)\n\n batch_size = 32\n data_flow = data_gen.flow_from_directory(\n img_path,\n batch_size=batch_size,\n target_size=(224, 224),\n class_mode='sparse'\n )\n\n # for take in data_flow:\n # print(type(take), len(take)) # 2\n\n # for xx, yy in data_flow:\n # print(xx.shape, yy.shape) # (32, 224, 224, 3) (32,)\n\n xx, yy = data_flow.next()\n # print(xx.shape, yy.shape) # (32, 224, 224, 3) (32,)\n\n model, labels = get_image_classifier()\n preds = model.predict(xx)\n print(preds.shape) # (32, 1001)\n\n preds_arg = np.argmax(preds, axis=1)\n print(preds_arg) # [946 884 717 986 986 320 ...]\n print(labels[preds_arg]) # ['bell pepper' 'vase' 'picket fence' 'daisy' ...]\n print(yy[:5]) # [3. 4. 1. 1. 0.]\n\n # 문제\n # 예측 결과를 한 줄에 8개씩 4줄에 피겨 1개에 그려주세요\n # 예측에 사용한 이미지를 출력하고, 그 위에 예측한 레이블을 출력합니다\n\n for i, (img, pred) in enumerate(zip(xx, preds_arg)):\n # print(i, img.shape, pred)\n plt.subplot(4, 8, i+1)\n plt.title(labels[pred])\n plt.axis('off')\n plt.imshow(img)\n\n plt.show()\n\n\n# classify_image()\nclassify_by_generator()\n\n\n# 아래 에러가 발생했을 때의 해결책\n# OSError: SavedModel file does not exist at:\n# C:\\Users\\308\\AppData\\Local\\Temp\\tfhub_modules\\426589ad685896ab7954855255a52db3442cb38d/{saved_model.pbtxt|saved_model.pb}\n\n# 원인 (아래 폴더는 가져온 모델이 사용하는 폴더로 항상 동일)\n# 426589ad685896ab7954855255a52db3442cb38d 폴더에 파일이 생성되지 않음\n\n# 해결책\n# 정상적으로 생성된 해당 폴더를 복사해서 붙여넣기\n\n# 폴더 위치 (308은 사용자 id)\n# C:\\Users\\308\\AppData\\Local\\Temp\\tfhub_modules\n","sub_path":"Keras - DeepLearning/26. tfhub.py","file_name":"26. tfhub.py","file_ext":"py","file_size_in_byte":5264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"476751148","text":"# Author: Sai Uday Shankar\n# Email: skorlimarla@unomaha.edu\n\nfrom pymongo import MongoClient\nfrom datetime import date\nimport shutil\nimport os\nimport sys\nimport re\n\n\n# Pop goes the database\n# casual reference to\n# James Patterson's pop goes the weasel\n# Do read that book, not this code!\n'''\nmongo_location:\n Picks mongo URL dynamically from current user's home\n directory. It is expected that the user has\n a directory .netscan with a file netScan.conf.\n Also it is expected that netScan.conf has mongo db url\n'''\ndef mongo_location():\n mongo_url = ''\n with open(os.path.expanduser(\"~/\"+'/.netScan/netScan.conf')) as conf:\n for line in conf:\n if 'mongo_url' in line:\n mongo_url= line.split(\" \")[1]\n else:\n mongo_url = 'mongodb://localhost:27017/'\n return mongo_url\n\n# client that will talk to the database\nclient = MongoClient(mongo_location())\n\n# netScanDB Database for netScan\ncpe_db = client['netScanDB']\n\n# The above mongoDB will have it's first cpe_collection\n# CPEs from NVD - Let's call it cpe_collection\ncpe_collection = cpe_db.cpe_collection\n\n# Method for dropping CPE collection\n'''\ndrop_collection:\n Method for dropping collections\n pass collection name to be dropped\n Ex: drop_collection(cpe_collection)\n'''\ndef drop_cpe_collection():\n cpe_collection.drop()\n if get_cpes_count() == 0:\n print(\"[+] {} dropped\".format(cpe_collection))\n\n# Generic method for inserting elements into a colleciton\n# use bulk inserts for inserting many things at once\n'''\ndb_insert(cpe_collection, \"cpe:/a:apache:http_server:2.2.0\")\n pass a collections name and what is to be posted into\n the collection\n'''\n\ndef db_insert(post):\n preq = cpe_collection.find_one(post)\n post = post\n if preq is None:\n post_id = cpe_collection.insert_one(post).inserted_id\n return post_id\n else:\n print(\"[-] Existing record\")\n print(preq)\n return None\n\ndef db_insert_bulk(multi_posts):\n multi_posts = multi_posts\n post_ids = cpe_collection.insert_many(multi_posts)\n # Huge list will be retured\n # If lot of posts are inserted into\n # the database\n return post_ids.inserted_ids\n\n# Count items in a collection\n'''\nget_cpes_count\n posts in cpe_collection\n'''\ndef get_cpes_count():\n return cpe_collection.count()\n","sub_path":"netscan/dbSchema.py","file_name":"dbSchema.py","file_ext":"py","file_size_in_byte":2359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"352279961","text":"from collections import deque\nfrom queue import PriorityQueue\nfrom Node import Node\n\nfrom itertools import count\n\nclass MyException(Exception):\n '''Raising my own exception to handle depth limited search max depth reached conditions'''\n\nclass UninformedSearch:\n def __init__(self, start_node: Node):\n super().__init__()\n self.start_node = start_node\n\n def depth_first(self):\n frontier = deque()\n explored_set = set()\n\n frontier.append(self.start_node)\n while(len(frontier) > 0):\n top = frontier.pop()\n explored_set.add(top.get_string())\n\n for action in top.actions:\n newNode = top.child_node(action)\n if newNode.get_string() in explored_set:\n continue\n if newNode.is_goalstate():\n return newNode.path_to_goal()\n frontier.append(newNode)\n print(\"DepthFirstSearch failed to reach the given goal state.\")\n return [], []\n\n def depth_limited(self, max_depth):\n frontier = deque()\n frontier.append((self.start_node, 0))\n explored_set = {}\n max_depth_reached = False\n\n while(len(frontier) > 0):\n top, depth = frontier.pop()\n explored_set[top.get_string()] = depth\n if(depth >= max_depth):\n max_depth_reached = True\n continue\n \n for action in top.actions:\n newNode = top.child_node(action)\n newNode_str = newNode.get_string()\n if newNode_str in explored_set and explored_set[newNode_str] < (depth+1):\n continue\n if newNode.is_goalstate():\n return newNode.path_to_goal()\n frontier.append((newNode, depth+1))\n if max_depth_reached:\n raise MyException('Maximum Depth reached while searching. Try again with bigger max_depth.')\n else:\n print(\"DepthLimitedSearch failed to reach the given goal state.\")\n return [], []\n\n def iterative_deepening(self, stop_depth=31, verbose=False):\n depth = 1\n while depth:\n if(depth > stop_depth): break\n try:\n if verbose: print(\"Running depth limited search with depth=\", depth)\n states, actions = self.depth_limited(depth)\n except MyException as e:\n depth += 1\n continue\n return states, actions\n print(\"Stopped deepening search after stop_depth is reached. Current stop_depth= \", stop_depth)\n return [], []\n \nclass InformedSearch:\n valid_metrics = ('manhattan_distance', 'num_wrong_tiles')\n def __init__(self, start_node: Node):\n super().__init__()\n self.start_node = start_node\n self.unique = count()\n\n def queue_push(self, pqueue, node, distance_metric):\n priority_value = getattr(node, distance_metric)() + node.path_cost\n pqueue.put((priority_value, next(self.unique), node))\n\n def astar(self, distance_metric='manhattan_distance'): \n if distance_metric not in self.valid_metrics:\n raise ValueError('Invalid distance metric specified. Valid metrics are:', self.valid_metrics)\n \n frontier = PriorityQueue()\n explored_set = set()\n\n self.queue_push(frontier, self.start_node, distance_metric)\n while not frontier.empty():\n _, _, top = frontier.get()\n explored_set.add(top.get_string())\n if top.is_goalstate():\n return top.path_to_goal()\n for action in top.actions:\n newNode = top.child_node(action)\n if newNode.get_string() in explored_set:\n continue\n self.queue_push(frontier, newNode, distance_metric)\n print(\"A-star search failed to reach the given goal state.\")\n return [], []\n","sub_path":"Search.py","file_name":"Search.py","file_ext":"py","file_size_in_byte":3969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"649388960","text":"from django.db import models\nfrom django.db.models.query import QuerySet\n\nfrom .item_system.containers import *\nfrom .item_system.cont_items import *\n\nfrom game_module.models import GroupConfigure, Subject, QuestionStar\n\nfrom player_module.models import Player\nfrom exermon_module.models import ExerSkill, HitType, TargetType\nfrom record_module.models import QuestionSetRecord, PlayerQuestion, RecordSource\n\nfrom utils.calc_utils import ExerciseSingleRewardCalc, BattleResultRewardCalc\nfrom utils.model_utils import CacheableModel, Common as ModelUtils\nfrom utils.exception import ErrorType, GameException\n\nfrom enum import Enum\nimport random, datetime\n\n# Create your models here.\n\n\n# =======================\n# 对战评价表,记录对战评价所需分数以及增加/扣除星星数的关系\n# =======================\nclass BattleResultJudge(GroupConfigure):\n\t\"\"\"\n\t对战评价表,记录对战评价所需分数以及增加/扣除星星数的关系\n\t\"\"\"\n\n\tclass Meta:\n\t\tverbose_name = verbose_name_plural = \"对战评价表\"\n\n\tNOT_EXIST_ERROR = ErrorType.ResultJudgeNotExist\n\n\t# 评价要求分数\n\tscore = models.PositiveSmallIntegerField(default=0, verbose_name=\"评价分数\")\n\n\t# 胜利增加星星\n\twin = models.SmallIntegerField(default=0, verbose_name=\"胜利增星数\")\n\n\t# 失败增加星星(负数为减少)\n\tlose = models.SmallIntegerField(default=0, verbose_name=\"失败增星数\")\n\n\tdef convert(self, type: str = None, **kwargs) -> dict:\n\t\t\"\"\"\n\t\t转化为字典\n\t\tArgs:\n\t\t\ttype (str): 转化类型\n\t\t\t**kwargs (**dict): 子类重载参数\n\t\tReturns:\n\t\t\t转化后的字典数据\n\t\t\"\"\"\n\t\tres = super().convert()\n\n\t\tres['score'] = self.score\n\t\tres['win'] = self.win\n\t\tres['lose'] = self.lose\n\n\t\treturn res\n\n\n# ===================================================\n# 对战类型枚举\n# ===================================================\nclass BattleMode(Enum):\n\tNormal = 0 # 经典模式\n\n\n# ===================================================\n# 对战记录表\n# ===================================================\nclass BattleRecord(CacheableModel):\n\tclass Meta:\n\t\tverbose_name = verbose_name_plural = \"对战记录\"\n\n\t# 常量声明\n\tMODES = [\n\t\t(BattleMode.Normal.value, '经典模式'),\n\t]\n\n\t# 对战玩家缓存键\n\tBATTLE_PLAYERS_CACHE_KEY = \"players\"\n\n\t# 对战玩家缓存键\n\tBATTLE_ROUNDS_CACHE_KEY = \"rounds\"\n\n\t# 对战模式\n\tmode = models.PositiveSmallIntegerField(default=BattleMode.Normal.value,\n\t\t\t\t\t\t\t\t\t\t\tchoices=MODES, verbose_name=\"对战模式\")\n\n\t# 赛季\n\tseason = models.ForeignKey('season_module.CompSeason', on_delete=models.CASCADE,\n\t\t\t\t\t\t\t verbose_name=\"赛季\")\n\n\t# 对战时间\n\tcreate_time = models.DateTimeField(auto_now_add=True, verbose_name=\"对战时间\")\n\n\t# 结算时间\n\tresult_time = models.DateTimeField(null=True, verbose_name=\"结算时间\")\n\n\tdef __str__(self):\n\t\treturn \"%s. %s\" % (str(self.id), self.generateName())\n\n\tdef generateName(self):\n\t\t\"\"\"\n\t\t生成名称\n\t\tReturns:\n\t\t\t返回对战记录名称\n\t\t\"\"\"\n\t\treturn \"%s VS %s\" % (self.adminPlayer1(), self.adminPlayer2())\n\n\t# admin 显示玩家1\n\tdef adminPlayer1(self):\n\t\tplayer = self.firstPlayer()\n\t\treturn '-' if player is None else str(player)\n\n\tadminPlayer1.short_description = \"玩家1\"\n\n\t# admin 显示玩家2\n\tdef adminPlayer2(self):\n\t\tplayer = self.secondPlayer()\n\t\treturn '-' if player is None else str(player)\n\n\tadminPlayer2.short_description = \"玩家2\"\n\n\t@classmethod\n\tdef create(cls, player1: Player, player2: Player, mode: int) -> 'BattleRecord':\n\t\t\"\"\"\n\t\t创建对战记录实例\n\t\tArgs:\n\t\t\tplayer1 (Player): 玩家1\n\t\t\tplayer2 (Player): 玩家2\n\t\t\tmode (int): 对战模式\n\t\tReturns:\n\t\t\t本对战记录实例\n\t\t\"\"\"\n\t\tfrom season_module.runtimes import SeasonManager\n\n\t\trec = cls()\n\t\trec.mode = mode\n\t\trec.season_id = SeasonManager.getCurrentSeason().id\n\n\t\trec.save()\n\n\t\trec.start(player1, player2)\n\n\t\treturn rec\n\n\tdef convert(self, type: str = None, **kwargs) -> dict:\n\t\t\"\"\"\n\t\t转化为字典\n\t\tArgs:\n\t\t\ttype (str): 转化类型\n\t\t\t**kwargs (**dict): 子类重载参数\n\t\tReturns:\n\t\t\t转化后的字典数据\n\t\t\"\"\"\n\t\tcreate_time = ModelUtils.timeToStr(self.create_time)\n\t\tresult_time = ModelUtils.timeToStr(self.result_time)\n\n\t\tplayers = ModelUtils.objectsToDict(self.battlePlayers(), type=type)\n\t\trounds = ModelUtils.objectsToDict(self.battleRounds(), type=type)\n\n\t\tres = {\n\t\t\t'id': self.id,\n\t\t\t'mode': self.mode,\n\t\t\t'season_id': self.season_id,\n\t\t\t'create_time': create_time,\n\t\t\t'result_time': result_time,\n\n\t\t\t'players': players,\n\t\t}\n\n\t\tif type == \"record\" or type == \"result\":\n\t\t\tres['rounds'] = rounds\n\n\t\treturn res\n\n\tdef start(self, player1: Player, player2: Player):\n\t\t\"\"\"\n\t\t对战开始\n\t\tArgs:\n\t\t\tplayer1 (Player): 玩家1\n\t\t\tplayer2 (Player): 玩家2\n\t\t\"\"\"\n\t\tself._initCaches()\n\n\t\tself.addPlayer(player1)\n\t\tself.addPlayer(player2)\n\n\tdef terminate(self, battle):\n\t\t\"\"\"\n\t\t结束对战\n\t\tArgs:\n\t\t\tbattle (RuntimeBattle): 运行时对战\n\t\t\"\"\"\n\t\t# self.save()\n\t\t# self._saveCache(self.BATTLE_ROUNDS_CACHE_KEY)\n\n\t\tself.firstPlayer().terminate(battle=battle)\n\t\tself.secondPlayer().terminate(battle=battle)\n\n\t\tself.result_time = datetime.datetime.now()\n\n\t\tself.save()\n\n\tdef _initCaches(self):\n\t\t\"\"\"\n\t\t初始化所有缓存数据\n\t\t\"\"\"\n\t\t# self._cache(self.BATTLE_PLAYERS_CACHE_KEY, [])\n\t\t# self._cache(self.BATTLE_ROUNDS_CACHE_KEY, [])\n\n\t# region 玩家操作\n\n\tdef battlePlayers(self) -> QuerySet:\n\t\t\"\"\"\n\t\t获取对战玩家\n\t\tReturns:\n\t\t\t对战玩家 QuerySet 对象\n\t\t\"\"\"\n\t\t# 结算时间为空,表示正在对战中\n\t\tif self.result_time is None:\n\t\t\treturn self._getCachedBattlePlayers()\n\n\t\treturn self.battleplayer_set.all()\n\n\tdef firstPlayer(self) -> 'BattlePlayer':\n\t\t\"\"\"\n\t\t获取第一个玩家实例\n\t\tReturns:\n\t\t\t如果有第一个玩家,则返回其实例,否则返回 None\n\t\t\"\"\"\n\t\t# 结算时间为空,表示正在对战中\n\t\tif self.result_time is None:\n\t\t\tplayers = self._getCachedBattlePlayers()\n\t\t\treturn players[0] if players is not None else None\n\n\t\tplayers = self.battlePlayers()\n\t\tif players.count() >= 1:\n\t\t\treturn players[0]\n\t\treturn None\n\n\tdef secondPlayer(self) -> 'BattlePlayer':\n\t\t\"\"\"\n\t\t获取第二个玩家实例\n\t\tReturns:\n\t\t\t如果有第二个玩家,则返回其实例,否则返回 None\n\t\t\"\"\"\n\t\tif self.result_time is None:\n\t\t\tplayers = self._getCachedBattlePlayers()\n\t\t\treturn players[1] if players is not None else None\n\n\t\tplayers = self.battlePlayers()\n\t\tif players.count() >= 2:\n\t\t\treturn players[1]\n\t\treturn None\n\n\tdef getBattlePlayer(self, player: Player = None, battle_player: 'BattlePlayer' = None):\n\t\t\"\"\"\n\t\t获取对战玩家\n\t\tArgs:\n\t\t\tplayer (Player): 玩家实例\n\t\t\tbattle_player (BattlePlayer): 对战玩家实例\n\t\tReturns:\n\t\t\t返回自身对战玩家\n\t\t\"\"\"\n\t\tbattle_player1 = self.firstPlayer()\n\t\tbattle_player2 = self.secondPlayer()\n\n\t\tif player and player.id == battle_player1.player_id:\n\t\t\treturn battle_player1\n\t\tif player and player.id == battle_player2.player_id:\n\t\t\treturn battle_player2\n\n\t\tif battle_player and battle_player == battle_player1:\n\t\t\treturn battle_player1\n\t\tif battle_player and battle_player == battle_player2:\n\t\t\treturn battle_player2\n\n\t\treturn None\n\n\tdef getOppoBattlePlayer(self, player: Player = None, battle_player: 'BattlePlayer' = None):\n\t\t\"\"\"\n\t\t获取对方对战玩家\n\t\tArgs:\n\t\t\tplayer (Player): 玩家实例\n\t\t\tbattle_player (BattlePlayer): 对战玩家实例\n\t\tReturns:\n\t\t\t返回自身对战玩家\n\t\t\"\"\"\n\t\tbattle_player1 = self.firstPlayer()\n\t\tbattle_player2 = self.secondPlayer()\n\n\t\tif player and player.id == battle_player1.player_id:\n\t\t\treturn battle_player2\n\t\tif player and player.id == battle_player2.player_id:\n\t\t\treturn battle_player1\n\n\t\tif battle_player and battle_player == battle_player1:\n\t\t\treturn battle_player2\n\t\tif battle_player and battle_player == battle_player2:\n\t\t\treturn battle_player1\n\n\t\treturn None\n\n\tdef addPlayer(self, player: Player) -> 'BattlePlayer':\n\t\t\"\"\"\n\t\t添加一个对战玩家\n\t\tArgs:\n\t\t\tplayer (Player): 玩家\n\t\t\"\"\"\n\t\tplayer = BattlePlayer.create(player, record=self)\n\t\tself._addBattlePlayerCache(player)\n\n\t\treturn player\n\n\tdef _addBattlePlayerCache(self, player: 'BattlePlayer'):\n\t\t\"\"\"\n\t\t添加对战玩家到缓存中\n\t\tArgs:\n\t\t\tplayer (BattlePlayer): 对战玩家\n\t\t\"\"\"\n\t\tcache = self._getCachedBattlePlayers()\n\t\tcache.append(player)\n\n\tdef _getCachedBattlePlayers(self) -> list:\n\t\t\"\"\"\n\t\t获取缓存对战玩家数组\n\t\tReturns:\n\t\t\t返回当前缓存对战玩家数组\n\t\t\"\"\"\n\t\treturn self._getCache(self.BATTLE_PLAYERS_CACHE_KEY)\n\n\t# endregion\n\n\t# region 回合操作\n\n\tdef battleRounds(self) -> QuerySet:\n\t\t\"\"\"\n\t\t获取所有对战回合数据\n\t\tReturns:\n\t\t\t对战回合 QuerySet 对象\n\t\t\"\"\"\n\t\treturn self.battleround_set.all()\n\n\tdef currentRound(self) -> 'BattleRound':\n\t\t\"\"\"\n\t\t获取当前回合\n\t\tReturns:\n\t\t\t若对战未结束,返回最后一个回合(当前回合),否则返回空\n\t\t\"\"\"\n\t\tif self.result_time is not None: return None\n\t\tcache = self._getCachedBattleRounds()\n\n\t\tif len(cache) > 0: return cache[-1]\n\t\treturn None\n\n\tdef addRound(self) -> 'BattleRound':\n\t\t\"\"\"\n\t\t添加一个对战回合\n\t\t\"\"\"\n\t\tcache = self._getCachedBattleRounds()\n\t\tround = BattleRound.create(self, len(cache))\n\n\t\tplayers = self._getCachedBattlePlayers()\n\t\tfor player in players: player.addRound(round)\n\n\t\tself._addBattleRoundCache(round)\n\n\t\treturn round\n\n\tdef startCurrentRound(self):\n\t\t\"\"\"\n\t\t开始当前回合(答题用)\n\t\t\"\"\"\n\t\tplayers = self._getCachedBattlePlayers()\n\t\tfor player in players: player.startCurrentRound()\n\n\tdef _addBattleRoundCache(self, round: 'BattleRound'):\n\t\t\"\"\"\n\t\t添加对战回合到缓存中\n\t\tArgs:\n\t\t\tround (BattleRound): 对战回合\n\t\t\"\"\"\n\t\tcache = self._getCachedBattleRounds()\n\t\tcache.append(round)\n\n\tdef _getCachedBattleRounds(self) -> list:\n\t\t\"\"\"\n\t\t获取缓存对战回合数组\n\t\tReturns:\n\t\t\t返回当前缓存对战回合数组\n\t\t\"\"\"\n\t\treturn self._getCache(self.BATTLE_ROUNDS_CACHE_KEY)\n\n\t# endregion\n\n\tdef subjects(self) -> set:\n\t\t\"\"\"\n\t\t获取对战玩家所选的科目数据\n\t\tReturns:\n\t\t\t所选科目数组\n\t\t\"\"\"\n\t\tplayer = self.firstPlayer()\n\t\tif player is None: return []\n\t\tplayer = player.player\n\n\t\treturn player.subjects()\n\n\n# ===================================================\n# 对战回合\n# ===================================================\nclass BattleRound(models.Model):\n\n\tclass Meta:\n\n\t\tverbose_name = verbose_name_plural = \"对战回合\"\n\n\t# 回合序号\n\torder = models.PositiveSmallIntegerField(default=0, verbose_name=\"回合号\")\n\n\t# 关联的对战记录\n\trecord = models.ForeignKey('BattleRecord', on_delete=models.CASCADE, verbose_name=\"对战记录\")\n\n\t# 题目\n\tquestion = models.ForeignKey('question_module.Question', null=True, on_delete=models.CASCADE, verbose_name=\"题目\")\n\n\tdef __str__(self):\n\t\treturn str(self.record)+\" 回合 \"+str(self.order)\n\n\tdef convert(self, type: str = None) -> dict:\n\t\t\"\"\"\n\t\t转化为字典\n\t\tArgs:\n\t\t\ttype (str): 转化类型\n\t\tReturns:\n\t\t\t转化后的字典数据\n\t\t\"\"\"\n\t\treturn {\n\t\t\t'order': self.order,\n\t\t\t'subject_id': self.question.subject_id,\n\t\t\t'star_id': self.question.star_id,\n\t\t\t'question_id': self.question_id\n\t\t}\n\n\t# 创建对象\n\t@classmethod\n\tdef create(cls, battle: BattleRecord, order: int) -> 'BattleRound':\n\t\t\"\"\"\n\t\t创建一个对战回合\n\t\tArgs:\n\t\t\tbattle (BattleRecord): 对战记录\n\t\t\torder (int): 回合序号(从0开始)\n\t\tReturns:\n\t\t\t新创建的对战回合对象\n\t\t\"\"\"\n\t\tround = cls()\n\t\tround.order = order\n\t\tround.record = battle\n\n\t\tround.save()\n\n\t\tround.generateQuestion()\n\n\t\treturn round\n\n\t# 生成科目和星级\n\tdef _generateSubjectAndStar(self) -> (Subject, QuestionStar):\n\t\t\"\"\"\n\t\t生成随机科目和星级\n\t\tReturns:\n\t\t\t随机生成的科目, 题目星级\n\t\t\"\"\"\n\t\treturn random.choice(list(self.record.subjects())), \\\n\t\t\t random.choice(QuestionStar.objs())\n\n\tdef _generateConfigurePlayer(self) -> Player:\n\t\t\"\"\"\n\t\t生成本回合题目生成配置时所需的玩家\n\t\tReturns:\n\t\t\t题目生成配置所需的玩家对象\n\t\t\"\"\"\n\t\tbattler = self.record.firstPlayer() if self.order % 2 == 1 \\\n\t\t\telse self.record.secondPlayer()\n\n\t\treturn battler.player\n\n\tdef generateQuestion(self):\n\t\t\"\"\"\n\t\t生成题目,赋值到 question 中\n\t\t\"\"\"\n\n\t\tfrom utils.calc_utils import QuestionGenerateConfigure, QuestionGenerateType, QuestionGenerator\n\n\t\tplayer = self._generateConfigurePlayer()\n\t\tsubject, star = self._generateSubjectAndStar()\n\n\t\tconfigure = QuestionGenerateConfigure(self, player, subject, ques_star=star, count=1,\n\t\t\t\t\t\t\t\t\t\t\t gen_type=QuestionGenerateType.NotOccurFirst.value)\n\n\t\tgen = QuestionGenerator.generate(configure, True)\n\t\tresult = gen.result\n\n\t\tif len(result) > 0: self.question_id = result[0]\n\t\t# 没有题目生成\n\t\telse: raise GameException(ErrorType.GenerateError)\n\n\n# ===================================================\n# 对战玩家状态枚举\n# ===================================================\nclass BattlePlayerStatus(Enum):\n\tNormal = 0\n\tDisconnected = 1\n\tCancelled = 2\n\n\n# ===================================================\n# 玩家对战结果枚举\n# ===================================================\nclass BattlePlayerResult(Enum):\n\tWin = 1 # 胜利\n\tLose = 2 # 失败\n\tTie = 3 # 平局\n\n\n# ===================================================\n# 对战玩家表\n# ===================================================\nclass BattlePlayer(QuestionSetRecord):\n\n\tclass Meta:\n\t\tverbose_name = verbose_name_plural = \"对战玩家\"\n\n\tSTATUSES = [\n\t\t(BattlePlayerStatus.Normal.value, \"正常\"),\n\t\t(BattlePlayerStatus.Disconnected.value, \"掉线\"),\n\t\t(BattlePlayerStatus.Cancelled.value, \"退出\"),\n\t]\n\n\tRESULT_TYPES = [\n\t\t(BattlePlayerResult.Win.value, \"胜利\"),\n\t\t(BattlePlayerResult.Lose.value, \"失败\"),\n\t\t(BattlePlayerResult.Tie.value, \"平局\"),\n\t]\n\n\tLIST_DISPLAY_APPEND = ['adminScores']\n\n\t# 关联的记录\n\trecord = models.ForeignKey('BattleRecord', on_delete=models.CASCADE, verbose_name=\"对战记录\")\n\n\t# 积分变更\n\tscore_incr = models.SmallIntegerField(null=True, verbose_name=\"积分变更\")\n\n\t# 用时评分(*100)\n\ttime_score = models.PositiveSmallIntegerField(null=True, verbose_name=\"用时评分\")\n\n\t# 伤害评分(*100)\n\thurt_score = models.PositiveSmallIntegerField(null=True, verbose_name=\"伤害评分\")\n\n\t# 承伤评分(*100)\n\tdamage_score = models.PositiveSmallIntegerField(null=True, verbose_name=\"承伤评分\")\n\n\t# 恢复评分(*100)\n\trecovery_score = models.PositiveSmallIntegerField(null=True, verbose_name=\"恢复评分\")\n\n\t# 正确评分(*100)\n\tcorrect_score = models.PositiveSmallIntegerField(null=True, verbose_name=\"行动评分\")\n\n\t# 奖励分数(*100)\n\tplus_score = models.PositiveSmallIntegerField(null=True, verbose_name=\"奖励分数\")\n\n\t# 战斗结果\n\tresult = models.PositiveSmallIntegerField(null=True, choices=RESULT_TYPES, verbose_name=\"战斗结果\")\n\n\t# 战斗标志\n\tstatus = models.PositiveSmallIntegerField(null=True, choices=STATUSES, verbose_name=\"战斗状态标志\")\n\n\tdef __str__(self):\n\t\treturn str(self.player)\n\n\t# admin 用\n\tdef adminScores(self):\n\t\tfrom django.utils.html import format_html\n\n\t\tres = \"用时:%.2f,伤害:%.2f
\" \\\n\t\t\t \"承伤:%.2f,回复:%.2f
\" \\\n\t\t\t \"正确:%.2f,附加:%.2f
\" \\\n\t\t\t \"总分:%.2f\" % \\\n\t\t\t (self.time_score, self.hurt_score, self.damage_score,\n\t\t\t self.recovery_score, self.correct_score, self.plus_score,\n\t\t\t self.battleScore())\n\n\t\treturn format_html(res)\n\n\t# region 配置\n\n\t@classmethod\n\tdef rewardCalculator(cls) -> BattleResultRewardCalc:\n\t\t\"\"\"\n\t\t奖励计算类\n\t\tReturns:\n\t\t\t返回对应的奖励计算类类对象\n\t\t\"\"\"\n\t\treturn BattleResultRewardCalc\n\n\t@classmethod\n\tdef playerQuesClass(cls) -> 'BattleRoundResult':\n\t\t\"\"\"\n\t\t该类对应的玩家题目关系类,用于 addQuestion 中创建一个题目关系\n\t\tReturns:\n\t\t\t返回 BattleRoundResult 本身\n\t\t\"\"\"\n\t\treturn BattleRoundResult\n\n\t@classmethod\n\tdef rewardClass(cls):\n\t\t\"\"\"\n\t\t该类对应的奖励记录类\n\t\tReturns:\n\t\t\t返回为空\n\t\t\"\"\"\n\t\treturn None\n\n\tdef _playerQuestions(self) -> QuerySet:\n\t\t\"\"\"\n\t\t获取所有题目关系(数据库)\n\t\tReturns:\n\t\t\t题目关系 QuerySet 对象\n\t\t\"\"\"\n\t\treturn self.battleroundresult_set.all()\n\n\tdef _rewards(self) -> QuerySet:\n\t\t\"\"\"\n\t\t获取所有奖励(数据库)\n\t\tReturns:\n\t\t\t题目集奖励 QuerySet 对象\n\t\t\"\"\"\n\t\treturn []\n\n\t# endregion\n\n\tdef generateName(self) -> str:\n\t\t\"\"\"\n\t\t生成题目集记录的名字\n\t\tReturns:\n\t\t\t生成的名字\n\t\t\"\"\"\n\t\treturn self.record.generateName()\n\n\tdef _create(self, record: 'BattleRecord'):\n\t\t\"\"\"\n\t\t创建实例后用于配置具体属性\n\t\tArgs:\n\t\t\trecord (BattleRecord): 子类中定义参数\n\t\t\"\"\"\n\t\tself.record = record\n\n\tdef convert(self, type: str = None) -> dict:\n\t\t\"\"\"\n\t\t转化为字典\n\t\tArgs:\n\t\t\ttype (str): 转化类型\n\t\tReturns:\n\t\t\t转化后的字典数据\n\t\t\"\"\"\n\t\tres = super().convert(type)\n\n\t\tres['pid'] = self.player_id\n\t\tres['score_incr'] = self.score_incr\n\n\t\t# res['sum_hurt'] = self.sumHurt()\n\t\t# res['sum_damage'] = self.sumDamage()\n\t\t# res['sum_recover'] = self.sumRecover()\n\n\t\tres['time_score'] = self.time_score/100\n\t\tres['hurt_score'] = self.hurt_score/100\n\t\tres['damage_score'] = self.damage_score/100\n\t\tres['recovery_score'] = self.recovery_score/100\n\t\tres['correct_score'] = self.correct_score/100\n\t\tres['plus_score'] = self.plus_score/100\n\n\t\tres['result'] = self.result\n\t\tres['status'] = self.status\n\n\t\treturn res\n\n\tdef battleScore(self) -> int:\n\t\t\"\"\"\n\t\t获取最终对战评分\n\t\tReturns:\n\t\t\t对战评分\n\t\t\"\"\"\n\t\treturn (self.time_score + self.hurt_score + self.damage_score +\n\t\t\t\tself.recovery_score + self.correct_score) / 5 + self.plus_score\n\n\tdef currentRound(self) -> 'BattleRoundResult':\n\t\t\"\"\"\n\t\t获取当前回合对象\n\t\tReturns:\n\t\t\t返回当前回合对象(BattleRoundResult)(从缓存)\n\t\t\"\"\"\n\t\tif self.finished: return None\n\n\t\trounds = list(self.playerQuestions())\n\t\tif len(rounds) > 0: return rounds[-1]\n\t\treturn None\n\n\t# region 回合操作\n\n\tdef addRound(self, round: BattleRound):\n\t\t\"\"\"\n\t\t添加回合\n\t\tArgs:\n\t\t\tround (BattleRound): 对战回合\n\t\t\"\"\"\n\t\tself.addQuestion(round.question_id, round=round)\n\n\tdef startCurrentRound(self):\n\t\t\"\"\"\n\t\t开始当前回合\n\t\t\"\"\"\n\t\tcur_round = self.currentRound()\n\t\tif cur_round is None: return\n\n\t\tself.startQuestion(player_ques=cur_round)\n\n\tdef answerCurrentRound(self, selection: list, timespan: int):\n\t\t\"\"\"\n\t\t作答当前回合\n\t\tArgs:\n\t\t\tselection (list): 选择情况\n\t\t\ttimespan (int): 作答时长\n\t\t\"\"\"\n\t\tcur_round = self.currentRound()\n\t\tif cur_round is None: return\n\n\t\tself.answerQuestion(selection, timespan, player_ques=cur_round)\n\n\t# endregion\n\n\t# region 对战结束\n\n\tdef _applyBaseResult(self, calc: BattleResultRewardCalc):\n\t\t\"\"\"\n\t\t应用基本结果\n\t\tArgs:\n\t\t\tcalc (BattleResultRewardCalc): 结果\n\t\t\"\"\"\n\t\tsuper()._applyBaseResult(calc)\n\n\t\tself.result = calc.result.value\n\t\tself.status = calc.status.value\n\t\tself.score_incr = calc.score_incr\n\n\t\tself.time_score = calc.battle_scores.time_score*100\n\t\tself.hurt_score = calc.battle_scores.hurt_score*100\n\t\tself.damage_score = calc.battle_scores.damage_score*100\n\t\tself.recovery_score = calc.battle_scores.recovery_score*100\n\t\tself.correct_score = calc.battle_scores.correct_score*100\n\t\tself.plus_score = calc.battle_scores.plus_score*100\n\n\tdef _applyPlayerResult(self, calc: BattleResultRewardCalc):\n\t\t\"\"\"\n\t\t应用玩家结果\n\t\tArgs:\n\t\t\tcalc (BattleResultRewardCalc): 结果\n\t\t\"\"\"\n\t\tsuper()._applyPlayerResult(calc)\n\n\t\tplayer = self.exactlyPlayer()\n\n\t\tseason_record = player.currentSeasonRecord()\n\n\t\tseason_record.adjustCredit(calc.credit_incr)\n\t\tseason_record.adjustPoint(calc.score_incr)\n\t\tseason_record.adjustStarNum(calc.star_incr)\n\n\t\tseason_record.save()\n\n\t# endregion\n\n\t# region 统计数据\n\n\tdef sumHurt(self, player_queses: QuerySet = None) -> int:\n\t\t\"\"\"\n\t\t获取对战总伤害\n\t\tArgs:\n\t\t\tplayer_queses (QuerySet): 玩家题目关系集合,默认情况下为所有题目关系\n\t\tReturns:\n\t\t\t返回对战总伤害\n\t\t\"\"\"\n\t\treturn self._sumData('hurt', lambda d: d.hurtPoint(), player_queses)\n\n\tdef sumDamage(self, player_queses: QuerySet = None) -> int:\n\t\t\"\"\"\n\t\t获取对战总承伤\n\t\tArgs:\n\t\t\tplayer_queses (QuerySet): 玩家题目关系集合,默认情况下为所有题目关系\n\t\tReturns:\n\t\t\t返回对战总承伤\n\t\t\"\"\"\n\t\treturn self._sumData('damage', lambda d: d.damagePoint(), player_queses)\n\n\tdef sumRecovery(self, player_queses: QuerySet = None) -> int:\n\t\t\"\"\"\n\t\t获取对战总回复\n\t\tArgs:\n\t\t\tplayer_queses (QuerySet): 玩家题目关系集合,默认情况下为所有题目关系\n\t\tReturns:\n\t\t\t返回对战总回复\n\t\t\"\"\"\n\t\treturn self._sumData('recovery', lambda d: d.recovery, player_queses)\n\n\t# endregion\n\n\t\"\"\"占位符\"\"\"\n\n\n# ===================================================\n# 对战回合结果类型枚举\n# ===================================================\nclass HitResultType(Enum):\n\tUnknown = 0 # 未知\n\tHit = 1 # 命中\n\tCritical = 2 # 命中\n\tMiss = 3 # 回避\n\n\n# ===================================================\n# 对战回合结果表\n# ===================================================\nclass BattleRoundResult(PlayerQuestion):\n\tclass Meta:\n\t\tverbose_name = verbose_name_plural = \"对战回合结果\"\n\n\tRESULT_TYPES = [\n\t\t(HitResultType.Unknown.value, \"未知\"),\n\t\t(HitResultType.Hit.value, \"命中\"),\n\t\t(HitResultType.Critical.value, \"暴击\"),\n\t\t(HitResultType.Miss.value, \"回避\"),\n\t]\n\n\tLIST_EDITABLE_EXCLUDE = ['round', 'battle_player']\n\n\t# 关联的回合\n\tround = models.ForeignKey('BattleRound', on_delete=models.CASCADE, verbose_name=\"回合\")\n\n\t# 对战玩家\n\tbattle_player = models.ForeignKey('BattlePlayer', on_delete=models.CASCADE, verbose_name=\"对战玩家\")\n\n\t# 是否进攻\n\tattack = models.BooleanField(null=True, verbose_name=\"是否进攻\")\n\n\t# 使用技能(为 None 则是普通攻击)(本回合攻击方的技能)\n\tskill = models.ForeignKey(\"exermon_module.ExerSkill\", null=True,\n\t\t\t\t\t\t\t on_delete=models.SET_NULL, verbose_name=\"使用技能\")\n\n\t# 目标(本回合攻击方的目标)\n\ttarget_type = models.PositiveSmallIntegerField(default=TargetType.Enemy.value,\n\t\t\t\t\t\t\t\t\t\t\t\t choices=ExerSkill.TARGET_TYPES, verbose_name=\"目标\")\n\n\t# 回合结果(本回合攻击方的结果)\n\tresult_type = models.PositiveSmallIntegerField(default=HitResultType.Unknown.value,\n\t\t\t\t\t\t\t\t\t\t\t\t choices=RESULT_TYPES, verbose_name=\"回合结果\")\n\n\t# 伤害点数(自己对目标造成的HP伤害,小于0为恢复)\n\thurt = models.SmallIntegerField(default=0, verbose_name=\"伤害点数\")\n\n\t# 承伤点数(任何自己遭受的HP伤害,小于0为恢复)\n\tdamage = models.SmallIntegerField(default=0, verbose_name=\"承伤点数\")\n\n\t# 回复点数(通过物品的HP回复,若物品需要扣除HP则不算入内)\n\trecovery = models.PositiveSmallIntegerField(default=0, verbose_name=\"回复点数\")\n\n\t@classmethod\n\tdef rewardCalculator(cls) -> ExerciseSingleRewardCalc:\n\t\t\"\"\"\n\t\t获取对应的奖励计算类\n\t\tReturns:\n\t\t\t对应奖励计算类本身(继承自 QuestionSetSingleRewardCalc)\n\t\t\"\"\"\n\t\treturn ExerciseSingleRewardCalc\n\n\t@classmethod\n\tdef source(cls) -> RecordSource:\n\t\t\"\"\"\n\t\t题目来源\n\t\tReturns:\n\t\t\t题目来源枚举成员\n\t\t\"\"\"\n\t\treturn RecordSource.Battle\n\n\tdef convert(self, type: str = None,\n\t\t\t\t\t runtime_battler: 'RuntimeBattlePlayer' = None) -> dict:\n\t\t\"\"\"\n\t\t转化为字典\n\t\tArgs:\n\t\t\ttype (str): 转化类型\n\t\t\truntime_battler (RuntimeBattlePlayer): 运行时对战玩家对象\n\t\tReturns:\n\t\t\t转化后的字典数据\n\t\t\"\"\"\n\t\tres = super().convert(type)\n\n\t\tres['order'] = self.round.order\n\t\tres['attack'] = self.attack\n\t\tres['skill_id'] = self.skill_id\n\t\tres['target_type'] = self.target_type\n\t\tres['result_type'] = self.result_type\n\t\tres['hurt'] = self.hurt\n\t\tres['damage'] = self.damage\n\t\tres['recovery'] = self.recovery\n\n\t\tif runtime_battler is not None:\n\t\t\truntime_battler.convert(res)\n\n\t\treturn res\n\n\tdef _create(self, round: BattleRound):\n\t\t\"\"\"\n\t\t内部创建函数\n\t\tArgs:\n\t\t\tround (BattleRound): 站都回合对象\n\t\t\"\"\"\n\t\tself.round = round\n\n\tdef setQuestionSet(self, question_set: BattlePlayer):\n\t\t\"\"\"\n\t\t设置题目集(对战玩家)\n\t\tArgs:\n\t\t\tquestion_set (BattlePlayer): 对战玩家\n\t\t\"\"\"\n\t\tself.battle_player = question_set\n\n\tdef questionSet(self) -> BattlePlayer:\n\t\t\"\"\"\n\t\t获取题目集记录(对战玩家)\n\t\tReturns:\n\t\t\t对战玩家\n\t\t\"\"\"\n\t\treturn self.battle_player\n\n\tdef start(self):\n\t\tsuper().start()\n\t\tself.hurt = self.damage = self.recovery = 0\n\n\tdef processRecovery(self, recovery: int):\n\t\t\"\"\"\n\t\t处理道具回复\n\t\tArgs:\n\t\t\trecovery (int): 道具回复量\n\t\t\"\"\"\n\t\tif recovery > 0: self.recovery += recovery\n\n\tdef processAttack(self, skill: ExerSkill, target_type: TargetType,\n\t\t\t\t result_type: HitResultType, hurt: int, attacker: bool):\n\t\t\"\"\"\n\t\t处理回合攻击\n\t\tArgs:\n\t\t\tskill (ExerSkill): 技能(为 None 则为普通攻击)\n\t\t\ttarget_type (TargetType): 实际目标类型(有可能与技能的目标类型不一致)\n\t\t\tresult_type (HitResultType): 命中结果类型\n\t\t\thurt (int): 伤害点数\n\t\t\tattacker (bool): 自己是否为攻击方\n\t\t\"\"\"\n\t\tself.skill = skill\n\t\tself.target_type = target_type.value\n\t\tself.result_type = result_type.value\n\n\t\tself._processHurt(hurt, attacker)\n\n\tdef _processHurt(self, hurt: int, attacker: bool):\n\t\t\"\"\"\n\t\t处理伤害(保存记录)\n\t\tArgs:\n\t\t\thurt (int): 伤害值\n\t\t\tattacker (bool): 自己是否为攻击方\n\t\t\"\"\"\n\t\tif self.skill is None:\n\t\t\thit_type = HitType.HPDamage\n\t\telse:\n\t\t\thit_type = HitType(self.skill.hit_type)\n\n\t\tif hit_type == HitType.MPDamage or \\\n\t\t\thit_type == HitType.HPRecover or \\\n\t\t\thit_type == HitType.MPRecover or \\\n\t\t\thit_type == HitType.MPDrain: return\n\n\t\ttarget_type = TargetType(self.target_type)\n\n\t\t# 对敌攻击/双方攻击,如果是攻击方则计入伤害点数,否则计入承伤点数\n\t\tif target_type == TargetType.Enemy or target_type == TargetType.Both:\n\t\t\tif attacker: self.hurt += hurt\n\t\t\telse: self.damage += hurt\n\n\t\t# 对己攻击,如果是攻击方计入承伤点数,不计入伤害点数\n\t\tif target_type == TargetType.Self:\n\t\t\tif attacker: self.damage += hurt\n\n\tdef hurtPoint(self) -> int:\n\t\t\"\"\"\n\t\t实际对敌伤害值\n\t\tReturns:\n\t\t\t返回实际的对敌伤害值\n\t\t\"\"\"\n\t\tskill = self.skill\n\n\t\tif skill is None: return self.hurt\n\t\tif skill.hit_type == HitType.HPDamage and \\\n\t\t\tself.target_type == TargetType.Both or \\\n\t\t\tself.target_type == TargetType.Enemy:\n\t\t\treturn self.hurt\n\n\tdef damagePoint(self) -> int:\n\t\t\"\"\"\n\t\t实际受到的伤害点数\n\t\tReturns:\n\t\t\t获取实际受到的伤害\n\t\t\"\"\"\n\t\tskill = self.skill\n\n\t\tif skill is None: return self.damage\n\t\tif skill.hit_type == HitType.HPDamage and \\\n\t\t\tself.target_type == TargetType.Both or \\\n\t\t\tself.target_type == TargetType.Enemy:\n\t\t\treturn self.damage\n","sub_path":"Server/ExermonServer/battle_module/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":26494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"319196440","text":"from base64 import b64encode\nimport requests\n\n\nAPI_URL = 'https://api.infusionsoft.com/crm/rest/v1/'\n\n\ndef get_access_token_response(request, code):\n params = {\n 'client_id': 'nndwt7ass8w95kqfgh2utw9h',\n 'client_secret': 'fWew3CY7zp',\n 'code': code,\n 'grant_type': 'authorization_code',\n 'redirect_uri': request.build_absolute_uri('?'),\n }\n response = requests.post(\n 'https://api.infusionsoft.com/token',\n params,\n )\n return response\n\n\ndef get_refresh_token_response(refresh_token):\n b64_cred = b64encode(b'nndwt7ass8w95kqfgh2utw9h:fWew3CY7zp')\n authorization = 'Basic ' + b64_cred.decode('utf-8')\n params = {\n 'grant_type': 'refresh_token',\n 'refresh_token': refresh_token,\n }\n headers = {\n 'Authorization': authorization\n }\n response = requests.post(\n 'https://api.infusionsoft.com/token',\n params,\n headers=headers,\n )\n return response\n","sub_path":"services/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"65605989","text":"import warnings\nwarnings.filterwarnings('ignore')\nimport argparse\nimport numpy as np\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import f1_score\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import train_test_split\n\ndef read_file(file_path):\n print(\"Read dataset: %s\" % args.input_file)\n X = []\n Y = []\n with open(file_path, 'r') as dataset:\n for line in dataset.readlines():\n toks = line.split(' ')\n Y.append(int(toks[0]))\n x = dict()\n for key, value in [pair.split(':') for pair in toks[1:]]:\n key, value = int(key), float(value)\n x[key - 1] = value\n X.append(x)\n\n X_np = np.zeros((len(X), len(X[0]),), dtype=float)\n for i, x in enumerate(X):\n for j, value in x.items():\n X_np[i, j] = value\n\n return X_np, np.array(Y)\n\ndef tuning(X, Y):\n print('Tuning...')\n clf = GridSearchCV(SVC(), parameters, cv=5, scoring='f1_micro', verbose=2)\n clf.fit(X, Y)\n print('Best parameters:', clf.best_params_)\n\n print(\"Training Micro-F1: %.5f\" % cross_val_score(SVC(**clf.best_params_), X, Y, cv=5, scoring='f1_micro').mean())\n return clf.best_params_\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Tuning Script')\n parser.add_argument('input_file')\n args = parser.parse_args()\n\n parameters = [\n {\n 'kernel': ['rbf'],\n 'gamma': np.logspace(-7, -1, num=8, base=2.0),\n 'C': np.arange(1, 25, 4.5)\n }\n ]\n\n X, Y = read_file(args.input_file)\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=1)\n param = tuning(X_train, Y_train)\n\n # param = {'C': 10.0, 'gamma': 0.0078125, 'kernel': 'rbf'}\n\n svm_best = SVC(**param)\n svm_best.fit(X_train, Y_train)\n print(\"Testing Micro-F1: %.5f\" % f1_score(Y_test, svm_best.predict(X_test), average='micro'))","sub_path":"Assignment-3/tuning-2.py","file_name":"tuning-2.py","file_ext":"py","file_size_in_byte":2049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"322614310","text":"class TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass TreeMethod:\n def maxDepth(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n lcount = rcount = 0\n if root:\n lcount = rcount = 1\n if root.left:\n lcount += self.maxDepth(root.left)\n if root.right:\n rcount += self.maxDepth(root.right)\n return max(lcount, rcount)\n\n\n\ndef print_node(node):\n li = []\n if node:\n li.append(node.val)\n if node.left or node.right:\n if node.left:\n li.append(print_node(node.left))\n else:\n li.append([])\n if node.right:\n li.append(print_node(node.right))\n else:\n li.append([])\n return li\n\n\ndef get_root(li):\n if len(li) != 15:\n print(\"暂时只支持长度为15的列表\")\n return\n a, b, c, d, e, f, g, h, i, j, k, l, m, n, o = TreeNode(li[0]) if li[0] is not None else None, \\\n TreeNode(li[1]) if li[1] is not None else None, \\\n TreeNode(li[2]) if li[2] is not None else None, \\\n TreeNode(li[3]) if li[3] is not None else None, \\\n TreeNode(li[4]) if li[4] is not None else None, \\\n TreeNode(li[5]) if li[5] is not None else None, \\\n TreeNode(li[6]) if li[6] is not None else None, \\\n TreeNode(li[7]) if li[7] is not None else None, \\\n TreeNode(li[8]) if li[8] is not None else None, \\\n TreeNode(li[9]) if li[9] is not None else None, \\\n TreeNode(li[10]) if li[10] is not None else None, \\\n TreeNode(li[11]) if li[11] is not None else None, \\\n TreeNode(li[12]) if li[12] is not None else None, \\\n TreeNode(li[13]) if li[13] is not None else None, \\\n TreeNode(li[14]) if li[14] is not None else None\n if a:\n a.left = b\n a.right = c\n if b:\n b.left = d\n b.right = e\n if c:\n c.left = f\n c.right = g\n if d:\n d.left = h\n d.right = i\n if e:\n e.left = j\n e.right = k\n if f:\n f.left = l\n f.right = m\n if g:\n g.left = n\n g.right = o\n return a\n\n\ndef stringToTreeNode(string):\n string = string.strip()\n string = string[1:-1]\n if not string:\n return None\n\n inputValues = [s.strip() for s in string.split(',')]\n root = TreeNode(int(inputValues[0]))\n nodeQueue = [root]\n front = 0\n index = 1\n while index < len(inputValues):\n node = nodeQueue[front]\n front = front + 1\n\n item = inputValues[index]\n index = index + 1\n if item != \"null\":\n leftNumber = int(item)\n node.left = TreeNode(leftNumber)\n nodeQueue.append(node.left)\n\n if index >= len(inputValues):\n break\n\n item = inputValues[index]\n index = index + 1\n if item != \"null\":\n rightNumber = int(item)\n node.right = TreeNode(rightNumber)\n nodeQueue.append(node.right)\n return root","sub_path":"tools/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":3716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"77860430","text":"# Module for handling query source code\n\nfrom __future__ import annotations\n\nimport functools\nimport logging\nfrom typing import Union, Dict, List, Tuple\n\nimport sqlparse\n\nlogging.basicConfig(\n format='%(asctime)s %(levelname)s %(name)s %(message)s',\n level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\ndef hash_reference(cte_reference: str) -> str:\n return f\"`{cte_reference}`\" # f\"( SELECT * FROM `{cte_reference}`)\"\n\n\nclass Source:\n\n def __init__(self,\n source: str):\n self._source = source\n\n def source(self) -> str:\n return self._source\n\ndef clean(tokens: sqlparse.tokens, recurse=False) -> sqlparse.tokens:\n ret_tokens = []\n # trim extra whitespace\n prev_token = None\n for token in tokens:\n if not isinstance(token, sqlparse.sql.Token):\n ret_tokens.extend(clean(token, recurse=recurse))\n elif token.is_group and recurse:\n ret_tokens.extend(clean(token.tokens, recurse=recurse))\n elif ((not token.is_whitespace or not prev_token or not prev_token.is_whitespace)\n # trim comments\n and not token.match(sqlparse.sql.Comment, None)\n and not token.match(sqlparse.tokens.Comment.Single, None)\n and not token.match(sqlparse.tokens.Comment.Multiline, None)): #isinstance(token, sqlparse.sql.Comment)):\n ret_tokens.append(token)\n prev_token = token\n return ret_tokens\n\ndef serialize_tokens(tokens: sqlparse.tokens):\n\n out = \"\"\n cleaned = clean(tokens)\n for token in cleaned:\n if token.is_group:\n out += serialize_tokens(token.tokens)\n else:\n out += token.value\n return out\n\nclass ParsedSource:\n\n def __init__(self,\n source: Source):\n self._source = source\n self._parsed_statements = self.__parse()\n\n def source(self) -> Source:\n return self._source\n\n def parsed_statements(self) -> List[sqlparse.sql.Statement]:\n return self._parsed_statements\n\n def serialize(self, reindent=False) -> str:\n raw_string = \";\".join([serialize_tokens(statement.tokens) for statement in self._parsed_statements])\n return sqlparse.format(raw_string, reindent=reindent, keyword_case='upper')\n\n def __parse(self) -> List[sqlparse.sql.Statement]:\n split_statements = []\n for split in sqlparse.split(self._source.source()):\n parsed_split = sqlparse.parse(split) # sqlparse.format(split, reindent=True, keyword_case='upper'))\n for statement in parsed_split:\n serialized = serialize_tokens(statement.tokens)\n stripped_tokens = sqlparse.parse(serialized)\n split_statements.extend(stripped_tokens)\n return split_statements\n\n def extract_statements(self) -> List[Tuple[str, sqlparse.tokens]]:\n statements = []\n for statement in self.parsed_statements():\n sub_statements = []\n for sub_statement in extract_statements(statement.tokens):\n sub_statements.append(sub_statement)\n statements.append(sub_statements)\n return statements\n\n\nclass DecomposedSource:\n\n def __init__(self,\n parsed_source: ParsedSource,\n known_dependencies: Dict[str, DecomposedSource] = None,\n extract_statements = True,\n alias: str = None):\n self._alias = alias\n self._dependencies = []\n self._parsed_sources = []\n self._known_dependencies = known_dependencies\n if extract_statements:\n for statements in parsed_source.extract_statements():\n for name, tokens in statements:\n sub_source = ParsedSource(Source(serialize_tokens(tokens)))\n decomposed_dependencies = self._decompose_dependencies(\n name,\n sub_source,\n top_level_statements=self._known_dependencies or statements)\n self._dependencies.extend(decomposed_dependencies)\n self._parsed_sources.append(sub_source)\n else:\n for statement in parsed_source.parsed_statements():\n sub_source = ParsedSource(Source(serialize_tokens(statement.tokens)))\n decomposed_dependencies = self._decompose_dependencies(\n self._alias,\n sub_source,\n top_level_statements=self._known_dependencies)\n self._dependencies.extend(decomposed_dependencies)\n self._parsed_sources.append(sub_source)\n\n def parsed_sources(self) -> List[ParsedSource]:\n return self._parsed_sources\n\n def statements(self) -> List[sqlparse.tokens]:\n return self._parsed_sources.statements()\n\n def dependencies(self, recurse: bool = False) -> List[Dict[str, DecomposedSource]]:\n dependencies = self._dependencies\n if recurse:\n for dependency_map in dependencies:\n for name, dependency in dependency_map.items():\n dependencies.extend(dependency.dependencies())\n return dependencies\n\n def alias(self) -> str:\n return self._alias\n\n def serialize(self, recurse: bool = False, top_level: bool = True) -> str:\n raw_string = \";\".join([parsed_source.serialize() for parsed_source in self._parsed_sources])\n return sqlparse.format(raw_string, keyword_case='upper')\n\n def has_dependency(self, potential_dependency: DecomposedSource, recurse: bool = True) -> bool:\n\n ret_val = False\n alias = potential_dependency.alias()\n if alias:\n ret_val = self._has_dependency(potential_dependency)\n if recurse:\n for dependency_mapping in self._dependencies:\n for decomposed_source in dependency_mapping.values():\n if decomposed_source.has_dependency(potential_dependency):\n ret_val = True\n break\n return ret_val\n\n def _has_dependency(self, potential_dependency: DecomposedSource) -> bool:\n return potential_dependency.alias() and \\\n next((dep for dep in self.dependencies() if potential_dependency.alias() in dep.keys()), None) is not None\n\n\n def _decompose_dependencies(self,\n name: str,\n parsed_source: ParsedSource,\n top_level_statements: Dict[str, DecomposedSource]) -> List[Dict[str, DecomposedSource]]:\n\n all_statement_dependencies = []\n # recursively decompose statements by dependency\n for statement in parsed_source.parsed_statements():\n statement_dependencies = {}\n aliases = [statement_pair[0] for statement_pair in top_level_statements if statement_pair[0]]\n for dependency in map_dependencies(name, statement, known_aliases=aliases):\n dependency_tokens = next(statement_pair[1] for statement_pair in top_level_statements if statement_pair[0] == dependency)\n sub_source = ParsedSource(Source(serialize_tokens(dependency_tokens)))\n statement_dependencies[dependency] = DecomposedSource(\n sub_source,\n known_dependencies=top_level_statements,\n alias=dependency,\n extract_statements=False)\n all_statement_dependencies.append(statement_dependencies)\n return all_statement_dependencies\n\n\nclass EncodedSource:\n\n def __init__(self,\n decomposed_source: DecomposedSource,\n known_dependencies: Dict[str, EncodedSource] = None,\n prefix: str = \"\"):\n assert(isinstance(decomposed_source, DecomposedSource))\n self._alias = decomposed_source.alias()\n self._decomposed_source = decomposed_source\n self._aliased_source = []\n self._hashed_sources = []\n self._encoded_sources = []\n self._encoded_dependencies = []\n self._known_dependencies = known_dependencies or {}\n for parsed_source, dependencies in zip(decomposed_source.parsed_sources(), decomposed_source.dependencies()):\n # recursively encode dependencies first\n sub_encoded_dependencies = []\n include_source_dependencies = []\n serialized = \"\"\n unencoded_dependencies_by_name = {}\n for alias, dependency in dependencies.items():\n if alias:\n if dependency.alias().startswith(prefix):\n encoded_dependency = EncodedSource(dependency, known_dependencies=self._known_dependencies, prefix=prefix)\n sub_encoded_dependencies.append(encoded_dependency)\n #all_encoded_dependencies[alias] = encoded_dependency\n #include_source_dependencies.append(f\"{alias} AS (SELECT * FROM `{encoded_dependency.hashed_sources()[-1]}`)\")\n else:\n unencoded_dependencies_by_name[alias] = dependency\n dependency_map_list = dependency.dependencies(recurse=True)\n for dependency_map in dependency_map_list:\n unencoded_dependencies_by_name.update(dependency_map)\n #include_source_dependencies.extend(f\"{alias} AS ({dependency.serialize(recurse=True)})\")\n self._encoded_dependencies.append(sub_encoded_dependencies)\n\n # determine ordering of dependencies to include.\n for encoded_dependency in sub_encoded_dependencies:\n # if we have encoded, prefer that. Remove from non-encoded\n unencoded_dependencies_by_name.pop(encoded_dependency.alias(), None)\n include_source_dependencies.append(encoded_dependency)\n # now add non-encoded\n for alias, dependency in unencoded_dependencies_by_name.items():\n include_source_dependencies.append(dependency)\n\n # https://stackoverflow.com/questions/47192626/deceptively-simple-implementation-of-topological-sorting-in-python\n # def iterative_topological_sort(graph, start):\n # seen = set()\n # stack = [] # path variable is gone, stack and order are new\n # order = [] # order will be in reverse order at first\n # q = [start]\n # while q:\n # v = q.pop()\n # if v not in seen:\n # seen.add(v) # no need to append to path any more\n # q.extend(graph[v])\n #\n # while stack and v not in graph[stack[-1]]:\n # order.append(stack.pop())\n # stack.append(v)\n #\n # return stack + order[::-1]\n\n if include_source_dependencies:\n #logger.info(f\"BEFORE include_source_dependencies:{[dep.alias() for dep in include_source_dependencies]}\")\n dep_graph = Graph(len(include_source_dependencies))\n #start = [dep for dep in include_source_dependencies if dep.alias() in dependencies.keys()]\n #logger.info(f\"start deps:{[dep.alias() for dep in start]}\")\n idx_source = 0\n for source in include_source_dependencies:\n if isinstance(source, EncodedSource):\n decomposed_source = source.decomposed_source()\n else:\n decomposed_source = source\n idx_dep = 0\n #for dep in [dep for dep in include_source_dependencies if dep.alias() in source_dep_keys]:\n for target in include_source_dependencies:\n if decomposed_source is not target and decomposed_source.has_dependency(target):\n #logger.info(f\"adding edge:source: {source.alias()} dep: {target.alias()}\")\n dep_graph.addEdge(idx_dep, idx_source)\n idx_dep += 1\n idx_source += 1\n sorted_indices = dep_graph.topologicalSort()\n #logger.info(f\"sorted_indices:{sorted_indices}\")\n include_source_dependencies_new = [include_source_dependencies[idx] for idx in sorted_indices]\n include_source_dependencies = include_source_dependencies_new\n\n\n #logger.info(f\"AFTER self:{self.alias()} include_source_dependencies:{[dep.alias() for dep in include_source_dependencies]}\")\n\n # render out source with its dependencies\n if include_source_dependencies:\n serialized += \"WITH \"\n serialized += \",\\n\".join([f\" {dep.alias()} AS ({dep.serialize()})\" for dep in include_source_dependencies]) + \"\\n\"\n serialized += f\"{parsed_source.serialize()}\"\n self._encoded_sources.append(serialized)\n import hashlib\n hasher = hashlib.sha1()\n hasher.update(serialized.encode('utf-8'))\n hashed = hasher.hexdigest()\n self._hashed_sources.append(hashed)\n self._known_dependencies[hashed] = self\n\n def alias(self) -> str:\n return self._alias\n\n def decomposed_source(self) -> DecomposedSource:\n return self._decomposed_source\n\n # still retains source unencoded, but with encoded dependency references\n def encoded_sources(self) -> List[str]:\n return self._encoded_sources\n\n # hashes all sources, included encoded dependency references\n def hashed_sources(self) -> List[str]:\n return self._hashed_sources\n\n # direct encoded dependencies\n def encoded_dependencies(self) -> List[List[EncodedSource]]:\n return self._encoded_dependencies\n\n # all encoded sources known by this source structure\n def all_encoded_sources_by_name(self) -> Dict[str, EncodedSource]:\n return self._known_dependencies\n\n def serialize(self, reindent=False) -> str:\n return sqlparse.format(f\"SELECT * FROM `{self._hashed_sources[-1]}`\", reindent=reindent, keyword_case='upper')\n\n @staticmethod\n def from_str(source_str: str, prefix=\"\"):\n return EncodedSource(DecomposedSource(ParsedSource(Source(source_str))), prefix=prefix)\n\n\ndef map_dependencies(name: str, statement: sqlparse.sql.Statement, known_aliases: List[str]) -> List[str]:\n single_dependencies = map_dependencies_single(name=name, known_aliases=known_aliases, tokens=statement.tokens)\n return single_dependencies\n\n\ndef map_dependencies_single(name: str, known_aliases: List[str], tokens: sqlparse.tokens) -> List[str]:\n dependency_list = []\n for token in tokens:\n # TODO: might need recursive flatten here\n for flat_token in token.flatten():\n dependency = flat_token.value\n # see if we have a query which maps to this name\n if (not name or dependency != name) and dependency in known_aliases:\n dependency_list.append(dependency)\n # dependencies[cte_name] = dependency_list\n return dependency_list\n\n\ndef extract_statements(tokens: sqlparse.tokens) -> Union[str, sqlparse.tokens]:\n remaining_tokens = []\n found_with = False\n expect_comma = False\n encountered_non_whitespace = False\n for token in tokens:\n # from https://www.programcreek.com/python/?code=dbcli%2Flitecli%2Flitecli-master%2Flitecli%2Fpackages%2Fparseutils.py\n if found_with and not expect_comma and (isinstance(token, sqlparse.sql.IdentifierList) or isinstance(token, sqlparse.sql.Identifier)):\n item_list = token.get_identifiers() if isinstance(token, sqlparse.sql.IdentifierList) else [token]\n for identifier in item_list:\n # Sometimes Keywords (such as FROM ) are classified as\n # identifiers which don't have the get_real_name() method.\n try:\n real_name = identifier.get_real_name()\n except AttributeError:\n continue\n # we are starting a new identifier. return what we have so far and clear it for after the id\n if remaining_tokens:\n yield None, remaining_tokens\n remaining_tokens = []\n cte_tokens = identifier.tokens\n\n # yield real_name, identifier\n\n found_as = False\n for cte_token in cte_tokens:\n # are we defining a CTE identifier?\n if found_as:\n if type(cte_token) == sqlparse.sql.Parenthesis:\n found_as = False\n expect_comma = True\n # get everything between parens, the identifiers internals, to replace\n # return everything up to, including the opening paren, but but not including\n # the identifier internals, to replace\n between_parens = list(cte_token.tokens)[1:-1]\n remaining_tokens.append(between_parens)\n # now add everything after the internals, including trailing paren, and continue\n yield real_name, remaining_tokens\n remaining_tokens = []\n break # stop extracting this cte\n if not found_as and cte_token.value.upper() == 'AS':\n found_as = True\n\n\n else:\n # if we are expecting a comma and see one, we expect another cte\n # if expect_comma and token.value == \",\":\n # expect_comma = False\n # el\n if token.value.upper() == \"WITH\":\n found_with = True\n elif not token.is_whitespace or encountered_non_whitespace:\n encountered_non_whitespace = True\n remaining_tokens.append(token)\n\n if remaining_tokens:\n yield None, remaining_tokens\n\n\n# https://www.geeksforgeeks.org/topological-sorting/\n# Class to represent a graph\nfrom collections import defaultdict\nclass Graph:\n def __init__(self, vertices):\n self.graph = defaultdict(list) # dictionary containing adjacency List\n self.V = vertices # No. of vertices\n\n # function to add an edge to graph\n def addEdge(self, u, v):\n self.graph[u].append(v)\n\n # A recursive function used by topologicalSort\n def topologicalSortUtil(self, v, visited, stack):\n\n # Mark the current node as visited.\n visited[v] = True\n\n # Recur for all the vertices adjacent to this vertex\n for i in self.graph[v]:\n if visited[i] == False:\n self.topologicalSortUtil(i, visited, stack)\n\n # Push current vertex to stack which stores result\n stack.append(v)\n\n # The function to do Topological Sort. It uses recursive\n # topologicalSortUtil()\n def topologicalSort(self):\n # Mark all the vertices as not visited\n visited = [False]*self.V\n stack = []\n\n # Call the recursive helper function to store Topological\n # Sort starting from all vertices one by one\n for i in range(self.V):\n if visited[i] == False:\n self.topologicalSortUtil(i, visited, stack)\n\n # Print contents of the stack\n return stack[::-1] # return list in reverse order\n\n# def sort_by_dependence(x, y):\n# if isinstance(x, EncodedSource):\n# x = x.decomposed_source()\n# if isinstance(y, EncodedSource):\n# y = y.decomposed_source()\n# # x a dependency of y?\n# ret_val = 0\n# if y.has_dependency(x, recurse=True):\n# ret_val = -1\n# elif x.has_dependency(y, recurse=True):\n# ret_val = 1\n# logger.info(\"--\")\n# logger.info(f\"{ret_val} x.alias(): {x.alias()} x:{x} y.alias():{y.alias()} y:{y}\")\n# return ret_val\n# include_source_dependencies.sort(key=functools.cmp_to_key(sort_by_dependence))\n# logger.info(f\"dependency order:{','.join([dep.alias() for dep in include_source_dependencies])}\")\n\n\n\n\n","sub_path":"src/source.py","file_name":"source.py","file_ext":"py","file_size_in_byte":20270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"415227577","text":"import json\n\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom django.utils.translation import ugettext_noop, ugettext_lazy as _\n\n\nclass Gig(models.Model):\n title = models.CharField(verbose_name=_(\"Gig name\"),\n max_length=60, blank=False)\n slug = models.SlugField(verbose_name=_(\"Slug\"), blank=False)\n date = models.DateField(verbose_name=_(\"Gig date\"), blank=False)\n description = models.TextField(verbose_name=_(\"Description\"),\n null=False, blank=True)\n\n def __init__(self, *args, **kwargs):\n super(Gig, self).__init__(*args, **kwargs)\n self._pristine = {\n field.name: getattr(self, field.name)\n for field in self._meta.fields\n }\n\n def __str__(self):\n return \"%s (%s)\" % (self.title, self.date)\n\n def track_changes(self, user_making_changes):\n changes = []\n track_changes_of = [\n ('title', ugettext_noop('Gig name')),\n ('date', ugettext_noop('Gig date')),\n ('description', ugettext_noop('Description')),\n ]\n for field, verbose_name in track_changes_of:\n oldval = str(self._pristine[field] or '')\n newval = str(getattr(self, field))\n if oldval != newval:\n changes.append({\n 'title': verbose_name,\n 'title_translatable': True,\n 'prev': oldval,\n 'new': newval\n })\n if not changes:\n return\n action = (ugettext_noop('%(who)s (f) edited gig %(when)s')\n if user_making_changes.profile.gender == 'f' else\n ugettext_noop('%(who)s (m) edited gig %(when)s'))\n info = {'action': action, 'changes': changes}\n Comment.objects.create(\n gig=self, song=None, author=user_making_changes,\n text=json.dumps(info), comment_type=Comment.CT_GIG_EDIT,\n )\n\n\nclass CommentManager(models.Manager):\n def get_queryset(self):\n qs = super(CommentManager, self).get_queryset()\n return qs.select_related('song', 'author', 'gig')\n\n\nclass Comment(models.Model):\n objects = CommentManager()\n\n CT_SONG_COMMENT = 'song_comment'\n CT_SONG_EDIT = 'song_changed'\n CT_GIG_COMMENT = 'gig_comment'\n CT_GIG_EDIT = 'gig_changed'\n\n COMMENT_TYPE_CHOICES = (\n (CT_SONG_COMMENT, CT_SONG_COMMENT),\n (CT_GIG_COMMENT, CT_GIG_COMMENT),\n (CT_SONG_EDIT, CT_SONG_EDIT),\n )\n GIG_ONLY_COMMENTS = (CT_GIG_COMMENT, CT_GIG_EDIT)\n\n gig = models.ForeignKey(Gig, on_delete=models.CASCADE,\n blank=False, related_name='comments')\n song = models.ForeignKey('sbsong.Song', on_delete=models.CASCADE,\n blank=True, null=True, related_name='comments')\n comment_type = models.CharField(max_length=20, null=False, blank=False,\n choices=COMMENT_TYPE_CHOICES)\n author = models.ForeignKey(User, on_delete=models.PROTECT,\n null=True, blank=True,\n related_name='comments')\n datetime = models.DateTimeField(auto_now_add=True)\n text = models.TextField(null=False, blank=False)\n\n class Meta:\n ordering = ['-datetime']\n index_together = [\n ['author', 'datetime'],\n ['gig', 'datetime'],\n ['song', 'datetime'],\n ]\n","sub_path":"sbgig/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"63986010","text":"import time\n\nimport HtmlTestRunner\nfrom selenium import webdriver\nimport unittest\nfrom Project_05.Pages.test_Pages import TestPages\nimport sys\nimport os\n\nsys.path.append(os.path.join(os.path.dirname(__file__), \"...\", \"...\"))\n\n\nclass TestForms(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.driver = webdriver.Chrome(\"/home/jackdaniel/PycharmProjects/selenium/Project_05/Driver/chromedriver\")\n cls.driver.implicitly_wait(10)\n cls.driver.maximize_window()\n\n def test_01_form_filling(self):\n driver = self.driver\n driver.get(\"https://demoqa.com/automation-practice-form/\")\n\n homepage = TestPages(driver)\n homepage.enter_firstname(\"A\")\n homepage.enter_last_name(\"Daniel\")\n homepage.enter_email(\"daniel@gmail.com\")\n homepage.click_gender()\n homepage.enter_mobile_number(\"8946087040\")\n homepage.click_date()\n homepage.click_hobbies()\n homepage.enter_address(\"rc-street Ramagiri\")\n homepage.click_submit()\n homepage.click_close()\n time.sleep(10)\n\n @classmethod\n def tearDownClass(cls):\n cls.driver.close()\n cls.driver.quit()\n\n\nif __name__ == \"__main__\":\n unittest.main(testRunner=HtmlTestRunner.HTMLTestRunner(\n output=\"/home/jackdaniel/PycharmProjects/selenium/Project_05/Reports\"))\n","sub_path":"Automated form filling Test/Tests/Forms.py","file_name":"Forms.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"566415360","text":"#cluster and visualize\nimport dataClass as dat\nimport numpy as np\nfrom sklearn import mixture\nimport scipy.stats as stats\nimport matplotlib.pyplot as plt\n\n# Since the python gmm does not sort its means, we need to do that ourselves\n# otherwise, the given predictions will not match the true classes\ndef getSortedPredictions(predictions, means):\n\t#put means next to indices, sort, replace predictions\n\tmeans = means.reshape(means.size)\n\tindices = np.argsort(means)\n\tnewPredictions = np.zeros(predictions.size)\n\tfor ind in np.arange(0,predictions.size):\n\t\tnewPredictions[ind] = indices[predictions[ind]]\n\treturn newPredictions.astype(np.int8)\n\n# logOdds needs to already be in correct shape (length, 1)\ndef getGmm(logOdds, numComponents=2, BIC=False, BIC_Complexity_Max=8):\n\tlogOdds = logOdds.reshape((logOdds.size,1))\n\tif BIC:\n\t\tsmallest_BIC = 1000000000\n\t\tbestGm = mixture.GMM(n_components=numComponents) #placeholder default\n\t\tfor nComponents in np.arange(1,BIC_Complexity_Max+1):\n\t\t\tgm = mixture.GMM(n_components=nComponents)\n\t\t\tgm.fit(logOdds)\n\t\t\tthisBic = gm.bic(logOdds)\n\t\t\t#print \"BIC for iter \", nComponents, \" = \", thisBic\n\t\t\tif (thisBic < smallest_BIC):\n\t\t\t\tbestGm = gm\n\t\t\t\tsmallest_BIC = thisBic\n\t\t#predictions = bestGm.predict(logOdds)\n\t\treturn bestGm\n\t\t#return bestGm, getSortedPredictions(predictions, gm.means_)\n\n\telse:\n\t\tgm = mixture.GMM(n_components=numComponents)\n\t\tgm.fit(logOdds)\n\t\t#predictions = gm.predict(logOdds)\n\t\treturn gm\n\t\t#return gm, getSortedPredictions(predictions, gm.means_)\n\ndef visualizeGmm(gmm, logOdds):\n\tplt.hist(logOdds,bins=50, normed=True)\n\tplt.title(\"Log Odds Ratio Histogram\")\n\tplt.xlabel(\"Log Odds Ratio\")\n\tplt.ylabel(\"Frequency\")\n\tmeans = gmm.means_\n\tvariances = gmm.covars_\n\tx_plot = np.linspace(min(logOdds),max(logOdds),200)\n\tfor m in np.arange(0,means.size):\n\t\tmu = means[m,0]\n\t\tsigma = variances[m,0]**0.5\n\t\trv = stats.norm(mu,sigma)\n\t\tplt.plot(x_plot, rv.pdf(x_plot))\n\tplt.show()\n\ndef visualize(gmm=None, counts=None, bins=None, show=True,\n title=\"Log Odds Ratio Histogram\",\n xlabel=\"Log Odds Ratio\",\n ylabel=\"Frequency\"):\n\t#plt.hist(counts,bins=bins, normed=True)\n\twidth = bins[1]-bins[0]\n\tplt.bar(bins[:bins.size-1], counts, width=width) #How do I plot a pre-made histogram?\n\tplt.title(title)\n\tplt.xlabel(xlabel)\n\tplt.ylabel(ylabel)\n\tif (gmm is not None):\n\t\tmeans = gmm.means_\n\t\tvariances = gmm.covars_\n\t\tx_plot = np.linspace(min(bins),max(bins),200)\n\t\tfor m in np.arange(0,means.size):\n\t\t\tmu = means[m,0]\n\t\t\tsigma = variances[m,0]**0.5\n\t\t\trv = stats.norm(mu,sigma)\n\t\t\tplt.plot(x_plot, rv.pdf(x_plot))\n\tif (show):\n\t\tplt.show()\n\ndef getError(unsorted_Means, sorted_True_Means):\n\tmeans = unsorted_Means.reshape(unsorted_Means.size)\n\tmeans.sort()\n\t#print \"Learned means\", means\n\t#print \"True means\", sorted_True_Means\n\terrs = np.absolute(means-sorted_True_Means)/sorted_True_Means\n\treturn errs, np.average(errs), np.std(errs)\n\ndef getAccuracy(trueClass, predictedClass):\n\tcorrect = np.sum(trueClass==predictedClass)\n\treturn np.float64(correct) / trueClass.size\n\n# np.random.seed(4)\n\n# # delta needs to be a column vector\n# n=3\n# nPatients = 100000\n# ourdelta = np.array([[1.5],[3],[4.5]])#,[0.5],[-0.25],[-2],[3],[5]])\n# datagen = dat.DataSim(nCovariates = 100, nHeterogenous = n,\n# \t\t\t\t\t\ttreatmentEffect=1, delta=ourdelta)\n# means = datagen.getTrueMeans()\n# #print \"True means:\", means\n# X, Z, Y = datagen.generate(nPatients)\n# oddsRatios = dat.getIndividualOdds(X,Z,Y)\n# logOdds = np.log(oddsRatios)\n# logOdds = logOdds.reshape((nPatients,1))\n\n# #gm, predictions = getGmm(logOdds, numComponents = 2**n)\n# gm = getGmm(logOdds, BIC = True)\n# predictions = gm.predict(logOdds)\n# sortedPredictions = getSortedPredictions(predictions, gm.means_)\n# trueClasses = datagen.getTrueClass(X)\n\n# print \"Number of components\", gm.means_.size\n\n# if gm.means_.size == means.size:\n# \t# Don't forget to exp the log means!\n# \terrs, avgErr, stdErr = getError(np.exp(gm.means_), means)\n# \tprint \"Errors:\", errs\n# \tprint \"average Error:\", avgErr\n# \tprint \"std err\", stdErr\n# \tprint \"predictions\", predictions[0:10]\n# \tprint \"true classes\", trueClasses[0:10]\n# \tprint \"accuracy on predictions\", getAccuracy(trueClasses, predictions)\n# \tprint \"accuracy on sorted predictions\", getAccuracy(trueClasses, sortedPredictions)\n\n# print \"true means\", means\n# print \"gm means\", np.exp(gm.means_)\n# visualizeGmm(gm, logOdds)\n#print logOdds[0:30]\n\n\n#visualizeGmm(gm, logOdds)\n\n# Using log odd ratios is a smashing success!\n# Next step: Get true means\n# Figure out clustering\n# Then do experiments! Some with bayesian information criterion, some not!\n\n# Functions to write:\n# function that returns the GMM model and fit\n# Function that plots the gmm model\n# function that saves the gmm model and other stuff to files\n# Function that loads stuff from files and plots it\n# Function that predicts class, and gets true class, of patients\n\n# Need to join true means to learned means, sort by learned means\n","sub_path":"clusterVisualize.py","file_name":"clusterVisualize.py","file_ext":"py","file_size_in_byte":4918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"223038381","text":"#!/usr/bin/env python\n\n# Ensure that row padding works properly with split streams.\n\nfrom icecube import icetray, dataclasses, dataio, tableio, phys_services\nimport I3Tray\nimport os, sys, random, unittest\n\ntry:\n\tfrom icecube import hdfwriter\n\timport tables\nexcept ImportError:\n\tsys.exit()\n\t\ndef headerfaker(frame):\n\theader = dataclasses.I3EventHeader()\n\theader.run_id = 0\n\theader.event_id = headerfaker.event\n\theaderfaker.event += 1\n\tframe['I3EventHeader'] = header\nheaderfaker.event = 0\n\ndef emitter(frame, label, prob=0.5):\n\tif (random.random() < prob):\n\t\tparticle = dataclasses.I3Particle()\n\t\tframe[label] = particle\n\ndef streampick(stream):\n\tdef pick(frame):\n\t\treturn frame.Stop != icetray.I3Frame.Physics or frame['I3EventHeader'].sub_event_stream == stream\n\treturn pick\n\n\nclass SubeventTest(unittest.TestCase):\n\tfname = os.environ['I3_BUILD'] + '/hdfwriter/subevent_test.hdf5'\n\t@classmethod\n\tdef runtray(cls, fname):\n\t\ttray = I3Tray.I3Tray()\n\t\t\n\t\ttray.AddModule(\"I3InfiniteSource\", \"source\", stream=icetray.I3Frame.DAQ)\n\t\t\n\t\ttray.AddModule(headerfaker, 'headers', Streams=[icetray.I3Frame.DAQ])\n\t\t\n\t\ttray.AddModule(\"I3NullSplitter\", \"s1\")\n\t\ttray.AddModule(\"I3NullSplitter\", \"s2\")\n\t\t\n\t\tfor i in range(10):\n\t\t\ttray.AddModule(emitter, 's1e%d' % i, label='s1e%d' % i, prob=0.1, If=streampick(\"s1\"))\n\t\t\t\n\t\tfor i in range(10):\n\t\t\ttray.AddModule(emitter, 's2e%d' % i, label='s2e%d' % i, prob=0.1, If=streampick(\"s2\"))\n\n\t\ttabler = hdfwriter.I3HDFTableService(fname)\n\t\ttray.AddModule(tableio.I3TableWriter, 'scribe',\n\t\t\ttableservice=tabler,\n\t\t\ttypes=[dataclasses.I3Particle],\n\t\t\tSubEventStreams=['s1','s2'],\n\t\t\t)\n\t\t\n\t\t\n\t\ttray.Execute(100)\n\t\t\n\tdef setUp(self):\n\t\tself.__class__.runtray(self.fname)\n\tdef tearDown(self):\n\t\tos.unlink(self.fname)\n\tdef testRowAlignment(self):\n\t\thdf = tables.open_file(self.fname)\n\t\ttabs = []\n\t\tfor i in range(10):\n\t\t\tfor j in range(2):\n\t\t\t\ttry:\n\t\t\t\t\ttabs.append(hdf.get_node('/s%de%d' % (j,i)))\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\tnrows = tabs[0].nrows\n\t\tfor tab in tabs[1:]:\n\t\t\tself.assertEquals(nrows, tab.nrows)\n\t\tfor i in range(nrows):\n\t\t\tcanonical = tabs[0][i]\n\t\t\tfor tab in tabs[1:]:\n\t\t\t\trow = tab[i]\n\t\t\t\tfor field in ['Run', 'Event', 'SubEvent', 'SubEventStream']:\n\t\t\t\t\tself.assertEquals(canonical[field], row[field],\n\t\t\t\t\t \"'%s' are equal in row %d (%d != %d)\" % (field, i, \n\t\t\t\t\t canonical[field], row[field]))\n\t\thdf.close()\n\nclass SubeventMergingTest(unittest.TestCase):\n\tfname1 = os.environ['I3_BUILD'] + '/hdfwriter/subevent_test_1.hdf5'\n\tfname2 = os.environ['I3_BUILD'] + '/hdfwriter/subevent_test_2.hdf5'\n\tfname_merged = os.environ['I3_BUILD'] + '/hdfwriter/subevent_test_merged.hdf5'\n\tdef setUp(self):\n\t\tSubeventTest.runtray(self.fname1)\n\t\tSubeventTest.runtray(self.fname2)\n\t\tfrom subprocess import call\n\t\tcall([os.environ['I3_BUILD'] + \"/hdfwriter/resources/scripts/merge.py\", \"-o\", self.fname_merged, self.fname1, self.fname2])\n\tdef tearDown(self):\n\t\tfor f in [self.fname1, self.fname2, self.fname_merged]:\n\t\t\tos.unlink(f)\n\tdef testMergedAlignment(self):\n\t\thdf1 = tables.open_file(self.fname1)\n\t\thdf2 = tables.open_file(self.fname2)\n\t\thdfmerge = tables.open_file(self.fname_merged)\n\n\t\ttabs = []\n\t\tfor i in range(10):\n\t\t\tfor j in range(2):\n\t\t\t\ttry:\n\t\t\t\t\ttabs.append(hdf1.get_node('/s%de%d' % (j,i)))\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\tnrows1 = tabs[0].nrows\n\t\ttabs = []\n\t\tfor i in range(10):\n\t\t\tfor j in range(2):\n\t\t\t\ttry:\n\t\t\t\t\ttabs.append(hdf2.get_node('/s%de%d' % (j,i)))\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\tnrows2 = tabs[0].nrows\n\t\tfor tab in tabs:\n\t\t\titab = hdf2.get_node('/__I3Index__/%s' % tab.name)\n\t\t\timtab = hdfmerge.get_node('/__I3Index__/%s' % tab.name)\n\t\t\tmtab = hdfmerge.get_node('/%s' % tab.name)\n\t\t\tself.assertEquals(mtab.nrows, nrows1+nrows2)\n\t\t\tfor i in range(nrows2):\n\t\t\t\trow = tab[i]\n\t\t\t\tmrow = mtab[i+nrows1]\n\t\t\t\tfor field in ['Run', 'Event', 'SubEvent', 'SubEventStream', 'exists']:\n\t\t\t\t\tself.assertEquals(row[field], mrow[field],\n\t\t\t\t\t \"'%s' are equal in row %d (%d != %d)\" % (field, i, \n\t\t\t\t\t row[field], mrow[field]))\n\t\t\t\tirow = itab[i]\n\t\t\t\timrow = imtab[i+nrows1]\n\t\t\t\tfor field in ['Run', 'Event', 'SubEvent', 'SubEventStream', 'exists']:\n\t\t\t\t\tself.assertEquals(irow[field], imrow[field],\n\t\t\t\t\t \"'%s' are equal in row %d (%d != %d)\" % (field, i, \n\t\t\t\t\t irow[field], imrow[field]))\n\t\thdf1.close()\n\t\thdf2.close()\n\t\thdfmerge.close()\n\nif __name__ == \"__main__\":\n\tunittest.main()\n\t\n\n","sub_path":"tableio/resources/test/test_subevents.py","file_name":"test_subevents.py","file_ext":"py","file_size_in_byte":4327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"533231834","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nЗагрузка JSON файла\n\npython filem/samples/load_json.py --file путь_к_файлу_JSON [--lines 0 --create --no_clear_shell]\n\"\"\"\n\n# ######################################################################################################################\n# Импорт необходимых инструментов\n# ######################################################################################################################\nimport argparse # Парсинг аргументов и параметров командной строки\nimport itertools # Итераторы зацикливания\n\n# Персональные\nfrom trml.shell import Shell # Работа с Shell\nfrom filem.json import Json # Работа с JSON\n\n\n# ######################################################################################################################\n# Выполняем только в том случае, если файл запущен сам по себе\n# ######################################################################################################################\ndef main():\n # Построение аргументов командой строки\n ap = argparse.ArgumentParser()\n\n # Добавление аргументов в парсер командной строки\n ap.add_argument('--file', required=True, help='Путь к файлу JSON')\n ap.add_argument('--lines', type=int, default=0, help='Количество строк для отображения')\n ap.add_argument('--create', action='store_true', help='Создание файла в случае его отсутствия')\n ap.add_argument('--no_clear_shell', action='store_false', help='Не очищать консоль перед выполнением')\n\n args = vars(ap.parse_args()) # Преобразование списка аргументов командной строки в словарь\n\n # Очистка консоли перед выполнением\n if args['no_clear_shell'] is True:\n Shell.clear() # Очистка консоли\n\n _json = Json() # Работа с JSON\n data = _json.load(args['file'], args['create']) # Загрузка JSON файла\n\n # JSON файл не загружен\n if data is None:\n return False\n\n # Количество строк для отображения меньше 0\n if args['lines'] < 0:\n return None\n\n # Количество строк для отображения больше значений в загружаемом файле или равно 0\n if args['lines'] is 0 or args['lines'] > len(data):\n args['lines'] = len(data)\n\n data_out = dict(itertools.islice(data.items(), args['lines'])) # Срез элементов словаря\n\n _json.recursive_data_display(data_out) # Рекурсивное отображение данные из словаря\n\n print() # Разрыв\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"filem/filem/samples/load_json.py","file_name":"load_json.py","file_ext":"py","file_size_in_byte":3083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"23620303","text":"import glob\nimport pandas as pd\nimport numpy as np\nimport os\nimport argparse\nfrom pathlib import Path\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"vcf_path\", type=Path)\nparser.add_argument(\"output_path\", type=Path)\nparser.add_argument(\"job\")\nargs = parser.parse_args()\n\n\nvcf_output=glob.glob(os.path.join(args.vcf_path,'*_annotated_snp_mito.vcf'))\n\ncsv_name_annot=args.job+'_annotated_snp_mito.csv'\n\ncsv_output_annot=os.path.join(args.output_path,csv_name_annot)\n\nwith open(vcf_output[0],'r') as f:\n lines = f.readlines()\n\n#Preseving comments from vcf file\n\ncomments = []\nheader=[]\nfor i in range(len(lines)):\n if lines[i].startswith('##'):\n comments.append(lines[i])\n elif lines[i].startswith('#'):\n header.append(lines[i])\n\n#Opening .vcf file as a dataframe\nin_df=pd.read_csv(vcf_output[0], delimiter='\\t', quotechar='\"', quoting=2, comment='#', header=None)\n\n#Dividing the dataframe into the VEP annotation part and the sample part, with vcf formnat data\nin_df.columns=header[0].split()\ndf_A= in_df[['#CHROM','POS','REF','ALT','INFO','FORMAT']].copy()\ndf_B=in_df[in_df.columns[9:]]\n\n#Parsing the VEP annotation\n\ndf_A[['INFO','VEP']]=in_df['INFO'].str.split('CSQ=', expand=True)\ndf_A[['Gene','Feature','SYMBOL','Existing_variation','VARIANT_CLASS','Consequence','cDNA_position','CDS_position','Protein_position','Amino_acids','HGVSc','HGVSp','BIOTYPE','IMPACT','CLIN_SIG','PolyPhen','SIFT','gnomAD_AF','CADD_PHRED','CADD_RAW','MutationTaster_pred']]=df_A['VEP'].str.split('|',expand=True)\n\n\n#Replacing GnomAD empty cells with '.'\n\ndf_A['gnomAD_AF']=df_A['gnomAD_AF'].replace(r'\\s+',np.nan,regex=True).replace('','.')\n\n\n\n#Separating heteroplasmy levels of the variants \n\nflexcols = df_B.columns.tolist()\nnew_cols = []\n\n\nfor col in flexcols:\n new_cols.append(in_df[col].str.split(':').str[1].rename(col))\n\n\n#Write the output file into output directory \n\ncombined=pd.concat([df_A]+new_cols,axis=1)\n\n\n\nwith open(csv_output_annot, 'w') as f:\n combined.to_csv(f, index=False)\n\n\n","sub_path":"format_mito_ex.py","file_name":"format_mito_ex.py","file_ext":"py","file_size_in_byte":2018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"249884769","text":"'''\nCreated on Feb 28, 2019\n\n@author: mrane\n'''\n\nimport pytest\nfrom core.configuration import CONFIG\nfrom core.browsers.web_drivers import WebDrivers\n\n@pytest.mark.usefixtures(\"web_driver\")\nclass WebTest():\n \n @pytest.fixture(scope=CONFIG.get(\"tests.browser.scope\", \"class\"))\n def web_driver(self, request):\n '''\n This fixture contains the set up and tear down code for each test.\n \n '''\n self.driver = WebDrivers().get()\n request.cls.driver = self.driver \n yield \n # Close browser window:\n self.driver.quit()\n","sub_path":"core/web/webtest.py","file_name":"webtest.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"345773722","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.template import loader\nfrom django.db.models import Max\n\nfrom operator import itemgetter\n\nfrom .models import Player, Round, Team\nfrom .forms import RoundsForm, JointScoreFormPlayer1, JointScoreFormPlayer2\n\nclass tableEntry():\n name = ''\n wins = 0\n losses = 0\n draws = 0\n points = 0\n played = 0\n pct = 0\n diff = 0\n\n def __init__(self, wname):\n self.name = wname\n\ndef boolToText(b):\n if b == 'True':\n return 'Pobeda'\n else:\n return 'Kita'\n\ndef getScore(wplayers, wrounds):\n score = list()\n\n for p in wplayers:\n pTableEntry = tableEntry(p.name)\n\n for r in wrounds:\n if p.id in r.team1.players or p.id in r.team2.players:\n if r.teamWon is None:\n pTableEntry.draws = pTableEntry.draws + 1\n elif p.id in r.teamWon.players:\n pTableEntry.wins = pTableEntry.wins + 1\n else:\n pTableEntry.losses = pTableEntry.losses + 1\n\n pTableEntry.points = 5 * pTableEntry.wins + 3 * pTableEntry.draws + 1 * pTableEntry.losses\n pTableEntry.played = pTableEntry.wins + pTableEntry.draws + pTableEntry.losses\n pTableEntry.diff = pTableEntry.wins - pTableEntry.losses\n\n if pTableEntry.played == 0:\n pTableEntry.pct = 0\n else:\n pTableEntry.pct = round(100 * (pTableEntry.wins + pTableEntry.draws * 0.5) / pTableEntry.played, 2)\n\n score.append(pTableEntry)\n\n score = sorted(score, key = lambda x: (x.points, x.name), reverse=True)\n\n return score\n\ndef getJointScore(wplayers, wrounds):\n score = list()\n\n p0 = wplayers[0]\n p1 = wplayers[1]\n\n pTableEntry = tableEntry(p0.name)\n\n for r in wrounds:\n if (p0.id in r.team1.players and p1.id in r.team1.players) or (p0.id in r.team2.players and p1.id in r.team2.players):\n if r.teamWon is None:\n pTableEntry.draws = pTableEntry.draws + 1\n elif p0.id in r.teamWon.players:\n pTableEntry.wins = pTableEntry.wins + 1\n else:\n pTableEntry.losses = pTableEntry.losses + 1\n\n pTableEntry.points = 5 * pTableEntry.wins + 3 * pTableEntry.draws + 1 * pTableEntry.losses\n pTableEntry.played = pTableEntry.wins + pTableEntry.draws + pTableEntry.losses\n pTableEntry.diff = pTableEntry.wins - pTableEntry.losses\n\n if pTableEntry.played == 0:\n pTableEntry.pct = 0\n else:\n pTableEntry.pct = round(100 * (pTableEntry.wins + pTableEntry.draws * 0.5) / pTableEntry.played, 2)\n\n score.append(pTableEntry)\n\n return score\n\ndef getRound(r):\n t1 = Team.objects.get(id = r.team1.id)\n t2 = Team.objects.get(id = r.team2.id)\n\n names = [str(Player.objects.get(id = x)) for x in t1.players] + [str(Player.objects.get(id = x)) for x in t2.players]\n\n if r.teamWon is None:\n strings = ['Pola' for x in names]\n else:\n bools = [str(x in r.teamWon.players) for x in t1.players] + [str(x in r.teamWon.players) for x in t2.players]\n strings = [boolToText(x) for x in bools]\n\n return [(x[0], x[1]) for x in list(zip(names, strings))]\n\n# Create your views here.\ndef index(request):\n currentSeason = Round.objects.latest('date').season\n players = Player.objects.all()\n rounds = Round.objects.filter(season = currentSeason)\n\n currentRound = [x for x in sorted(rounds, key = lambda y: (y.id), reverse = True)][0]\n jointPlayer1Id = Player.objects.get(id = 1)\n jointPlayer2Id = Player.objects.get(id = 6)\n\n roundSelectForm = RoundsForm()\n jointScoreForm1 = JointScoreFormPlayer1()\n jointScoreForm2 = JointScoreFormPlayer2()\n\n if request.method == 'POST':\n if 'selectedPlayer1' in request.POST and 'selectedPlayer2' in request.POST:\n jointScoreForm1 = JointScoreFormPlayer1(request.POST)\n jointScoreForm2 = JointScoreFormPlayer2(request.POST)\n jointPlayer1Id = Player.objects.get(name = request.POST['selectedPlayer1'])\n jointPlayer2Id = Player.objects.get(name = request.POST['selectedPlayer2'])\n\n if 'selectedRound' in request.POST:\n roundSelectForm = RoundsForm(request.POST)\n currentRound = Round.objects.get(id = request.POST['selectedRound'])\n\n r = getRound(currentRound)\n score = getScore(players, rounds)\n\n jointPlayers = [Player.objects.get(id = jointPlayer1Id.id), Player.objects.get(id = jointPlayer2Id.id)]\n jointScore = getJointScore(jointPlayers, rounds)\n\n template = loader.get_template('tabela/index.html')\n context = {\n 'score': score,\n 'roundSelectForm': roundSelectForm,\n 'jointScoreForm1': jointScoreForm1,\n 'jointScoreForm2': jointScoreForm2,\n 'jointScore': jointScore,\n 'roundTable': r\n }\n\n return HttpResponse(template.render(context, request))","sub_path":"tabela/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"372036846","text":"from __future__ import unicode_literals\n\nfrom copy import deepcopy\nimport datetime\nimport logging\nimport re\nimport string\n\nfrom django.db import transaction\nfrom django.db.models import Q, Count\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.urls import reverse\n\nimport numpy as np\nfrom sklearn.datasets.base import Bunch\nfrom sklearn.feature_extraction.text import (\n CountVectorizer,\n TfidfTransformer,\n)\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.pipeline import Pipeline\n\nfrom easy_pdf.rendering import render_to_pdf_response\nfrom xlsxwriter.workbook import Workbook\n\nfrom core.forms import UploadExcelFileForm\nfrom core.models import (\n Amenity,\n Business,\n Sector_DTI_Files,\n Sector_DTI_NCCP,\n Status,\n Sector_DTI_NCCP_Dataset,\n)\n\ntry:\n import cStringIO as StringIO\nexcept ImportError:\n import StringIO\n\nlogger = logging.getLogger(__name__)\n\ndef upload_xls(request):\n if request.method == 'POST':\n form = UploadExcelFileForm(request.POST, request.FILES)\n if form.is_valid():\n filehandle = request.FILES.get('file')\n xls_array = np.array(filehandle.get_array())\n\n logger.info(\"Uploading and parsing file \\'{}\\'.\".format(filehandle._name))\n\n year_issued = None\n start_create = False\n current_establishment = None\n file_sector = None\n file_sector_code = None\n file_status = None\n\n file_sector_code = get_sector_code_from_file(filehandle._name)[0]\n file_sector = Sector_DTI_Files.objects.filter(code=file_sector_code).first()\n\n # For fetching the status defined from the filename. Defaults to \"New\" status\n if 'renewal' in filehandle._name.split('.')[0].lower():\n file_status = Status.objects.get(id=2)\n else:\n file_status = Status.objects.get(id=1)\n\n with transaction.atomic():\n for row in xls_array:\n if not start_create:\n secstat_cell = re.split('[\\s()]+', unicode(row[0]).lower())\n\n if string.join(secstat_cell[:2], '') == 'listof':\n year_issued = [int(x) for x in secstat_cell if x.isdigit() and (1000 < int(x)< 9999)]\n\n # For fetching the sector defined inside the file\n if not file_sector:\n file_sector = get_sector_from_file(secstat_cell, file_sector_code)\n\n check_taxpayer = unicode(row[1]).translate(dict.fromkeys(map(ord, string.punctuation))).lower()\n if check_taxpayer == 'taxpayers name':\n start_create = True\n logger.info('Processing the XLS file \\'{}\\''.format(filehandle._name))\n continue\n\n current_establishment = create_business_and_amenity(\n row,\n file_sector,\n file_status,\n current_establishment,\n year_issued\n )\n\n return HttpResponseRedirect(reverse('admin:core_business_changelist'))\n\n else:\n form = UploadExcelFileForm()\n\n return render(\n request,\n 'upload_xls_file.html',\n {'form': form}\n )\n\n\ndef export_excel(request, filters):\n output = StringIO.StringIO()\n\n book = Workbook(output)\n sheet = book.add_worksheet('LIST OF BUSINESSES')\n\n bold = book.add_format({'bold': True})\n money = book.add_format({'num_format': '\"Php\" #,##0.00'})\n\n\n sheet.write(0, 0, 'LIST OF BUSINESSES', bold)\n columns = [\"Taxpayer's Name\", \"Business Name\", \"Telephone Number\", \"Business Address\",\n \"Barangay\", \"Type of Business\", \"Type of Business Ownership\", \"Capital\",\n \"Year Issued\", \"Status\", \"Sector From DTI Files\", \"Sector From DTI-NCCP\"\n ]\n for item in xrange(0,len(columns)):\n sheet.write(2, item, columns[item],bold)\n\n businesses = Business.objects.all()\n businesses = filter_businesses(businesses, filters)\n\n column_width = compare_column_width([0]*12,columns)\n for index, business in enumerate(businesses):\n business_object = [\n business.taxpayer_name,\n business.business_name,\n business.tel_number,\n business.address,\n business.barangay,\n business.get_business_type_display() if business.business_type else \"\",\n business.get_ownership_type_display() if business.ownership_type else \"\",\n business.capital,\n business.year,\n business.status.name if business.status else \"\",\n business.sector_dti_files.name if business.sector_dti_files else \"\",\n business.sector_dti_nccp.name if business.sector_dti_nccp else \"\",\n ]\n for object_index in xrange(len(business_object)):\n if object_index == 7:\n sheet.write(index+3,object_index,business_object[object_index], money)\n else:\n sheet.write(index+3,object_index,business_object[object_index])\n column_width = compare_column_width(column_width, business_object)\n\n for column in xrange(len(column_width)):\n column_name = [\"A:A\", \"B:B\", \"C:C\", \"D:D\", \"E:E\", \"F:F\", \"G:G\", \"H:H\", \"I:I\", \"J:J\", \"K:K\", \"L:L\"]\n sheet.set_column(column_name[column], column_width[column])\n sheet.protect()\n book.close()\n\n logger.info(\"Exporting list to XLS file.\")\n\n # construct response\n filename = \"dti-sordas-list-of-businesses-{}\".format(datetime.datetime.now().date())\n output.seek(0)\n response = HttpResponse(output.read(), content_type=\"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\")\n response['Content-Disposition'] = \"attachment; filename={}.xlsx\".format(filename)\n\n return response\n\n\ndef export_pdf(request, filters):\n businesses = Business.objects.all()\n businesses = filter_businesses(businesses, filters)\n\n business_list = []\n for business in businesses:\n business_object = [\n business.taxpayer_name,\n business.business_name,\n business.tel_number,\n business.address,\n business.barangay,\n business.get_business_type_display() if business.business_type else \"\",\n business.get_ownership_type_display() if business.ownership_type else \"\",\n unicode('Php {:,.2f}'.format(business.capital)) if business.capital else \"\",\n business.year,\n business.status.name if business.status else \"\",\n business.sector_dti_files.name if business.sector_dti_files else \"\",\n business.sector_dti_nccp.name if business.sector_dti_nccp else \"\",\n ]\n business_list.append(business_object)\n\n logger.info(\"Exporting list to PDF (read-only) file.\")\n\n return render_to_pdf_response(request, 'pdf/pdf_business_list.html', {'business_list':business_list})\n\n\ndef classify_business_to_sectors(request, filters):\n businesses = Business.objects.filter(is_verified=False)\n businesses = filter_businesses(businesses, filters)\n\n if not businesses:\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n data = []\n target = []\n target_names = []\n sector_nccp = Sector_DTI_NCCP.objects.all()\n\n for target_count, sector in enumerate(sector_nccp, start=0):\n dataset = Sector_DTI_NCCP_Dataset.objects.filter(sector_dti_nccp=sector).prefetch_related()\n\n for entry in dataset:\n data.append(entry.text)\n target.append(target_count)\n target_names.append(sector.name)\n\n amenities = Amenity.objects.filter(establishment__is_verified=True, establishment__sector_dti_nccp=sector)\n amenities_str = \" \".join(unicode(amenity) for amenity in amenities)\n\n data.append(amenities_str)\n target.append(target_count)\n target_names.append(sector.name)\n\n section_data = Bunch(data=data, target=np.array(target), target_names=target_names)\n\n classifier = Pipeline([('vect', CountVectorizer()),\n ('tfidf', TfidfTransformer()),\n ('clf', SGDClassifier(loss='hinge', penalty='l2',\n alpha=1e-3, n_iter=5, random_state=42)),\n ])\n\n business_details = []\n for business in businesses:\n amenities = Amenity.objects.filter(establishment=business)\n amenities_str = \" \".join(unicode(amenity) for amenity in amenities)\n\n business_details.append(\"{} {}\".format(\n unicode(amenities_str),\n unicode(business.sector_dti_files)\n ))\n\n section_data_clf = classifier.fit(section_data.data, section_data.target)\n predicted = classifier.predict(business_details)\n\n with transaction.atomic():\n for business, predicted_sector in zip(businesses, predicted):\n business.sector_dti_nccp = sector_nccp[predicted_sector]\n business.save()\n\n logger.info('\\'{}\\' assigned to {}: \\'{}\\''.format(business.business_name, sector_nccp[predicted_sector].code, sector_nccp[predicted_sector].name))\n\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\ndef get_sector_code_from_file(file_name):\n if str(file_name).find('completed') != -1:\n return re.findall('(.+?)completed', str(file_name))\n\n elif str(file_name).find('renewal') != -1:\n return re.findall('(.+?)renewal', str(file_name))\n\n\ndef compare_column_width(current_width, item):\n for column in xrange(len(current_width)):\n if item[column]:\n if not isinstance(item[column], basestring):\n if column is 7:\n item[column] = 'Php {:,.2f}'.format(item[column])\n item[column] = unicode(item[column])\n if len(item[column]) > current_width[column]:\n current_width[column] = len(item[column])\n return current_width\n\n\ndef filter_businesses(business_list, filters):\n filters = [item.split('=') for item in filters.strip('?').split('&')]\n default_sorting = True\n\n if len(filters[0][0]):\n logger.info(\"Filtering the list of businesses according to {}\".format(\", \".join([x for (x,y) in filters])))\n for item in filters:\n if item[0] == 'q' and item[1]:\n business_filter_fields = ['taxpayer_name', 'business_name', 'tel_number', 'address', 'barangay',\n 'business_type', 'ownership_type', 'capital', 'year', 'status__name', 'sector_dti_files__name',\n 'sector_dti_nccp__name',]\n entry_query = get_search_query(item[1], business_filter_fields)\n business_list = business_list.filter(entry_query)\n if '__exact' in item[0]:\n if '__id__exact' in item[0]:\n field = {item[0].replace('__id__exact',''):item[1]}\n business_list = business_list.filter(**field)\n else:\n field = {item[0].replace('__exact',''):item[1]}\n business_list = business_list.filter(**field)\n if item[0] == 'year':\n field = {'year': item[1]}\n business_list = business_list.filter(**field)\n if item[0] == 'barangay':\n field = {'barangay': item[1].replace('+', ' ')}\n business_list = business_list.filter(**field)\n if item[0] == 'capital':\n if item[1] == 'micro':\n business_list = business_list.filter(capital__lt=3000000)\n elif item[1] == 'small':\n business_list = business_list.filter(capital__gte=3000000,capital__lte=15000000)\n elif item[1] == 'medium':\n business_list = business_list.filter(capital__gt=15000000,capital__lte=100000000)\n elif item[1] == 'large':\n business_list = business_list.filter(capital__gt=100000000)\n if item[0] == 'o':\n sort_fields = item[1].split(\".\")\n business_sort_fields = ['taxpayer_name', 'business_name', 'tel_number', 'address', 'barangay',\n 'business_type', 'ownership_type', 'capital', 'year', 'status', 'sector_dti_files',\n 'sector_dti_nccp', 'is_verified',]\n for index in xrange(len(sort_fields)):\n if sort_fields[index]:\n default_sorting = False\n if int(sort_fields[index]) > 0:\n sort_fields[index] = business_sort_fields[int(sort_fields[index]) - 1]\n else:\n sort_fields[index] = \"-{}\".format(business_sort_fields[abs(int(sort_fields[index])) - 1])\n business_list = business_list.order_by(*sort_fields)\n\n logger.info(\"Finished filtering.\")\n\n if default_sorting:\n business_list = business_list.order_by('taxpayer_name')\n logger.info(\"No filters were specified.\")\n\n return business_list\n\n\ndef create_business_and_amenity(file_line, file_sector, file_status, current_establishment, year_issued):\n try:\n if file_line[0].isdigit():\n\n business_capital = unicode(file_line[5]).translate(dict.fromkeys(map(ord, ',')))\n\n address_number = unicode(file_line[3]).split(' ')\n business_address = deepcopy(address_number)\n business_number = address_number.pop()\n\n if not business_number.replace('-', '').isdigit():\n business_number = None\n business_address = string.join(business_address)\n\n else:\n if len(business_number.replace('-', '')) < 7:\n business_number = None\n business_address = string.join(business_address)\n else:\n business_address = string.join(address_number)\n\n business = Business.objects.create(\n taxpayer_name = file_line[1],\n business_name = file_line[2],\n address = business_address,\n tel_number = business_number,\n barangay = file_line[4],\n capital = business_capital,\n sector_dti_files = file_sector,\n status= file_status\n )\n\n if year_issued:\n business.year = year_issued[0]\n business.save()\n\n logger.info('Creating Business \\'{}({})\\''.format(file_line[1], file_line[2]))\n\n current_establishment = business\n\n elif file_line[0] == '*':\n Amenity.objects.create(\n name= file_line[1],\n establishment = current_establishment\n )\n\n logger.info('Creating Amenity \\'{}\\' for Business \\'{}\\''.format(file_line[1], current_establishment.business_name))\n\n return current_establishment\n\n except ValueError:\n logger.error('Error while creating the database entry for {}'.format(file_line[1]), exc_info=True)\n\n\ndef get_status_from_file(file_desc):\n from_file_status = re.findall('\\d*\\s*\\((.+?)\\)', str(file_desc))[0]\n number_of_matches = 0\n file_status_words = from_file_status.split()\n file_status = None\n\n for word in file_status_words:\n file_status = Status.objects.filter(name__icontains=word).first()\n if file_status:\n number_of_matches += 1\n\n if not float(number_of_matches)/len(file_status_words) > 0.5:\n file_status = None\n\n if not file_status:\n file_status = Status.objects.create(\n name=from_file_status.title()\n )\n\n return file_status\n\n\ndef get_sector_from_file(file_desc_array, file_sector_code):\n pointer_start = 2\n if file_desc_array[2] == 'registered':\n pointer_start = 3\n\n pointer_end = pointer_start\n for word in file_desc_array[pointer_start:]:\n if word == 'in':\n break\n pointer_end += 1\n\n file_sector_name = unicode.title(string.join(file_desc_array[pointer_start:pointer_end], \" \"))\n file_sector = Sector_DTI_Files.objects.filter(name=file_sector_name).first()\n\n if not file_sector:\n file_sector = Sector_DTI_Files.objects.create(\n name = file_sector_name,\n code = file_sector_code\n )\n\n return file_sector\n\ndef normalize_query(query_string,\n findterms=re.compile(r'\"([^\"]+)\"|(\\S+)').findall,\n normspace=re.compile(r'\\s{2,}').sub):\n\n return [normspace('',(t[0] or t[1]).strip()) for t in findterms(query_string)]\n\n\ndef get_search_query(query_string, search_fields):\n\n '''\n Returns a query, that is a combination of Q objects.\n That combination aims to search keywords within a model by testing the given search fields.\n '''\n\n query = None # Query to search for every search term\n terms = normalize_query(query_string)\n for term in terms:\n or_query = None # Query to search for a given term in each field\n for field_name in search_fields:\n q = Q(**{\"%s__icontains\" % field_name: term})\n if or_query is None:\n or_query = q\n else:\n or_query = or_query | q\n if query is None:\n query = or_query\n else:\n query = query & or_query\n return query\n\n\ndef fetch_upper_bound_of_median(businesses, total_count):\n sorted_businesses = sorted(businesses, key=lambda business: business['num_businesses'], reverse=True)\n median = int(total_count * 0.5)\n upper_half_sum = 0\n upper_bound_businesses = []\n\n for business in sorted_businesses:\n upper_half_sum += business['num_businesses']\n if (median - upper_half_sum) <= 0:\n break\n\n business['percent_from_total'] = ( float(business['num_businesses']) / total_count ) * 100\n upper_bound_businesses.append(business)\n\n return upper_bound_businesses\n\n\ndef display_analytics(request, **kwargs):\n\n years = Business.objects.values('year').distinct().order_by('-year')\n\n if 'year' in kwargs:\n if not kwargs.get('year').isdigit():\n return render(request, 'analytics-error.html', {'years':years})\n year_filter = int(kwargs.get('year'))\n else:\n year_filter = int(years.first().get('year'))\n\n if not {'year':year_filter} in years:\n return render(request, 'analytics-error.html', {'years':years})\n\n logger.info(\"Processing Data Anlaytics for Businesses in the Year {}\".format(year_filter))\n\n sector_dti_files_data = Sector_DTI_Files.objects.filter(business__year=year_filter).annotate(num_businesses=Count('business')).order_by('-num_businesses').exclude(num_businesses=0).values('name','num_businesses')\n sector_dti_nccp_data = Sector_DTI_NCCP.objects.filter(business__year=year_filter).annotate(num_businesses=Count('business')).order_by('-num_businesses').exclude(num_businesses=0).values('name','num_businesses')\n status_data = Status.objects.filter(business__year=year_filter).annotate(num_businesses=Count('business')).order_by('-num_businesses').exclude(num_businesses=0).values('name','num_businesses')\n\n filtered_businesses = Business.objects.filter(year=year_filter)\n filtered_businness_count = filtered_businesses.count()\n barangay_data = fetch_upper_bound_of_median(filtered_businesses.values('barangay').annotate(num_businesses=Count('barangay')), filtered_businness_count)\n\n capital_data = [\n {'capital': 'Micro', 'value': Business.objects.filter(year=year_filter, capital__lt=3000000).count()},\n {'capital': 'Small', 'value': Business.objects.filter(year=year_filter, capital__gte=3000000,capital__lte=15000000).count()},\n {'capital': 'Medium', 'value': Business.objects.filter(year=year_filter, capital__gt=15000000,capital__lte=100000000).count()},\n {'capital': 'Large', 'value': Business.objects.filter(year=year_filter, capital__gt=100000000).count()},\n ]\n capital_data = [value for value in capital_data if value['value']>0]\n\n logger.info(\"Analytics processing done. Rendering Analytics...\")\n\n return render(\n request,\n 'analytics.html',\n {\n 'years': years,\n 'sector_dti_files_data': sector_dti_files_data,\n 'sector_dti_nccp_data': sector_dti_nccp_data,\n 'status_data': status_data,\n 'barangay_data': barangay_data,\n 'barangay_size': ( len(barangay_data) * 60 ),\n 'capital_data': capital_data,\n 'year_filter': year_filter,\n }\n )\n","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":20696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"521374101","text":"from base import *\nimport dj_database_url\nimport settings\n\nDEBUG = True\n\nDATABASES = {\n\t'default': {\n\t'ENGINE' : 'django.db.backends.sqlite3',\n\t'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n\t}\n}\n \nDATABASES['default'] = dj_database_url.config(\"CLEARDB_DATABASE_URL\")\n\n\n# Stripe environment variables\nSTRIPE_PUBLISHABLE = os.getenv('STRIPE_PUBLISHABLE', 'pk_test_JIybarkxbD1CZcxAsMjnfZ9a')\nSTRIPE_SECRET = os.getenv('STRIPE_SECRET', 'sk_test_957Asq4H5KfOr53tIVnzoFLJ')\n\n\n#paypal settings\nSITE_URL = 'https://cryptic-savannah-40745.herokuapp.com/'\nPAYPAL_NOTIFY_URL = 'http://1e325a84.ngrok.io/a-very-hard-to-guess-url/'\nPAYPAL_RECEIVER_EMAIL = 'sellsyoustuff@cheep.com'\n\n\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nMEDIA_URL = '/media/'\n\n","sub_path":"settings/staging.py","file_name":"staging.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"508837333","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 28 10:29:02 2020\n\n@author: user\n\"\"\"\n\n\n'''\nGiven a m * n matrix grid which is sorted in \nnon-increasing order both row-wise and column-wise. \n\nReturn the number of negative numbers in grid.\n\n \n\nExample 1:\n\nInput: grid = [[4,3,2,-1],[3,2,1,-1],[1,1,-1,-2],[-1,-1,-2,-3]]\nOutput: 8\nExplanation: There are 8 negatives number in the matrix.\nExample 2:\n\nInput: grid = [[3,2],[1,0]]\nOutput: 0\nExample 3:\n\nInput: grid = [[1,-1],[-1,-1]]\nOutput: 3\nExample 4:\n\nInput: grid = [[-1]]\nOutput: 1\n \n\nConstraints:\n\nm == grid.length\nn == grid[i].length\n1 <= m, n <= 100\n-100 <= grid[i][j] <= 100\n'''\nimport numpy as np\nclass Solution(object):\n def countNegatives(self, grid):\n \"\"\"\n :type grid: List[List[int]]\n :rtype: int\n \"\"\"\n a=np.array(grid)\n return np.count_nonzero(a < 0)\ny=Solution()\ngrid = [[4,3,2,-1],[3,2,1,-1],[1,1,-1,-2],[-1,-1,-2,-3]]\nprint(y.countNegatives(grid))\ngrid = [[3,2],[1,0]]\nprint(y.countNegatives(grid))\ngrid = [[1,-1],[-1,-1]]\nprint(y.countNegatives(grid))\ngrid = [[-1]]\nprint(y.countNegatives(grid))\n","sub_path":"countNegativeinMatrix.py","file_name":"countNegativeinMatrix.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"331511977","text":"\n\nfrom xai.brain.wordbase.nouns._blackberry import _BLACKBERRY\n\n#calss header\nclass _BLACKBERRIES(_BLACKBERRY, ):\n\tdef __init__(self,): \n\t\t_BLACKBERRY.__init__(self)\n\t\tself.name = \"BLACKBERRIES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"blackberry\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_blackberries.py","file_name":"_blackberries.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"647663608","text":"import pandas as pd\n\npath = './data/create_feature.csv'\n\n\n\ndef df_preprocess(path):\n df = pd.read_csv(path, index_col=0, header=0)\n df['trade_date'] = df['trade_date'].astype('datetime64')\n df = df[df['trade_date'] <= pd.datetime.strptime('20190809', '%Y%m%d')]\n df['trade_date'] = df['trade_date'].dt.date\n df = df.set_index('trade_date')\n colnames = df.columns.to_list()\n colnames = list(set(colnames) - set(['000001.SH_pe_y', '000300.SH_pe_y', '000905.SH_pe_y', '399006.SZ_pe_y']))\n colnames = [col for col in colnames if (col[:6] != '399016')]\n df = df[colnames].dropna(axis=0, how='all').fillna(method='ffill', axis=0).dropna(axis=0, how='any')\n for ind in [5, 10, 20, 30, 40, 60, 70, 125, 250, 500, 750]:\n df[[col + '_m' + str(ind) for col in colnames]] = df[colnames].rolling(window=ind, min_periods=1).mean()\n df[[col + '_q' + str(ind) for col in colnames]] = df[colnames].rolling(window=ind, min_periods=1).apply(\n lambda x: len(x[x <= x[-1]]) / len(x), raw=True)\n price_columns = [col for col in colnames if (col[-5:] == 'close')]\n return df, price_columns.to_list()\n\n\n","sub_path":"algorithm/data_preprocess.py","file_name":"data_preprocess.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"610049504","text":"import sys\nimport usb.core\nimport usb.util\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import signal\n \n# procurar o usb cypress fx2\ndev = usb.core.find(idVendor=0x04b4, idProduct=0x8613)\n\n# encontrado?\nif dev is None:\n raise ValueError('Dispositivo nao encontrado')\n\n# ativar config default\ndev.set_configuration()\ndev.set_interface_altsetting(interface = 0, alternate_setting = 1)\n#dev.set_interface_altsetting(interface = 0, alternate_setting = 0)\n\nrst_ep = 1 # EP1 (output endpoint 1, host-2usb) -- usado para reset da FPGA\ncfg_ep = 4 # EP4 (output endpoint 4, host-2usb) -- usado para config da FPGA\n\n# gera reset na FPGA (opional)\n\n#dev.write(rst_ep, bytearray([0x00, 0x01, 0x00])) # reset OFF, ON, OFF\n\n\n#dev.write(cfg_ep, chr(0)) # porque ?\n\nout_ep = 2 # EP2 (output endpoint 2, host-2-usb)\nin_ep = 6 + 128 # EP6 (input endpoint 6, usb-2-host) o bit 7 tem que estar setado\n\n##############################################\n\nwrite_samples = 512\n\nfs = 128000\n\nfreq = 4000 #int(fs/write_samples)\n\nampl = 32767\n\ntheta = 0\nthetax = 0\nthetay = 1*np.pi/2\n\nbuf_len = 512\n\nt = np.arange(0, write_samples)* 1/fs;\ntp = np.linspace(0, write_samples, fs)\n\n# sin_cos\ns_cos = 1*ampl/2 * np.cos(2 * np.pi * freq * (t + theta))+ampl/2\ns_sin = 1*ampl/2 * np.sin(2 * np.pi * freq * (t + theta))+ampl/2\n\nx = s_cos; y = s_sin\n\n# rampa\nr_cos = 1*ampl/2 * signal.sawtooth(2 * np.pi * freq * (t + thetax))+ampl/2\nr_sin = 1*ampl/2 * signal.sawtooth(2 * np.pi * freq * (t + thetay))+ampl/2\n\n#x = r_cos; y = r_sin\n\n#plt.plot(x)\n#plt.plot(y)\n\ndata_out_xy = bytearray(buf_len)\n\nfor i in range(int(buf_len/4)):\n\t\n\ttempx = (int(x[i])).to_bytes(2,byteorder=\"big\")\t\n\ttempy = (int(y[i])).to_bytes(2,byteorder=\"big\")\n\n\tdata_out_xy[4*i] = tempx[0]\n\tdata_out_xy[4*i+1] = tempx[1]\n\tdata_out_xy[4*i+2] = tempy[0]\n\tdata_out_xy[4*i+3] = tempy[1]\n\t\t\n\n#print(len(data_out_xy))\n\n##############################################\n\ndev.write(out_ep, data_out_xy)\n\n#plt.plot(x)\n#plt.plot(y)\n\n#plt.show()\n\n##############################################\n\n#read_samples = 512\nread_samples = write_samples\n\n#data_in = dev.read(in_ep,read_samples) # clear buffer\n\ndata_in = dev.read(in_ep,read_samples)\n\n#print(data_in)\n\ndata_read = np.zeros((int(read_samples/2),), dtype=int)\n\ndata_real = np.zeros((int(read_samples/4),), dtype=int)\ndata_imag = np.zeros((int(read_samples/4),), dtype=int)\n\nfor i in range(int(read_samples/4)):\n\tdata_real[i] = int(data_in[4*i])*256+int(data_in[4*i+1])*1\n\tdata_imag[i] = int(data_in[4*i+2])*256+int(data_in[4*i+3])*1\n\t#print(i)\n\n# remove o offset (centraliza em zero)\n\noffset = 16384*1\n\n#for i in range(len(data_real)):\n#\tif data_real[i] > offset:\n#\t\tdata_real[i] = data_real[i] - offset*2\n\ndata_real = data_real - offset\n\n#for i in range(len(data_imag)):\n#\tif data_imag[i] > offset:\n#\t\tdata_imag[i] = data_imag[i] - offset*2\n\ndata_imag = data_imag - offset\n\n# gera arquivo texto (talvez para o octave)\n\n#f= open(\"adc.txt\",\"w+\")\n\n#for i in data_in:\n#\tf.write(\"%d\\r\\n\" % i)\n#\t#print(i)\n\t\n#f.close()\n\nplt.plot(data_real)\nplt.plot(data_imag)\n#plt.plot(data_read)\n#plt.plot(data_in)\n\nplt.show()\n\n","sub_path":"pyusb/fpga_loop_back.py","file_name":"fpga_loop_back.py","file_ext":"py","file_size_in_byte":3096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"630922358","text":"import torch\nfrom network import ConvTasNet\nfrom uPIT_Loss import uPIT\nimport matplotlib.pyplot as plt\nimport soundfile as sf\nimport yaml\nimport tensorboardX as tbx\nimport os\nfrom tqdm import tqdm\n\n\nclass Trainer():\n def __init__(self,model,config,time):\n self.model = model\n self.cur_epoch = 0\n self.name = config['name']\n self.C = config['network']['C']\n self.config = config\n self.dir_save = os.path.join('./checkpoint',self.name,time)\n os.makedirs(self.dir_save)\n\n # setting about optimizer\n opt_name = config['optim']['name']\n weight_decay = config['optim']['weight_decay']\n lr = config['optim']['lr']\n momentum = config['optim']['momentum']\n\n optimizer = getattr(torch.optim, opt_name)\n if opt_name == 'Adam':\n self.optimizer = optimizer(self.model.parameters(), lr=lr, weight_decay=weight_decay)\n else:\n self.optimizer = optimizer(self.model.parameters(), lr=lr, weight_decay=weight_decay, momentum=momentum)\n self.clip_norm = config['optim']['clip_norm'] if config['optim']['clip_norm'] else 0\n # setting about machine\n self.device = torch.device(config['gpu'])\n self.parallel = config['parallel']\n if config['training']['resume']['state']: \n self.load_checkpoint(config)\n\n self.model = self.model.to(self.device)\n self.model = torch.nn.DataParallel(self.model, device_ids=[0,1,2])\n self.total_epoch = config['training']['total_epoch']\n self.early_stop = config['training']['early_stop']\n\n def train(self, epoch, dataloader):\n self.model.train()\n num_batchs = len(dataloader)\n total_loss = 0\n for mix, s in tqdm(dataloader):\n mix = mix.to(self.device).detach()\n s = s.to(self.device).detach()\n est_s = self.model(mix)\n epoch_loss = uPIT(est_s,s)\n\n self.optimizer.zero_grad()\n epoch_loss.backward()\n if self.clip_norm:\n torch.nn.utils.clip_grad_norm_(self.model.parameters(),self.clip_norm)\n self.optimizer.step()\n total_loss += epoch_loss.detach()\n\n total_loss = total_loss/num_batchs\n\n return total_loss\n\n def validation(self, dataloader):\n self.model.eval()\n num_batchs = len(dataloader)\n total_loss = 0\n with torch.no_grad():\n for mix, s in tqdm(dataloader):\n mix = mix.to(self.device)\n s = s.to(self.device)\n est_s = self.model(mix)\n total_loss += uPIT(est_s,s)\n return total_loss/num_batchs\n\n def est_test(self,mixture):\n est_s = self.model(mix)\n print(est_s.shape)\n\n\n \n def run(self,train_dataloader,valid_dataloader):\n train_loss = []\n val_loss = []\n print('cur_epoch',self.cur_epoch)\n\n writer = tbx.SummaryWriter(self.dir_save)\n self.save_checkpoint(self.cur_epoch,best=False)\n v_loss = self.validation(valid_dataloader)\n best_loss = 1e10\n no_improve = 0\n # starting training part\n while self.cur_epoch < self.total_epoch:\n self.cur_epoch += 1\n t_loss = self.train(self.cur_epoch, train_dataloader)\n print('epoch{0}:train_loss{1}'.format(self.cur_epoch,t_loss))\n v_loss = self.validation(valid_dataloader)\n print('epoch{0}:valid_loss{1}'.format(self.cur_epoch,v_loss))\n\n writer.add_scalar('t_loss', t_loss, self.cur_epoch)\n writer.add_scalar('v_loss', v_loss, self.cur_epoch)\n\n if v_loss >= best_loss:\n no_improve += 1\n else:\n best_loss = v_loss\n no_improve = 0\n self.save_checkpoint(self.cur_epoch,best=True)\n \n if no_improve == self.early_stop:\n break\n self.save_checkpoint(self.cur_epoch,best=False)\n \n writer.close()\n \n\n \n def save_checkpoint(self, epoch, best=True):\n self.model.to('cpu')\n print('save model epoch:{0} as {1}'.format(epoch,\"best\" if best else \"last\"))\n path_save_model = os.path.join(self.dir_save,'{0}.pt'.format('best' if best else 'last'))\n\n torch.save({\n 'epoch': epoch,\n 'model_state_dict': self.model.module.state_dict(),\n 'optim_state_dict': self.optimizer.state_dict()\n },\n path_save_model)\n\n self.model.to(self.device)\n\n with open(os.path.join(self.dir_save,'config_backup.yaml'),mode='w') as f:\n f.write(yaml.dump(self.config))\n\n\n def load_checkpoint(self,config):\n print('load on:',self.device)\n\n ckp = torch.load(config['training']['resume']['path'],map_location=torch.device('cpu'))\n self.cur_epoch = ckp['epoch']\n self.model.load_state_dict(ckp['model_state_dict'])\n self.optimizer.load_state_dict(ckp['optim_state_dict'])\n\n self.model = self.model.to(self.device)\n for state in self.optimizer.state.values():\n for k, v in state.items():\n if isinstance(v, torch.Tensor):\n state[k] = v.to(self.device)\n\n print('training resume epoch:',self.cur_epoch)\n\nif __name__ == \"__main__\":\n with open('./config.yaml', 'r') as yml:\n config = yaml.safe_load(yml)\n\n s1,_ = sf.read(\"/data1/h_munakata/wsj0/2speakers/min/tr/s1/01aa010b_0.97482_209a010p_-0.97482.wav\")\n s2,_ = sf.read(\"/data1/h_munakata/wsj0/2speakers/min/tr/s2/01aa010b_0.97482_209a010p_-0.97482.wav\")\n \n s1 = torch.tensor(s1,dtype=torch.float32).view([1,-1])\n s2 = torch.tensor(s2,dtype=torch.float32).view([1,-1])\n\n s = torch.cat([s1.view([1,-1,1]), s2.view([1,-1,1])],dim=2)\n mixture = s1+s2\n\n ctn = ConvTasNet(config)\n\n optimizer = torch.optim.Adam(ctn.parameters(),lr=0.001)\n\n for i in tqdm(range(100)):\n est_s = ctn(mixture)\n loss = uPIT(est_s, s)\n print(loss)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n plt.subplot(2,3,1)\n plt.plot(s[:,:,0].view(-1).detach().numpy())\n plt.subplot(2,3,2)\n plt.plot(s[:,:,1].view(-1).detach().numpy())\n\n plt.subplot(2,3,4)\n plt.plot(est_s[:,:,0].view(-1).detach().numpy())\n plt.subplot(2,3,5)\n plt.plot(est_s[:,:,1].view(-1).detach().numpy())\n\n plt.savefig('./test.png')","sub_path":"trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":6442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"230976057","text":"'''# In[making the matrix --> and practicing]:\n\nimport os\n\nFoldList = os.popen('ls -p /home/flucas/Documents/Bioinformatics/week3/Riboswitch_sequences/llmg_0079 | grep /').read().split()\n\nmatrix=[]\nfor i in range(len(FoldList)):\n os.chdir(\"/home/flucas/Documents/Bioinformatics/week3/Riboswitch_sequences/llmg_0079/{0}\".format(FoldList[i]))\n pngList = os.popen('ls | grep png').read().split()\n if len(pngList) == 0:\n continue\n else:\n matrix.append(pngList)\n\nfor i in range(len(matrix)):\n for j in range(len(matrix[i])):\n print(matrix[i][j], end=\" \")\n print()'''\n\n# In[Display Result with the pygame lib]:\n\n'''use as a base to write a scritp that can change the folders.'''\n\"\"\"\nSample Python/Pygame Programs\nSimpson College Computer Science\nhttp://programarcadegames.com/\nhttp://simpson.edu/computer-science/\n\"\"\"\nimport pygame\nimport os\n\n # Define some colors in RGB format\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nGREEN = (0, 255, 0)\nRED = (255, 0, 0)\n\n#make the matrix here to prevent doing this 60* per sec in the main while loop\nFoldList = os.popen('ls -p /home/manager/Bioinformatics-Project-Ribofind/Riboswitch_sequences/llmg_0079 | grep /').read().split()\nmatrix=[]\nfor i in range(len(FoldList)):\n os.chdir(\"/home/manager/Bioinformatics-Project-Ribofind/Riboswitch_sequences/llmg_0079/{0}\".format(FoldList[i]))\n pngList = os.popen('ls | grep png').read().split()\n if len(pngList) == 0:\n continue\n else:\n matrix.append(pngList)\n\n#draw matrix function. to draw one pic when defined here image should be called only once instead of 60* per sec\ndef draw_matrix(i, j):\n background_image = pygame.image.load(\"/home/manager/Bioinformatics-Project-Ribofind/Riboswitch_sequences/llmg_0079/{0}{1}\".format(FoldList[i+10], matrix[i][j])).convert()\n screen.blit(background_image, [0,0])\n\n# Setup pygame \npygame.init()\n# Set the width and height of the screen [width,height]\nsize = [591, 800]\nscreen = pygame.display.set_mode(size)\npygame.display.set_caption(\"Bioinformatics project\")\n# Loop until the user clicks the close button.\ndone = False\n# Used to manage how fast the screen updates\nclock = pygame.time.Clock()\n# Hide the mouse cursor\npygame.mouse.set_visible(True)\n# base speed of changing images\niadd = 0\njadd = 0\n# start of image in the list\ni = 0\nj = 0\ndrawinfo = 0\n# -------- Main Program Loop -----------\\\nwhile not done:\n# --- Event Processing\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n # User pressed down on a key\n elif event.type == pygame.KEYDOWN:\n # Figure out if it was an arrow key. If so adjust image.\n if event.key == pygame.K_LEFT:\n iadd = -1\n elif event.key == pygame.K_RIGHT:\n iadd = 1\n elif event.key == pygame.K_UP:\n jadd = 1\n elif event.key == pygame.K_DOWN:\n jadd = -1\n elif event.key == pygame.K_j:\n drawinfo = 1\n # User let up on a key'''\n elif event.type == pygame.KEYUP:\n # If it is an arrow key, reset vector back to zero\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\n iadd = 0\n elif event.key == pygame.K_UP or event.key == pygame.K_DOWN:\n jadd = 0\n elif event.key == pygame.K_j:\n drawinfo = 0\n \n # --- Program Logic\n # change pos in list.\n i = i + iadd\n if i <= 0:\n i = 0\n if i >= (len(matrix)-1):\n i = (len(matrix)-1)\n j = j + jadd\n if j <= 0:\n j = 0\n if j >= (len(matrix[i])-1):\n j = (len(matrix[i])-1)\n # --- Drawing Code\n # First, clear the screen to WHITE. Don't put other drawing commands\n # above this, or they will be erased with this command.\n screen.fill(WHITE)\n draw_matrix(i, j)\n \n #showng where you are in the list and which file it is\n font = pygame.font.SysFont('Calibri', 20, True, False)\n text1 = font.render(\"SeqLen: {0}/{1}\".format(i,(len(matrix)-1)),True, BLACK)\n text2 = font.render(\"Fold: {0}/{1}\".format(j,(len(matrix[i])-1)),True, BLACK)\n text3 = font.render(\"{0}\".format(FoldList[i + 10]),True, BLACK)\n screen.blit(text1, [350,2])\n screen.blit(text2, [350,24])\n screen.blit(text3, [350,46])\n #draw additional info of sequence, nothing important yet\n if drawinfo == 1:\n pygame.draw.rect(screen, BLACK, [0, 600, 591, 800])\n text4 = font.render(\"info about this folding, gene, locus tag\",True, WHITE)\n text5 = font.render(\"additional info: calculated structures e.g. basenr-basnr = terminator\",True, WHITE)\n screen.blit(text4, [10, 610])\n screen.blit(text5, [10, 635])\n # update the screen with what we've drawn.\n pygame.display.flip()\n # Limit frames per second\n clock.tick(10)\n# Close the window and quit.\npygame.quit()\n","sub_path":"DisplayRiboswitch.py","file_name":"DisplayRiboswitch.py","file_ext":"py","file_size_in_byte":4931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"341551781","text":"# -*- coding: utf-8 -*-\n# @COPYRIGHT_begin\n#\n# Copyright [2015] Michał Szczygieł, M4GiK Software\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# @COPYRIGHT_end\n\nfrom __future__ import unicode_literals\n\nfrom os.path import join\nfrom PIL import Image, ImageOps\nimport StringIO\nfrom django.core.files.uploadedfile import InMemoryUploadedFile\n\nfrom django.db import models\nfrom django.utils import timezone\n\nfrom core.utils.exception import DevCloudException\n\n\ndef get_upload_path(instance, filename):\n return join('pictures', str(instance.id), filename)\n\n\nclass Users(models.Model):\n id = models.AutoField(primary_key=True)\n login = models.CharField(unique=True, max_length=45)\n password = models.CharField(max_length=255)\n name = models.CharField(max_length=45, blank=True)\n lastname = models.CharField(max_length=45, blank=True)\n email = models.CharField(unique=True, max_length=255)\n create_time = models.DateTimeField(blank=True, null=True, default=timezone.now)\n language = models.CharField(max_length=45, blank=True)\n picture = models.ImageField(blank=True, upload_to=get_upload_path)\n activation_key = models.CharField(max_length=255, blank=True)\n is_active = models.IntegerField(blank=True)\n is_superuser = models.BooleanField(blank=True)\n last_activity = models.DateTimeField(blank=True, null=True, default=timezone.now)\n\n class Meta:\n managed = False\n db_table = 'Users'\n # app_label = 'database'\n\n @property\n def dict(self):\n \"\"\"\n @returns{dict} this User's data\n \\n fields:\n @dictkey{user_id,int} id of this User\n @dictkey{first,string} first name\n @dictkey{last,string} last name\n @dictkey{login,string} login\n @dictkey{email,string} email\n @dictkey{picture, path} path to image\n @dictkey{act_key,string} activation key's content\n @dictkey{is_active,bool} true for active User\n @dictkey{is_superuser,bool} true for User with admin privilidges\n @dictkey{activation_date,datetime.datetime} activation's date\n @dictkey{last_activity,datetime.datetime} last login's date\n \"\"\"\n d = {'user_id': self.id, 'first': self.name, 'last': self.lastname, 'login': self.login, 'email': self.email,\n 'picture': self.picture, 'act_key': self.activation_key or '', 'is_active': self.is_active or 0,\n 'is_superuser': self.is_superuser or 0, 'activation_date': self.create_time or '',\n 'last_activity': self.last_activity or ''}\n return d\n\n @property\n def short_dict(self):\n \"\"\"\n @returns{dict} very short version of User's data\n \\n fields:\n @dictkey{user_id,int} id of this User\n @dictkey{first,string} first name\n @dictkey{last,string} last name\n \"\"\"\n d = {'user_id': self.id, 'first': self.name, 'last': self.lastname}\n\n return d\n\n @property\n def ajax_dict(self):\n \"\"\"\n @returns{dict} this User's data for ajax response\n \\n fields:\n @dictkey{user_id,int} id of this User\n @dictkey{first,string} first name\n @dictkey{last,string} last name\n @dictkey{login,string} login\n @dictkey{email,string} email\n @dictkey{act_key,string} activation key's content\n @dictkey{is_active,bool} true for active User\n @dictkey{is_superuser,bool} true for User with admin privilidges\n \"\"\"\n d = {'user_id': self.id, 'first': self.name, 'last': self.lastname, 'login': self.login, 'email': self.email,\n 'act_key': self.activation_key or '', 'is_active': self.is_active or 0,\n 'is_superuser': self.is_superuser or 0}\n\n return d\n\n @staticmethod\n def get(user_id):\n \"\"\"\n @parameter{id,int} primary index of the @type{User}\n @returns{User} instance of requested @type{User}\n @raises{user_get,CLMException}\n \"\"\"\n try:\n user = Users.objects.get(pk=user_id)\n except:\n raise DevCloudException('user_get')\n return user\n\n @staticmethod\n def superuser(user_id):\n \"\"\"\n @raises{user_permission,DevCloudException} User isn't superuser\n @param user_id: User's id, int\n @return: {bool}\n @avail{True} - User is superuser\n\n @raises{user_permission,DevCloudException} User isn't superuser\n \"\"\"\n user = Users.get(user_id)\n if not user.is_superuser:\n raise DevCloudException('user_permission')\n return True\n\n def set_password(self, password):\n self.password = password\n\n def delete(self, *args, **kwargs):\n self.picture.delete()\n super(Users, self).delete(*args, **kwargs)\n\n @staticmethod\n def save_picture(user, request):\n upload_picture = request.FILES['image']\n if user.picture is not None:\n user.picture.delete()\n\n user.picture.save(upload_picture.name, upload_picture)\n\n if user.picture:\n try:\n image = Image.open(StringIO.StringIO(user.picture.read()))\n image = ImageOps.fit(image, (140, 140), Image.ANTIALIAS)\n output = StringIO.StringIO()\n image.save(output, format='JPEG', quality=75)\n output.seek(0)\n user.picture = InMemoryUploadedFile(output, 'ImageField', user.picture.name, 'image/jpeg',\n output.len, None)\n except Exception as e:\n raise DevCloudException(e)\n\n @staticmethod\n def parse_user(user):\n \"\"\"\n Helper function that returns \\c User object based on the provided dictionary.\n @param user:\n @return:\n \"\"\"\n return Users(id=user['user_id'], name=user['first'], lastname=user['last'], login=user['login'],\n password='', email=user['email'], is_active=user['is_active'], is_superuser=user['is_superuser'])\n","sub_path":"dev_cloud/database/models/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":6484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"251843986","text":"from __future__ import absolute_import\nimport itertools\nfrom nltk.corpus import wordnet\n\nfrom . import normalization\n\n\"\"\"\ndef getRecursiveSynsets(word, base_case=4):\n\nTODO ADDME\n\nperhaps this can have a threshold argument\nthat determines how recursively down the\n*nym should go within a given synset,\nand have a default that is crafted over practice\n\nwill obtain a synset, grab that synsets\nvalue and continue recursively until a\nbase case is reached, defaulting to depth 4\n\n# base = 0\n# if not base_case >= base:\n# return getRecursiveSynsets(word, base_case=base+=1)\n # stem and remove punctuation, if any\n\nE.G:\nfor k in new_array:\nt = synset.hyponyms()\nfor sub_t in t:\n#print \"sub sub hyponyms\", sub_t.hyponyms()\nx = sub_t.hyponyms()\nfor sub_sub_t in x:\ny = sub_sub_t.hyponyms()\nfor sub_sub_sub_t in y:\nprint \"sub sub sub sub hyponyms\",sub_sub_sub_t.hyponyms()\n\n\"\"\"\n\n\ndef print_all_synset_categories():\n \"\"\"\n Prints all domains and\n categories for research purposes\n \"\"\"\n categories = []\n for synset in list(wordnet.all_synsets('n')):\n categories.append(synset)\n return categories\n\n\ndef get_synsets(words=None, use_definitions=False):\n \"\"\"This is a brute force method of getting as many related words\n to a given set as possible. You are expected to filter or remove any\n that are not relevant separately, if the resultant set is too long.\n The scoring module provides tools to filter based on pronunciation,\n but you can write your own and extend the functionality.\"\"\"\n results = {\n 'words': {}\n }\n\n for word in words:\n synsets = wordnet.synsets(\n word.encode(\"utf-8\"),\n pos=None)\n\n for synset in synsets:\n results['synset_original'] = []\n results['synset_original'].append(synset.lemma_names)\n if use_definitions:\n results['synset_original'].append(\n synset.definition.split())\n\n \"\"\"\n More Specific *nyms (deep)\n \"\"\"\n\n if synset.hyponyms():\n results['hyponyms'] = []\n for v in synset.hyponyms():\n results['hyponyms'].append(v.lemma_names)\n if use_definitions:\n results['hyponyms'].append(\n v.definition.split())\n\n if synset.instance_hyponyms():\n results['instance_hyponyms'] = []\n for v in synset.instance_hyponyms():\n results['instance_hyponyms'].append(v.lemma_names)\n if use_definitions:\n results['instance_hyponyms'].append(\n v.definition.split())\n\n if synset.member_meronyms():\n results['member_meronyms'] = []\n for v in synset.member_meronyms():\n results['member_meronyms'].append(v.lemma_names)\n if use_definitions:\n results['member_meronyms'].append(\n v.definition.split())\n\n if synset.substance_meronyms():\n results['substance_meronyms'] = []\n for v in synset.substance_meronyms():\n results['substance_meronyms'].append(\n v.lemma_names)\n if use_definitions:\n results.append(\n v.definition.split())\n\n if synset.part_meronyms():\n results['part_meronyms'] = []\n for v in synset.part_meronyms():\n results['part_meronyms'].append(v.lemma_names)\n if use_definitions:\n results['part_meronyms'].append(\n v.definition.split())\n\n if synset.substance_holonyms():\n results['substance_holonyms'] = []\n for v in synset.substance_holonyms():\n results['substance_holonyms'].append(\n v.lemma_names)\n if use_definitions:\n results['substance_holonyms'].append(\n v.definition.split())\n\n \"\"\"\n More Generic *nyms (shallow)\n \"\"\"\n\n if synset.member_holonyms():\n results['causes'] = []\n for v in synset.member_holonyms():\n results['causes'].append(v.lemma_names)\n if use_definitions:\n results['causes'].append(\n v.definition.split())\n\n if synset.part_holonyms():\n results['part_holonyms'] = []\n for v in synset.part_holonyms():\n results['part_holonyms'].append(v.lemma_names)\n if use_definitions:\n results['part_holonyms'].append(\n v.definition.split())\n\n if synset.instance_hypernyms():\n results['instance_hypernyms'] = []\n for v in synset.instance_hypernyms():\n results['instance_hypernyms'].append(\n v.lemma_names)\n if use_definitions:\n results['instance_hypernyms'].append(\n v.definition.split())\n\n if synset.hypernyms():\n results['hypernyms'] = []\n for v in synset.hypernyms():\n results['hypernyms'].append(v.lemma_names)\n if use_definitions:\n results['hypernyms'].append(\n v.definition.split())\n\n \"\"\"\n Other types\n (need classification) TODO\n \"\"\"\n\n if synset.topic_domains():\n results['topic_domains'] = []\n for v in synset.topic_domains():\n results['topic_domains'].append(v.lemma_names)\n if use_definitions:\n results['topic_domains'].append(\n v.definition.split())\n\n if synset.region_domains():\n results['region_domains'] = []\n for v in synset.region_domains():\n results['region_domains'].append(v.lemma_names)\n if use_definitions:\n results['region_domains'].append(\n v.definition.split())\n\n if synset.usage_domains():\n results['usage_domains'] = []\n for v in synset.usage_domains():\n results['usage_domains'].append(v.lemma_names)\n if use_definitions:\n results['usage_domains'].append(\n v.definition.split())\n\n if synset.attributes():\n results['attributes'] = []\n for v in synset.attributes():\n results['attributes'].append(v.lemma_names)\n if use_definitions:\n results['attributes'].append(\n v.definition.split())\n\n if synset.entailments():\n results['entailments'] = []\n for v in synset.entailments():\n results['entailments'].append(v.lemma_names)\n if use_definitions:\n results['entailments'].append(\n v.definition.split())\n\n if synset.causes():\n results['causes'] = []\n for v in synset.causes():\n results['causes'].append(v.lemma_names)\n if use_definitions:\n results['causes'].append(\n v.definition.split())\n\n if synset.also_sees():\n results['also_sees'] = []\n for v in synset.also_sees():\n results['also_sees'].append(v.lemma_names)\n if use_definitions:\n results['also_sees'].append(\n v.definition.split())\n\n if synset.verb_groups():\n results['verb_groups'] = []\n for v in synset.verb_groups():\n results['verb_groups'].append(v.lemma_names)\n if use_definitions:\n results['verb_groups'].append(\n v.definition.split())\n\n if synset.similar_tos():\n results['similar_tos'] = []\n for v in synset.similar_tos():\n results['similar_tos'].append(v.lemma_names)\n if use_definitions:\n results['similar_tos'].append(\n v.definition.split())\n\n \"\"\"\n 1. get words back\n 2. flatten nested array\n 3. split up words\n 4. filter, clean, stem, uniquify\n \"\"\"\n\n for nlp_type in results:\n results[nlp_type] = sorted(\n normalization.uniquify(\n normalization.clean_sort(\n normalization.remove_stop_words(\n normalization.stem_words(\n normalization.remove_bad_words(\n list(itertools.chain(\n *results[nlp_type]))))))))\n\n return results\n","sub_path":"namebot/nlp.py","file_name":"nlp.py","file_ext":"py","file_size_in_byte":9393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"588357298","text":"#!/usr/bin/env python3\n\n#https://codeforces.com/problemset/problem/665/C\n\ns = list(input())\nfor i in range(1,len(s)-1):\n if s[i]==s[i-1]:\n if s[i-1]!='a' and s[i+1]!='a':\n s[i]='a'\n elif s[i-1]!='b' and s[i+1]!='b':\n s[i]='b'\n else:\n s[i]='c'\nif len(s)>1 and s[-1]==s[-2]:\n s[-1] = 'a' if s[-2]=='b' else 'b'\nprint(''.join(s))\n","sub_path":"codeforces/dp动态规划/1300/665C简单字符串.py","file_name":"665C简单字符串.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"132429618","text":"import time\n\nimport os\n\nfrom commands import commands_git\nfrom datetime import datetime\nfrom threading import Thread\n\n\nclass SecRet(Thread):\n def __init__(self, bus, git_repo):\n # event bus\n self.bus = bus\n # git repo\n self.git_repo = git_repo\n # get and hold last commit sha for auto update\n self.last_commit_sha = commands_git.get_last_commit(git_repo).sha\n\n Thread.__init__(self)\n\n def run(self):\n # sleep until 00\n now = datetime.now()\n c_min = now.minute\n c_sec = now.second\n l_min = 59 - c_min\n l_sec = 59 - c_sec + (60 * l_min)\n\n time.sleep(l_sec)\n\n while True:\n self.secret_hourly_task()\n time.sleep(60 * 60)\n\n def secret_hourly_task(self):\n self.ding()\n self.auto_update()\n\n def ding(self):\n now = datetime.now()\n s = ''\n for i in range(0, now.hour):\n s += 'DING '\n self.bus.emit('secret_send', message=s)\n\n def auto_update(self):\n commits = self.git_repo.get_commits()\n commit = commits[0]\n if commit.sha != self.last_commit_sha:\n self.bus.emit('secret_send', message='**updating secRet sources**')\n while commit.sha != self.last_commit_sha:\n self.bus.emit('secret_send', message='**merging:**' + ' ' + commit.commit.message +\n '\\n**from** ' + commit.author.name)\n # use pipe and shell.. feel free to code a better way\n os.system(\"git fetch origin master\")\n os.system(\"git reset --h FETCH_HEAD\")\n self.bus.emit('secret_restart')\n\n\n\n","sub_path":"secret.py","file_name":"secret.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"460861077","text":"import math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision import models\n\n\ndef weights_init(init_type='gaussian'):\n def init_fun(m):\n classname = m.__class__.__name__\n if (classname.find('Conv') == 0 or classname.find(\n 'Linear') == 0) and hasattr(m, 'weight'):\n if init_type == 'gaussian':\n nn.init.normal_(m.weight, 0.0, 0.02)\n elif init_type == 'xavier':\n nn.init.xavier_normal_(m.weight, gain=math.sqrt(2))\n elif init_type == 'kaiming':\n nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')\n elif init_type == 'orthogonal':\n nn.init.orthogonal_(m.weight, gain=math.sqrt(2))\n elif init_type == 'default':\n pass\n else:\n assert 0, \"Unsupported initialization: {}\".format(init_type)\n if hasattr(m, 'bias') and m.bias is not None:\n nn.init.constant_(m.bias, 0.0)\n\n return init_fun\n\n\nclass VGG16FeatureExtractor(nn.Module):\n def __init__(self):\n super(VGG16FeatureExtractor, self).__init__()\n vgg16 = models.vgg16(pretrained=True)\n self.enc_1 = nn.Sequential(*vgg16.features[:5])\n self.enc_2 = nn.Sequential(*vgg16.features[5:10])\n self.enc_3 = nn.Sequential(*vgg16.features[10:17])\n\n # fix the encoder\n for i in range(3):\n for param in getattr(self, 'enc_{:d}'.format(i + 1)).parameters():\n param.requires_grad = False\n\n def forward(self, image):\n results = [image]\n for i in range(3):\n func = getattr(self, 'enc_{:d}'.format(i + 1))\n results.append(func(results[-1]))\n return results[1:]\n\n\n\n\nclass PartialConv(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, stride=1,\n padding=0, dilation=1, groups=1, bias=True):\n super(PartialConv, self).__init__()\n\n self.input_conv = nn.Conv2d(in_channels, out_channels, kernel_size,\n stride, padding, dilation, groups, bias)\n\n self.mask_conv = nn.Conv2d(in_channels, out_channels, kernel_size,\n stride, padding, dilation, groups, False)\n #\n # up = nn.UpsamplingBilinear2d(scale_factor=2)\n # self.input_conv = up(self.input_conv)\n # self.mask_conv = up(self.mask_conv)\n\n #self.input_conv = F.upsample(self.input_conv, scale_factor=2)\n # self.input_conv = nn.Sequential(\n # nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias),\n # nn.Upsample(scale_factor=2, mode='bilinear'),\n # )\n #\n # self.mask_conv = nn.Sequential(\n # nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, False),\n # nn.Upsample(scale_factor=2, mode='bilinear'),\n # )\n\n self.input_conv.apply(weights_init('kaiming'))\n\n torch.nn.init.constant_(self.mask_conv.weight, 1.0)\n\n # mask is not updated\n for param in self.mask_conv.parameters():\n param.requires_grad = False\n\n def forward(self, input, mask):\n\n output = self.input_conv(input * mask)\n if self.input_conv.bias is not None:\n output_bias = self.input_conv.bias.view(1, -1, 1, 1).expand_as(\n output)\n else:\n output_bias = torch.zeros_like(output)\n\n with torch.no_grad():\n output_mask = self.mask_conv(mask)\n\n no_update_holes = output_mask == 0\n mask_sum = output_mask.masked_fill_(no_update_holes, 1.0)\n\n output_pre = (output - output_bias) / mask_sum + output_bias\n output = output_pre.masked_fill_(no_update_holes, 0.0)\n\n new_mask = torch.ones_like(output)\n new_mask = new_mask.masked_fill_(no_update_holes, 0.0)\n\n return output, new_mask\n\nclass PCBActiv(nn.Module):\n def __init__(self, in_ch, out_ch, bn=True, sample='none-3', activ='relu',\n conv_bias=False, dilation=3):\n super(PCBActiv, self).__init__()\n #in_channels, out_channels, kernel_size,stride, padding, dilation, groups, bias\n if sample == 'layer-1':\n # print(\"layer 1 Check!\")\n self.conv = PartialConv(in_ch, out_ch, 7, 2, 3, bias=conv_bias, dilation= dilation)\n\n elif sample == 'layer-2':\n # print(\"layer 2 Check!\")\n self.conv = PartialConv(in_ch, out_ch, 12, 3, 0, bias=conv_bias, dilation= dilation)\n\n elif sample == 'layer-3':\n # print(\"layer 3 Check!\")\n self.conv = PartialConv(in_ch, out_ch, 9, 1, 0, bias=conv_bias, dilation= dilation)\n\n elif sample == 'layer-4':\n # print(\"layer 4 Check!\")\n self.conv = PartialConv(in_ch, out_ch, 3, 2, 4, bias=conv_bias, dilation= dilation)\n\n elif sample == 'layer-5':\n # print(\"layer 5 Check!\")\n self.conv = PartialConv(in_ch, out_ch, 1, 4, 0, bias=conv_bias, dilation= dilation)\n\n elif sample == 'layer-6':\n # print(\"layer 6 Check!\")\n self.conv = PartialConv(in_ch, out_ch, 1, 4, 0, bias=conv_bias, dilation= dilation)\n\n elif sample == 'layer-7':\n # print(\"layer 6 Check!\")\n self.conv = PartialConv(in_ch, out_ch, 3, 2, 1, bias=conv_bias, dilation= dilation)\n else:\n self.conv = PartialConv(in_ch, out_ch, 3, 1, 1, bias=conv_bias, dilation= dilation)\n # print(\"Other Layer Check!\")\n\n if bn:\n self.bn = nn.BatchNorm2d(out_ch)\n if activ == 'relu':\n self.activation = nn.ReLU()\n elif activ == 'leaky':\n self.activation = nn.LeakyReLU(negative_slope=0.2)\n\n def forward(self, input, input_mask):\n h, h_mask = self.conv(input, input_mask)\n if hasattr(self, 'bn'):\n h = self.bn(h)\n if hasattr(self, 'activation'):\n h = self.activation(h)\n return h, h_mask\n\n\n\nclass PConvUNet(nn.Module):\n def __init__(self, layer_size=7):\n super(PConvUNet, self).__init__()\n self.freeze_enc_bn = False\n # Brilliant - Also works for fewer layers\n self.layer_size = layer_size\n self.enc_1 = PCBActiv(3, 64, bn=False, sample='layer-1', dilation = 1)\n self.enc_2 = PCBActiv(64, 128, sample='layer-2', dilation = 3)\n self.enc_3 = PCBActiv(128, 256, sample='layer-3', dilation = 6)\n self.enc_4 = PCBActiv(256, 512, sample='layer-4', dilation = 12)\n self.enc_5 = PCBActiv(512, 512, sample='layer-5', dilation = 18)\n self.enc_6 = PCBActiv(512, 512, sample='layer-6', dilation = 24)\n\n for i in range(6, self.layer_size):\n name = 'enc_{:d}'.format(i + 1)\n setattr(self, name, PCBActiv(512, 512, sample='layer-7', dilation = 1))\n\n for i in range(4, self.layer_size):\n name = 'dec_{:d}'.format(i + 1)\n setattr(self, name, PCBActiv(512 + 512, 512, activ='leaky', dilation = 1))\n self.dec_4 = PCBActiv(512 + 256, 256, activ='leaky', dilation = 1)\n self.dec_3 = PCBActiv(256 + 128, 128, activ='leaky', dilation = 1)\n self.dec_2 = PCBActiv(128 + 64 , 64 , activ='leaky', dilation = 1)\n self.dec_1 = PCBActiv(64 + 3, 3, bn=False, activ=None, conv_bias=True, dilation = 1)\n\n def forward(self, input, input_mask):\n h_dict = {} # for the output of enc_N\n h_mask_dict = {} # for the output of enc_N\n h_dict['h_0'], h_mask_dict['h_0'] = input, input_mask\n\n\n h_key_prev = 'h_0'\n for i in range(1, self.layer_size + 1):\n l_key = 'enc_{:d}'.format(i)\n h_key = 'h_{:d}'.format(i)\n h_dict[h_key], h_mask_dict[h_key] = getattr(self, l_key)(\n h_dict[h_key_prev], h_mask_dict[h_key_prev])\n # print('--------------------')\n # print(\"ENC h_key:\", h_key)\n # print(\"ENC l_key:\", l_key)\n # print(\"ENC h:h_dict[h_key]\",h_dict[h_key].size())\n\n h_key_prev = h_key\n\n if h_key in ['h_1','h_2','h_3','h_4','h_5','h_6']:\n h_dict[h_key] = F.upsample(h_dict[h_key], scale_factor=2)\n h_mask_dict[h_key] = F.upsample(h_mask_dict[h_key], scale_factor=2)\n # print(\"ENC post_upsample:\", h_key)\n # print('ENC post_upsample: h_dict[h_key]', h_dict[h_key].size())\n\n h_key = 'h_{:d}'.format(self.layer_size)\n # print('----------------------------------------')\n # print(\"MID h_key:\", h_key)\n h, h_mask = h_dict[h_key], h_mask_dict[h_key]\n # print(\"MID h.size()\", h.size())\n # print('----------------------------------------')\n\n\n # adds the DECODER layers to the dict\n for i in range(self.layer_size, 0, -1):\n enc_h_key = 'h_{:d}'.format(i - 1)\n dec_l_key = 'dec_{:d}'.format(i)\n\n ## included in the original\n if enc_h_key!='h_0':\n h = F.upsample(h, scale_factor=2)\n h_mask = F.upsample(h_mask, scale_factor=2)\n\n # print(\"DEC enc_h_key:\", enc_h_key)\n # print(\"DEC enc_h_key:\", dec_l_key)\n # print(\"DEC h.size()\",h.size())\n\n # print(\"DEC h_dict[enc_h_key]:\", h_dict[enc_h_key].size())\n # print('--------------------')\n\n h = torch.cat([h, h_dict[enc_h_key]], dim=1)\n h_mask = torch.cat([h_mask, h_mask_dict[enc_h_key]], dim=1)\n h, h_mask = getattr(self, dec_l_key)(h, h_mask)\n\n return h, h_mask\n\n def train(self, mode=True):\n \"\"\"\n Override the default train() to freeze the BN parameters\n \"\"\"\n super(PConvUNet, self).train(mode)\n if self.freeze_enc_bn:\n for name, module in self.named_modules():\n if isinstance(module, nn.BatchNorm2d) and 'enc' in name:\n module.eval()\n\n\n\n\n","sub_path":"3. PCONV/PCONV-pca-in-dilation-INCREASING/net_dilation_new.py","file_name":"net_dilation_new.py","file_ext":"py","file_size_in_byte":9997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"350697854","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport pygame\nimport random\nimport sys\nimport os\nimport time\n\nos.environ['SDL_VIDEO_WINDOW_POS'] = '600, 30'\n\n\nclass Maze(object):\n\n def __init__(self):\n self.mv_chk = lambda x, y: x + y\n self.lt_chk = lambda x, y: x - y\n \n self.prev = [20, 700]\n self.pos = [20, 680]\n\n self.explored = [self.prev]\n self.unexplored = [None]\n\n self.prev_type = 'vert' \n self.curr_type = 'vert'\n\n self.horz = [i for i in self.h_mov if i not in self.explored]\n self.vert = [i for i in self.v_mov if i not in self.explored]\n self.edges = self.horz + self.vert\n\n @property\n def h_mov(self):\n horizontal = {1:[-20, 0], 2:[20, 0]}\n moves = [v for v in [map(self.mv_chk, self.pos, horizontal[i]) \n for i in horizontal] if all(j>0 and j<700 for j in v)]\n return moves\n\n @property\n def v_mov(self):\n vertical = {1:[0, -20], 2:[0, 20]}\n moves = [v for v in [map(self.mv_chk, self.pos, vertical[i])\n for i in vertical] if all(j>0 and j<700 for j in v)]\n return moves\n\n @property\n def vec_chk(self):\n diff = tuple(map(self.lt_chk, self.pos, self.prev))\n nodes = {(0, 20,):[[-20, 0], [20, 0], [20, 20], [-20, 20], [0, 20]],\n (0, -20,):[[-20, 0], [20, 0], [-20, -20], [20, -20], [0, -20]],\n (20, 0,):[[20, 0], [20, -20], [20, 20], [0, -20], [0, 20]],\n (-20, 0,):[[-20, 0], [-20, -20], [-20, 20], [0, -20], [0, 20]]\n }\n moves = [v for v in [map(self.mv_chk, self.pos, i) for i in nodes[diff]]]\n return moves\n\n def next_wall(self, walls):\n move_pos = None\n while True:\n if walls:\n if isinstance(walls[0], dict):\n move_pos = walls.pop(0) \n self.prev, self.pos = move_pos.items()[0]\n else:\n self.pos = random.choice(walls)\n walls.remove(self.pos)\n if not [i for i in self.vec_chk if i in self.explored]:\n self.prev_type = self.curr_type \n if self.prev[0] == self.pos[0]:\n self.curr_type = 'vert'\n else:\n self.curr_type = 'horz' \n if not move_pos: \n for i in self.edges:\n self.unexplored.append({tuple(self.prev):i})\n return \n else:\n self.pos = self.prev\n return\n\n def gen_maze(self):\n while self.unexplored:\n if not self.unexplored[0]:\n self.unexplored.pop(0)\n if (all(i > 0 and i < 700 for i in self.pos) and self.pos not in self.explored):\n self.explored.append(self.pos) \n self.horz = [i for i in self.h_mov if i not in self.explored]\n self.vert = [i for i in self.v_mov if i not in self.explored]\n self.edges = self.horz + self.vert\n self.prev = self.pos\n if self.prev_type != self.curr_type and self.curr_type == 'horz':\n self.next_wall(self.horz)\n elif self.prev_type != self.curr_type and self.curr_type == 'vert':\n self.next_wall(self.vert)\n else:\n self.next_wall(self.edges)\n else:\n self.next_wall(self.unexplored)\n return self.explored\n\ndef main():\n pygame.init()\n screen = pygame.display.set_mode((720, 720))\n home = os.getcwd()\n wall = pygame.image.load(home + '/images/sm_wall.png')\n wall.convert_alpha()\n indicator = pygame.image.load(home + '/images/sm_position.png')\n indicator.convert_alpha()\n location = [20, 720]\n\n screen.blit(wall, (20, 700))\n screen.blit(wall, [700, 20])\n screen.blit(indicator, location)\n\n maze = Maze()\n maze_structure = maze.gen_maze()\n maze_structure.extend([[700, 20], [720, 20]])\n\n while True:\n for event in pygame.event.get():\n if (event.type == pygame.KEYDOWN and \n event.key == pygame.K_ESCAPE):\n sys.exit()\n if (event.type == pygame.KEYDOWN and\n event.key == pygame.K_UP):\n if [location[0], location[1] - 20] in maze_structure:\n location[1] -= 20\n if (event.type == pygame.KEYDOWN and\n event.key == pygame.K_DOWN):\n if [location[0], location[1] + 20] in maze_structure:\n location[1] += 20\n if (event.type == pygame.KEYDOWN and\n event.key == pygame.K_RIGHT):\n if [location[0] + 20, location[1]] in maze_structure:\n location[0] += 20\n if (event.type == pygame.KEYDOWN and\n event.key == pygame.K_LEFT):\n if [location[0] - 20, location[1]] in maze_structure:\n location[0] -= 20\n for path in maze_structure:\n screen.blit(wall, path)\n screen.blit(indicator, location)\n pygame.display.flip()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"lg_maze.py","file_name":"lg_maze.py","file_ext":"py","file_size_in_byte":5269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"489917471","text":"#-*-coding:UTF-8 -*-\nimport pymysql\n\nconn = pymysql.connect(\n host='localhost',\n user='yuan',\n password='test1234',\n database='testdb'\n)\ncursor = conn.cursor()\n\n#--sql資料表 球員 的好像有少人 Q_Q\n\n#--需要一個函式沒有輸入值,回傳所有球員的姓名跟學號\ndef show_all_player():\n sql='SELECT 學號,名字 FROM 球員'\n try:\n cursor.execute(sql)\n data = cursor.fetchall()\n return data\n except:\n return None\n\n#--需要一個函示沒有輸入值,回傳所有比賽的日期盃賽名對手學校跟對手系名\ndef game_info():\n sql='SELECT 日期,盃賽名稱,對手學校,對手系名 FROM 比賽'\n try:\n cursor.execute(sql)\n data = cursor.fetchall()\n return data\n except:\n return None\n\n#依球員學號顯示背號、比賽場數、先發次數和顯示是否為退休球員和是否為隊長\ndef player_info(stu_id):\n sql1='SELECT * FROM (SELECT * FROM 球員 LEFT JOIN 退休球員 USING (學號) LEFT JOIN 隊長 USING (學號)) t1 LEFT JOIN (SELECT 學號,COUNT(學號) as 出賽場次 FROM 球員比賽表現 GROUP BY 學號) t2 USING(學號) WHERE 學號=%s;'\n try:\n cursor.execute(sql1,(stu_id))\n data = cursor.fetchall()\n return data\n except:\n return None\ndef playerfix(stu_id):\n sql='SELECT * FROM 球員 LEFT JOIN 退休球員 USING (學號) LEFT JOIN 隊長 USING (學號) WHERE 學號=%s;'\n try:\n cursor.execute(sql,(stu_id))\n data = cursor.fetchall()\n return data\n except:\n return None\n\n#依球員學號顯示列出球員各項數據平均(得分、進攻籃板數、防守籃板數、助攻數、阻攻數、抄截數、犯規數、失誤數)\ndef player_data_average(stu_id):\n sql2 ='SELECT 學號,名字,背號,得分率,進攻籃板率,防守籃板率,助攻率,阻攻率,抄截率,犯規率,失誤率 FROM(SELECT 球員比賽表現.學號, (sum(表現.二分球中)*2 + sum(表現.三分球中)*3 + sum(表現.罰球中)*1)/count(球員比賽表現.學號) as 得分率, sum(表現.進攻籃板)/count(球員比賽表現.學號) as 進攻籃板率, sum(表現.防守籃板)/count(球員比賽表現.學號) as 防守籃板率, sum(表現.助攻)/count(球員比賽表現.學號) as 助攻率, sum(表現.阻攻)/count(球員比賽表現.學號) as 阻攻率, sum(表現.抄截)/count(球員比賽表現.學號) as 抄截率, sum(表現.犯規)/count(球員比賽表現.學號) as 犯規率, sum(表現.失誤)/count(球員比賽表現.學號) as 失誤率 FROM 球員比賽表現 LEFT JOIN 表現 ON 球員比賽表現.編號 = 表現.編號 GROUP BY 球員比賽表現.學號)t1 left join (SELECT 學號,名字,背號 FROM 球員)t2 USING(學號) WHERE 學號=%s'\n try:\n cursor.execute(sql2,(stu_id))\n data = cursor.fetchall()\n return data\n except:\n return None\n\n#依球員學號顯示列出球員命中率(三分球、投籃、罰球)\ndef player_hit_rate(stu_id):\n sql3 ='SELECT 學號,名字,背號,三分球命中率,投球命中率,罰球命中率 FROM(SELECT 球員比賽表現.學號, (sum(表現.三分球中)*100/sum(表現.三分球投)) as 三分球命中率, ((sum(表現.三分球中)+sum(表現.二分球中))*100/(sum(表現.三分球投)+sum(表現.二分球投))) as 投球命中率, (sum(表現.罰球中)*100/sum(表現.罰球投)) as 罰球命中率 FROM 球員比賽表現 LEFT JOIN 表現 ON 球員比賽表現.編號 = 表現.編號 GROUP BY 球員比賽表現.學號)t1 LEFT JOIN (SELECT 名字,學號,背號 FROM 球員)t2 USING (學號) WHERE 學號=%s;'\n try:\n cursor.execute(sql3,(stu_id))\n data = cursor.fetchall()\n return data\n except:\n return None\n\n#列出球隊所有比賽的比數\ndef game_score():\n sql4 = \"SELECT * FROM (SELECT 球員比賽表現.日期,sum(表現.二分球中)*2 + sum(表現.三分球中)*3 + sum(表現.罰球中)*1 as 我方得分 FROM 球員比賽表現 LEFT JOIN 表現 USING(編號) GROUP BY 球員比賽表現.日期) t1 LEFT JOIN (SELECT * FROM 比賽) t2 USING(日期);\"\n try:\n cursor.execute(sql4)\n data = cursor.fetchall()\n return data\n except:\n return None\n\n#列出球隊各項數據平均(得分、籃板數、助攻數、阻攻數、抄截數、犯規數、失誤數)\ndef data_average():\n sql5 ='SELECT (round(cast(sum(二分球中*2)+sum(三分球中*3)+sum(罰球中)as float)))/count(DISTINCT 日期) as 平均得分, (round(cast(sum(防守籃板)+sum(進攻籃板)as float)))/count(DISTINCT 日期) as 籃板平均, round(cast(sum(助攻)as float))/count(DISTINCT 日期) as 助攻平均, round(cast(sum(阻攻)as float))/count(DISTINCT 日期) as 阻攻平均, round(cast(sum(抄截)as float))/count(DISTINCT 日期) as 抄截平均, round(cast(sum(犯規)as float))/count(DISTINCT 日期) as 犯規平均, round(cast(sum(失誤)as float))/count(DISTINCT 日期) as 失誤平均 FROM 球員比賽表現 LEFT JOIN 表現 ON 表現.編號=球員比賽表現.編號;'\n try:\n cursor.execute(sql5)\n data = cursor.fetchall()\n return data\n except:\n return None\n\n#列出球隊命中率(三分球、投籃、罰球)\ndef team_hit_rate():\n sql6 ='SELECT sum(三分球中)/sum(三分球投)*100 as 二分命中率, (sum(三分球中)+sum(二分球中))/(sum(三分球投)+sum(二分球投))*100 as 三分命中率, sum(罰球中)/sum(罰球投)*100 as 罰球命中率 FROM 表現;'\n try:\n cursor.execute(sql6)\n data = cursor.fetchall()\n return data\n except:\n return None\n\n#--完蛋我只有用學號 但是要印出姓名跟背號 _|:o_/|=\ndef score_mvp():\n sql7 ='SELECT 學號,名字,背號,得分 FROM(SELECT 球員比賽表現.學號, (sum(表現.二分球中)*2 + sum(表現.三分球中)*3 + sum(表現.罰球中)*1) as ��分 FROM 球員比賽表現 LEFT JOIN 表現 ON 球員比賽表現.編號 = 表現.編號 GROUP BY 球員比賽表現.學號)t1 left join (SELECT 名字,學號,背號 FROM 球員)t2 USING (學號) ORDER BY 得分 DESC;'\n try:\n cursor.execute(sql7)\n data = cursor.fetchall()\n return data\n except:\n return None\n\ndef backboard_mvp():\n sql8 ='SELECT 學號,名字,背號,籃板 FROM(SELECT 球員比賽表現.學號, (sum(表現.防守籃板) + sum(表現.進攻籃板)) as 籃板 FROM 球員比賽表現 LEFT JOIN 表現 ON 球員比賽表現.編號 = 表現.編號 GROUP BY 球員比賽表現.學號)t1 left join (SELECT 名字,學號,背號 FROM 球員)t2 USING (學號) ORDER BY 籃板 DESC;'\n try:\n cursor.execute(sql8)\n data = cursor.fetchall()\n return data\n except:\n return None\n\ndef assist_mvp():\n sql9 ='SELECT 學號,名字,背號,助攻 FROM(SELECT 球員比賽表現.學號, sum(表現.助攻) as 助攻 FROM 球員比賽表現 LEFT JOIN 表現 ON 球員比賽表現.編號 = 表現.編號 GROUP BY 球員比賽表現.學號)t1 left join (SELECT 名字,學號,背號 FROM 球員)t2 USING (學號) ORDER BY 助攻 DESC;'\n try:\n cursor.execute(sql9)\n data = cursor.fetchall()\n return data\n except:\n return None\n \ndef block_mvp():\n sql10='SELECT 學號,名字,背號,阻攻 FROM(SELECT 球員比賽表現.學號, sum(表現.阻攻) as 阻攻 FROM 球員比賽表現 LEFT JOIN 表現 ON 球員比賽表現.編號 = 表現.編號 GROUP BY 球員比賽表現.學號)t1 left join (SELECT 名字,學號,背號 FROM 球員)t2 USING (學號) ORDER BY 阻攻 DESC;'\n try:\n cursor.execute(sql10)\n data = cursor.fetchall()\n return data\n except:\n return None\n\ndef intercept_mvp():\n sql11='SELECT 學號,名字,背號,抄截 FROM(SELECT 球員比賽表現.學號, sum(表現.抄截) as 抄截 FROM 球員比賽表現 LEFT JOIN 表現 ON 球員比賽表現.編號 = 表現.編號 GROUP BY 球員比賽表現.學號)t1 left join (SELECT 名字,學號,背號 FROM 球員)t2 USING (學號) ORDER BY 抄截 DESC;'\n try:\n cursor.execute(sql11)\n data = cursor.fetchall()\n return data\n except:\n return None\n\ndef three_point_rate():\n sql12='SELECT 學號,名字,背號,三分球命中率 FROM(SELECT 球員比賽表現.學號, (sum(表現.三分球中)/sum(表現.三分球投)*100) as 三分球命中率 FROM 球員比賽表現 LEFT JOIN 表現 ON 球員比賽表現.編號 = 表現.編號 GROUP BY 球員比賽表現.學號)t1 left join (SELECT 名字,學號,背號 FROM 球員)t2 USING (學號) ORDER BY 三分球命中率 DESC;'\n try:\n cursor.execute(sql12)\n data = cursor.fetchall()\n return data\n except:\n return None\n\ndef shoot_rate_mvp():\n sql13='SELECT 學號,名字,背號,投球命中率 FROM(SELECT 球員比賽表現.學號, ((sum(表現.三分球中)+sum(表現.二分球中))/(sum(表現.三分球投)+sum(表現.二分球投))*100) as 投球命中率 FROM 球員比賽表現 LEFT JOIN 表現 ON 球員比賽表現.編號 = 表現.編號 GROUP BY 球員比賽表現.學號)t1 left join (SELECT 名字,學號,背號 FROM 球員)t2 USING (學號) ORDER BY 投球命中率 DESC;'\n try:\n cursor.execute(sql13)\n data = cursor.fetchall()\n return data\n except:\n return None\n\ndef penalty_mvp():\n sql14='SELECT 學號,名字,背號,罰球命中率 FROM(SELECT 球員比賽表現.學號, (sum(表現.罰球中)/sum(表現.罰球投)*100) as 罰球命中率 FROM 球員比賽表現 LEFT JOIN 表現 ON 球員比賽表現.編號 = 表現.編號 GROUP BY 球員比賽表現.學號 )t1 left join (SELECT 名字,學號,背號 FROM 球員)t2 USING (學號) ORDER BY 罰球命中率 DESC;'\n try:\n cursor.execute(sql14)\n data = cursor.fetchall()\n return data\n except:\n return None\n\ndef show_record(date,game_name,competitor_school,competitor_dept_name):#有條件輸入\n sql15='SELECT * FROM 球員比賽表現 LEFT JOIN 球員 USING(學號) LEFT JOIN 表現 USING(編號) WHERE 日期=%s and 盃賽名稱=%s and 對手學校=%s and 對手系名=%s;'\n try:\n cursor.execute(sql15, (date,game_name,competitor_school,competitor_dept_name))\n data = cursor.fetchall()\n return data\n except:\n return None\n\ndef online_player():\n sql16 = 'SELECT 學號,背號,入隊學年 FROM 球員 LEFT JOIN 退休球員 USING(學號) WHERE 退休學年 is NULL ORDER BY 入隊學年 ASC;'\n try:\n cursor.execute(sql16)\n data = cursor.fetchall()\n return data\n except:\n return None\n\ndef pastonline_player():\n sql = 'SELECT 名字,學號 FROM 球員 LEFT JOIN 退休球員 USING(學號) WHERE 1;'\n try:\n cursor.execute(sql)\n data = cursor.fetchall()\n return data\n except:\n return None\n\ndef show_year():\n sql='SELECT DISTINCT 入隊學年 FROM 球員 LEFT JOIN 退休球員 USING(學號) WHERE 退休學年 is NULL ORDER BY 入隊學年 ASC;'\n try:\n cursor.execute(sql)\n data = cursor.fetchall()\n return data\n except:\n return None\n\ndef new_data(name,stuid,num,in_year): #53\n sql = \"INSERT INTO 球員(名字, 學號, 背號, 入隊學年)VALUES(%s, %s, %s, %s)\"\n try:\n cursor.execute(sql,(name,stuid,num,in_year))\n conn.commit()\n except: \n conn.rollback()\n\ndef fix_data(new_name,new_stuid,new_num,new_in_year,odd_name,odd_stuid,odd_num,odd_in_year): #53\n sql = \"UPDATE 球員 SET 名字 = %s, 學號= %s ,背號 = %s,入隊學年= %s WHERE 球員.名字 = %s and 球員.學號 = %s and 球員.背號 = %s and 球員.入隊學年=%s \"\n try:\n cursor.execute(sql,(new_name,new_stuid,new_num,new_in_year,odd_name,odd_stuid,odd_num,odd_in_year))\n conn.commit()\n\n except:\n conn.rollback()\n\ndef out_fix1(stuid,outyear):#修改的新增退隊\n sql='INSERT INTO 退休球員(學號,退休學年)VALUES(%s,%s)'\n try:\n cursor.execute(sql,(stuid,outyear))\n conn.commit()\n except: \n conn.rollback() \n \ndef out_fix2(newoutyear,stuid,oddoutyear):#修改的更新退隊\n sql='UPDATE 退休球員 SET 退休學年=%s WHERE 學號=%s and 退休學年=%s'\n try:\n cursor.execute(sql,(newoutyear,stuid,oddoutyear))\n conn.commit()\n except: \n conn.rollback()\n\ndef leader_fix1(stuid,isleader):#修改的新增隊長\n sql='INSERT INTO 隊長(學號,任期年分)VALUES(%s,%s)'\n try:\n cursor.execute(sql,(stuid,isleader))\n conn.commit()\n return 1\n except: \n conn.rollback() \n return 0\n\ndef leader_fix2(newisleader,stuid,oddisleader):#修改的更新隊長\n sql='UPDATE 隊長 SET 任期年分=%s WHERE 學號=%s and 任期年分=%s'\n try:\n cursor.execute(sql,(newisleader,stuid,oddisleader))\n conn.commit()\n except: \n conn.rollback()\n#新增球員比賽表現\ndef player_performance(date,game_name,competitor_school,competitor_dept_name,Id,num):\n sql='INSERT INTO 球員比賽表現(日期,盃賽名稱,對手學校,對手系名,學號,編號)VALUES(%s,%s,%s,%s,%s,%s);'\n try:\n cursor.execute(sql,(date,game_name,competitor_school,competitor_dept_name,Id,num))\n conn.commit()\n except:\n conn.rollback()\n#新增比賽\ndef new_game(date,game,oppschool,oppdep,opppoint): #53\n sql = \"INSERT INTO 比賽( 日期, 盃賽名稱, 對手學校, 對手系名, 對手得分)VALUES(%s, %s, %s, %s, %s);\"\n try:\n cursor.execute(sql,(date,game,oppschool,oppdep,opppoint))# 執行SQL语句\n conn.commit() # 提交到資料庫系統執行\n except: # 發生異常錯誤時回復\n conn.rollback()\n#新增表現\ndef player_ingamedata(num,twopoint,twopointin,threepoint,threepointin,faball,faballin,backboard,goboard,gohelp,block,cut,mistake,foul,fouled): #53\n sql = \"INSERT INTO 表現(編號, 二分球投, 二分球中, 三分球投, 三分球中, 罰球投, 罰球中, 防守籃板, 進攻籃板, 助攻, 阻攻, 抄截, 失誤, 犯規, 被犯)VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\n try:\n cursor.execute(sql,(num,twopoint,twopointin,threepoint,threepointin,faball,faballin,backboard,goboard,gohelp,block,cut,mistake,foul,fouled))\n conn.commit()\n except:\n conn.rollback()\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":14334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"586660463","text":"# 时间复杂度:O(?)有点难评估\n# 空间复杂度:O(n)\nfrom typing import List\n\n\n# 递归,回溯思路。dfs方法\nclass Solution:\n def combine(self, n: int, k: int) -> List[List[int]]:\n def dfs(n, k, start, stack, result):\n # terminator\n if k == 0:\n result.append(stack[:])\n return\n # process\n for i in range(start, n - k + 1):\n stack.append(i)\n # drill down\n dfs(n, k - 1, i + 1, stack, result)\n # reverse state\n stack.pop()\n\n result, stack = [], []\n dfs(n, k, 1, stack, result)\n return result\n","sub_path":"Week3/Solution-77.py","file_name":"Solution-77.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"25535841","text":"\"\"\"\nScript to simulate an M/M/1 and M/M/n queues using simpy.\n\"\"\"\nimport sys\nimport pandas as pd\nimport os\n\nimport simpy\nimport random\nimport csv\nimport numpy as np\n\n# seed = 42 # dit is een seed\nmean_joblength = 1 # Minutes it takes to serve, either fixed or from distribution (known as mu)\nsim_time = 10000 # Simulation time in minutes\n\ncustomers_in_line = []\n\nclass Store(object):\n \"\"\"A Store has a limited number of cashiers (``SERVERS``) to\n help customers.\n\n Customers request a server. When there is no queue, they go to a server which takes joblength to help.\n\n \"\"\"\n def __init__(self, env, capacity):\n self.env = env\n self.capacity = capacity\n self.resource = simpy.Resource(env, capacity)\n\n def serving(self, joblength):\n \"\"\"yield timeout for servicing a customer\"\"\"\n\n yield self.env.timeout(joblength)\n\nclass Customer():\n def __init__(self, env, name, joblength):\n self.env = env\n self.name = name\n self.joblength = joblength\n\n def get_help(self, env, run, rho, store):\n \"\"\"\n Request a service at the store, predetermined joblength. FIFO system\n \"\"\"\n\n arrival = env.now\n customers_in_line.append(self.name)\n\n with store.resource.request() as req:\n yield req\n\n #we wait till he's helped\n enter = env.now\n customers_in_line.remove(self.name)\n\n yield env.process(store.serving(self.joblength))\n\n leave = env.now\n\n with open(\"data/mm\" + str(store.capacity) + \"_temp.csv\", 'a') as resultsFile:\n writer = csv.writer(resultsFile)\n\n # we are interested in the waiting time of the customers\n waitingtime = enter - arrival\n\n writer.writerow([arrival, waitingtime, len(customers_in_line)])\n\ndef setup(env, run, rho, capacity):\n \"\"\"\n Create a store and generate customers while simulation is running.\n T_inter is dependent on joblength in order to keep workload the same.\n \"\"\"\n\n # Create the store\n store = Store(env, capacity)\n\n customer_no = 0\n\n # stop at a certain simulation time\n while True:\n\n joblength = random.expovariate(mean_joblength)\n customer = Customer(env, f'customer{customer_no}', joblength)\n\n t_inter = random.expovariate(capacity*rho)\n\n env.process(customer.get_help(env, run=run, rho=rho, store=store))\n yield env.timeout(t_inter)\n customer_no += 1\n\ndef batch(rho, capacity):\n df = pd.read_csv(\"data/mm\" + str(capacity) + \"_temp.csv\")\n df.columns = [\"arrive\", \"waitingtime\", \"len_queue\"]\n\n # divide the dataframe into 20 batches\n for i in range(20):\n df[(df[\"arrive\"] > i * 500 + 200) & (df[\"arrive\"] < (i + 1) * 500)][\"waitingtime\"].mean()\n\n # write mean to csvfile\n with open(\"data/mm\" + str(capacity) + \"_means_results.csv\", 'a') as resultsFile:\n writer = csv.writer(resultsFile)\n\n writer.writerow([rho,\n df[(df[\"arrive\"] > i * 500 + 200) & (df[\"arrive\"] < (i + 1) * 500)][\"waitingtime\"].mean(),\n df[(df[\"arrive\"] > i * 500 + 200) & (df[\"arrive\"] < (i + 1) * 500)][\"len_queue\"].mean()])\n\n os.remove(\"data/mm\" + str(capacity) + \"_temp.csv\")\n\n\ndef main():\n\n if not len(sys.argv) == 4:\n print(\"Usage python mmn_queue.py \")\n\n capacity = int(sys.argv[1])\n rho = float(sys.argv[2])\n run = int(sys.argv[3])\n\n # Setup and start the simulation\n # random.seed(seed)\n\n # Create an environment and start the setup process\n env = simpy.Environment()\n env.process(setup(env, run, rho, capacity))\n\n # Execute!\n env.run(until=sim_time)\n\n batch(rho, capacity)\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"assignment2/mmn_queue.py","file_name":"mmn_queue.py","file_ext":"py","file_size_in_byte":3900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"488884799","text":"# source: https://www.hackerrank.com/challenges/encryption\n# title: Encryption\n\nfrom math import sqrt, floor, ceil\n\nmsg = input()\nL = len(msg)\nrows = floor(sqrt(L))\ncolumns = ceil(sqrt(L))\nif rows * columns < L:\n rows += 1\nencrypted = '';\nfor i in range(columns):\n for j in range(rows):\n if (i + j*columns < L):\n encrypted += msg[i + j*columns]\n encrypted += ' '\nprint(encrypted)\n","sub_path":"HackerRank/algorithms/Implementation/encryption.py","file_name":"encryption.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"243218736","text":"#! /usr/bin/python3\n\nimport os\nimport csv\nimport re\nfrom os import path\nimport json\nimport requests\nimport weasyprint\nimport zipfile\nimport argparse\n\nimport canvas\n\ndef start_file(file_name):\n if path.exists(file_name):\n os.rename(file_name, file_name + '~')\n htmlfile = open(file_name, 'w')\n htmlfile.write('''\n \n \n \n ''')\n htmlfile_list.append(file_name)\n return htmlfile\n\ndef save_raw_answer(answer, identification):\n question = questions[answer['question_id']]\n if question['question_type'] == 'essay_question':\n raw_file_name = 'answer_%s.html' % identification\n rawanswers_file.writestr(raw_file_name, answer['text'])\n elif question['question_type'] == 'file_upload_question':\n answer['text'] = 'See file(s):
'\n for file in [canvas.file(a) for a in answer['attachment_ids']]:\n raw_file_name = 'answer_%s_%s' % \\\n (identification, file['display_name'])\n data = requests.get(file['url'])\n if data:\n rawanswers_file.writestr(raw_file_name, data.content)\n answer['text'] += '- %s
' % raw_file_name\n answer['text'] += '
'\n\n\ndef write_exam_file(htmlfile, questions, qs = None):\n acct = ''\n snum = ''\n sname = ''\n answers = {}\n sub_questions = {}\n num_attempts = 0\n if qs != None:\n sub = submissions[qs['submission_id']]\n snum = sub['user']['sis_user_id']\n sname = sub['user']['name']\n if args.classlist:\n if snum in student_accounts:\n acct = student_accounts[snum]\n else:\n print('Account not found for student: %s' % snum)\n else:\n acct = snum\n\n sub_questions = quiz.submission_questions(qs)\n\n previous_score = -1\n previous_attempt = -1\n variation = {}\n for attempt in sub['submission_history']:\n if 'submission_data' in attempt:\n num_attempts += 1\n update_answer = False\n if attempt['score'] > previous_score:\n previous_score = attempt['score']\n previous_attempt = attempt['attempt']\n update_answer = True\n elif attempt['score'] == previous_score and \\\n attempt['attempt'] > previous_attempt:\n previous_attempt = attempt['attempt']\n update_answer = True\n if attempt['attempt'] in variation.keys():\n variation[attempt['attempt']] += 'x'\n else:\n variation[attempt['attempt']] = ''\n for answer in attempt['submission_data']:\n if question_included(answer['question_id']):\n save_raw_answer(answer, '%d_%s_v%d%s' % \\\n (answer['question_id'], acct,\n attempt['attempt'],\n variation[attempt['attempt']]))\n if update_answer:\n answers[answer['question_id']] = answer\n\n if args.classlist:\n htmlfile.write('''\n Account:\n %s\n
''' % acct)\n else:\n htmlfile.write('''\n Student Number:\n %s\n Name:\n %s\n
''' % (snum, sname))\n\n qn = 1\n for (question_id, question) in questions.items():\n question_name = question['question_name']\n question_text = question['question_text']\n question_type = question['question_type']\n if question_id in sub_questions and question_type == 'calculated_question':\n question_text = sub_questions[question_id]['question_text']\n if question_type == 'text_only_question':\n htmlfile.write('''\n \n %s\n
\n ''' % question_text)\n continue\n\n worth = question['points_possible']\n answer = None\n answer_text = ''\n points = ''\n \n if question_id in answers:\n answer = answers[question_id]\n answer_text = answer['text'] if 'text' in answer else ''\n points = answer['points']\n elif qs != None:\n question_type = None # To avoid formatting of multiple-choice\n answer_text = '''\n *** NO SUBMISSION ***
\n This typically means that this question is part of a question\n group, and the student did not receive this question in the\n group (i.e., the student answered a different question in\n this set).\n '''\n\n if question_type == 'calculated_question' or \\\n question_type == 'short_answer_question' or \\\n question_type == 'essay_question' or \\\n question_type == 'numerical_question':\n pass # use answer exactly as provided\n elif question_type == 'true_false_question' or \\\n question_type == 'multiple_choice_question' or \\\n question_type == 'multiple_answers_question':\n answer_text = ''\n for pa in question['answers']:\n if question_type == 'multiple_answers_question':\n key = 'answer_%s' % pa['id']\n choice = answer[key] if answer != None and key in answer else ''\n if choice == '0': choice = ''\n else:\n choice = 'X' if answer != None and 'answer_id' in answer and pa['id'] == answer['answer_id'] else ''\n answer_text += ' %s %s
' % (choice, pa['text'])\n \n elif question_type == 'fill_in_multiple_blanks_question' or \\\n question_type == 'multiple_dropdowns_question':\n answer_text = ''\n tokens = []\n dd_answers = {}\n for pa in question['answers']:\n if pa['blank_id'] not in tokens: tokens.append(pa['blank_id'])\n dd_answers[pa['id']] = pa['text']\n for token in tokens:\n key = 'answer_for_%s' % token\n choice = answer[key] if answer != None and key in answer else ''\n if choice != '' and question_type == 'multiple_dropdowns_question' and choice in dd_answers:\n choice = dd_answers[choice]\n answer_text += '| %s | => | %s |
' % (token, choice)\n answer_text += '
'\n \n elif question_type == 'matching_question':\n answer_text = ''\n matches = {}\n for match in question['matches']:\n matches['%d' % match['match_id']] = match['text']\n for pa in question['answers']:\n key = 'answer_%s' % pa['id']\n choice = matches[answer[key]] if answer != None and key in answer and answer[key] in matches else ''\n answer_text += '| %s | => | %s |
' % (pa['text'], choice)\n answer_text += '
'\n \n elif question_type == 'file_upload_question':\n pass # This is handled in the processing of history above.\n elif question_type != None:\n raise ValueError('Invalid question type: \"%s\"' % question_type)\n \n htmlfile.write('''\n \n
Question %d [%s]:
\n
%s
\n
\n %s \n %s \n
\n
Answer%s:
\n
%s
\n
\n ''' % (question_id, question_id, question_id, question_name,\n question_text, worth, points,\n '' if num_attempts <= 1 else ' (%d attempts)' % num_attempts,\n answer_text))\n qn += 1\n\ndef flatten_list(l):\n if isinstance(l, list):\n for x in [x for x in l if isinstance(x, list)]:\n l.remove(x)\n l.extend(x)\n return l\n \ndef end_file(htmlfile):\n htmlfile.write('\\n')\n htmlfile.close()\n\ndef question_included(qid):\n if args.not_question and qid in args.not_question:\n return False\n elif args.only_question:\n return qid in args.only_question\n else:\n return True\n\nparser = argparse.ArgumentParser()\ncanvas.Canvas.add_arguments(parser, quiz=True)\nparser.add_argument(\"-l\", \"--classlist\",\n type=str, #type=argparse.FileType('r', newline=''),\n help=\"CSV file containing student number and account. If used, account is provided on the front page, otherwise it will include name and student number.\")\nparser.add_argument(\"-p\", \"--output-prefix\",\n help=\"Path/prefix for output files\")\ngroup = parser.add_mutually_exclusive_group()\ngroup.add_argument(\"--only-question\", action='append', nargs='+', type=int,\n metavar=\"QUESTIONID\", help=\"Questions to include\")\ngroup.add_argument(\"--not-question\", action='append', nargs='+', type=int,\n metavar=\"QUESTIONID\", help=\"Questions to exclude\")\nparser.add_argument(\"--css\",\n help=\"Additional CSS file to use in PDF creation.\")\nparser.add_argument(\"--template-only\", action='store_true',\n help=\"Create only the template, without students.\")\nparser.add_argument(\"-d\", \"--debug\", help=\"Enable debugging mode\",\n action='store_true')\nargs = parser.parse_args()\n\nflatten_list(args.only_question)\nflatten_list(args.not_question)\n\ncanvas = canvas.Canvas(args=args)\n\nstudent_accounts = {}\nhtmlfile_list = []\n\nif args.classlist:\n print('Reading classlist...')\n \n with open(args.classlist, 'r', newline='') as file:\n reader = csv.DictReader(file)\n if 'SNUM' not in reader.fieldnames:\n raise ValueError('Classlist CSV file does not contain student number.')\n if 'ACCT' not in reader.fieldnames:\n raise ValueError('Classlist CSV file does not contain account.')\n for row in reader:\n student_accounts[row['SNUM']] = row['ACCT']\n\nprint('Reading data from Canvas...')\ncourse = canvas.course(args.course, prompt_if_needed=True)\nprint('Using course: %s / %s' % (course['term']['name'],\n course['course_code']))\n\nquiz = course.quiz(args.quiz, prompt_if_needed=True)\nprint('Using quiz: %s' % (quiz['title']))\n\nif not args.output_prefix:\n args.output_prefix = re.sub(r'[^A-Za-z0-9-_]+', '', quiz['title'])\n print('Using prefix: %s' % args.output_prefix);\n\n# Reading questions\nprint('Retrieving quiz questions...')\n(questions, groups) = quiz.questions(question_included)\n\nprint('Retrieving quiz submissions...')\nif args.template_only:\n quiz_submissions = []\n submissions = {}\nelse:\n (quiz_submissions, submissions) = quiz.submissions(debug=args.debug)\n\nprint('Generating HTML files...')\n\nfile_no = 1;\ntemplate_file = start_file(args.output_prefix + '_template.html')\nif not args.template_only:\n exams_file = start_file(args.output_prefix + '_exams_%d.html' % file_no)\n rawanswers_file = zipfile.ZipFile(args.output_prefix + '_raw_answers.zip', 'w')\n\nwrite_exam_file(template_file, questions)\n\nif args.debug:\n with open('debug.json', 'w') as file:\n data = {}\n data['quiz'] = quiz.data\n data['questions'] = questions\n data['quiz_submissions'] = quiz_submissions\n data['submissions'] = submissions\n json.dump(data, file, indent=2)\n\nnum_exams = 0\nfor qs in quiz_submissions:\n print(\"Exporting student %d out of %d...\" %\n (num_exams + 1, len(quiz_submissions)), end='\\r');\n write_exam_file(exams_file, questions, qs)\n num_exams += 1\n if num_exams % 20 == 0:\n end_file(exams_file)\n file_no += 1\n exams_file = start_file(args.output_prefix + '_exams_%d.html' % file_no)\n\nend_file(template_file)\nif not args.template_only:\n end_file(exams_file)\n rawanswers_file.close()\n\nprint('\\nConverting to PDF...')\ncss = [weasyprint.CSS(path.join(path.dirname(__file__),'canvasquiz.css'))]\nif args.css:\n css.append(weasyprint.CSS(args.css))\n\nfor file in htmlfile_list:\n print(file + '... ', end='\\r');\n weasyprint.HTML(filename=file).write_pdf(file + '.pdf', stylesheets=css)\n\nprint('\\nDONE. Created files:')\nfor file in htmlfile_list:\n print('- ' + file + '.pdf')\nif not args.template_only:\n print('- ' + args.output_prefix + '_raw_answers.zip')\n","sub_path":"quiz2pdf.py","file_name":"quiz2pdf.py","file_ext":"py","file_size_in_byte":13419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"24117847","text":"#!/usr/bin/env python3\n# -*- encoding utf-8 -*-\n\nimport argparse\nfrom lxml import etree\nimport os\nimport sys\nimport re\n\n# TODO: Tones\n# TODO: Format\n\ndef check_cex(cexxml, possetxml, phonexml):\n good_cex = True\n if not cex_has_allpos(cexxml, possetxml):\n sys.stderr.write('cex does not match posset\\n')\n good_cex = False\n\n if not cex_has_allphones(cexxml, phonexml):\n sys.stderr.write('cex does not match posset\\n')\n good_cex = False\n\n return good_cex \n\ndef cex_has_allpos(cexxml, possetxml):\n \"\"\" The CEX file must have all the POS tags in the 'pos' set \"\"\"\n posset_pos = set()\n for tag in possetxml.xpath('//tag'):\n name = tag.get('name', '')\n name = name.strip(\"'\").strip('\"')\n if len(name):\n posset_pos.add(name)\n\n if len(posset_pos):\n cexpos = cexxml.xpath('//set[@name=\"pos\"]')\n if not cexpos:\n sys.stderr.write('cex does not have a set for pos\\n')\n return False\n cex_pos = set()\n for item in cexpos[0].xpath('item'):\n cex_pos.add(item.get('name'))\n\n missing_pos = list(posset_pos - cex_pos)\n missing_pos.sort()\n if missing_pos:\n sys.stderr.write('POS tags are missing in CEX:\\n\\t' + '\\n\\t'.join(missing_pos) + '\\n')\n\n extra_pos = list(cex_pos - posset_pos)\n extra_pos.sort()\n if extra_pos:\n sys.stderr.write('Extra POS tags in CEX:\\n\\t' + '\\n\\t'.join(extra_pos) + '\\n')\n\n if missing_pos or extra_pos:\n return False\n\n return True\n\ndef cex_has_allphones(cexxml, phonexml):\n\n cex_phones = []\n has_pau = False\n has_sil = False\n has_X = False\n no_bad_names = True\n for phone in cexxml.xpath('//set[@name=\"phone\"]/item'):\n name = phone.get('name')\n if name is None:\n sys.stderr.write('Phone is missing name\\n')\n no_bad_names = False\n continue\n if name == 'pau':\n has_pau = True\n elif name == 'sil':\n has_sil = True\n elif name == 'X':\n has_X = True\n elif not name:\n sys.stderr.write('Phone name is blank\\n')\n no_bad_names = False\n elif name != name.strip():\n sys.stderr.write('Phone name has spaces\\n')\n no_bad_names = False\n else:\n cex_phones.append(name)\n if not has_pau: sys.stderr.write('Missing required phone \"pau\"\\n')\n if not has_sil: sys.stderr.write('Missing required phone \"sil\"\\n')\n if not has_X: sys.stderr.write('Missing required phone \"X\"\\n')\n\n no_duplicate_phones = True\n if len(cex_phones) != len(set(cex_phones)):\n sys.stderr.write('Missing required phone \"X\"\\n')\n duplicate_phones = False\n\n cex_phones = set(cex_phones)\n\n phoneset = set()\n for phone in phonexml.xpath('//phone'):\n name = phone.get('name')\n if name != '_':\n phoneset.add(name)\n\n missing_phones = list(phoneset - cex_phones)\n missing_phones.sort()\n if missing_phones:\n sys.stderr.write('Phones are missing in CEX:\\n\\t' + '\\n\\t'.join(missing_phones) + '\\n')\n\n extra_phones = list(cex_phones - phoneset)\n extra_phones.sort()\n if extra_phones:\n sys.stderr.write('Extra phones in CEX:\\n\\t' + '\\n\\t'.join(extra_phones) + '\\n')\n \n if missing_phones or extra_phones or not all([no_bad_names, has_pau, has_sil, has_X]):\n return False\n \n return True\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', '--cex', required = True, type = argparse.FileType('rb'),\n help = 'cex xml to check')\n parser.add_argument('-p', '--phoneset', type = argparse.FileType('rb'),\n help = 'phoneset to check against')\n parser.add_argument('-P', '--posset', required = True, type = argparse.FileType('rb'),\n help = 'posset to check against')\n\n args = parser.parse_args()\n\n cexxml = etree.parse(args.cex)\n possetxml = etree.parse(args.posset)\n phonexml = etree.parse(args.phoneset)\n\n if not check_cex(cexxml, possetxml, phonexml):\n sys.stderr.write('errors in cex file\\n')\n exit(1)\n exit(0)\n\n\n\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"idlak-misc/validators/check_cex.py","file_name":"check_cex.py","file_ext":"py","file_size_in_byte":4225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"22661017","text":"#!/usr/bin/env python\n\nfrom sklearn.linear_model import SGDClassifier as _SGDClassifier\nfrom sklearn.preprocessing import StandardScaler\nimport pandas as pd\nimport numpy as np\nfrom codec import codecs_manager\nfrom codec.codecs import NoopCodec\n\nfrom base import EstimatorMixin\nfrom util.param_util import convert_params\n\n\nclass SGDClassifier(EstimatorMixin):\n def __init__(self, options):\n self.handle_options(options)\n\n out_params = convert_params(\n options.get('params', {}),\n bools=['fit_intercept'],\n ints=['random_state', 'n_iter'],\n floats=['l1_ratio', 'alpha', 'eta0', 'power_t'],\n strs=['loss', 'penalty', 'learning_rate'],\n )\n\n if 'loss' in out_params:\n try:\n assert (out_params['loss'] in ['hinge', 'log', 'modified_huber', 'squared_hinge', 'perceptron'])\n except AssertionError:\n raise RuntimeError(\n 'Value for parameter \"loss\" has to be one of \"hinge\", \"log\", \"modified_huber\", \"squared_hinge\", or \"perceptron\"')\n self.scaler = StandardScaler()\n self.estimator = _SGDClassifier(**out_params)\n self.columns = None\n self.classes = None\n self.is_classifier = True\n\n def fit(self, X):\n X, y, self.columns = self.preprocess_fit(X)\n scaled_X = self.scaler.fit_transform(X.values)\n self.estimator.fit(scaled_X, y.values)\n\n def partial_fit(self, X, handle_new_cat):\n X, y, columns = self.preprocess_fit(X)\n if self.classes is None:\n self.classes = np.unique(y)\n self.scaler.partial_fit(X.values)\n scaled_X = self.scaler.transform(X.values)\n self.estimator.partial_fit(scaled_X, y, classes=self.classes)\n self.columns = columns\n else:\n self.handle_categorical(X, y, handle_new_cat, self.columns, self.classes)\n if X.empty:\n return\n self.scaler.partial_fit(X.values)\n scaled_X = self.scaler.transform(X.values)\n self.estimator.partial_fit(scaled_X, y)\n\n def predict(self, X, options=None, output_name=None):\n X = self.preprocess_predict(X)\n\n # Allocate output DataFrame\n output_name = 'predicted(%s)' % self.response_variable\n output = pd.DataFrame({output_name: np.empty(len(X))})\n output[output_name] = np.nan\n\n nans = self.drop_na_rows(X)\n scaled_X = self.scaler.transform(X.values)\n y_hat = self.estimator.predict(scaled_X)\n\n output.ix[~nans, output_name] = y_hat\n self.rename_columns(output, options)\n\n return output\n\n def summary(self):\n df = pd.DataFrame()\n n_classes = len(self.estimator.classes_)\n limit = 1 if n_classes == 2 else n_classes\n\n for i, c in enumerate(self.estimator.classes_[:limit]):\n cdf = pd.DataFrame({'feature': self.columns,\n 'coefficient': self.estimator.coef_[i].ravel()})\n cdf = cdf.append(pd.DataFrame({'feature': ['_intercept'],\n 'coefficient': [self.estimator.intercept_[i]]}))\n cdf['class'] = c\n df = df.append(cdf)\n\n return df\n\n @staticmethod\n def register_codecs():\n from codec.codecs import SimpleObjectCodec\n codecs_manager.add_codec('algos.SGDClassifier', 'SGDClassifier', SimpleObjectCodec)\n codecs_manager.add_codec('sklearn.linear_model.stochastic_gradient', 'SGDClassifier', SimpleObjectCodec)\n codecs_manager.add_codec('sklearn.preprocessing.data', 'StandardScaler', SimpleObjectCodec)\n codecs_manager.add_codec('sklearn.linear_model.sgd_fast', 'Hinge', NoopCodec)\n codecs_manager.add_codec('sklearn.linear_model.sgd_fast', 'Log', NoopCodec)\n codecs_manager.add_codec('sklearn.linear_model.sgd_fast', 'ModifiedHuber', NoopCodec)\n codecs_manager.add_codec('sklearn.linear_model.sgd_fast', 'SquaredHinge', NoopCodec)\n","sub_path":"apps/Splunk_ML_Toolkit/bin/algos/SGDClassifier.py","file_name":"SGDClassifier.py","file_ext":"py","file_size_in_byte":4026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"118466990","text":"\"\"\"djangolearn URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.urls import path,include\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='home'),\n path('Students/', views.Students, name='students'),\n path('Createstudents/', views.Createstudents, name='Createstudents'),\n path('Createproducts/', views.Createproducts, name='Createproducts'),\n \n]\n","sub_path":"pages/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"634356331","text":"# Copyright (c) 2017 Intel Corp. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n\nimport os\n\nfrom aria.parser.loading import UriLocation\nfrom aria.parser.consumption import (\n ConsumptionContext,\n ConsumerChain,\n Read,\n Validate,\n ServiceTemplate,\n ServiceInstance\n)\n\nfrom vnfsdk_pkgtools import validator\n\n\nclass AriaValidator(validator.ValidatorBase):\n def validate(self, reader):\n context = ConsumptionContext()\n service_template_path = os.path.join(reader.destination,\n reader.entry_definitions)\n context.presentation.location = UriLocation(service_template_path)\n print(reader.entry_definitions_yaml)\n chain = ConsumerChain(context, (Read, Validate, ServiceTemplate, ServiceInstance))\n chain.consume()\n if context.validation.dump_issues():\n raise RuntimeError('Validation failed')\n dumper = chain.consumers[-1]\n dumper.dump()\n\n","sub_path":"vnfsdk_pkgtools/validator/aria_validator.py","file_name":"aria_validator.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"429435873","text":"import os\nfrom threading import Thread\n\nimport telegram\nfrom flask import Flask, request\n\napp = Flask(__name__)\napp.config.from_object(os.environ['APP_SETTINGS'])\n\n__location__ = os.path.realpath(\n os.path.join(os.getcwd(), os.path.dirname(__file__)))\n\n\nglobal bot\nbot = telegram.Bot(token=app.config['BOT_TOKEN'])\n\n\n@app.route('/ya', methods=['POST'])\ndef webhook_handler():\n try:\n if request.method == \"POST\":\n update = telegram.Update.de_json(request.get_json(force=True))\n chat_id = update.message.chat.id\n text = '*`' + update.message.text.encode('utf-8') + '`*'\n bot.sendMessage(\n chat_id=chat_id, text=text, parse_mode=telegram.ParseMode.MARKDOWN)\n return 'ok'\n except Exception as e:\n raise\n\n\n@app.route('/set_webhook', methods=['GET', 'POST'])\ndef set_webhook():\n with open(os.path.join(__location__, 'ngrok.host'), 'r') as ngrok_host:\n webhook_url = ngrok_host.read()\n s = bot.setWebhook(webhook_url='https://{}/ya'.format(webhook_url))\n if s:\n return \"webhook setup ok\"\n else:\n return \"webhook setup failed\"\n\n\ndef unset_webhook():\n bot.setWebhook(webhook_url=None)\n\n\n@app.route('/')\ndef index():\n return '.'\n\n\ndef get_last_update_id():\n new_updates = bot.getUpdates(timeout=10)\n if new_updates:\n return new_updates[0].update_id\n\n\ndef get_updates():\n unset_webhook()\n last_update_id = get_last_update_id()\n while True:\n if last_update_id:\n for update in bot.getUpdates(offset=last_update_id, timeout=10):\n with app.test_request_context('/ya', method='POST', data=update.to_json()):\n app.dispatch_request()\n text = update.message.text\n chat_id = update.message.chat_id\n update_id = update.update_id\n\n bot.sendMessage(chat_id=chat_id, text=text)\n last_update_id = update_id + 1\n if text == 'exit':\n bot.getUpdates(offset=last_update_id)\n return\n else:\n last_update_id = get_last_update_id()\n\nif not app.config['BOT_TOKEN']:\n set_webhook()\n\n\ndef main():\n if app.config['BOT_TOKEN']:\n unset_webhook()\n updates_thread = Thread(target=get_updates, args=())\n updates_thread.setDaemon(True)\n updates_thread.start()\n app.run(host='0.0.0.0', use_reloader=False)\n else:\n app.run(host='0.0.0.0')\n\nif __name__ == '__main__':\n main()\n","sub_path":"wundle.py","file_name":"wundle.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"28339532","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport json\nimport random\n\nfrom config import CLASSES_FILE, RACES_FILE\n\n\nclass Character:\n\n def __init__(self, char_class, char_race):\n # текстовое отображение\n self.char_class = char_class\n self.char_race = char_race\n\n # внутреннее обозначение\n self.int_class = \"\"\n self.int_race = \"\"\n\n # initial char mods\n self.communication = 0\n self.reputation = 0\n self.skill = 0\n self.knowledge = 0\n self.responsibility = 0\n self.connections = 0\n\n self.skills = [self.communication, self.reputation, self.skill, self.knowledge,\n self.responsibility, self.connections]\n\n self.map_class_race_to_internal()\n self.set_class_mods()\n self.set_race_mods()\n self.set_random_mods()\n\n def map_class_race_to_internal(self):\n with open(CLASSES_FILE, encoding=\"utf-8\") as jfile:\n classes_json = json.loads(jfile.read())\n with open(RACES_FILE, encoding=\"utf-8\") as jfile:\n races_json = json.loads(jfile.read())\n for item in races_json:\n if races_json[item][\"external_name\"] == self.char_race:\n self.int_race = item\n for item in classes_json:\n if classes_json[item][\"external_name\"] == self.char_class:\n self.int_class = item\n if self.int_class == \"\" or self.int_race == \"\":\n print(\"RACE or CLASS stated is unknown. Race, class stated: {}, {}\".format(self.char_race, self.char_class))\n\n def modify_chars(self, json_data, key):\n self.communication += json_data[key][\"mods\"][0]\n self.reputation += json_data[key][\"mods\"][1]\n self.skill += json_data[key][\"mods\"][2]\n self.knowledge += json_data[key][\"mods\"][3]\n self.responsibility += json_data[key][\"mods\"][4]\n self.connections += json_data[key][\"mods\"][5]\n\n def simple_modify_chars(self, mod_list):\n self.communication += mod_list[0]\n self.reputation += mod_list[1]\n self.skill += mod_list[2]\n self.knowledge += mod_list[3]\n self.responsibility += mod_list[4]\n self.connections += mod_list[5]\n\n def set_class_mods(self):\n with open(CLASSES_FILE, encoding=\"utf-8\") as jfile:\n classes_json = json.loads(jfile.read())\n self.modify_chars(classes_json, self.int_class)\n\n def set_race_mods(self):\n with open(RACES_FILE, encoding=\"utf-8\") as jfile:\n races_json = json.loads(jfile.read())\n self.modify_chars(races_json, self.int_race)\n\n def set_random_mods(self):\n random_mods = [0, 0, 0, 0, 0, 0]\n while sum(random_mods) < 3:\n index = random.randint(0, 5)\n if random_mods[index] == 0:\n random_mods[index] = 1\n self.simple_modify_chars(random_mods)\n\n def get_character_mods(self):\n return [self.communication, self.reputation, self.skill, self.knowledge, self.responsibility, self.connections]\n\n def get_char_description(self):\n description = {\n \"class\": self.char_class,\n \"race\": self.char_race,\n \"communication\": self.communication,\n \"reputation\": self.reputation,\n \"skill\": self.skill,\n \"knowledge\": self.knowledge,\n \"responsibility\": self.responsibility,\n \"connections\": self.connections\n }\n # print(json.dumps(description))\n return description","sub_path":"character.py","file_name":"character.py","file_ext":"py","file_size_in_byte":3558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"330545452","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 22 10:21:52 2019\n\n@author: Lucia\n\"\"\"\n\nfrom PyQt5 import Qt\nimport pyqtgraph.parametertree.parameterTypes as pTypes\nimport pyqtgraph.parametertree.Parameter as pParams\n\nimport numpy as np\nimport copy\nimport time\nimport re\n\nimport Pyxi.NifGenerator as NifGen\nimport Pyxi.NiScope as NiScope\n\nSweepsParam = {'name':'SweepsConfig',\n 'type':'group',\n 'children':({'name':'Enable',\n 'type':'bool',\n 'value':False},\n {'name':'VgsSweep',\n 'type':'group',\n 'children':({'name':'Start',\n 'type': 'float',\n 'value': 0,\n 'siPrefix': True,\n 'suffix': 'V'},\n {'name':'Stop',\n 'type': 'float',\n 'value': -0.5,\n 'siPrefix': True,\n 'suffix': 'V'},\n {'name':'nSweeps',\n 'type': 'int',\n 'value': 4},\n {'name':'timeXsweep',\n 'type':'int',\n 'value':15,\n 'siPrefix':True,\n 'suffix':'sec'},)\n },\n {'name':'AcSweep',\n 'type':'group',\n 'children':({'name':'Start',\n 'type': 'float',\n 'value': 0.5,\n 'siPrefix': True,\n 'suffix': 'V'},\n {'name':'Stop',\n 'type': 'float',\n 'value': 0.5,\n 'siPrefix': True,\n 'suffix': 'V'},\n {'name':'nSweeps',\n 'type': 'int',\n 'value': 1},\n ) \n })\n }\n \n##############################Sweeps########################################## \nclass SweepsParameters(pTypes.GroupParameter): \n \n def __init__(self, **kwargs):\n pTypes.GroupParameter.__init__(self, **kwargs)\n \n self.addChild(SweepsParam)\n \n self.SweepsConfig = self.param('SweepsConfig')\n self.VgsConfig = self.SweepsConfig.param('VgsSweep')\n self.AcConfig = self.SweepsConfig.param('AcSweep')\n \n self.AcConfig.sigTreeStateChanged.connect(self.on_Ac_Sweep_Changed)\n self.on_Ac_Sweep_Changed()\n self.VgsConfig.sigTreeStateChanged.connect(self.on_Vgs_Sweep_Changed)\n self.on_Vgs_Sweep_Changed()\n \n def on_Ac_Sweep_Changed(self):\n self.AcSweepValues = np.linspace(self.AcConfig.param('Start').value(),\n self.AcConfig.param('Stop').value(),\n self.AcConfig.param('nSweeps').value())\n \n def on_Vgs_Sweep_Changed(self):\n self.VgsSweepValues = np.linspace(self.VgsConfig.param('Start').value(),\n self.VgsConfig.param('Stop').value(),\n self.VgsConfig.param('nSweeps').value()\n )\n self.VgsTime = self.VgsConfig.param('timeXsweep').value()\n \n if self.VgsTime > 0.2:\n if self.VgsTime % 2 != 0:\n self.VgsTime += 1\n self.CountTime = self.VgsTime/0.2\n print(self.VgsTime, self.CountTime)\n \n else:\n self.CountTime = 0\n print(self.VgsTime, self.CountTime)\n \n def GetSweepParams(self):\n self.Sweeps = {'VgsSweep':{},\n 'AcSweep':{}\n }\n for Config in self.VgsConfig.children():\n self.Sweeps['VgsSweep'][Config.name()] = Config.value()\n \n for Config in self.AcConfig.children():\n self.Sweeps['AcSweep'][Config.name()] = Config.value()\n\n return self.Sweeps\n\n# def ChangeVCols(self, ColsConfig, FsGen, GenSize, CMVoltage):\n# \n# if self.IterAcSweep >= len(self.AcSweepValues):\n# CMVoltage=self.VgsSweepValues[self.IterVgsSweep]\n# \n# self.IterAcSweep = 0\n# self.IterVgsSweep = self.IterVgsSweep+1\n# for Col, val in ColsConfig.items():\n# ColsConfig[Col]['Amplitude']=self.AcSweepValues[self.IterAcSweep]\n# \n# self.IterAcSweep = self.IterAcSweep+1\n# self.Generator = {'ColsConfig':ColsConfig,\n# 'FsGen':FsGen,\n# 'GenSize':GenSize,\n# 'CMVoltage':CMVoltage\n# }\n# \n# if self.IterVgsSweep >= len(self.VgsSweepValues):\n# EndOfSweeps = True\n# self.IterAcSweep=0\n# self.IterVgsSweep=0\n# if self.IterVgsSweep < len(self.VgsSweepValues):\n# EndOfSweeps = False\n# \n# return EndOfSweeps, self.Generator\n \n def NextSweep(self, nAcSw, nVgsSw, ColsConfig, FsGen,GenSize, CMVoltage):\n #cambiar Vgs\n CMVoltage=self.VgsSweepValues[nVgsSw]\n #cambiar Acs\n print(CMVoltage, nVgsSw)\n print(self.AcSweepValues, nAcSw)\n for Col, val in ColsConfig.items():\n ColsConfig[Col]['Amplitude']=self.AcSweepValues[nAcSw]\n \n #cambiar el diccionario del generador\n self.Generator = {'ColsConfig':ColsConfig,\n 'FsGen':FsGen,\n 'GenSize':GenSize,\n 'CMVoltage':CMVoltage\n }\n \n return self.Generator\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n ","sub_path":"PyFreqMux/Sweep.py","file_name":"Sweep.py","file_ext":"py","file_size_in_byte":6570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"110727399","text":"class Search_problem(object):\n def start_node(self):\n raise NotImplementedError(\"start_node\")\n\n def is_goal(self, node):\n raise NotImplementedError(\"is_goal\")\n\n def neighbors(self, node):\n raise NotImplementedError(\"neighbors\")\n \n def heuristic(self, n):\n return 0\n\n \n\nclass Arc(object):\n \n def __init__(self, from_node, to_node, cost = 1, action = None):\n assert cost >= 0, (\"Cost cannot be negative for \" + str(from_node) + \" -> \" + str(to_node) + \"cost: \" + str(cost))\n self.from_node = from_node\n self.to_node = to_node\n self.action = action\n self.cost = cost\n\n def __repr__(self):\n if self.action:\n return str(self.from_node) + \" -- \" + str(self.action) + \" --> \" + str(self.to_node)\n else:\n return str(self.from_node) + \" --> \" + str(self.to_node)\n\nclass Search_problem_from_explicit_graph(Search_problem):\n\n def __init__(self, nodes, arcs, start = None, goals = set(), hmap={}):\n self.neighs = {}\n self.nodes = nodes\n for node in nodes:\n self.neighs[node] = []\n self.arcs = arcs\n for arc in arcs:\n self.neighs[arc.from_node].append(arc)\n self.start = start\n self.goals = goals\n self.hmap = hmap\n\n def start_node(self):\n return self.start\n \n def is_goal(self, node):\n return node in self.goals\n \n def neighbors(self, node):\n return self.neighs[node]\n\n def heuristic(self, node):\n if node in self.hmap:\n return self.hmap[node]\n else:\n return 0\n\n def __repr__(self):\n res = \"\"\n for arc in self.arcs:\n res += str(arc) + \". \"\n \n return res\n\n def neighbor_nodes(self, node):\n return (path.to_node for path in self.neighs[node])\n\nclass Path(object):\n\n def __init__(self, initial, arc = None):\n self.initial = initial\n self.arc = arc\n if arc is None:\n self.cost = 0\n else:\n self.cost = initial.cost + arc.cost\n\n def end(self):\n if self.arc is None:\n return self.initial\n else:\n self.arc.to_node\n \n def nodes(self):\n current = self\n while current.arc is not None:\n yield current.arc.to_node\n current = current.initial\n yield current.initial\n\n def initial_nodes(self):\n if self.arc is None:\n for nd in self.initial.nodes(): yield nd\n\n def __repr__(self):\n if self.arc is None:\n return str(self.initial)\n elif self.arc.action:\n return (str(self.initial) + \"\\n -- \" + str(self.arc.action) + \" --> \" + str(self.arc.to_node))\n else:\n return str(self.initial) + \" --> \" + str(self.arc.to_node)\n\nproblem1 = Search_problem_from_explicit_graph(\n {'a', 'b', 'c', 'd', 'g'},\n [\n Arc('a', 'b', 1),\n Arc('a', 'c', 3),\n Arc('b', 'd', 3),\n Arc('b','c',1),\n Arc('c','d',1),\n Arc('c','g',3),\n Arc('d','g',1),\n ],\n start = 'a',\n goals = { 'g' }\n)\n\nprint(problem1)","sub_path":"demos/python-for-AI/search-for-solutions/search-for-solutions.py","file_name":"search-for-solutions.py","file_ext":"py","file_size_in_byte":3164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"23984271","text":"from django.conf.urls import url\n\nfrom users.views import ActiveUserView\nfrom . import views\n\napp_name = 'users'\nurlpatterns = [\n url(r'^register/', views.register, name='register'),\n url(r'^balabala/', views.rejson, name='rejson'),\n url(r'^relation/', views.uuuu, name='relation'),\n url(r'^profile/', views.nnnn, name='profile'),\n url(r'^message/', views.message, name='message'),\n url(r'^hhhh/', views.hhhh, name='hhhh'),\n url(r'^active/(?P.*)/$', ActiveUserView.as_view(), name=\"user_active\")\n]\n","sub_path":"users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"112733462","text":"\"\"\"\nGeneral test of some SLA features: CPU pinning, CPU host, delete protection,\ncount threads as cores and placement policy\n\"\"\"\nimport random\n\nimport pytest\nfrom rhevmtests.compute.sla.fixtures import ( # noqa: F401\n create_vms,\n start_vms,\n stop_vms,\n update_cluster,\n update_vms,\n update_vms_to_default_parameters,\n update_vms_cpus_to_hosts_cpus,\n update_cluster_to_default_parameters\n)\n\nimport art.rhevm_api.tests_lib.low_level.hosts as ll_hosts\nimport art.rhevm_api.tests_lib.low_level.sla as ll_sla\nimport art.rhevm_api.tests_lib.low_level.vms as ll_vms\nimport config as conf\nimport helpers\nfrom art.test_handler.tools import polarion\nfrom art.unittest_lib import testflow, tier1, tier2, SlaTest\n\n\nclass BasicSlaSanity(SlaTest):\n \"\"\"\n Base class for all sla sanity tests\n \"\"\"\n\n @staticmethod\n def _update_vm_vcpu_pinning(vcpu_pinning, positive=True, compare=True):\n \"\"\"\n Update the VM VCPU pinning\n\n Args:\n vcpu_pinning (list): VCPU pinning\n positive (bool): Positive test behaviour\n compare (bool): Enable validator\n \"\"\"\n testflow.step(\n \"Set VM %s VCPU pinning to %s\", conf.VM_NAME[0], vcpu_pinning\n )\n assert ll_vms.updateVm(\n positive=True,\n vm=conf.VM_NAME[0],\n vcpu_pinning=vcpu_pinning,\n compare=compare\n ) == positive\n\n\n@pytest.mark.usefixtures(\n create_vms.__name__,\n update_vms_to_default_parameters.__name__\n)\nclass TestProtectedVm(BasicSlaSanity):\n \"\"\"\n 1) Remove the protected VM\n 2) Force remove the protected VM\n \"\"\"\n vms_create_params = {\n conf.PROTECTED_VM_NAME: {\n conf.VM_CLUSTER: conf.CLUSTER_NAME[0],\n conf.VM_STORAGE_DOMAIN: conf.STORAGE_NAME[0],\n conf.VM_DISK_SIZE: conf.GB,\n conf.VM_NIC: conf.NIC_NAME[0],\n conf.VM_NETWORK: conf.MGMT_BRIDGE,\n conf.VM_PROTECTED: True\n }\n }\n update_to_default_params = [conf.PROTECTED_VM_NAME]\n\n @tier2\n @polarion(\"RHEVM3-9512\")\n def test_remove_protected_vm(self):\n \"\"\"\n Remove the protected VM\n \"\"\"\n testflow.step(\n \"Remove the protected VM %s\", conf.PROTECTED_VM_NAME\n )\n assert not ll_vms.removeVm(positive=True, vm=conf.PROTECTED_VM_NAME)\n\n @tier2\n @polarion(\"RHEVM3-9519\")\n def test_force_remove_protected_vm(self):\n \"\"\"\n Attempt to force remove the protected VM\n \"\"\"\n testflow.step(\n \"Force remove the protected VM %s\", conf.PROTECTED_VM_NAME\n )\n assert not ll_vms.removeVm(\n positive=True, vm=conf.PROTECTED_VM_NAME, force=True\n )\n\n\n@pytest.mark.usefixtures(update_vms_to_default_parameters.__name__)\nclass TestCPUHostCase1(BasicSlaSanity):\n \"\"\"\n Update the migratable VM to use CPU host\n \"\"\"\n update_to_default_params = conf.VM_NAME[:1]\n\n @tier2\n @polarion(\"RHEVM3-9527\")\n def test_update_migratable_vm_to_use_cpu_host(self):\n \"\"\"\n Update the migratable VM to use CPU host\n \"\"\"\n testflow.step(\n \"Update the migratable VM %s CPU passthrough to 'host'\",\n conf.VM_NAME[0]\n )\n assert not ll_vms.updateVm(\n positive=True,\n vm=conf.VM_NAME[0],\n cpu_mode=conf.VM_HOST_PASS_THROUGH\n )\n\n\n@pytest.mark.usefixtures(update_vms_to_default_parameters.__name__)\nclass TestCPUHostCase2(BasicSlaSanity):\n \"\"\"\n Update the user migratable VM to use CPU host\n \"\"\"\n update_to_default_params = conf.VM_NAME[:1]\n\n @tier2\n @polarion(\"RHEVM3-9531\")\n def test_update_user_migratable_vm_to_use_cpu_host(self):\n \"\"\"\n Update the user migratable VM to use CPU host\n \"\"\"\n testflow.step(\n \"Update the user migratable VM %s CPU passthrough to 'host'\",\n conf.VM_NAME[0]\n )\n assert not ll_vms.updateVm(\n positive=True,\n vm=conf.VM_NAME[0],\n placement_affinity=conf.VM_USER_MIGRATABLE,\n cpu_mode=conf.VM_HOST_PASS_THROUGH\n )\n\n\n@pytest.mark.usefixtures(update_vms.__name__)\nclass TestCPUHostCase3(BasicSlaSanity):\n \"\"\"\n Update the VM with host_passthrough to migratable\n \"\"\"\n vms_to_params = {\n conf.VM_NAME[0]: {\n conf.VM_PLACEMENT_AFFINITY: conf.VM_PINNED,\n conf.VM_PLACEMENT_HOSTS: [0],\n conf.VM_CPU_MODE: conf.VM_HOST_PASS_THROUGH\n }\n }\n\n @tier2\n @polarion(\"RHEVM3-9523\")\n def test_update_vm_with_host_passthrough_to_migratable(self):\n \"\"\"\n Update the VM with host_passthrough to migratable\n \"\"\"\n testflow.step(\n \"Update the VM %s with host_passthrough to migratable\",\n conf.VM_NAME[0]\n )\n assert not ll_vms.updateVm(\n positive=True,\n vm=conf.VM_NAME[0],\n placement_affinity=conf.VM_MIGRATABLE\n )\n\n\n@pytest.mark.usefixtures(update_vms.__name__)\nclass TestCPUHostCase4(BasicSlaSanity):\n \"\"\"\n Unpin the VM with host_passthrough\n \"\"\"\n vms_to_params = {\n conf.VM_NAME[0]: {\n conf.VM_PLACEMENT_AFFINITY: conf.VM_PINNED,\n conf.VM_PLACEMENT_HOSTS: [0],\n conf.VM_CPU_MODE: conf.VM_HOST_PASS_THROUGH\n }\n }\n\n @tier1\n @pytest.mark.skipif(conf.PPC_ARCH, reason=conf.PPC_SKIP_MESSAGE)\n @polarion(\"RHEVM3-9533\")\n def test_unpin_vm_with_host_passthrough(self):\n \"\"\"\n Unpin the VM with host_passthrough\n \"\"\"\n testflow.step(\n \"Unpin the VM %s with host_passthrough\", conf.VM_NAME[0]\n )\n assert ll_vms.updateVm(\n positive=True, vm=conf.VM_NAME[0], placement_host=conf.VM_ANY_HOST\n )\n\n\n@pytest.mark.usefixtures(update_vms.__name__)\nclass TestCPUHostCase5(BasicSlaSanity):\n \"\"\"\n Update the VM with host_passthrough to user migratable\n \"\"\"\n vms_to_params = {\n conf.VM_NAME[0]: {\n conf.VM_PLACEMENT_AFFINITY: conf.VM_PINNED,\n conf.VM_PLACEMENT_HOSTS: [0],\n conf.VM_CPU_MODE: conf.VM_HOST_PASS_THROUGH\n }\n }\n\n @tier1\n @pytest.mark.skipif(conf.PPC_ARCH, reason=conf.PPC_SKIP_MESSAGE)\n @polarion(\"RHEVM3-9535\")\n def test_update_vm_with_host_passthrough_to_user_migratable(self):\n \"\"\"\n Update the VM with host_passthrough to user migratable\n \"\"\"\n testflow.step(\n \"Update the VM %s with host_passthrough to user migratable\",\n conf.VM_NAME[0]\n )\n assert not ll_vms.updateVm(\n positive=True,\n vm=conf.VM_NAME[0],\n placement_affinity=conf.VM_USER_MIGRATABLE\n )\n\n\n@pytest.mark.usefixtures(\n update_vms.__name__,\n start_vms.__name__\n)\nclass TestCPUHostCase6(BasicSlaSanity):\n \"\"\"\n Check that VM with CPU host is running with correct QEMU values\n \"\"\"\n vms_to_params = {\n conf.VM_NAME[0]: {\n conf.VM_PLACEMENT_AFFINITY: conf.VM_PINNED,\n conf.VM_PLACEMENT_HOSTS: [0],\n conf.VM_CPU_MODE: conf.VM_HOST_PASS_THROUGH\n }\n }\n vms_to_start = conf.VM_NAME[:1]\n wait_for_vms_ip = False\n\n @tier1\n @pytest.mark.skipif(conf.PPC_ARCH, reason=conf.PPC_SKIP_MESSAGE)\n @polarion(\"RHEVM3-9536\")\n def test_check_qemu_params(self):\n \"\"\"\n Check that VM runs with the correct \"-cpu\" value on QEMU\n \"\"\"\n expected_value = \"host\"\n testflow.step(\n \"Check that the VM %s QEMU process has arg '-cpu' equal to '%s'\",\n conf.VM_NAME[0], expected_value\n )\n value = helpers.get_vm_qemu_argument_from_host(\n host_resource=conf.VDS_HOSTS[0],\n vm_name=conf.VM_NAME[0],\n qemu_arg_name=\"cpu\"\n )\n assert value == expected_value\n\n\n@pytest.mark.usefixtures(\n update_cluster.__name__,\n update_vms_cpus_to_hosts_cpus.__name__,\n update_vms.__name__,\n stop_vms.__name__\n)\nclass BasicThreadSla(BasicSlaSanity):\n \"\"\"\n Basic class for all tests connect to thread_as_core option\n \"\"\"\n cluster_to_update_params = None\n vms_to_hosts_cpus = {conf.VM_NAME[0]: 0}\n double_vms_cpus = None\n threads_on = None\n vms_to_params = {\n conf.VM_NAME[0]: {\n conf.VM_PLACEMENT_AFFINITY: conf.VM_PINNED,\n conf.VM_PLACEMENT_HOSTS: [0]\n }\n }\n vms_to_stop = conf.VM_NAME[:1]\n\n\nclass TestThreadsOff(BasicThreadSla):\n \"\"\"\n Check that VM with the number of CPU's equals to\n the host number of CPU's(without threads) succeeds to start,\n in the cluster with the threads_as_core option disabled\n \"\"\"\n cluster_to_update_params = {\n conf.CLUSTER_THREADS_AS_CORE: False\n }\n double_vms_cpus = False\n threads_on = False\n\n @tier1\n @polarion(\"RHEVM3-9518\")\n def test_cores_as_threads_off(self):\n \"\"\"\n Start the VM\n \"\"\"\n testflow.step(\"Start the VM %s\", conf.VM_NAME[0])\n assert ll_vms.startVm(positive=True, vm=conf.VM_NAME[0])\n\n\nclass TestNegativeThreadsOff(BasicThreadSla):\n \"\"\"\n Check that VM with the number of CPU's greater than\n the host number of CPU's(without threads) failed to start,\n in the cluster with the threads_as_core option disabled\n \"\"\"\n cluster_to_update_params = {\n conf.CLUSTER_THREADS_AS_CORE: False\n }\n double_vms_cpus = True\n threads_on = False\n\n @tier2\n @polarion(\"RHEVM3-9517\")\n def test_cores_as_threads_off(self):\n \"\"\"\n Start the VM\n \"\"\"\n testflow.step(\"Start the VM %s\", conf.VM_NAME[0])\n assert not ll_vms.startVm(positive=True, vm=conf.VM_NAME[0])\n\n\nclass TestThreadsOn(BasicThreadSla):\n \"\"\"\n Check that VM with the number of CPU's equals to\n the host number of CPU's(include threads) succeeds to start,\n in the cluster with the threads_as_core option disabled\n \"\"\"\n cluster_to_update_params = {\n conf.CLUSTER_THREADS_AS_CORE: True\n }\n double_vms_cpus = False\n threads_on = True\n\n @tier1\n @polarion(\"RHEVM3-9515\")\n def test_cores_as_threads_on(self):\n \"\"\"\n Start the VM\n \"\"\"\n testflow.step(\"Start the VM %s\", conf.VM_NAME[0])\n assert ll_vms.startVm(positive=True, vm=conf.VM_NAME[0])\n\n\nclass TestThreadsOnNegative(BasicThreadSla):\n \"\"\"\n Check that VM with the number of CPU's greater than\n the host number of CPU's(include threads) failed to start,\n in the cluster with the threads_as_core option disabled\n \"\"\"\n cluster_to_update_params = {\n conf.CLUSTER_THREADS_AS_CORE: True\n }\n double_vms_cpus = True\n threads_on = True\n\n @tier2\n @polarion(\"RHEVM3-9516\")\n def test_cores_as_threads_on(self):\n \"\"\"\n Start the VM\n \"\"\"\n testflow.step(\"Start the VM %s\", conf.VM_NAME[0])\n assert not ll_vms.startVm(positive=True, vm=conf.VM_NAME[0])\n\n\n@pytest.mark.usefixtures(update_vms.__name__)\nclass TestCPUPinCase1(BasicSlaSanity):\n \"\"\"\n Check CPU pinning format\n \"\"\"\n vms_to_params = {\n conf.VM_NAME[0]: {\n conf.VM_PLACEMENT_AFFINITY: conf.VM_PINNED,\n conf.VM_PLACEMENT_HOSTS: [0]\n }\n }\n\n @tier1\n @polarion(\"RHEVM3-9541\")\n def test_cpupin_format1(self):\n \"\"\"\n Set pinning to 0#0\n \"\"\"\n self._update_vm_vcpu_pinning(vcpu_pinning=conf.DEFAULT_VCPU_PINNING)\n\n @tier1\n @polarion(\"RHEVM3-12221\")\n def test_cpupin_format2(self):\n \"\"\"\n Set pinning to 0#0-16\n \"\"\"\n self._update_vm_vcpu_pinning(vcpu_pinning=[{\"0\": \"0-16\"}])\n\n @tier2\n @polarion(\"RHEVM3-12222\")\n def test_cpupin_format3(self):\n \"\"\"\n Negative: Set pinning to 0#^1\n \"\"\"\n self._update_vm_vcpu_pinning(\n vcpu_pinning=[{\"0\": \"^1\"}], positive=False\n )\n\n @tier2\n @polarion(\"RHEVM3-12223\")\n def test_cpupin_format4(self):\n \"\"\"\n Negative: Set pinning to 0#^1,^2\n \"\"\"\n vcpu_pinning = helpers.adapt_vcpu_pinning_to_cli(\n vcpu_pinning=[{\"0\": \"^1,^2\"}]\n )\n self._update_vm_vcpu_pinning(\n vcpu_pinning=vcpu_pinning, positive=False\n )\n\n @tier1\n @polarion(\"RHEVM3-12224\")\n def test_cpupin_format5(self):\n \"\"\"\n Set pinning to 0#0-3,^1\n \"\"\"\n vcpu_pinning = helpers.adapt_vcpu_pinning_to_cli(\n vcpu_pinning=[{\"0\": \"0-3,^1\"}]\n )\n compare = conf.ART_CONFIG['RUN'][\"engine\"] != \"cli\"\n self._update_vm_vcpu_pinning(\n vcpu_pinning=vcpu_pinning, compare=compare\n )\n\n @tier1\n @polarion(\"RHEVM3-12225\")\n def test_cpupin_format6(self):\n \"\"\"\n Set pinning to 0#0-3,^1,^2\n \"\"\"\n vcpu_pinning = helpers.adapt_vcpu_pinning_to_cli(\n vcpu_pinning=[{\"0\": \"0-3,^1,^2\"}]\n )\n compare = conf.ART_CONFIG['RUN'][\"engine\"] != \"cli\"\n self._update_vm_vcpu_pinning(\n vcpu_pinning=vcpu_pinning, compare=compare\n )\n\n @tier1\n @polarion(\"RHEVM3-12226\")\n def test_cpupin_format7(self):\n \"\"\"\n Set pinning to 0#1,2,3\n \"\"\"\n vcpu_pinning = helpers.adapt_vcpu_pinning_to_cli(\n vcpu_pinning=[{\"0\": \"1,2,3\"}]\n )\n compare = conf.ART_CONFIG['RUN'][\"engine\"] != \"cli\"\n self._update_vm_vcpu_pinning(\n vcpu_pinning=vcpu_pinning, compare=compare\n )\n\n @tier2\n @polarion(\"RHEVM3-12227\")\n def test_cpupin_format8(self):\n \"\"\"\n Negative: Set pinning to 0#0_0#1\n \"\"\"\n vcpu_pinning = helpers.adapt_vcpu_pinning_to_cli(\n vcpu_pinning=[{\"0\": \"0\"}, {\"0\": \"1\"}]\n )\n self._update_vm_vcpu_pinning(\n vcpu_pinning=vcpu_pinning, positive=False\n )\n\n @tier2\n @polarion(\"RHEVM3-12228\")\n def test_cpupin_format9(self):\n \"\"\"\n Negative: Letter instead of pCPU\n \"\"\"\n self._update_vm_vcpu_pinning(\n vcpu_pinning=[{\"0\": \"A\"}], positive=False\n )\n\n @tier2\n @polarion(\"RHEVM3-12229\")\n def test_cpupin_format10(self):\n \"\"\"\n Negative: Letter instead of pCPU\n \"\"\"\n try:\n self._update_vm_vcpu_pinning(\n vcpu_pinning=[{\"A\": \"0\"}], positive=False\n )\n except (TypeError, ValueError):\n pass\n\n @tier2\n @polarion(\"RHEVM3-12230\")\n def test_cpupin_format15(self):\n \"\"\"\n Negative: Pinning to empty range\n \"\"\"\n vcpu_pinning = helpers.adapt_vcpu_pinning_to_cli(\n vcpu_pinning=[{\"0\": \"0-1,^0,^1\"}]\n )\n self._update_vm_vcpu_pinning(\n vcpu_pinning=vcpu_pinning, positive=False\n )\n\n @tier2\n @polarion(\"RHEVM3-12231\")\n def test_cpupin_format16(self):\n \"\"\"\n Negative: Pinning to non-existing pCPU\n \"\"\"\n self._update_vm_vcpu_pinning(vcpu_pinning=[{\"0\": \"4096\"}])\n testflow.step(\"Start the VM %s\", conf.VM_NAME[0])\n assert not ll_vms.startVm(\n positive=True, vm=conf.VM_NAME[0], timeout=conf.CONNECT_TIMEOUT\n )\n\n @tier2\n @polarion(\"RHEVM3-12232\")\n def test_cpupin_format17(self):\n \"\"\"\n Negative: Pinning to an empty string\n \"\"\"\n self._update_vm_vcpu_pinning(\n vcpu_pinning=[{\"0\": \"\"}], positive=False\n )\n\n @tier2\n @polarion(\"RHEVM3-12233\")\n def test_cpupin_format18(self):\n \"\"\"\n Negative: Pinning non-existing vCPU\n \"\"\"\n self._update_vm_vcpu_pinning(\n vcpu_pinning=[{\"4096\": \"0\"}], positive=False\n )\n\n\n@pytest.mark.usefixtures(update_vms_to_default_parameters.__name__)\nclass TestCPUPinCase2(BasicSlaSanity):\n \"\"\"\n Set VCPU pinning on the migratable VM\n \"\"\"\n update_to_default_params = conf.VM_NAME[:1]\n\n @tier2\n @polarion(\"RHEVM3-9532\")\n def test_update_vcpu_pinning(self):\n \"\"\"\n Update the VM VCPU pinning\n \"\"\"\n self._update_vm_vcpu_pinning(\n vcpu_pinning=conf.DEFAULT_VCPU_PINNING, positive=False\n )\n\n\n@pytest.mark.usefixtures(update_vms.__name__)\nclass TestCPUPinCase3(BasicSlaSanity):\n \"\"\"\n Change the VM with VCPU pinning to migratable\n \"\"\"\n vms_to_params = {\n conf.VM_NAME[0]: {\n conf.VM_PLACEMENT_AFFINITY: conf.VM_PINNED,\n conf.VM_PLACEMENT_HOSTS: [0],\n conf.VM_CPU_PINNING: conf.DEFAULT_VCPU_PINNING\n }\n }\n\n @tier2\n @polarion(\"RHEVM3-9534\")\n def test_update_vm_to_migratable(self):\n \"\"\"\n Update the VM to migratable\n \"\"\"\n testflow.step(\"Update the VM %s\", conf.VM_NAME[0])\n assert not ll_vms.updateVm(\n positive=True,\n vm=conf.VM_NAME[0],\n placement_affinity=conf.VM_MIGRATABLE,\n placement_host=conf.VM_ANY_HOST\n )\n\n\n@pytest.mark.usefixtures(update_vms.__name__)\nclass TestCPUPinCase4(BasicSlaSanity):\n \"\"\"\n Set VCPU pinning on the user migratable VM\n \"\"\"\n vms_to_params = {\n conf.VM_NAME[0]: {\n conf.VM_PLACEMENT_AFFINITY: conf.VM_USER_MIGRATABLE\n }\n }\n\n @tier2\n @polarion(\"RHEVM3-9543\")\n def test_update_vcpu_pinning(self):\n \"\"\"\n Update the VM VCPU pinning\n \"\"\"\n self._update_vm_vcpu_pinning(\n vcpu_pinning=conf.DEFAULT_VCPU_PINNING, positive=False\n )\n\n\n@pytest.mark.usefixtures(update_vms.__name__)\nclass TestCPUPinCase5(BasicSlaSanity):\n \"\"\"\n Change the VM with VCPU pinning to user migratable\n \"\"\"\n vms_to_params = {\n conf.VM_NAME[0]: {\n conf.VM_PLACEMENT_AFFINITY: conf.VM_PINNED,\n conf.VM_PLACEMENT_HOSTS: [0],\n conf.VM_CPU_PINNING: conf.DEFAULT_VCPU_PINNING\n }\n }\n\n @tier2\n @polarion(\"RHEVM3-9542\")\n def test_update_vm_to_user_migratable(self):\n \"\"\"\n Update the VM to user migratable\n \"\"\"\n testflow.step(\"Update the VM %s\", conf.VM_NAME[0])\n assert not ll_vms.updateVm(\n positive=True,\n vm=conf.VM_NAME[0],\n placement_affinity=conf.VM_MIGRATABLE,\n placement_host=conf.VM_ANY_HOST\n )\n\n\n@pytest.mark.usefixtures(\n update_vms.__name__,\n stop_vms.__name__\n)\nclass TestCPUPinCase6(BasicSlaSanity):\n \"\"\"\n Check VCPU pinning to the random host CPU\n \"\"\"\n vms_to_params = {\n conf.VM_NAME[0]: {\n conf.VM_PLACEMENT_AFFINITY: conf.VM_PINNED,\n conf.VM_PLACEMENT_HOSTS: [0],\n conf.VM_CPU_PINNING: conf.DEFAULT_VCPU_PINNING\n }\n }\n vms_to_stop = conf.VM_NAME[:1]\n\n @tier2\n @polarion(\"RHEVM3-9529\")\n def test_random_vcpu_pinning(self):\n \"\"\"\n Update the VM with the random VCPU pinning\n \"\"\"\n online_cpus = ll_sla.get_list_of_online_cpus_on_resource(\n resource=conf.VDS_HOSTS[0]\n )\n host_cpus = online_cpus[-1] + online_cpus[1]\n for n in range(5):\n expected_pin = str(random.choice(online_cpus))\n hyp_exp = \"-\" * int(expected_pin)\n hyp_cores = \"-\" * (host_cpus - int(expected_pin) - 1)\n expected_affinity = \"%sy%s\" % (hyp_exp, hyp_cores)\n\n self._update_vm_vcpu_pinning(vcpu_pinning=[{\"0\": expected_pin}])\n\n testflow.step(\"Start the VM %s\", conf.VM_NAME[0])\n assert ll_vms.startVm(positive=True, vm=conf.VM_NAME[0])\n\n res = helpers.get_vcpu_pinning_info_from_host(\n host_resource=conf.VDS_HOSTS[0],\n vm_name=conf.VM_NAME[0],\n vcpu=0\n )\n testflow.step(\n \"Check that VCPU 0 is pinned to the CPU %s\", expected_pin\n )\n assert expected_pin == res[0]\n\n testflow.step(\n \"Check that VCPU 0 has pinning affinity %s\",\n expected_affinity\n )\n assert expected_affinity == res[1][:host_cpus]\n\n testflow.step(\"Stop the VM %s\", conf.VM_NAME[0])\n assert ll_vms.stopVm(positive=True, vm=conf.VM_NAME[0])\n\n\n@pytest.mark.usefixtures(\n update_vms_cpus_to_hosts_cpus.__name__,\n update_vms.__name__,\n stop_vms.__name__\n)\nclass TestCPUPinCase7(BasicSlaSanity):\n \"\"\"\n Test VCPU pinning of all VM CPU's to the one host CPU\n \"\"\"\n vms_to_params = {\n conf.VM_NAME[0]: {\n conf.VM_PLACEMENT_AFFINITY: conf.VM_PINNED,\n conf.VM_PLACEMENT_HOSTS: [0]\n }\n }\n vms_to_hosts_cpus = {conf.VM_NAME[0]: 0}\n vms_to_stop = conf.VM_NAME[:1]\n\n @tier1\n @polarion(\"RHEVM3-9539\")\n def test_pinning_load(self):\n \"\"\"\n Check VCPU pinning\n \"\"\"\n host_online_cpu = str(\n ll_sla.get_list_of_online_cpus_on_resource(\n resource=conf.VDS_HOSTS[0]\n )[0]\n )\n host_topology = ll_hosts.get_host_topology(host_name=conf.HOSTS[0])\n host_cpus = host_topology.cores * host_topology.sockets\n vcpu_pinning = [\n {i: host_online_cpu} for i in xrange(host_cpus)\n ]\n\n testflow.step(\n \"Update the VM %s VCPU pinning\", conf.VM_NAME[0]\n )\n assert ll_vms.updateVm(\n positive=True, vm=conf.VM_NAME[0], vcpu_pinning=vcpu_pinning\n )\n\n testflow.step(\"Update the VM %s\", conf.VM_NAME[0])\n assert ll_vms.startVm(positive=True, vm=conf.VM_NAME[0])\n\n for i in range(host_cpus):\n vcpu_pinning_info = helpers.get_vcpu_pinning_info_from_host(\n host_resource=conf.VDS_HOSTS[0],\n vm_name=conf.VM_NAME[0],\n vcpu=i\n )\n testflow.step(\n \"Check that VM %s VCPU %s pinned to the host %s CPU %s\",\n conf.VM_NAME[0], i, conf.HOSTS[0], host_online_cpu\n )\n assert vcpu_pinning_info[0] == host_online_cpu\n\n\n@pytest.mark.usefixtures(update_vms_to_default_parameters.__name__)\nclass TestCPUPinCase8(BasicSlaSanity):\n \"\"\"\n Set VCPU pinning to the non migratable VM with no specified host to run on\n \"\"\"\n update_to_default_params = conf.VM_NAME[:1]\n\n @tier2\n @polarion(\"RHEVM3-9544\")\n def test_set_pinned_cpupin_vm_a(self):\n \"\"\"\n Update the VM with VCPU pinning without specific host to run on\n \"\"\"\n testflow.step(\"Update the VM %s\", conf.VM_NAME[0])\n assert not ll_vms.updateVm(\n positive=True,\n vm=conf.VM_NAME[0],\n placement_affinity=conf.VM_PINNED,\n placement_host=conf.VM_ANY_HOST,\n vcpu_pinning=conf.DEFAULT_VCPU_PINNING\n )\n\n\n@pytest.mark.usefixtures(\n update_vms.__name__,\n start_vms.__name__\n)\nclass TestPlacementPolicyCase1(BasicSlaSanity):\n \"\"\"\n Migrate a migratable VM\n \"\"\"\n vms_to_params = {\n conf.VM_NAME[0]: {\n conf.VM_PLACEMENT_HOSTS: [0]\n }\n }\n vms_to_start = conf.VM_NAME[:1]\n\n @tier1\n @polarion(\"RHEVM3-9522\")\n def test_migrate_migratable_vm(self):\n \"\"\"\n Migrate a migratable VM\n \"\"\"\n testflow.step(\"Migrate the VM %s\", conf.VM_NAME[0])\n assert ll_vms.migrateVm(positive=True, vm=conf.VM_NAME[0])\n\n\n@pytest.mark.usefixtures(\n update_vms.__name__,\n start_vms.__name__\n)\nclass TestPlacementPolicyCase2(BasicSlaSanity):\n \"\"\"\n Migrate a user-migratable VM\n \"\"\"\n vms_to_params = {\n conf.VM_NAME[0]: {\n conf.VM_PLACEMENT_HOSTS: [0],\n conf.VM_PLACEMENT_AFFINITY: conf.VM_USER_MIGRATABLE\n }\n }\n vms_to_start = conf.VM_NAME[:1]\n\n @tier1\n @polarion(\"RHEVM3-9525\")\n def test_migrate_user_migratable_vm(self):\n \"\"\"\n Migrate a user-migratable VM\n \"\"\"\n testflow.step(\"Migrate the VM %s\", conf.VM_NAME[0])\n assert ll_vms.migrateVm(positive=True, vm=conf.VM_NAME[0], force=True)\n\n\n@pytest.mark.usefixtures(\n update_vms.__name__,\n start_vms.__name__\n)\nclass TestPlacementPolicyCase3(BasicSlaSanity):\n \"\"\"\n Migrate a non-migratable VM\n \"\"\"\n vms_to_params = {\n conf.VM_NAME[0]: {\n conf.VM_PLACEMENT_HOSTS: [0],\n conf.VM_PLACEMENT_AFFINITY: conf.VM_PINNED\n }\n }\n vms_to_start = conf.VM_NAME[:1]\n wait_for_vms_ip = False\n\n @tier1\n @polarion(\"RHEVM3-9526\")\n def test_migrate_non_migratable_vm(self):\n \"\"\"\n Migrate a non-migratable VM\n \"\"\"\n testflow.step(\"Migrate the VM %s\", conf.VM_NAME[0])\n assert not ll_vms.migrateVm(positive=True, vm=conf.VM_NAME[0])\n\n\n@pytest.mark.usefixtures(\n update_vms.__name__,\n stop_vms.__name__\n)\nclass TestPlacementPolicyCase4(BasicSlaSanity):\n \"\"\"\n Run non migratable VM with no specific host\n \"\"\"\n vms_to_params = {\n conf.VM_NAME[0]: {\n conf.VM_PLACEMENT_AFFINITY: conf.VM_PINNED\n }\n }\n vms_to_stop = conf.VM_NAME[:1]\n\n @tier1\n @polarion(\"RHEVM3-9530\")\n def test_run_non_migratable_no_specific(self):\n \"\"\"\n Start a non-migratable VM with no specific host to run on\n \"\"\"\n testflow.step(\"Start the VM %s\", conf.VM_NAME[0])\n assert ll_vms.startVm(\n positive=True, vm=conf.VM_NAME[0], wait_for_status=conf.VM_UP\n )\n","sub_path":"art/tests/rhevmtests/compute/sla/sla_sanity/sla_sanity_test.py","file_name":"sla_sanity_test.py","file_ext":"py","file_size_in_byte":25319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"367472346","text":"class Bank:\n bank_name=\"sbt\"\n def acc_details(self,acc_no,name,mini_bal):\n self.acc_no=acc_no\n self.name=name\n self.minbal=mini_bal\n\n def deposit(self,amount):\n self.minbal+=amount\n print('Your account no of',self.acc_no,'is being credited with ',amount,'Rs, Current balance is ',self.minbal)\n def withdraw(self,debit):\n if self.minbal 0:\n query_type.append({'NumberOfRooms': {'$in': estate_types}})\n\n if len(query_type) > 0:\n query_a = {\"$and\": [query_city_a, {\"$or\": query_type}]}\n query_b = {\"$and\": [query_city_b, {\"$or\": query_type}]}\n else:\n query_a = query_city_a\n query_b = query_city_b\n\n adverts_a = adverts_db.find(query_a,\n {'_id': 0, 'Id': 1, 'Price': 1,\n 'Location': 1, 'NumberOfRooms': 1, 'Age': 1, 'LivingAreaM2': 1})\n\n adverts_b = adverts_db.find(query_b,\n {'_id': 0, 'Id': 1, 'Price': 1,\n 'Location': 1, 'NumberOfRooms': 1, 'Age': 1, 'LivingAreaM2': 1})\n\n # print(query) # Just for debuging purposes\n\n pricelist_a = getlist(adverts_a)\n pricelist_b = getlist(adverts_b)\n\n # hist, edges = np.histogram(pricelist, density=True, bins=10)\n #\n # plot = figure()\n #\n # plot.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:], fill_color=\"#036564\", line_color=\"#033649\")\n # script, div = components(plot, CDN)\n\n # TODO Make this query more dynamical so it can work even if no room number is selected\n\n stats_a = adverts_stats(pricelist_a) # get statistical information\n stats_a['city'] = city_a\n stats_b = adverts_stats(pricelist_b) # get statistical information\n stats_b['city'] = city_b\n else:\n # is submitted form is not valid return to index page\n context = {'form': form}\n return render(request, 'estates_compare/index.html', context)\n\n context = {'form': form, 'selected': selected, 'stats_a': stats_a, 'stats_b': stats_b}\n return render(request, 'estates_compare/compare.html', context)\n\n\ndef adverts_stats(pricelist):\n \"\"\" Get statistics about selected adverts\"\"\"\n # TODO Add more statistics and probably convert return into tuple or dictionary\n\n stats = {}\n\n if len(pricelist) > 0:\n # Get average, min, max, median price of adverts for category\n\n stats['count'] = len(pricelist)\n stats['average'] = sum(pricelist) / stats['count']\n stats['min'] = min(pricelist)\n stats['max'] = max(pricelist)\n\n return stats\n\n\ndef getlist(adverts):\n \"\"\"Get only latest prices from adverts, make list out of them for easier processing later\"\"\"\n\n pricelist = []\n\n for advert in adverts:\n # print(advert['Price'][0]) # Just for debugging purposes\n price = int(advert['Price'][0])\n if price > 1000: # Filter out adverts with not realistic low price\n pricelist.append(price)\n\n return pricelist\n","sub_path":"estates_compare/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"631882352","text":"# from tensorflow.examples.tutorials.mnist import input_data\nfrom datasets.AbstractDataset import AbstractDataset\nfrom utils import constant\n\n\nclass MnistDataset(AbstractDataset):\n\n def __init__(self):\n\n self.name = 'mnist'\n # self.dataset = input_data.read_data_sets(constant.DATA_DIR, one_hot=True)\n self.__init_datasets__()\n\n def __init_datasets__(self):\n\n width = constant.config['mnist_img_width']\n height = constant.config['mnist_img_height']\n channel = constant.config['mnist_img_channel']\n\n # self.train_x = self.dataset.train.images.reshape([-1, width, height, channel])\n # self.train_y = self.dataset.train.labels\n # self.validate_x = self.dataset.validation.images.reshape([-1, width, height, channel])\n # self.validate_y = self.dataset.validation.labels\n # self.test_x = self.dataset.test.images.reshape([-1, width, height, channel])\n # self.test_y = self.dataset.test.labels\n\n # def train_set(self):\n #\n # return (self.train_x, self.train_y)\n #\n # def validate_set(self):\n #\n # return (self.validate_x, self.validate_y)\n #\n # def test_set(self):\n #\n # return (self.test_x, self.test_y)\n\n","sub_path":"datasets/mnist/mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"213959440","text":"from fastapi.testclient import TestClient\nfrom libs.config_engine import ConfigEngine\nfrom api.models.base import Config\nfrom api.processor_api import ProcessorAPI\nimport pytest\nimport humps\nimport copy\n\nconfig_sample_path='/repo/api/models/data/config-sample.ini'\nconfig = ConfigEngine(config_sample_path)\napp_instance = ProcessorAPI(config)\napi = app_instance.app\nclient = TestClient(api)\n\n\n# read sample config file\nconfig_sample = ConfigEngine(config_sample_path)\nsections = config_sample.get_sections()\nconfig_sample_json = {}\n\nfor section in sections:\n config_sample_json[section] = config_sample.get_section_dict(section)\n\nconfig_sample_json = humps.decamelize(config_sample_json)\n\n#@pytest.mark.order1\ndef test_set_config():\n response = client.put(\n \"/config\",\n json=config_sample_json,\n )\n assert response.status_code == 200\n assert response.json() == config_sample_json\n\n#@pytest.mark.order2\ndef test_set_invalid_video_path():\n wrong_json = copy.deepcopy(config_sample_json)\n wrong_json['app']['video_path'] = 'wrong_path'\n expected_response = {'detail': [{'loc': ['body', 'app', 'video_path'], 'msg': 'Failed to load video. The video URI is not valid', 'type': 'value_error'}]}\n expected_response['body'] = wrong_json\n response = client.put(\n \"/config\",\n json=wrong_json,\n )\n assert response.status_code == 400\n assert response.json() == expected_response\n\n#@pytest.mark.order3\ndef test_get_config():\n config = ConfigEngine(config_sample_path)\n app_instance = ProcessorAPI(config)\n api = app_instance.app\n client = TestClient(api)\n\n response_get = client.get(\"/config\")\n\n assert response_get.status_code == 200\n assert response_get.json() == config_sample_json\n","sub_path":"api/tests/controllers/test_config_api.py","file_name":"test_config_api.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"264854037","text":"import pandas as pd\nimport numpy as np\nimport urllib3\nfrom bs4 import BeautifulSoup\nfrom collections import OrderedDict\n\nmapping = {\"Microsoft Corp.\": \"MSFT\",\n \"Amazon.com Inc.\": \"AMZN\",\n \"Facebook Inc. Cl A\": \"FB\",\n \"Tesla Inc.\": \"TSLA\",\n \"Under Armour Inc. Cl A\": \"UAA\",\n \"Alphabet Inc. Cl A\": \"GOOGL\",\n \"Apple Inc.\": \"AAPL\",\n \"S&P 500 Index\": \"SPX\",\n \"Dow Jones Industrial Average\": \"DJIA\"}\n#\ndef import_names():\n sheets = xw.Book(r'C:\\Users\\billies9\\OneDrive\\Documents\\Python_Screwaround\\Stock_Scraper\\Good_Project\\practice.xlsx').sheets\n\n sheet_names = []\n for name in sheets:\n name = str(name).split(']',1)[1].split('>',1)[0]\n if \"Covar\" not in name:\n sheet_names.append(name)\n return sheet_names\n\ndef construct_portfolio(dates, weights = None, close_df = None, returns_df = None):\n start_date = pd.to_datetime(dates[0])\n end_date = pd.to_datetime(dates[1])\n if 'on' in weights.values(): # synonymous with checkboxes\n # Select of securities\n _ = {}\n for key in weights.keys():\n if weights[key] == 'on':\n try:\n dfs[key].set_index('DateTime', inplace=True)\n except: pass\n _[key] = [dfs[key].loc[start_date:end_date, \"Percent Change\"].mean(),] # Need to change to reflect end - beg / beg\n df = pd.DataFrame(_)\n\n cov_matrix = np.array(covariance_matrix(df.columns))\n ret_list = df.values.tolist()\n\n num_portfolios = 4000 # maybe allow user input in later versions...\n results = np.zeros((3 + len(df.columns), num_portfolios))\n nums = np.random.random(size = (num_portfolios, len(df.columns)))\n\n days = end_date - start_date\n for i in range(num_portfolios):\n weights = np.array(nums[i] / np.sum(nums[i]))\n\n port_return = np.sum(ret_list * weights) * (252/(days.days)) # Check returns list and match with weights in std deviation\n\n port_deviation = np.sqrt(np.dot(weights.T, np.dot(cov_matrix, weights))) * np.sqrt(252/days.days)\n\n results[0, i] = port_return\n results[1, i] = port_deviation\n results[2, i] = (results[0, i] - .03) / results[1, i] #extract risk free rate?\n for j in range(len(weights)):\n results[j + 3, i] = weights[j]\n\n results_frame = pd.DataFrame(results.T, columns = ['Portfolio Return', 'Portfolio Deviation', 'Sharpe Ratio'] + list(_.keys()))\n else:\n \"\"\"Create the portfolio here, and when done, defined a new weights dictionary that houses 'on' as the signifier of a weight for a recursive definition\"\"\"\n # User has defined weights\n #results_frame = #####\n results = np.zeros((3 + len(returns_df.columns), 1))\n _ = {}\n for column in close_df.columns: # WIll run over twice becasue of close and returns - Do I need daily returns?\n ticker = column.split(' ')[0]\n if ticker != _.keys():\n return_over_pd = (close_df.loc[end_date, ticker + ' close'] - close_df.loc[start_date, ticker + ' close']) / close_df.loc[start_date, ticker + ' close']\n _[ticker] = [return_over_pd,]\n covar_df = pd.DataFrame(_)\n cov_matrix = np.array(covariance_matrix(covar_df.columns, close_df))\n\n ord_weights = OrderedDict(sorted(weights.items(), key=lambda k: k[0]))\n lst_weights = np.array([float(val) for key, val in ord_weights.items() if val != ''])\n\n results[0, 0] = returns_df.sum(axis = 1) *100 # annualize?\n results[1, 0] = np.sqrt(np.dot(lst_weights.T, np.dot(cov_matrix, lst_weights))) # Std Dev.\n results[2, 0] = (results[0, 0] - .03) / results[1, 0]\n for j in range(len(lst_weights)):\n results[j + 3, 0] = lst_weights[j]\n results_frame = pd.DataFrame(results.T, columns = ['Portfolio Return', 'Portfolio Deviation', 'Sharpe Ratio'] + list(_.keys()))\n print(results_frame)\n return results_frame\n\ndef covariance_matrix(df_columns, price_df = None):\n price_df = price_df.filter(regex='returns').dropna()\n result = price_df.reset_index(drop = True)\n try:\n return result.cov()\n except:\n return result.var()\n \ndef check_list(lst, article):\n _ = True\n for word in lst:\n if word in article.split(' '):\n _ = False\n return _\n\nif __name__ == '__main__':\n print(link_matches)\n","sub_path":"portfolio_page_construction.py","file_name":"portfolio_page_construction.py","file_ext":"py","file_size_in_byte":4493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"624722260","text":"\n####777777\n\nimport os\nfrom time import sleep\nfrom appium import webdriver\nfrom actions import *\nfrom utils.HTMLTestRunner import HTMLTestRunner\nfrom utils.parametic import *\nfrom utils.util import *\nfrom utils.verify_items import *\nfrom utils.config import Config\nfrom pages.Xcall import Xcall\n\n\nPATH = lambda p: os.path.abspath(\n os.path.join(os.path.dirname(__file__), p)\n)\nclass B_XcallTests(ParametrizedTestCase):\n @classmethod\n def setUpClass(cls):\n path = Config().get('path')\n desired_caps = {}\n desired_caps['automationName'] = 'XCUITest'\n desired_caps['platformName'] = 'iOS'\n desired_caps['platformVersion'] = '10.3'\n desired_caps['deviceName'] = 'iPhone 6'\n desired_caps['app'] = PATH(\n path\n )\n desired_caps['noReset'] = True\n desired_caps['bundleId'] = \"com.ctrip.xplan.xcall\"\n #desired_caps['udid'] = \"7bf03dd3bed6d6c6826919e8de7632e641186357\"\n desired_caps['udid'] = \"7e885bc9ccd6ee1ae4297fc7505f9d48aada496f\"\n desired_caps[\"unicodeKeyboard\"] = \"True\"\n desired_caps[\"resetKeyboard\"] = \"True\"\n desired_caps[\"wdaLocalPort\"] = \"8002\"\n cls.driver = webdriver.Remote('http://localhost:4743/wd/hub', desired_caps)\n cls.pipe_num = 1\n\n def test_1_b_login(self):\n print(\"proc2 t1s rev ---:\", self.param.recv())\n print(\"do something @proc2\")\n xcall_page = Xcall(self.driver)\n xcall_page.b_login()\n sleep(2)\n print(\"proc2 t1e send---: %s\" % (self.pipe_num))\n self.param.send(self.pipe_num)\n\n#\n def test_2_b(self):\n xcall_page = Xcall(self.driver)\n self.driver.background_app(10)\n\n print(\"proc2 t1e send---: %s\" % (self.pipe_num))\n self.param.send(self.pipe_num)\n\n print(\"proc2 t1s rev ---:\", self.param.recv())\n print(\"do something @proc2\")\n\n result = verify_be_called(self)\n self.assertTrue(result)\n\n xcall_page.b_answer()\n result = verify_answer(self)\n self.assertTrue(result)\n\n\n\n\n\n\n @classmethod\n def tearDownClass(cls):\n sleep(3)\n cls.driver.quit()\n\n\nif __name__ == '__main__':\n # pipe = 1\n report = \"/Users/zoe/Desktop/Test_Framework_iOS/report\" + '/report.html'\n with open(report, 'wb') as f:\n runner = HTMLTestRunner(f, verbosity=2, title='xconnect_ios', description='报告')\n suite = unittest.TestLoader().loadTestsFromTestCase(B_XcallTests)\n runner.run(suite)\n","sub_path":"Xcall_iOS/test/test_xcall_3_2.py","file_name":"test_xcall_3_2.py","file_ext":"py","file_size_in_byte":2496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"496604001","text":"\"\"\"fix the typo...\n\nRevision ID: dc7042a9ce99\nRevises: 3bd094f697e8\nCreate Date: 2021-05-15 07:14:44.381991\n\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\nfrom figure_hook.Models.relation_table import (product_paintwork_table,\n product_sculptor_table)\nfrom sqlalchemy import orm\n\n# revision identifiers, used by Alembic.\nrevision = 'dc7042a9ce99'\ndown_revision = '3bd094f697e8'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('product_paintwork', sa.Column('product_id', sa.Integer(), nullable=True))\n op.create_foreign_key(None, 'product_paintwork', 'product', ['product_id'], ['id'])\n op.add_column('product_sculptor', sa.Column('product_id', sa.Integer(), nullable=True))\n op.create_foreign_key(None, 'product_sculptor', 'product', ['product_id'], ['id'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint('product_paintwork_product_id_fkey', 'product_paintwork', type_='foreignkey')\n op.drop_constraint('product_sculptor_product_id_fkey', 'product_sculptor', type_='foreignkey')\n\n op.drop_column('product_sculptor', 'product_id')\n op.drop_column('product_paintwork', 'product_id')\n\n # ### end Alembic commands ###\n","sub_path":"db/migrate/versions/dc7042a9ce99_fix_the_typo.py","file_name":"dc7042a9ce99_fix_the_typo.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"635344493","text":"# -*- coding: utf-8 -*-\nimport unittest\nimport ConfigParser\nfrom appium import webdriver\nimport gl\n\nm_config = gl.GL_CONFIG\ncf = ConfigParser.ConfigParser()\ncf.read(\"E:/Platform/Platform/jdjr/serivces/appium_serivces/config.conf\")\n\nm_config['packageName'] = cf.get(\"base\", \"packageName\")\nm_config['packageFullName'] = cf.get(\"base\", \"packageFullName\")\nm_config['packageActivity'] = cf.get(\"base\", \"packageActivity\")\nm_config['packagePath'] = cf.get(\"base\", \"packagePath\")\nm_config['deviceName'] = cf.get(\"base\", \"deviceName\")\n\nclass appTest(unittest.TestCase):\n\n def setUp(self):\n desired_caps = {}\n desired_caps['platformName'] = 'Android'\n desired_caps['platformVersion'] = '4.4.2'\n desired_caps['deviceName'] = ''\n desired_caps['app'] = m_config['packagePath'] + m_config['packageFullName'] # apk路径 + apk包名\n desired_caps['appPackage'] = m_config['packageName'] # 安装包名\n desired_caps['appActivity'] = m_config['packageActivity'] #启动首页面\n desired_caps['udid'] = m_config['deviceName']\n self.driver = webdriver.Remote('http://10.13.16.203:4723/wd/hub', desired_caps)\n\n def tearDown(self):\n self.driver.quit()\n\n","sub_path":"Platform/jdjr/serivces/appium_serivces/case/initInfo.py","file_name":"initInfo.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"41851420","text":"model_card={\n \"type\": \"text classification\",\n \"prediction_type\":\"multiclass classification\",\n \"dataset_used\": \"https://git.unbiased.cc/unbiased-intelligence-hub/emotion_detector_text/-/tree/master/Data\",\n \"output_labels\": {'0':'anger','1':'fear','2':'joy','3':'love','4':'sadness','5':'surprise'},\n \n \"model_unit\": \"LSTM\",\n \"used_pretrained_embeddings\": 'true',\n 'pretrained_embedding':'glove.6B.100d',\n \"val_accuracy\": '93.3',\n \"usage\": \"emotion_recognition_text\",\n \"model_size\": \"2.89 MB\",\n \"input_data_type\": \"Tensor with a shape of (No_of_sentences,45,100)\",\n \"backend\": \"Tensorflow Keras\",\n \"trained_on\": \"8 GB RAM\",\n \"classification_report\":{\n '0':{'precision':'0.97','recall':'0.89','f1_score':'0.93'},\n '1':{'precision':'0.92','recall':'0.88','f1_score':'0.90'},\n '2':{'precision':'0.91','recall':'0.99','f1_score':'0.95'},\n '3':{'precision':'0.99','recall':'0.67','f1_score':'0.80'},\n '4':{'precision':'0.97','recall':'0.97','f1_score':'0.97'},\n '5':{'precision':'0.67','recall':'0.94','f1_score':'0.78'},\n 'overall':{'precision':'0.94','recall':'0.93','f1_score':'0.93'}\n }\n}\nimport json\nwith open('model_card.json', 'w') as fp:\n json.dump(model_card, fp)","sub_path":"modelcard.py","file_name":"modelcard.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"560168743","text":"\n\n#calss header\nclass _OBFUSCATE():\n\tdef __init__(self,): \n\t\tself.name = \"OBFUSCATE\"\n\t\tself.definitions = [u'to make something less clear and harder to understand, especially intentionally: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_obfuscate.py","file_name":"_obfuscate.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"597057068","text":"# *****************************************************************************\n# Copyright (c) 2019, Intel Corporation All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# *****************************************************************************\n\n\nimport operator\nimport numba\nfrom numba import types, typing\nfrom numba.typing.templates import (signature, AbstractTemplate, infer,\n ConcreteTemplate, AttributeTemplate, bound_function, infer_global)\nfrom numba.extending import typeof_impl, lower_cast\nfrom numba.extending import type_callable, box, unbox, NativeValue\nfrom numba.extending import models, register_model, infer_getattr\nfrom numba.extending import lower_builtin, overload_method, overload\nfrom numba.targets.imputils import (impl_ret_new_ref, impl_ret_borrowed,\n iternext_impl, RefType)\nfrom sdc.str_ext import string_type, gen_unicode_to_std_str, gen_std_str_to_unicode\nfrom numba import cgutils\nfrom llvmlite import ir as lir\nimport llvmlite.binding as ll\nfrom . import hdict_ext\nfrom sdc.utils import unliteral_all\n\nll_voidp = lir.IntType(8).as_pointer()\n\n\nclass ByteVecType(types.Opaque):\n def __init__(self):\n super(ByteVecType, self).__init__(\n name='byte_vec')\n\n\nbyte_vec_type = ByteVecType()\nregister_model(ByteVecType)(models.OpaqueModel)\n\n\nclass DictType(types.Opaque):\n def __init__(self, key_typ, val_typ):\n self.key_typ = key_typ\n self.val_typ = val_typ\n super(DictType, self).__init__(\n name='DictType{}{}'.format(key_typ, val_typ))\n\n @property\n def key(self):\n return self.key_typ, self.val_typ\n\n @property\n def iterator_type(self):\n return DictKeyIteratorType(self.key_typ, self.val_typ)\n\n def is_precise(self):\n return self.key_typ.is_precise() and self.val_typ.is_precise()\n\n\nelem_types = [\n types.int8,\n types.int16,\n types.int32,\n types.int64,\n types.uint8,\n types.uint16,\n types.uint32,\n types.uint64,\n types.boolean,\n types.float32,\n types.float64,\n string_type\n]\n\n\ndef typ_str_to_obj(typ_str):\n if typ_str == types.boolean:\n return \"types.boolean\"\n if typ_str == string_type:\n return \"string_type\"\n return \"types.{}\".format(typ_str)\n\n\ndef _add_dict_symbols(key_str, val_str):\n # init dict object\n exec(\"ll.add_symbol('dict_{0}_{1}_init', hdict_ext.dict_{0}_{1}_init)\".format(key_str, val_str))\n # setitem\n exec(\"ll.add_symbol('dict_{0}_{1}_setitem', hdict_ext.dict_{0}_{1}_setitem)\".format(key_str, val_str))\n # getitem\n exec(\"ll.add_symbol('dict_{0}_{1}_getitem', hdict_ext.dict_{0}_{1}_getitem)\".format(key_str, val_str))\n # in\n exec(\"ll.add_symbol('dict_{0}_{1}_in', hdict_ext.dict_{0}_{1}_in)\".format(key_str, val_str))\n # print\n exec(\"ll.add_symbol('dict_{0}_{1}_print', hdict_ext.dict_{0}_{1}_print)\".format(key_str, val_str))\n # get\n exec(\"ll.add_symbol('dict_{0}_{1}_get', hdict_ext.dict_{0}_{1}_get)\".format(key_str, val_str))\n # pop\n exec(\"ll.add_symbol('dict_{0}_{1}_pop', hdict_ext.dict_{0}_{1}_pop)\".format(key_str, val_str))\n # keys\n exec(\"ll.add_symbol('dict_{0}_{1}_keys', hdict_ext.dict_{0}_{1}_keys)\".format(key_str, val_str))\n # min\n exec(\"ll.add_symbol('dict_{0}_{1}_min', hdict_ext.dict_{0}_{1}_min)\".format(key_str, val_str))\n # max\n exec(\"ll.add_symbol('dict_{0}_{1}_max', hdict_ext.dict_{0}_{1}_max)\".format(key_str, val_str))\n # not_empty\n exec(\"ll.add_symbol('dict_{0}_{1}_not_empty', hdict_ext.dict_{0}_{1}_not_empty)\".format(key_str, val_str))\n\n\nfor key_typ in elem_types:\n for val_typ in elem_types:\n k_obj = typ_str_to_obj(key_typ)\n v_obj = typ_str_to_obj(val_typ)\n key_str = str(key_typ)\n val_str = str(val_typ)\n _add_dict_symbols(key_str, val_str)\n # create types\n exec(\"dict_{}_{}_type = DictType({}, {})\".format(key_str, val_str, k_obj, v_obj))\n exec_format_line = \"dict_{0}_{1}_init = types.ExternalFunction('dict_{0}_{1}_init', dict_{0}_{1}_type())\"\n exec(exec_format_line.format(key_str, val_str))\n\ndict_byte_vec_int64_type = DictType(byte_vec_type, types.int64)\ndict_byte_vec_int64_init = types.ExternalFunction('dict_byte_vec_int64_init', dict_byte_vec_int64_type())\n_add_dict_symbols('byte_vec', 'int64')\n\nll.add_symbol('byte_vec_init', hdict_ext.byte_vec_init)\nll.add_symbol('byte_vec_set', hdict_ext.byte_vec_set)\nll.add_symbol('byte_vec_free', hdict_ext.byte_vec_free)\nll.add_symbol('byte_vec_resize', hdict_ext.byte_vec_resize)\n\nbyte_vec_init = types.ExternalFunction('byte_vec_init', byte_vec_type(types.int64, types.voidptr))\nbyte_vec_set = types.ExternalFunction(\n 'byte_vec_set',\n types.void(\n byte_vec_type,\n types.int64,\n types.voidptr,\n types.int64))\nbyte_vec_resize = types.ExternalFunction('byte_vec_resize', types.void(byte_vec_type, types.int64))\nbyte_vec_free = types.ExternalFunction('byte_vec_free', types.void(byte_vec_type))\n\n\nclass MultiMapType(types.Opaque):\n def __init__(self, key_typ, val_typ):\n self.key_typ = key_typ\n self.val_typ = val_typ\n super(MultiMapType, self).__init__(\n name='MultiMapType{}{}'.format(key_typ, val_typ))\n\n @property\n def key(self):\n return self.key_typ, self.val_typ\n\n def is_precise(self):\n return self.key_typ.is_precise() and self.val_typ.is_precise()\n\n\nregister_model(MultiMapType)(models.OpaqueModel)\n\n\nclass MultiMapRangeIteratorType(types.SimpleIteratorType):\n def __init__(self, key_typ, val_typ):\n self.key_typ = key_typ\n self.val_typ = val_typ\n yield_type = val_typ\n super(MultiMapRangeIteratorType, self).__init__(\n 'MultiMapRangeIteratorType{}{}'.format(key_typ, val_typ), yield_type)\n\n @property\n def iterator_type(self):\n return self\n\n @property\n def key(self):\n return self.key_typ, self.val_typ\n\n def is_precise(self):\n return self.key_typ.is_precise() and self.val_typ.is_precise()\n\n\nmultimap_int64_range_iterator_type = MultiMapRangeIteratorType(types.intp, types.intp)\n\nregister_model(MultiMapRangeIteratorType)(models.OpaqueModel)\n\nmultimap_int64_type = MultiMapType(types.int64, types.int64)\nmultimap_int64_init = types.ExternalFunction(\n 'multimap_int64_init', multimap_int64_type())\nmultimap_int64_insert = types.ExternalFunction(\n 'multimap_int64_insert',\n types.void(multimap_int64_type, types.int64, types.int64))\nmultimap_int64_equal_range = types.ExternalFunction(\n 'multimap_int64_equal_range',\n multimap_int64_range_iterator_type(multimap_int64_type, types.int64))\n\n\n# store the iterator pair type in same storage and avoid repeated alloc\nmultimap_int64_equal_range_alloc = types.ExternalFunction(\n 'multimap_int64_equal_range_alloc', multimap_int64_range_iterator_type())\n\nmultimap_int64_equal_range_dealloc = types.ExternalFunction(\n 'multimap_int64_equal_range_dealloc',\n types.void(multimap_int64_range_iterator_type))\n\nmultimap_int64_equal_range_inplace = types.ExternalFunction(\n 'multimap_int64_equal_range_inplace',\n multimap_int64_range_iterator_type(multimap_int64_type, types.int64,\n multimap_int64_range_iterator_type))\n\nll.add_symbol('multimap_int64_init', hdict_ext.multimap_int64_init)\nll.add_symbol('multimap_int64_insert', hdict_ext.multimap_int64_insert)\nll.add_symbol('multimap_int64_equal_range', hdict_ext.multimap_int64_equal_range)\nll.add_symbol('multimap_int64_equal_range_alloc', hdict_ext.multimap_int64_equal_range_alloc)\nll.add_symbol('multimap_int64_equal_range_dealloc', hdict_ext.multimap_int64_equal_range_dealloc)\nll.add_symbol('multimap_int64_equal_range_inplace', hdict_ext.multimap_int64_equal_range_inplace)\nll.add_symbol('multimap_int64_it_is_valid', hdict_ext.multimap_int64_it_is_valid)\nll.add_symbol('multimap_int64_it_get_value', hdict_ext.multimap_int64_it_get_value)\nll.add_symbol('multimap_int64_it_inc', hdict_ext.multimap_int64_it_inc)\n\n\n@lower_builtin('getiter', MultiMapRangeIteratorType)\ndef iterator_getiter(context, builder, sig, args):\n it, = args\n # return impl_ret_borrowed(context, builder, sig.return_type, it)\n return it\n\n\n@lower_builtin('iternext', MultiMapRangeIteratorType)\n@iternext_impl(RefType.UNTRACKED)\ndef iternext_listiter(context, builder, sig, args, result):\n ll_bool = context.get_value_type(types.bool_) # lir.IntType(1)?\n\n # is valid\n fnty = lir.FunctionType(ll_bool, [ll_voidp])\n it_is_valid = builder.module.get_or_insert_function(fnty, name=\"multimap_int64_it_is_valid\")\n\n # get value\n val_typ = context.get_value_type(sig.args[0].val_typ)\n fnty = lir.FunctionType(val_typ, [ll_voidp])\n get_value = builder.module.get_or_insert_function(fnty, name=\"multimap_int64_it_get_value\")\n\n # increment\n fnty = lir.FunctionType(lir.VoidType(), [ll_voidp])\n inc_it = builder.module.get_or_insert_function(fnty, name=\"multimap_int64_it_inc\")\n\n range_it, = args\n\n # it != range.second\n is_valid = builder.call(it_is_valid, [range_it])\n result.set_valid(is_valid)\n\n with builder.if_then(is_valid):\n # it->second\n val = builder.call(get_value, [range_it])\n result.yield_(val)\n builder.call(inc_it, [range_it])\n\n\n# XXX: needs Numba #3014 resolved\n# @overload(\"in\")\n# def in_dict(key_typ, dict_typ):\n# def f(k, dict_int):\n# return dict_int_int_in(dict_int, k)\n# return f\n\n# XXX possible overload bug\n# @overload(operator.setitem)\n# def setitem_dict(dict_typ, key_typ, val_typ):\n# def f(k, dict_int):\n# return dict_int_int_in(dict_int, k)\n# return f\n\n@infer\nclass InDict(AbstractTemplate):\n key = \"in\"\n\n def generic(self, args, kws):\n _, cont = args\n if isinstance(cont, DictType):\n return signature(types.boolean, cont.key_typ, cont)\n\n\n@infer_global(operator.contains)\nclass InDictOp(AbstractTemplate):\n def generic(self, args, kws):\n # contains operator reverses the args\n cont, _ = args\n if isinstance(cont, DictType):\n return signature(types.boolean, cont, cont.key_typ)\n\n\ndict_int_int_type = DictType(types.intc, types.intc)\ndict_int32_int32_type = DictType(types.int32, types.int32)\n\n\nclass DictIntInt(object):\n def __new__(cls, *args):\n return {}\n\n\nclass DictInt32Int32(object):\n def __new__(cls, *args):\n return {}\n\n\n@typeof_impl.register(DictIntInt)\ndef typeof_dict_int(val, c):\n return dict_int_int_type\n\n\n@typeof_impl.register(DictInt32Int32)\ndef typeof_dict_int32(val, c):\n return dict_int32_int32_type\n\n\n@type_callable(DictIntInt)\ndef type_dict_int(context):\n def typer():\n return dict_int_int_type\n return typer\n\n\n@type_callable(DictInt32Int32)\ndef type_dict_int32(context):\n def typer():\n return dict_int32_int32_type\n return typer\n\n\n@infer_global(operator.setitem)\nclass SetItemDict(AbstractTemplate):\n def generic(self, args, kws):\n dict_t, _, _ = args\n if isinstance(dict_t, DictType):\n return signature(types.none, dict_t, dict_t.key_typ, dict_t.val_typ)\n\n\n@infer_global(operator.getitem)\nclass GetItemDict(AbstractTemplate):\n key = operator.getitem\n\n def generic(self, args, kws):\n dict_t, _ = args\n if isinstance(dict_t, DictType):\n return signature(dict_t.val_typ, dict_t, dict_t.key_typ)\n\n\n@infer\nclass PrintDictIntInt(ConcreteTemplate):\n key = \"print_item\"\n cases = [signature(types.none, dict_int_int_type),\n signature(types.none, dict_int32_int32_type)]\n\n\n@infer_getattr\nclass DictAttribute(AttributeTemplate):\n key = DictType\n\n @bound_function(\"dict.get\")\n def resolve_get(self, dict, args, kws):\n assert not kws\n assert len(args) == 2\n return signature(args[1], *unliteral_all(args))\n\n @bound_function(\"dict.pop\")\n def resolve_pop(self, dict, args, kws):\n assert not kws\n return signature(dict.val_typ, *unliteral_all(args))\n\n @bound_function(\"dict.keys\")\n def resolve_keys(self, dict, args, kws):\n assert not kws\n return signature(DictKeyIteratorType(dict.key_typ, dict.val_typ))\n\n\nregister_model(DictType)(models.OpaqueModel)\n\n\n@box(DictType)\ndef box_dict(typ, val, c):\n \"\"\"\n \"\"\"\n # interval = cgutils.create_struct_proxy(typ)(c.context, c.builder, value=val)\n # lo_obj = c.pyapi.float_from_double(interval.lo)\n # hi_obj = c.pyapi.float_from_double(interval.hi)\n class_obj = c.pyapi.unserialize(c.pyapi.serialize_object(DictIntInt))\n res = c.pyapi.call_function_objargs(class_obj, (val,))\n # c.pyapi.decref(lo_obj)\n # c.pyapi.decref(hi_obj)\n c.pyapi.decref(class_obj)\n return res\n\n\nclass DictKeyIteratorType(types.Opaque):\n def __init__(self, key_typ, val_typ):\n self.key_typ = key_typ\n self.val_typ = val_typ\n super(DictKeyIteratorType, self).__init__(\n 'DictKeyIteratorType{}{}'.format(key_typ, val_typ))\n\n\ndict_key_iterator_int_int_type = DictKeyIteratorType(types.intp, types.intp)\ndict_key_iterator_int32_int32_type = DictKeyIteratorType(\n types.int32, types.int32)\n\nregister_model(DictKeyIteratorType)(models.OpaqueModel)\n\n\n@infer_global(min)\n@infer_global(max)\nclass MinMaxDict(AbstractTemplate):\n def generic(self, args, kws):\n if len(args) == 1 and isinstance(args[0], DictKeyIteratorType):\n return signature(args[0].key_typ, *unliteral_all(args))\n\n\n# dict_int_int_in = types.ExternalFunction(\"dict_int_int_in\", types.boolean(dict_int_int_type, types.intp))\n\n@lower_builtin(DictIntInt)\ndef impl_dict_int_int(context, builder, sig, args):\n fnty = lir.FunctionType(lir.IntType(8).as_pointer(), [])\n fn = builder.module.get_or_insert_function(fnty, name=\"dict_int_int_init\")\n return builder.call(fn, [])\n\n\n@lower_builtin(operator.setitem, DictType, types.Any, types.Any)\ndef setitem_dict(context, builder, sig, args):\n _, key_typ, val_typ = sig.args\n dct, key, val = args\n fname = \"dict_{}_{}_setitem\".format(key_typ, val_typ)\n\n if key_typ == string_type:\n key_typ = types.voidptr\n key = gen_unicode_to_std_str(context, builder, key)\n\n if val_typ == string_type:\n val_typ = types.voidptr\n val = gen_unicode_to_std_str(context, builder, val)\n\n fnty = lir.FunctionType(lir.VoidType(),\n [lir.IntType(8).as_pointer(),\n context.get_value_type(key_typ),\n context.get_value_type(val_typ)])\n fn = builder.module.get_or_insert_function(fnty, name=fname)\n return builder.call(fn, [dct, key, val])\n\n\n@lower_builtin(\"print_item\", dict_int_int_type)\ndef print_dict(context, builder, sig, args):\n # pyapi = context.get_python_api(builder)\n # strobj = pyapi.unserialize(pyapi.serialize_object(\"hello!\"))\n # pyapi.print_object(strobj)\n # pyapi.decref(strobj)\n # return context.get_dummy_value()\n fnty = lir.FunctionType(lir.VoidType(), [lir.IntType(8).as_pointer()])\n fn = builder.module.get_or_insert_function(fnty, name=\"dict_int_int_print\")\n return builder.call(fn, args)\n\n\n@lower_builtin(\"dict.get\", DictType, types.intp, types.intp)\ndef lower_dict_get(context, builder, sig, args):\n fnty = lir.FunctionType(lir.IntType(64), [lir.IntType(\n 8).as_pointer(), lir.IntType(64), lir.IntType(64)])\n fn = builder.module.get_or_insert_function(fnty, name=\"dict_int_int_get\")\n return builder.call(fn, args)\n\n\n@lower_builtin(operator.getitem, DictType, types.Any)\ndef lower_dict_getitem(context, builder, sig, args):\n dict_typ, key_typ = sig.args\n dct, key = args\n val_typ = dict_typ.val_typ\n\n fname = \"dict_{}_{}_getitem\".format(key_typ, val_typ)\n\n if key_typ == string_type:\n key_typ = types.voidptr\n key = gen_unicode_to_std_str(context, builder, key)\n\n ll_val_typ = context.get_value_type(val_typ)\n if val_typ == string_type:\n ll_val_typ = context.get_value_type(types.voidptr)\n\n fnty = lir.FunctionType(ll_val_typ,\n [lir.IntType(8).as_pointer(), context.get_value_type(key_typ)])\n\n fn = builder.module.get_or_insert_function(fnty, name=fname)\n val = builder.call(fn, [dct, key])\n if val_typ == string_type:\n val = gen_std_str_to_unicode(context, builder, val)\n return val\n\n\n@lower_builtin(\"dict.pop\", DictType, types.intp)\ndef lower_dict_pop(context, builder, sig, args):\n fnty = lir.FunctionType(lir.IntType(\n 64), [lir.IntType(8).as_pointer(), lir.IntType(64)])\n fn = builder.module.get_or_insert_function(fnty, name=\"dict_int_int_pop\")\n return builder.call(fn, args)\n\n\n@lower_builtin(\"dict.keys\", dict_int_int_type)\ndef lower_dict_keys(context, builder, sig, args):\n fnty = lir.FunctionType(lir.IntType(8).as_pointer(), [\n lir.IntType(8).as_pointer()])\n fn = builder.module.get_or_insert_function(fnty, name=\"dict_int_int_keys\")\n return builder.call(fn, args)\n\n\n@lower_builtin(min, dict_key_iterator_int_int_type)\ndef lower_dict_min(context, builder, sig, args):\n fnty = lir.FunctionType(lir.IntType(64), [lir.IntType(8).as_pointer()])\n fn = builder.module.get_or_insert_function(fnty, name=\"dict_int_int_min\")\n return builder.call(fn, args)\n\n\n@lower_builtin(max, dict_key_iterator_int_int_type)\ndef lower_dict_max(context, builder, sig, args):\n fnty = lir.FunctionType(lir.IntType(64), [lir.IntType(8).as_pointer()])\n fn = builder.module.get_or_insert_function(fnty, name=\"dict_int_int_max\")\n return builder.call(fn, args)\n\n\n@lower_builtin(\"in\", types.Any, DictType)\ndef lower_dict_in(context, builder, sig, args):\n key_typ, dict_typ = sig.args\n key, dct = args\n\n fname = \"dict_{}_{}_in\".format(key_typ, dict_typ.val_typ)\n\n if key_typ == string_type:\n key_typ = types.voidptr\n key = gen_unicode_to_std_str(context, builder, key)\n\n fnty = lir.FunctionType(lir.IntType(1), [lir.IntType(8).as_pointer(),\n context.get_value_type(key_typ), ])\n fn = builder.module.get_or_insert_function(fnty, name=fname)\n val = builder.call(fn, [dct, key])\n if dict_typ.val_typ == string_type:\n val = gen_std_str_to_unicode(context, builder, val)\n return val\n\n\n@lower_builtin(operator.contains, DictType, types.Any)\ndef lower_dict_in_op(context, builder, sig, args):\n dict_typ, key_typ = sig.args\n dct, key = args\n\n fname = \"dict_{}_{}_in\".format(key_typ, dict_typ.val_typ)\n\n if key_typ == string_type:\n key_typ = types.voidptr\n key = gen_unicode_to_std_str(context, builder, key)\n\n fnty = lir.FunctionType(lir.IntType(1), [lir.IntType(8).as_pointer(),\n context.get_value_type(key_typ), ])\n fn = builder.module.get_or_insert_function(fnty, name=fname)\n return builder.call(fn, [dct, key])\n\n\n@lower_cast(dict_int_int_type, types.boolean)\ndef dict_empty(context, builder, fromty, toty, val):\n fnty = lir.FunctionType(lir.IntType(1), [lir.IntType(8).as_pointer()])\n fn = builder.module.get_or_insert_function(\n fnty, name=\"dict_int_int_not_empty\")\n return builder.call(fn, (val,))\n\n\n# ------ int32 versions ------\n@lower_builtin(DictInt32Int32)\ndef impl_dict_int32_int32(context, builder, sig, args):\n fnty = lir.FunctionType(lir.IntType(8).as_pointer(), [])\n fn = builder.module.get_or_insert_function(\n fnty, name=\"dict_int32_int32_init\")\n return builder.call(fn, [])\n\n\n# @lower_builtin(operator.setitem, DictType, types.int32, types.int32)\n# def setitem_dict_int32(context, builder, sig, args):\n# fnty = lir.FunctionType(lir.VoidType(), [lir.IntType(\n# 8).as_pointer(), lir.IntType(32), lir.IntType(32)])\n# fn = builder.module.get_or_insert_function(\n# fnty, name=\"dict_int32_int32_setitem\")\n# return builder.call(fn, args)\n\n\n@lower_builtin(\"print_item\", dict_int32_int32_type)\ndef print_dict_int32(context, builder, sig, args):\n # pyapi = context.get_python_api(builder)\n # strobj = pyapi.unserialize(pyapi.serialize_object(\"hello!\"))\n # pyapi.print_object(strobj)\n # pyapi.decref(strobj)\n # return context.get_dummy_value()\n fnty = lir.FunctionType(lir.VoidType(), [lir.IntType(8).as_pointer()])\n fn = builder.module.get_or_insert_function(\n fnty, name=\"dict_int32_int32_print\")\n return builder.call(fn, args)\n\n\n@lower_builtin(\"dict.get\", DictType, types.int32, types.int32)\ndef lower_dict_get_int32(context, builder, sig, args):\n fnty = lir.FunctionType(lir.IntType(32), [lir.IntType(\n 8).as_pointer(), lir.IntType(32), lir.IntType(32)])\n fn = builder.module.get_or_insert_function(\n fnty, name=\"dict_int32_int32_get\")\n return builder.call(fn, args)\n\n\n# @lower_builtin(operator.getitem, DictType, types.int32)\n# def lower_dict_getitem_int32(context, builder, sig, args):\n# fnty = lir.FunctionType(lir.IntType(\n# 32), [lir.IntType(8).as_pointer(), lir.IntType(32)])\n# fn = builder.module.get_or_insert_function(\n# fnty, name=\"dict_int32_int32_getitem\")\n# return builder.call(fn, args)\n\n\n@lower_builtin(\"dict.pop\", DictType, types.int32)\ndef lower_dict_pop_int32(context, builder, sig, args):\n fnty = lir.FunctionType(lir.IntType(\n 32), [lir.IntType(8).as_pointer(), lir.IntType(32)])\n fn = builder.module.get_or_insert_function(\n fnty, name=\"dict_int32_int32_pop\")\n return builder.call(fn, args)\n\n\n@lower_builtin(\"dict.keys\", dict_int32_int32_type)\ndef lower_dict_keys_int32(context, builder, sig, args):\n fnty = lir.FunctionType(lir.IntType(8).as_pointer(), [\n lir.IntType(8).as_pointer()])\n fn = builder.module.get_or_insert_function(\n fnty, name=\"dict_int32_int32_keys\")\n return builder.call(fn, args)\n\n\n@lower_builtin(min, dict_key_iterator_int32_int32_type)\ndef lower_dict_min_int32(context, builder, sig, args):\n fnty = lir.FunctionType(lir.IntType(32), [lir.IntType(8).as_pointer()])\n fn = builder.module.get_or_insert_function(\n fnty, name=\"dict_int32_int32_min\")\n return builder.call(fn, args)\n\n\n@lower_builtin(max, dict_key_iterator_int32_int32_type)\ndef lower_dict_max_int32(context, builder, sig, args):\n fnty = lir.FunctionType(lir.IntType(32), [lir.IntType(8).as_pointer()])\n fn = builder.module.get_or_insert_function(\n fnty, name=\"dict_int32_int32_max\")\n return builder.call(fn, args)\n\n\n@lower_cast(dict_int32_int32_type, types.boolean)\ndef dict_empty_int32(context, builder, fromty, toty, val):\n fnty = lir.FunctionType(lir.IntType(1), [lir.IntType(8).as_pointer()])\n fn = builder.module.get_or_insert_function(\n fnty, name=\"dict_int32_int32_not_empty\")\n return builder.call(fn, (val,))\n","sub_path":"sdc/dict_ext.py","file_name":"dict_ext.py","file_ext":"py","file_size_in_byte":24061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"443207234","text":"import re\n\nclass SASArgument(object):\n '''\n SAS Argument Class\n \n Creates an object with the following properties\n\n Name: Name of the Argument given\n Type: Required or Optional\n DefaultValue: If type is optional then the default value\n DocString: Documentation String for the argument.\n '''\n\n def __init__(self,rawStr):\n\n reFlags = re.DOTALL|re.IGNORECASE\n self.name = re.sub('\\s','',re.findall('(.*?)(?:[=\\/\\*]|$)',rawStr,reFlags)[0])\n \n if re.search('=',rawStr) is not None:\n self.type='Optional'\n defaultValue = re.findall('=([^\\/]*)',rawStr,reFlags)\n\n if defaultValue is not None and len(defaultValue[0])>0:\n self.defaultValue=defaultValue[0]\n else:\n self.defaultValue='Not set'\n else:\n self.type='Required'\n self.defaultValue='Not set'\n\n if re.search('.*?\\*(.*)\\*',rawStr) is not None:\n self.docString = re.findall('.*?\\*(.*)\\*',rawStr,reFlags)[0]\n else:\n self.docString='Not set'\n\n def __str__(self):\n _ = '{}\\n - Type: {}\\n - DefaultValue: {}\\n - About: {}'.format(self.name,self.type,self.defaultValue,self.docString)\n return _\n \n def __repr__(self):\n return self.name\n","sub_path":"SASObjects/SASArgument.py","file_name":"SASArgument.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"22430364","text":"import os\n\nimport redis\nimport requests\nfrom flask import jsonify\nfrom flask_util_job_runner.api_utils import secured_with_token, with_request_params, handle\nfrom flask_util_job_runner.definitions import REDIS_ADDRESS\nfrom flask_util_job_runner.flask_utils import setup_app\n\napp = setup_app()\n\npool = redis.ConnectionPool(REDIS_ADDRESS, port=6379, db=0)\nr = redis.Redis(connection_pool=pool)\n\n\n@app.route(\"/health\", methods=['GET'])\n@secured_with_token()\ndef health():\n urls_to_ping = ['http://webservice.namespace1/ping', 'http://asyncwebservice.namespace1/ping',\n 'http://readwebservice.namespace1/ping', 'http://minio-hl.minio:9000/minio/health/live']\n status = {}\n status_up_all = True\n for utp in urls_to_ping:\n status[utp], status_up = get_status(utp)\n status_up_all = status_up_all and status_up\n status_redis_up = r.ping()\n status_up_all = status_up_all and status_redis_up\n status['redis'] = {'status': 'ok'} if status_redis_up else {'status': 'down'}\n if status_up_all:\n return jsonify(status)\n else:\n return jsonify(status), 500\n\n\ndef get_status(utp):\n status_ok_constant = {'status': 'ok'}\n status_down_constant = {'status': 'down'}\n try:\n resp = requests.get(utp, timeout=2)\n except requests.exceptions.RequestException as e:\n return status_down_constant, False\n if resp.ok:\n return status_ok_constant, True\n else:\n return status_down_constant, False\n\n\n@app.route(\"/webservice\", methods=['POST'])\n@secured_with_token()\n@with_request_params([\"data\"])\ndef webservice(data, uuid_str):\n version = 'namespace1'\n method_name = 'webservice'\n app.logger.info(\"webservice call\")\n return handle(method_name, version, uuid_str, {'data': data, 'uuid_str': uuid_str})\n\n\n@app.route(\"/readwebservice\", methods=['POST'])\n@secured_with_token()\n@with_request_params([\"uuid_requested\"])\ndef readwebservice(uuid_requested, uuid_str):\n version = 'namespace1'\n method_name = 'readwebservice'\n return handle(method_name, version, uuid_str, {'uuid_requested': uuid_requested, 'uuid_str': uuid_str})\n\n\nif __name__ == \"__main__\":\n app.run(debug=True, host='0.0.0.0', port=int(os.environ.get('PORT', 8081)))\n","sub_path":"api/entrypoints.py","file_name":"entrypoints.py","file_ext":"py","file_size_in_byte":2238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"95453898","text":"import os\nimport utils\nfrom time import sleep\nimport json\n\n\n#Clase Color: Uso en los mensajes\nclass Color():\n BLACK = '\\033[30m'\n RED = '\\033[31m'\n GREEN = '\\033[32m'\n YELLOW = '\\033[33m'\n BLUE = '\\033[1;34m'\n MAGENTA = '\\033[35m'\n CYAN = '\\033[36m'\n WHITE = '\\033[37m'\n UNDERLINE = '\\033[4m'\n RESET = '\\033[0m'\n CEND = '\\033[0;m'\n\n#Clase Persona \nclass Persona:\n __estado = True\n\n def __init__(self, dni, nombre, apellido, edad):\n self.dni = dni\n self.nombre = nombre\n self.apellido = apellido\n self.edad = edad\n\n @property\n def estado(self):\n return self.__estado\n\n @estado.setter\n def estado(self, nuevoEstado):\n __estado = nuevoEstado\n\n def registro(self):\n self.estado = True\n print(\"La Persona se ha registrado\")\n\n def desresgistro(self):\n self.estado = False\n\n#Clase Cliente HERENCIA Persona\nclass Cliente(Persona):\n def __init__(self, dni, nombre, apellido, edad, codCliente):\n super().__init__(dni, nombre, apellido, edad)\n self.codCliente = codCliente\n\n #def comprar(self):\n # print(\"El Cliente esta comprando\")\n # print(\"El Cliente terminó de comprar\")\n def dictCliente(self):\n dc = {\n 'dni': self.dni,\n 'nombre': self.nombre,\n 'apellido': self.apellido,\n 'edad': self.edad, \n 'codCliente': self.codCliente \n }\n return dc\n \n\n#Clase Empleado HERENCIA Persona\nclass Empleado(Persona):\n def __init__(self, dni, nombre, apellido, edad, codEmpleado):\n super().__init__(dni, nombre, apellido, edad)\n self.codEmpleado = codEmpleado\n\n def marcarIngreso(self):\n print(\"El empleado esta marcando su ingreso\")\n print(\"El empleado marcó su ingreso\")\n \n def dictEmpleado(self):\n de = {\n 'dni': self.dni,\n 'nombre': self.nombre,\n 'apellido': self.apellido,\n 'edad': self.edad, \n 'codEmpleado': self.codEmpleado \n }\n return de\n#Clase Producto\nclass Producto:\n log = utils.log(\"Producto\")\n\n def __init__(self, codProducto, nombreProducto, cantidadProducto, costoProducto):\n self.codProducto = codProducto\n self.nombreProducto = nombreProducto\n self.cantidadProducto = cantidadProducto\n self.costoProducto = costoProducto\n self.log.info(\"Se creo un producto\")\n\n # def __str__(self):\n # return \"\"\"Codigo: {} \\nNombre: {}\"\"\".format(self.codProducto, self.nombreProducto)\n\n def dictProducto(self):\n dp = {\n 'codProducto': self.codProducto,\n 'nombreProducto': self.nombreProducto,\n 'cantidadProducto': self.cantidadProducto,\n 'costoProducto': self.costoProducto\n }\n return dp\n\n \n\n #def costearProducto(self):\n # print(\"Costeando producto\")\n # print(\"Producto costeado\")\n#Buscar Cliente\ndef buscarCliente(fileClient, client):\n try: \n f = open(fileClient, 'r')\n except FileNotFoundError:\n return('¡El fichero ' + fileClient + ' no existe!\\n')\n else:\n directory = f.readlines()\n f.close()\n directory = list([(line.split(',')) for line in directory])\n if client in directory:\n return directory[client]\n else:\n return('¡El cliente ' + client + ' no existe!\\n')\n\n#Buscar Empleado\ndef buscarEmpleador(fileEmplead, emplead):\n try: \n f = open(fileEmplead, 'r')\n except FileNotFoundError:\n return('¡El fichero ' + fileEmplead + ' no existe!\\n')\n else:\n directory = f.readlines()\n f.close()\n directory = list([(line.split(',')) for line in directory])\n if emplead in directory:\n return directory[emplead]\n else:\n return('¡El cliente ' + emplead + ' no existe!\\n')\n#Buscar Producto\ndef buscarProducto(fileProduct, product):\n try: \n f = open(fileProduct, 'r')\n except FileNotFoundError:\n return('¡El fichero ' + fileProduct + ' no existe!\\n')\n else:\n directory = f.readlines()\n f.close()\n directory = list([(line.split(',')) for line in directory])\n if product in directory:\n return directory[product]\n else:\n return('¡El cliente ' + product + ' no existe!\\n')\n\n#Eliminar Producto\ndef eliminarProducto(fileProduct, product):\n\n try: \n f = open(fileProduct, 'r')\n except FileNotFoundError:\n return('¡El Producto ' + fileProduct + ' no existe!\\n')\n else:\n directory = f.readlines()\n f.close()\n directory = list([(line.split(',')) for line in directory])\n if product in directory:\n del directory[product]\n for codProducto, nomProducto in directory:\n f.write(codProducto + ',' + nomProducto)\n f.close()\n return ('¡El Producto se ha borrado!\\n')\n else:\n return('¡El Producto ' + product + ' no existe!\\n')\n\n#Clase Menu\nclass Menu:\n __log = utils.log(\"Menu\")\n\n def __init__(self, nombreMenu, listaOpciones):\n self.nombreMenu = nombreMenu\n self.listaOpciones = listaOpciones\n\n def mostrarMenu(self):\n self.limpiarPantalla()\n opSalir = True\n while(opSalir):\n self.limpiarPantalla()\n print(Color.BLUE+\":::::::::::::BIENVENIDOS EMPRESA ESMR::::::::::::::\"+Color.CEND)\n print(Color.BLUE+\":::::::::::::::::::\" +self.nombreMenu + \"::::::::::::::::::\"+Color.CEND)\n \n for (key, value) in self.listaOpciones.items():\n print(key, \"\\t:: \", value)\n #print(\"Salir \\t\\t:: 9\")\n opcion = 100\n try:\n print(Color.CYAN+\"Escoge tu opcion\"+Color.CEND)\n opcion = int(input())\n except ValueError as error:\n self.__log.error(error)\n print(Color.RED+\"Opcion invalida deben ser numeros del 0 al 2\"+Color.CEND)\n contOpciones = 0\n for (key, value) in self.listaOpciones.items():\n if(opcion == int(value)):\n contOpciones += 1\n if(contOpciones == 0):\n print(Color.RED+\"Escoge una opcion valida\"+Color.CEND)\n self.__log.debug(\"No escoje opion\")\n sleep(3)\n else:\n opSalir = False\n\n return opcion\n\n def limpiarPantalla(self):\n def clear():\n #return os.system('cls')\n return os.system('clear')\n clear()\n\n#Variables Globales\nlog = utils.log(\"INIT\")\n#Variables Productos\nfileProducto = utils.fileManager(\"Productos.txt\")\nfileProduct = 'Productos.txt'\nlstProductos = []\nlstProductosDic = []\n\n#Variables Clientes\nfileCliente = utils.fileManager(\"Clientes.txt\")\nfileClient = 'Clientes.txt'\nlstClientes = []\nlstClientesDic = []\n\n#Variables Empleados\nfileEmpleados = utils.fileManager(\"Empleados.txt\")\nfileEmplead = 'Empleados.txt'\nlstEmpleados = []\nlstEmpleadosDic = []\n#FIN Variables Globales\n\n#Funcion Carga Inicia: Realiza La carga de datos de Productos, Clientes y Empleados\ndef cargaInicial():\n try:\n res = fileProducto.leerArchivo()\n log.debug(res)\n lstProducto = json.loads(res)\n for dicProducto in lstProducto:\n #codProducto, nombreProducto, cantidadProducto, costoProducto\n objProducto = Producto(dicProducto[\"codProducto\"], dicProducto[\"nombreProducto\"],\n dicProducto[\"cantidadProducto\"], dicProducto[\"costoProducto\"])\n lstProductos.append(objProducto)\n lstProductosDic.append(dicProducto)\n log.debug(lstProductosDic)\n log.debug(lstProductos)\n except Exception as error:\n log.error(error)\n \n #Try/Carga Cliente\n try:\n resc = fileCliente.leerArchivo()\n log.debug(resc)\n lstCliente = json.loads(resc)\n for dictCliente in lstCliente:\n #dni, nombre, apellido, edad, codCliente\n objCliente = Cliente(dictCliente[\"dni\"],\n dictCliente[\"nombre\"], dictCliente[\"apellido\"],\n dictCliente[\"edad\"],dictCliente[\"codCliente\"])\n lstClientes.append(objCliente)\n lstClientesDic.append(dictCliente)\n log.debug(lstClientesDic)\n log.debug(lstClientes)\n except Exception as error:\n log.error(error)\n #FIN Try/Carga Cliente\n\n\n\ncargaInicial()\n\n#Menu de Opciones Principal \ndicOpcionesMenuPrincipal = {\"Cliente\": 1, \"Producto\": 2, \"Empleado\": 3, \"Salir \\t\": 0}\nmenuPrincipal = Menu(\"Menu de Inicio\", dicOpcionesMenuPrincipal)\nopcionMenuPrincipal = menuPrincipal.mostrarMenu()\n\n#Opciones de Crear Producto\ndicOpcionesCrearProducto = {\"Crear otro Producto\": 1, \"Mostrar todos los Productos\": 2}\nmenuProducto = Menu(\"Menu Producto\", dicOpcionesCrearProducto)\n\n#Opciones de Crear Cliente\ndicOpcionesCrearCliente = {\"Crear otro Cliente\": 1, \"Mostrar todos los Clientes\": 2}\nsubmenuCliente = Menu(\"Menu X Cliente\", dicOpcionesCrearCliente)\n\n#Opciones de Crear Empleado\ndicOpcionesCrearEmpleado = {\"Crear otro Empleado\": 1, \"Mostrar todos los Empleado\": 2}\nsubmenuEmpleado = Menu(\"Menu X Empleado\", dicOpcionesCrearEmpleado)\n\nif(opcionMenuPrincipal == 0):\n #opcionMenuPrincipal = menuPrincipal.mostrarMenu()\n print(\"Gracias, Uds salio del sistema\")\n#Opciones Menu Cliente\nelif(opcionMenuPrincipal == 1):\n dicOpcionesCliente = {\"Registrar Cliente\": 1, \"Listar Cliente \\t\": 2, \"Buscar Clientes\": 3, \"Salir\": 4}\n menuCliente = Menu(\"Menu de Cliente\", dicOpcionesCliente)\n resc = menuCliente.mostrarMenu()\n salirCreacionCliente = True\n while salirCreacionCliente:\n if(resc == 1):\n print(\"Digita el DNI del Cliente\")\n dni = input()\n print(\"Digita Nombre del Cliente\")\n nombre = input()\n print(\"Digita Apellido del Cliente\")\n apellido = input() \n print(\"Digita Edad del Cliente\")\n edad = input()\n print(\"Digita el Codigo del Cliente\")\n codCliente = input()\n cliente = Cliente(dni, nombre, apellido, edad,codCliente)\n \n print(\"Haz creado el Cliente: \", cliente)\n fileCliente.borrarArchivo()\n lstClientesDic.append(cliente.dictCliente())\n lstClientes.append(cliente)\n jsonStrcliente = json.dumps(lstClientesDic)\n fileCliente.escribirArchivo(jsonStrcliente)\n resMenuCliente = menuCliente.mostrarMenu()\n if(resMenuCliente == 1):\n log.debug(\"ingreso a la opcion 1 de menuCliente\")\n elif(resMenuCliente == 2):\n log.debug(\"ingreso a la opcion 2 de menuCliente\")\n for objCliente in lstClientes:\n print(\n f\"|{objCliente.dni} | {objCliente.nombre} | {objCliente.apellido} | {objCliente.edad} | {objCliente.codCliente}|\")\n sleep(1)\n resc = submenuCliente.mostrarMenu()\n if(resc == 2):\n log.debug(f\"ingreso a la opcion {resc}\")\n else:\n log.debug(\n f\"ingreso a la opcion {resMenuCliente} de menuCliente\")\n salirCreacionCliente = False\n break\n elif(resc == 2):\n print(f\"|{'DNI':^16}|{'NOMBRE':^17}|{'APELLIDO':^17}|{'EDAD':^18}|{'COD CLIENTE':^18}|\")\n for objCliente in lstClientes:\n print(f\"|{objCliente.dni:^15} | {objCliente.nombre:^15} | {objCliente.apellido:^15} | {objCliente.edad:^15} | {objCliente.codCliente:^15}|\")\n sleep(1)\n resc = menuCliente.mostrarMenu()\n elif (resc == 3):\n print(\"Buscar ---->\")\n name = input('Introduce el nombre del cliente: ')\n print(buscarCliente(fileClient, name))\n sleep(1) \n resc = menuCliente.mostrarMenu()\n \n # sleep(10)\n # res = menuEmpleado.mostrarMenu()\n elif (resc == 4):\n print(Color.GREEN+\"Salio con Exito del Menu Cliente\"+Color.CEND)\n sleep(1)\n resc = menuPrincipal.mostrarMenu()\n break \n\n#Opciones Menu Producto\nelif(opcionMenuPrincipal == 2):\n dicOpcionesProducto = {\"Registrar Productos\": 1,\"Listar Productos\": 2, \"Buscar Producto\": 3, \"Eliminar Producto\": 4, \"Inventario \\t\": 5, \"Salir \\t\\t\": 6}\n menuProducto = Menu(\"Menu de Producto\", dicOpcionesProducto)\n res = menuProducto.mostrarMenu()\n salirCreacionProducto = True\n while salirCreacionProducto:\n if(res == 1):\n print(\"Digita el Codigo del Producto\")\n codProducto = input()\n print(\"Digita el Nombre del Producto\")\n nomProducto = input()\n print(\"Digita la Cantidad del Producto\")\n cantProducto = input()\n print(\"Digita costo del Producto\")\n costProducto = input()\n producto = Producto(codProducto, nomProducto,\n cantProducto, costProducto)\n\n print(\"Haz creado el producto: \", producto)\n fileProducto.borrarArchivo()\n lstProductosDic.append(producto.dictProducto())\n lstProductos.append(producto)\n jsonStr = json.dumps(lstProductosDic)\n fileProducto.escribirArchivo(jsonStr)\n resMenuProducto = menuProducto.mostrarMenu()\n if(resMenuProducto == 1):\n log.debug(\"ingreso a la opcion 1 de menuProducto\")\n elif(resMenuProducto == 2):\n log.debug(\"ingreso a la opcion 2 de menuProducto\")\n for objProducto in lstProductos:\n print(f\"|{objProducto.nombreProducto} | {objProducto.codProducto} | {objProducto.cantidadProducto} | {objProducto.costoProducto} |\")\n sleep(5)\n res = menuProducto.mostrarMenu()\n if(res == 1):\n log.debug(f\"ingreso a la opcion {res}\")\n else:\n log.debug(\n f\"ingreso a la opcion {resMenuProducto} de menuProducto\")\n salirCreacionProducto = False\n break\n elif(res==2):\n print(f\"|{'COD PRODUCTO':^30}|{'NOMBRE':^30}|{'CANTIDAD':^30}|{'COSTO':^30}|\")\n for objProducto in lstProductos:\n print(f\"|{objProducto.codProducto:^30} | {objProducto.nombreProducto:^30} | {objProducto.cantidadProducto:^30} | {objProducto.costoProducto:^30}|\")\n sleep(4)\n res = menuProducto.mostrarMenu()\n elif (res==3):\n print(\"Buscar ---->\")\n name = input('Introduce el nombre del Producto: ')\n print(buscarProducto(fileEmplead, name))\n sleep(2)\n res = menuPrincipal.mostrarMenu() \n elif (res==4):\n print(\"Eliminar ---->\")\n #print(\"Busca en la lista el producto que deseas quitar\")\n #for objProducto in lstProductos:\n # for (key, value) in objProducto.items():\n # print(key , \" :: \", value )\n print(\"Escribe el nombre del Producto que quieres Eliminar\")\n strNombreEliminar = input()\n for objProducto in lstProductos:\n for (key, value) in objProducto.items():\n if(value == strNombreEliminar):\n print(f\"Borrar {value}?\")\n lstProductos.remove(objProducto)\n print(lstProductos)\n\n sleep(5)\n res = menuProducto.mostrarMenu()\n elif (res==5):\n totalV = 0.0\n for p in lstProductos:\n totalV +=p.total\n print(\" PRODUCTOS, TOTAL VALORIZADO: \",totalV)\n\n elif (res==6):\n pass\nelif(opcionMenuPrincipal == 3):\n dicOpcionesEmpleado = {\"Registrar Empleador\": 1, \"Listar Empelado\": 2, \"Buscar Empleado\": 3, \"Salir \\t\\t\": 4}\n menuEmpleado = Menu(\"Menu de Empleador\", dicOpcionesEmpleado)\n rese = menuEmpleado.mostrarMenu()\n salirCreacionEmpleado = True\n while salirCreacionEmpleado:\n if(rese == 1):\n print(\"Digita el DNI del Empleado\")\n dni = input()\n print(\"Digita Nombre del Empleado\")\n nombre = input()\n print(\"Digita Apellido del Empleado\")\n apellido = input() \n print(\"Digita Edad del Empleado\")\n edad = input()\n print(\"Digita el Codigo del Empleado\")\n codEmpleado = input()\n empleado = Empleado(dni, nombre, apellido, edad,codEmpleado)\n \n print(\"Haz creado el Empleado: \", empleado)\n fileEmpleados.borrarArchivo()\n lstEmpleadosDic.append(empleado.dictEmpleado())\n lstEmpleados.append(empleado)\n jsonStrcliente = json.dumps(lstEmpleadosDic)\n fileEmpleados.escribirArchivo(jsonStrcliente)\n resMenuEmpleado = menuEmpleado.mostrarMenu()\n if(resMenuEmpleado == 1):\n log.debug(\"ingreso a la opcion 1 de menuEmpleado\")\n elif(resMenuEmpleado == 2):\n log.debug(\"ingreso a la opcion 2 de menuEmpleado\")\n for objEmpleado in lstEmpleados:\n print(\n f\"|{objEmpleado.dni} | {objEmpleado.nombre} | {objEmpleado.apellido} | {objEmpleado.edad} | {objEmpleado.codEmpleado}|\")\n sleep(2)\n rese = submenuEmpleado.mostrarMenu()\n if(rese == 2):\n log.debug(f\"ingreso a la opcion {resc}\")\n else:\n log.debug(\n f\"ingreso a la opcion {resMenuEmpleado} de menuEmpleado\")\n salirCreacionCliente = False\n break\n elif(rese == 2):\n print(f\"|{'DNI':^16}|{'NOMBRE':^17}|{'APELLIDO':^17}|{'EDAD':^18}|{'COD EMPLEADO':^18}|\")\n for objEmpleado in lstEmpleados:\n print(f\"|{objEmpleado.dni:^15} | {objEmpleado.nombre:^15} | {objEmpleado.apellido:^15} | {objEmpleado.edad:^15} | {objEmpleado.codCliente:^15}|\")\n sleep(5)\n rese = menuEmpleado.mostrarMenu()\n elif (rese == 3):\n print(\"Buscar ---->\")\n name = input('Introduce el nombre del Empleado: ')\n print(buscarEmpleador(fileEmplead, name))\n sleep(2) \n # sleep(10)\n # res = menuEmpleado.mostrarMenu()\n elif (rese == 4):\n print(Color.GREEN+\"Salio con Exito\"+Color.CEND)\n break \n \n \n","sub_path":"Semana4Hackaton/emadrid/inventario.py","file_name":"inventario.py","file_ext":"py","file_size_in_byte":18769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"514118626","text":"from __future__ import print_function, absolute_import, division\n\nfrom numba.test_utils import InOtherThread\nfrom numba import ocl\nimport numpy as np\nimport numba.unittest_support as unittest\n\nclass TestSelectDevice(unittest.TestCase):\n @unittest.skip('not yet implemented')\n def test_select_device(self):\n def newthread():\n ocl.select_device(0)\n stream = ocl.stream()\n\n A = np.arange(100)\n dA = ocl.to_device(A, stream=stream)\n stream.synchronize()\n del dA\n del stream\n assert False\n ocl.close()\n\n for i in range(10):\n InOtherThread(newthread).return_value\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"numba/ocl/tests/ocldrv/test_select_device.py","file_name":"test_select_device.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"152517150","text":"from kivy.uix.screenmanager import Screen\nfrom kivy.clock import Clock\nfrom kivy.properties import BoundedNumericProperty\n\nfrom frog import Frog\nfrom snake import Snake \nfrom swamp import Swamp\nfrom hud import HUD\nfrom popups import PausePopup\nfrom events import *\n\nclass Game(Screen):\n\t\n\tcounter = BoundedNumericProperty(0, min=0)\n\t'''Counts correct answers.'''\n\t\n\tdef __init__(self, **kw):\n\t\tkw['disabled'] = kw.get('disabled', True)\n\t\tself.__active = False\n\t\tself.__current_level = None\n\t\tself.pause_popup = PausePopup(size_hint=(0.3, 0.5), pos_hint={'x':0.35, 'y':0.25})\n\t\tself.swamp = Swamp(size_hint=(1., 1.))\n\t\tself.snake = Snake(size_hint=(0.16, 0.35))\n\t\tself.frog = Frog(size_hint=(0.07, 0.05))\n\t\tself.hud = HUD(size_hint_y=0.12, pos_hint={'top':1})\n\t\tself.hud.bind(on_exit=self.on_pause)\n\t\tself.pause_popup.bind(on_continue=self.on_continue)\n\t\tself.pause_popup.bind(on_main_menu=self.on_main_menu)\n\t\tself.pause_popup.bind(on_exit=self.on_exit)\n\t\tsuper(Game, self).__init__(**kw)\n\t\tself.add_widget(self.swamp)\n\t\tself.add_widget(self.snake)\n\t\tself.add_widget(self.frog)\n\t\tself.add_widget(self.hud)\n\t\tEvtSnakeHealth.register(self.on_snake_health)\n\t\tEvtFrogPos.register(self.on_frog_pos)\n\t\tEvtLeafSingleTap.register(self.on_leaf_single_tap)\n\t\tEvtLeafPos.register(self.on_leaf_pos)\n\t\t\n\tdef init(self, level):\n\t\tself.__current_level = level\n\t\tself.counter = 0 \n\t\tself.swamp.init(level)\n\t\tself.frog.init(self.swamp.get_middle_leaf())\n\t\tEvtLeafPos.disable()\n\t\tself.snake.init()\n\t\t\n\tdef start(self):\n\t\tif self.__active:\n\t\t\treturn \n\t\tself.swamp.start()\n\t\tself.disabled = False\n\t\tEvtLeafPos.enable()\n\t\tself.__active = True\n\t\t\n\tdef stop(self):\n\t\tif not self.__active:\n\t\t\treturn \n\t\tself.swamp.stop()\n\t\tself.__active = False\n\t\tself.disabled = True\n\t\t\n\tdef is_active(self):\n\t\treturn self.__active\n\t\t\n\t@property\t\n\tdef current_level(self):\n\t\treturn self.__current_level\n\t\t\n\tdef get_next_level(self):\n\t\tif self.current_level == 6:\n\t\t\treturn 1\n\t\telse: return self.current_level + 1\n\t\t\n\tdef get_prev_level(self):\n\t\tif self.current_level == 1:\n\t\t\treturn 6\n\t\telse: return self.current_level - 1\n\t\t\n\tdef on_counter(self, w, c):\n\t\t'''\n\t\tif c == 3:\n\t\t\tself.frog.food = Food(type='bug')\n\t\telif c == 6:\n\t\t\tself.frog.food = Food(type='egg')\n\t\telif c == 9:\n\t\t\tself.frog.food = Food(type='worm')\n\t\t'''\n\t\tif c == 0:\n\t\t\treturn \n\t\tself.snake.health += c/4.\n\t\t\n\tdef on_snake_health(self, snake, value, *largs):\n\t\tself.hud.set_health(value)\n\t\tif value == 100.0:\n\t\t\tself.stop()\n\t\t\tEvtGameOver(True)\n\t\t\n\tdef on_frog_pos(self, frog, pos, *largs):\n\t\tsnake = self.snake\n\t\tsnake.center_y = frog.center_y\t\t\t# follow frog\n\t\tif frog.x <= snake.right - 20 and self.is_active():\n\t\t\tself.stop()\n\t\t\tEvtGameOver(False)\n\t\t\n\tdef on_leaf_single_tap(self, leaf, *largs):\n\t\tif leaf.has_eq():\n\t\t\tif leaf.is_true():\n\t\t\t\tself.counter += 1\n\t\t\t\tself.frog.jump(leaf)\n\t\t\telse: self.counter = 0\n\t\t\n\tdef on_leaf_pos(self, leaf, pos, *largs):\n\t\tif leaf.has_eq() and leaf.x <= self.frog.x and not self.frog.is_jumping():\n\t\t\tleaf.remove_eq()\n\t\tif leaf.has_food() and leaf.x <= self.snake.right and self.snake.collide_widget(leaf):\n\t\t\tleaf.remove_food()\n\t\t\t\n\tdef on_pause(self, hud):\n\t\tself.stop()\n\t\tself.pause_popup.open()\n\t\n\tdef on_continue(self, popup):\n\t\tpopup.dismiss()\n\t\tClock.schedule_once(lambda dt: self.start(), 0.5)\n\t\t\n\tdef on_main_menu(self, popup):\n\t\tpopup.dismiss()\n\t\tEvtGameStop()\n\t\t\n\tdef on_exit(self, popup):\n\t\tpopup.dismiss()\n\t\tEvtGameExit()\n\t\t\n\t\t","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"152843907","text":"from flask import Flask\nimport os\n\ndef create_app(test_config=None):\n app = Flask(__name__, instance_relative_config=True)\n\n app.config.from_mapping(\n SECRET_KEY = 'dev'\n )\n if test_config is None:\n app.config.from_pyfile('../instance/config.cfg')\n else:\n app.config.from_pyfile('test_config.py')\n\n\n @app.route('/hello')\n def hello():\n return 'Hello, World!'\n\n from . import login\n app.register_blueprint(login.bp)\n\n from . import dashboard\n app.register_blueprint(dashboard.bp)\n\n return app","sub_path":"ExerciseTracker/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"608768785","text":"import setuptools\nfrom distutils.core import setup\nLONG_DESCRIPTION =\\\n\"\"\"\nTestix is a Mocking framework for Python, meant to be used with [pytest](https://docs.pytest.org/en/latest/).\n\nread the full docs at the [project's homepage](https://github.com/haarcuba/testix).\n\nTestix is special because it allows you to specify what your mock objects do,\nand it then enforces your specifications automatically. It also reduces (albeit\nnot entirely) mock setup. Other frameworks usually have a flow like this:\n\n* setup mock\n* let code do something with mock\n* assert mock used in correct way\n\nTestix flow is a bit different\n\n* setup \"top level\" mock objects (`sock` in the following example)\n* specify exactly what should happen to them using a scenario\n\nAnd that's it. \n\"\"\"\n\nrequires = [ 'pytest>~4.3.0', ]\ntests_require = [ 'hypothesis>~4.7.19', 'pytest-asyncio' ]\nsetup(\n name=\"testix\",\n packages = [\"testix\",],\n version='6.0.0',\n description = \"Mocking framework Python with *exact* Scenarios\",\n author = \"Yoav Kleinberger\",\n author_email = \"haarcuba@gmail.com\",\n url = \"https://github.com/haarcuba/testix\",\n keywords = [\"mock\", \"mocking\", \"unittest\", \"python\", \"unit testing\"],\n install_requires=requires,\n long_description = LONG_DESCRIPTION,\n extras_require={\n 'testing': tests_require,\n },\n classifiers = [\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Software Development :: Testing\",\n ]\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"97084862","text":"import os\nimport sys\nimport time\nimport ntpath\nimport logging\n# import hdfdict\nfrom typing import *\n\nfrom uniparse.types import Parser\nfrom uniparse.dataprovider import ScaledBatcher, BucketBatcher\n\ntry:\n import uniparse.decoders as decoders\nexcept Exception as e:\n logging.error(\"ERROR: can't import decoders. please run 'python setup.py build_ext --inplace' from the root directory\")\n raise e\n\nimport uniparse.backend as backend_wrapper\nimport uniparse.evaluation.universal_eval as uni_eval\n\nimport numpy as np\nimport sklearn.utils\n\n\nclass ParserModel(object):\n def __init__(self, model: Parser, decoder, loss, optimizer, strategy, vocab):\n self._model_uid = time.strftime(\"%m%d%H%M%S\")\n self._parser = model\n self._optimizer = None\n self._vocab = vocab\n self._batch_strategy = strategy\n\n # retrieve backend wrapper\n self.backend = backend_wrapper.init_backend(model.get_backend_name())\n model.set_backend(self.backend)\n\n # extract optimizer / decoder / loss from strings\n if isinstance(optimizer, str):\n optimizer = self._get_optimizer(optimizer)\n self._optimizer = optimizer(model.parameters())\n else:\n self._optimizer = optimizer\n\n # extract decoder\n runtime_decoder = self._get_decoder(decoder)\n self._parser.set_decoder(runtime_decoder)\n\n # extract loss functions\n self.arc_loss, self.rel_loss = self._get_loss_functions(loss)\n\n def _get_optimizer(self, input_optimizer):\n # get setup optimizer\n backend = self.backend\n if isinstance(input_optimizer, str):\n optimizer_options = {\n \"adam\": backend.optimizers.adam,\n \"rmsprop\": backend.optimizers.rmsprop,\n \"adadelta\": backend.optimizers.adadelta,\n \"adagrad\": backend.optimizers.adagrad\n }\n\n if input_optimizer not in optimizer_options:\n raise ValueError(\"optimizer doesn't exist\")\n\n return optimizer_options[input_optimizer]\n else:\n return input_optimizer\n\n @staticmethod\n def _get_decoder(input_decoder):\n if isinstance(input_decoder, str):\n decoder_options = {\n \"eisner\": decoders.eisner,\n \"cle\": decoders.cle\n }\n\n if input_decoder not in decoder_options:\n raise ValueError(\"decoder (%s) not available\" % input_decoder)\n\n return decoder_options[input_decoder]\n else:\n return input_decoder\n\n def _get_loss_functions(self, input_loss: Union[str, Tuple[Any, Any]]):\n if isinstance(input_loss, str):\n loss = self.backend.loss\n loss_options = {\n # included for completeness\n \"crossentropy\": (loss.crossentropy, loss.crossentropy),\n \"kiperwasser\": (loss.hinge, loss.hinge),\n \"hinge\": (loss.hinge, loss.hinge)\n }\n if input_loss not in loss_options:\n raise ValueError(\"unknown loss function %s\" % input_loss)\n\n return loss_options[input_loss]\n else:\n return input_loss\n\n def _batch_data(self, samples: List, strategy: str, scale: int, shuffle: bool):\n if strategy == \"bucket\":\n dataprovider = BucketBatcher(samples, padding_token=self._vocab.PAD)\n _idx, _sentences = dataprovider.get_data(scale, shuffle)\n elif strategy == \"scaled_batch\":\n dataprovider = ScaledBatcher(samples, cluster_count=40, padding_token=self._vocab.PAD)\n _idx, _sentences = dataprovider.get_data(scale, shuffle)\n else:\n raise ValueError(\"no such data strategy\")\n\n return _idx, _sentences\n\n def run(self, samples: List, batch_size: int):\n indices, batches = self._batch_data(samples, strategy=self._batch_strategy, scale=batch_size, shuffle=False)\n backend = self.backend\n\n predictions = []\n for indicies, (x, y) in zip(indices, batches):\n backend.renew_cg()\n\n words, lemmas, tags, chars = x\n\n words = backend.input_tensor(words, dtype=\"int\")\n tags = backend.input_tensor(tags, dtype=\"int\")\n lemmas = backend.input_tensor(lemmas, dtype=\"int\")\n\n arc_preds, rel_preds, _, _ = self._parser((words, lemmas, tags, None, None, chars))\n\n outs = [(ind, arc[1:], rel[1:]) for ind, arc, rel in zip(indicies, arc_preds, rel_preds)]\n\n predictions.extend(outs)\n\n predictions.sort(key=lambda tup: tup[0])\n\n return predictions\n\n def train(self, train: List, dev_file: str, dev: List, epochs: int, batch_size: int, callbacks: List = None, patience:int = -1):\n callbacks = callbacks if callbacks else [] # This is done to avoid using the same list.\n \n if patience > -1:\n logging.debug(f\"...Training with patience {patience} for less than {epochs} epochs\")\n else: \n logging.debug(f\"...Training without patience for exactly {epochs} epochs\")\n\n running_patience = patience\n\n training_data = self._batch_data(train, strategy=self._batch_strategy, scale=batch_size, shuffle=True)\n '''\n i.e. in dev mode, train is a list of len 100.\n each element in train is a tuple of 6 elements: ([words], [lemmas], ...) --> ([1, 452, 12188, 3107, 19765, 5], [1, 2, 2, 2, 2, 2], [1, 3, 3, 11, 3, 4], [-1, 2, 3, 0, 3, 3], [1, 42, 19, 1, 12, 3], [[1], [18, 57, 39], [40, 52, 52, 24], [81, 15, 52, 16, 57], [11, 15, 79, 52, 27, 46, 79], [39]])\n \n training_data is a tuple of 2 elements: _idx, _sentences --> both elements are lists\n _idx = [[0, 80], [1], [92, 2, 88], [58, 3, 74], [4, 90, 35, 50, 98], [5, 20, 75], [21, 6, 76], [83, 7], [94, 8, 12], [9, 72], [10, 62, 36, 82, 81, 48], [11], [22, 13], [14, 52], [15], [16], [24, 78, 17, 57, 95, 38, 33], [68, 18], [19, 29], [71, 23, 66], [25, 39], [51, 26], [27, 91, 93, 37], [28, 31, 42], [30], [32, 49, 43, 59], [34, 65, 45, 56, 47, 60], [40, 87], [96, 41], [44], [46, 70], [53, 89], [54, 73], [55, 61], [63], [64], [67], [69], [77], [79], [84], [85], [86], [97], [99]]\n _sentences = [Batch1, Batch2, Batch3... ]\n \n \n '''\n\n backend = self.backend\n _, samples = training_data\n global_step = 0\n max_dev_uas=0.0\n for epoch in range(1, epochs+1):\n start = time.time()\n\n samples = sklearn.utils.shuffle(samples)\n\n logging.info(f\"Epoch {epoch}\")\n logging.info(\"=====================\")\n\n for step, (x, y) in enumerate(samples):\n batch_size, global_step = self._train_step(backend, batch_size, callbacks, global_step, x, y)\n\n do_break = self._evaluate_epoch(epoch, dev, dev_file, callbacks, batch_size, patience, max_dev_uas, running_patience, global_step, start)\n if do_break:\n break\n\n logging.debug(f\"Finished at epoch {epoch}\")\n\n def train_big_datasets(self, train_file: str, dev_file: str, dev: List, epochs: int, batch_size: int, callbacks: List = None, patience: int = -1, subset_size: int = 100000):\n callbacks = callbacks if callbacks else [] # This is done to avoid using the same list.\n\n if patience > -1:\n logging.debug(f\"...Training with patience {patience} for less than {epochs} epochs\")\n else:\n logging.debug(f\"...Training without patience for exactly {epochs} epochs\")\n\n running_patience = patience\n\n backend = self.backend\n global_step = 0\n max_dev_uas = 0.0\n for epoch in range(1, epochs + 1):\n\n start = time.time()\n logging.info(\"\")\n logging.info(f\"Epoch {epoch}\")\n logging.info(\"=====================\")\n\n with open(train_file, encoding=\"UTF-8\") as f:\n\n # ---------------------------------------------------\n # I move here functionality from vocabulary.py for the sake of efficiency in the large file reading (lpmayos)\n\n tokenize = True\n\n word_root = self._vocab.ROOT\n lemma_root = self._vocab.ROOT\n tag_root = self._vocab.ROOT\n rel_root = self._vocab.ROOT\n char_root = [self._vocab.ROOT]\n root_head = -1\n\n words, lemmas, tags, heads, rels, chars = [word_root], [lemma_root], [tag_root], [root_head], [rel_root], [char_root]\n\n read_sentences = 0\n total_read_sentences = 0\n training_data = []\n for line in f.readlines():\n\n blank_line, comment_line, word, lemma, tag, head, rel, characters = self._vocab._parse_line(line, tokenize=tokenize)\n\n if comment_line:\n pass\n\n elif not blank_line:\n words.append(word)\n lemmas.append(lemma)\n tags.append(tag)\n heads.append(head)\n rels.append(rel)\n chars.append(characters)\n\n else:\n sent = (words, lemmas, tags, heads, rels, chars)\n training_data.append(sent)\n read_sentences += 1\n total_read_sentences += 1\n words, lemmas, tags, heads, rels, chars = [word_root], [lemma_root], [tag_root], [root_head], [rel_root], [char_root]\n\n if read_sentences > 0 and read_sentences % subset_size == 0: # we have read 10000 sentences, lets use them to train\n\n logging.info('train_big_datasets; epoch %s; total sentences used to train: %s; read_sentences %s' % (epoch, total_read_sentences, read_sentences))\n\n _, samples = self._batch_data(training_data, strategy=self._batch_strategy, scale=batch_size, shuffle=True)\n samples = sklearn.utils.shuffle(samples)\n\n for step, (x, y) in enumerate(samples):\n\n batch_size, global_step = self._train_step(backend, batch_size, callbacks, global_step, x, y)\n\n read_sentences = 0\n training_data = []\n\n if len(training_data) > 0: # train with the last sentences\n logging.info('train_big_datasets; epoch %s; total sentences used to train: %s; read_sentences %s' % (epoch, total_read_sentences, read_sentences))\n\n _, samples = self._batch_data(training_data, strategy=self._batch_strategy, scale=batch_size, shuffle=True)\n samples = sklearn.utils.shuffle(samples)\n\n for step, (x, y) in enumerate(samples):\n batch_size, global_step = self._train_step(backend, batch_size, callbacks, global_step, x, y)\n\n # we have trained with all the sentences of the training set; evaluate epoch and finish, if needed\n do_break = self._evaluate_epoch(epoch, dev, dev_file, callbacks, batch_size, patience, max_dev_uas, running_patience, global_step, start)\n if do_break:\n break\n\n f.close()\n\n logging.debug(f\"Finished at epoch {epoch}\")\n\n def _train_step(self, backend, batch_size, callbacks, global_step, x, y):\n\n # renew graph\n backend.renew_cg()\n\n words, lemmas, tags, chars = x\n gold_arcs, gold_rels = y\n\n batch_size, n = words.shape\n\n # PAD = 0; ROOT = 1; OOV = 2; UNK = 2\n # Tokens > 1 are valid tokens we want to compute loss for use for accuracy metrics\n mask = np.greater(words, self._vocab.ROOT)\n num_tokens = int(np.sum(mask))\n\n \"\"\" this is necessary for satisfy compatibility with pytorch \"\"\"\n words = backend.input_tensor(words, dtype=\"int\")\n postags = backend.input_tensor(tags, dtype=\"int\")\n lemmas = backend.input_tensor(lemmas, dtype=\"int\")\n\n arc_preds, rel_preds, arc_scores, rel_scores = self._parser((words, lemmas, postags, gold_arcs, gold_rels, chars))\n\n arc_loss = self.arc_loss(arc_scores, arc_preds, gold_arcs, mask)\n rel_loss = self.rel_loss(rel_scores, None, gold_rels, mask)\n\n loss = arc_loss + rel_loss\n loss_value = backend.get_scalar(loss)\n loss.backward()\n\n backend.step(self._optimizer)\n\n arc_correct = np.equal(arc_preds, gold_arcs).astype(np.float32) * mask\n arc_accuracy = np.sum(arc_correct) / num_tokens\n\n rel_correct = np.equal(rel_preds, gold_rels).astype(np.float32) * mask\n rel_accuracy = np.sum(rel_correct) / num_tokens\n\n training_info = {\n \"arc_accuracy\": arc_accuracy,\n \"rel_accuracy\": rel_accuracy,\n \"arc_loss\": backend.get_scalar(arc_loss),\n \"rel_loss\": backend.get_scalar(rel_loss),\n \"global_step\": global_step\n }\n\n for callback in callbacks:\n callback.on_batch_end(training_info)\n\n sys.stdout.write(\n \"\\r\\rStep #%d: Acc: arc %.2f, rel %.2f, loss %.3f\"\n % (global_step, float(arc_accuracy), float(rel_accuracy), loss_value)\n )\n sys.stdout.flush()\n\n global_step += 1\n\n return batch_size, global_step\n\n def _evaluate_epoch(self, epoch, dev, dev_file, callbacks, batch_size, patience, max_dev_uas, running_patience, global_step, start):\n logging.debug(\"Completed epoch %s in %s\" % (epoch, time.time() - start))\n\n do_break = False\n\n metrics = self.parse_and_evaluate(dev_file, dev, batch_size, None)\n no_punct_dev_uas = metrics[\"nopunct_uas\"]\n no_punct_dev_las = metrics[\"nopunct_las\"]\n punct_dev_uas = metrics[\"uas\"]\n punct_dev_las = metrics[\"las\"]\n logging.debug(f\"UAS (wo. punct) {no_punct_dev_uas:.{5}}\\t LAS (wo. punct) {no_punct_dev_las:.{5}}\")\n logging.debug(f\"UAS (w. punct) {punct_dev_uas:.{5}}\\t LAS (w. punct) {punct_dev_las:.{5}}\")\n\n if patience > -1:\n if max_dev_uas > no_punct_dev_uas:\n max_dev_uas = no_punct_dev_uas\n running_patience -= 1\n logging.debug(f\"Patience decremented to {running_patience}\")\n else:\n running_patience = patience\n logging.debug(f\"Patience incremented to {running_patience}\")\n\n if running_patience == 0:\n do_break = True\n return do_break\n\n batch_end_info = {\n \"dev_uas\": no_punct_dev_uas,\n \"dev_las\": no_punct_dev_las,\n \"global_step\": global_step,\n \"model\": self._parser\n }\n\n for callback in callbacks:\n callback.on_epoch_end(epoch, batch_end_info)\n\n return do_break\n\n\n\n def parse(self, test_file: str, test_data: List, batch_size: int, output_file: str):\n\n temporal = False\n if output_file is None:\n stripped_filename = ntpath.basename(test_file)\n output_file = f\"{self._model_uid}_on_{stripped_filename}\"\n temporal = True\n\n # run parser on data\n predictions = self.run(test_data, batch_size)\n\n # write to file\n uni_eval.write_predictions_to_file(predictions, reference_file=test_file, output_file=output_file, vocab=self._vocab)\n logging.debug('output file saved to %s' % (output_file))\n\n return output_file, temporal\n\n def evaluate(self, output_file, test_file):\n\n metrics = uni_eval.evaluate_files(output_file, test_file)\n return metrics\n\n def parse_and_evaluate(self, test_file: str, test_data: List, batch_size: int, output_file: str):\n\n output_file, temporal = self.parse(test_file, test_data, batch_size, output_file)\n metrics = uni_eval.evaluate_files(output_file, test_file)\n\n if temporal:\n os.remove(output_file)\n\n return metrics\n\n def save_to_file(self, filename: str) -> None:\n self._parser.save_to_file(filename)\n\n def load_from_file(self, filename: str) -> None:\n self._parser.load_from_file(filename)\n","sub_path":"uniparse/parser_model.py","file_name":"parser_model.py","file_ext":"py","file_size_in_byte":16250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"240492076","text":"import contextlib\r\n\r\nclass AffinityPropogation(object):\r\n num_iterations = 100\r\n\r\n def __init__(self, data_points):\r\n self.lam = 0.9\r\n self.num_cols_and_rows = len(data_points)\r\n self.similarities = [[0 for x in range(self.num_cols_and_rows)] for y in range(self.num_cols_and_rows)]\r\n self.responsibilities = [[0 for x in range(self.num_cols_and_rows)] for y in range(self.num_cols_and_rows)]\r\n self.availabilities = [[0 for x in range(self.num_cols_and_rows)] for y in range(self.num_cols_and_rows)]\r\n self.exemplars = []\r\n self.clusters = [[]]\r\n self.set_similarities(data_points)\r\n print(self.similarities)\r\n print()\r\n self.set_exemplars(data_points)\r\n for i in range(self.num_iterations):\r\n self.set_responsibilities(data_points)\r\n self.set_availabilities(data_points)\r\n self.set_exemplars(data_points)\r\n if i % 10 == 0:\r\n print(i)\r\n # print(self.responsibilities)\r\n # print(self.availabilities)\r\n # print(self.exemplars)\r\n print([self.get_exemplar_for(i) for i in range(len(data_points))])\r\n self.group_points(data_points)\r\n print(self.clusters)\r\n print(\"Number of clusters: \" + str(self.get_num_clusters()))\r\n def get_max_index(self, data_points):\r\n max_index = 0\r\n for index in range(1, len(data_points)):\r\n if data_points[index] >= data_points[max_index]:\r\n max_index = index\r\n return max_index\r\n def get_max(self, data_points):\r\n if len(data_points) > 0:\r\n return data_points[self.get_max_index(data_points)]\r\n return 0\r\n def min(self, num1, num2):\r\n if num1 < num2:\r\n return num1\r\n return num2\r\n def set_similarity(self, data_points, row, col):\r\n self.similarities[row][col] = -(pow(data_points[row][0] - data_points[col][0], 2) + pow(data_points[row][1] - data_points[col][1], 2))\r\n def set_similarities(self, data_points):\r\n similarity_values = []\r\n for row in range(0, len(data_points)):\r\n for col in range(0, len(data_points)):\r\n self.set_similarity(data_points, row, col)\r\n for row in range(0, len(data_points)):\r\n for col in range(0, len(data_points)):\r\n if row == col:\r\n break\r\n similarity_values.append(self.similarities[row][col])\r\n similarity_values.sort()\r\n size = len(data_points) * (len(data_points) - 1) / 2\r\n if (size % 2 == 0):\r\n median = (similarity_values[int(size / 2)] + similarity_values[int(size / 2 - 1)]) / 2\r\n else:\r\n median = similarity_values[int(size / 2)]\r\n for i in range(len(data_points)):\r\n self.similarities[i][i] = int(median)\r\n def set_responsibility(self, data_points, row, col):\r\n input_similarity = self.similarities[row][col]\r\n max_similarities_plus_availabilities = self.get_max([self.similarities[row][x] + self.availabilities[row][x] for x in range(len(data_points)) if x != col])\r\n self.responsibilities[row][col] = (1 - self.lam) * (input_similarity - max_similarities_plus_availabilities) + self.responsibilities[row][col] * self.lam\r\n def set_responsibilities(self, data_points):\r\n for row in range(0, len(data_points)):\r\n for col in range(0, len(data_points)):\r\n self.set_responsibility(data_points, row, col)\r\n def set_availability(self, data_points, row, col):\r\n availability = 0\r\n for index in range(0, len(data_points)):\r\n if index != row and index != col:\r\n responsibility = self.responsibilities[index][col]\r\n if responsibility > 0:\r\n availability += responsibility\r\n if row == col:\r\n self.availabilities[row][col] = (1 - self.lam) * availability + self.availabilities[row][col] * self.lam\r\n else:\r\n self.availabilities[row][col] = (1 - self.lam) * self.min(0, self.responsibilities[row][row] + availability) + self.availabilities[row][col] * self.lam\r\n def set_availabilities(self, data_points):\r\n for row in range(0, len(data_points)):\r\n for col in range(0, len(data_points)):\r\n self.set_availability(data_points, row, col)\r\n def set_exemplars(self, data_points):\r\n self.exemplars = []\r\n for index in range(len(data_points)):\r\n if self.availabilities[index][index] + self.responsibilities[index][index] > 0:\r\n self.exemplars.append(index)\r\n def get_exemplar_for(self, point):\r\n candidate_similarities = []\r\n for i in range(len(self.exemplars)):\r\n candidate_similarities.append(self.similarities[point][self.exemplars[i]])\r\n return self.exemplars[self.get_max_index(candidate_similarities)]\r\n def group_points(self, data_points):\r\n exemplars_for_points = [self.get_exemplar_for(i) for i in range(len(data_points))]\r\n self.clusters = [[]]\r\n self.clusters[0].append(0)\r\n for i in range(1, len(exemplars_for_points)):\r\n for j in range(len(self.clusters)):\r\n if exemplars_for_points[i] == exemplars_for_points[self.clusters[j][0]]:\r\n self.clusters[j].append(i)\r\n break\r\n elif j == len(self.clusters) - 1:\r\n self.clusters.append([i])\r\n def get_num_clusters(self):\r\n return len(self.clusters)","sub_path":"AffinityPropogation.py","file_name":"AffinityPropogation.py","file_ext":"py","file_size_in_byte":5592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"134807176","text":"from random import randint\n# GLOBALS\n\nx=''\nmatgame = [\n [x,x,x],\n [x,x,x],\n [x,x,x],\n]\n\ndef printmat(m):\n for i in m:\n print(\"_________________________\")\n for x in i:\n print('|\\t' + x + '\\t', end='')\n print('|')\n print(\"_________________________\")\n\n\ndef sceltains():\n sceltax, sceltay = None, None\n ins = False\n while not ins:\n try:\n sceltay = int(input(\"Scegli una riga! -> \"))\n if not 1 <= sceltay <= 3:\n print(\"Devi inserire un valore da 1 a 3!\")\n else:\n ins = True\n except ValueError:\n print(\"Devi inserire un numero!!\")\n\n ins = False\n while not ins:\n try:\n sceltax = int(input(\"Scegli una colonna! -> \"))\n if not 1 <= sceltax <= 3:\n print(\"Devi inserire un valore da 1 a 3!\")\n else:\n ins = True\n except ValueError:\n print(\"Devi inserire un numero!!\")\n\n return [sceltay, sceltax]\n\n\ndef smarty(mat):\n flipmat = []\n for i in range(0,3):\n rows = []\n for x in range(0,3):\n rows.append(mat[x][i])\n flipmat.append(rows)\n\n for i in range(0,3):\n if mat[i].count('O') == 2 and '' in mat[i]:\n return [i, mat[i].index('')]\n\n for i in range(0, 3):\n if flipmat[i].count('O') == 2 and '' in flipmat[i]:\n return [flipmat[i].index(''), i]\n\n d1 = [mat[0][0], mat[1][1], mat[2][2]]\n d2 = [mat[2][0], mat[1][1], mat[0][2]]\n\n if d1[0] == 'O' and d1[1] == 'O' and d1[2] == '' :\n return [2, 2]\n if d1[0] == 'O' and d1[1] == '' and d1[2] == 'O' :\n return [1, 1]\n if d1[0] == '' and d1[1] == 'O' and d1[2] == 'O':\n return [0, 0]\n\n if d2[0] == 'O' and d2[1] == 'O' and d2[2] == '' :\n return [0, 2]\n if d2[0] == 'O' and d2[1] == '' and d2[2] == 'O' :\n return [1, 1]\n if d2[0] == '' and d2[1] == 'O' and d2[2] == 'O':\n return [2, 0]\n\n\ndef pcchoose(mat):\n while True:\n x, y = randint(0,2), randint(0,2)\n sm = smarty(mat)\n if sm is not None:\n x, y = sm[0], sm[1]\n if matgame[x][y] == '':\n matgame[x][y] = 'O'\n break\n return mat\n\ndef winner(mat):\n w = ''\n # diagonale 1\n if mat[0][0] == 'X' and mat[1][1] == 'X' and mat[2][2] == 'X':\n w = 'X'\n # diagonale 2\n elif mat[2][0] == 'X' and mat[1][1] == 'X' and mat[0][2] == 'X':\n w = 'X'\n # orizzontali\n elif mat[0][0] == 'X' and mat[0][1] == 'X' and mat[0][2] == 'X':\n w = 'X'\n elif mat[1][0] == 'X' and mat[1][1] == 'X' and mat[1][2] == 'X':\n w = 'X'\n elif mat[2][0] == 'X' and mat[2][1] == 'X' and mat[2][2] == 'X':\n w = 'X'\n # verticali\n elif mat[0][0] == 'X' and mat[1][0] == 'X' and mat[2][0] == 'X':\n w = 'X'\n elif mat[0][1] == 'X' and mat[1][1] == 'X' and mat[2][1] == 'X':\n w = 'X'\n elif mat[0][2] == 'X' and mat[1][2] == 'X' and mat[2][2] == 'X':\n w = 'X'\n\n if mat[0][0] == 'O' and mat[1][1] == 'O' and mat[2][2] == 'O':\n w = 'O'\n # diagonale 2\n elif mat[2][0] == 'O' and mat[1][1] == 'O' and mat[0][2] == 'O':\n w = 'O'\n # orizzontali\n elif mat[0][0] == 'O' and mat[0][1] == 'O' and mat[0][2] == 'O':\n w = 'O'\n elif mat[1][0] == 'O' and mat[1][1] == 'O' and mat[1][2] == 'O':\n w = 'O'\n elif mat[2][0] == 'O' and mat[2][1] == 'O' and mat[2][2] == 'O':\n w = 'O'\n # verticali\n elif mat[0][0] == 'O' and mat[1][0] == 'O' and mat[2][0] == 'O':\n w = 'O'\n elif mat[0][1] == 'O' and mat[1][1] == 'O' and mat[2][1] == 'O':\n w = 'O'\n elif mat[0][2] == 'O' and mat[1][2] == 'O' and mat[2][2] == 'O':\n w = 'O'\n return w\n\n\ndef aretherespaces(mat):\n space = False\n for row in mat:\n for el in row :\n if el == '':\n space = True\n return space\n\nplaying = True\nfirsttime = True\nwhile playing:\n if firsttime and randint(0,1) :\n print(\"Comincia il pc...\")\n matgame = pcchoose(matgame).copy()\n printmat(matgame)\n\n firsttime = False\n sceltaok = True\n while sceltaok:\n p = sceltains()\n if matgame[p[0]-1][p[1]-1] == '':\n # inserimento scelta umano\n matgame[p[0]-1][p[1]-1] = 'X'\n # inserimento scelta computer\n sceltaok = False\n matgame = pcchoose(matgame).copy()\n else:\n print(\"Posizione già occupata... Scegline un'altra!\")\n\n\n printmat(matgame)\n\n if not aretherespaces(matgame):\n print(\"NON VINCE NESSUNO!\")\n playing = False\n else:\n x = winner(matgame)\n if x != '':\n if x == 'X':\n print(\"Ha vinto il giocatore umano!\")\n playing = False\n elif x == 'O':\n print(\"HO VINTO IO BRUTTO PORCO\")\n playing = False\n\n","sub_path":"best/tictactoe/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"542613180","text":"from collections import deque\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom tensorflow.keras.models import Sequential\r\nfrom tensorflow.keras.layers import Conv2D, BatchNormalization, MaxPooling2D, Dense, Flatten, Input\r\nfrom tensorflow.keras.optimizers import Adam\r\nfrom tensorflow.keras.models import load_model, save_model\r\nimport random\r\nimport gym\r\nimport cv2\r\ntf.compat.v1.disable_eager_execution()\r\nepisodes = 100000\r\nBATCH_SIZE = 64\r\nMIN_MEM = 5000\r\nphysical_devices = tf.config.experimental.list_physical_devices('GPU')\r\nassert len(physical_devices) > 0, \"Not enough GPU hardware devices available\"\r\ntf.config.experimental.set_memory_growth(physical_devices[0], True)\r\n# Deep Q-learning Agent\r\nclass DQNAgent:\r\n\tdef __init__(self, state_size, action_size):\r\n\t\tself.state_size = state_size\r\n\t\tself.action_size = action_size\r\n\t\tself.memory = deque(maxlen=75_000)\r\n\t\tself.gamma = 0.99 # discount rate\r\n\t\tself.epsilon = 1.25 # exploration rate\r\n\t\tself.epsilon_min = 0.01\r\n\t\tself.epsilon_decay = 0.995\r\n\t\tself.learning_rate = 0.0001\r\n\t\tself.model = self._build_model()\r\n\r\n\tdef _build_model(self):\r\n\t\t# Neural Net for Deep-Q learning Model\r\n\t\tmodel = Sequential()\r\n\t\tmodel.add(Dense(64, input_dim=inx[0], activation='relu'))\r\n\t\tmodel.add(Dense(32, activation='relu'))\r\n\t\tmodel.add(Dense(16, activation='relu'))\r\n\t\tmodel.add(Dense(self.action_size, activation='linear'))\r\n\t\tmodel.compile(loss='mse',\r\n\t\t\t\t\t optimizer=Adam(lr=self.learning_rate))\r\n\t\treturn model\r\n\r\n\tdef remember(self, state, action, reward, next_state, done):\r\n\t\tself.memory.append((state, action, reward, next_state, done))\r\n\r\n\tdef act(self, state):\r\n\t\tif np.random.rand() <= self.epsilon:\r\n\t\t\treturn random.randrange(self.action_size)\r\n\t\telse:\r\n\t\t\tact_values = self.model.predict(state)\r\n\t\t\treturn np.argmax(act_values) # returns action\r\n\r\n\tdef replay(self, batch_size, epochs):\r\n\t\tminibatch = random.sample(self.memory, batch_size)\r\n\t\tstates = []\r\n\t\tnext_states = []\r\n\t\ttargets = []\r\n\t\ttarget_fs = []\r\n\t\tfor state, action, reward, next_state, done in minibatch:\r\n\t\t\ttarget = reward\r\n\t\t\tstate = state.reshape((1, inx[0]))\r\n\t\t\tstates.append(state)\r\n\t\t\tnext_state = next_state.reshape((1, inx[0]))\r\n\t\t\tnext_states.append(next_state)\r\n\t\t\tif not done:\r\n\t\t\t\ttarget = reward + self.gamma * \\\r\n\t\t\t\t\t\tnp.amax(self.model.predict(next_state))\r\n\t\t\ttargets.append(target)\r\n\t\t\ttarget_f = self.model.predict(state)\r\n\t\t\ttarget_f[0][action] = target\r\n\t\t\ttarget_fs.append(target_f)\r\n\t\tself.model.fit(np.array(states).reshape((-1, inx[0])), np.array(target_fs).reshape((-1, self.action_size)), epochs=epochs, verbose=0)\r\n\t\tif self.epsilon > self.epsilon_min:\r\n\t\t\tself.epsilon *= self.epsilon_decay\r\n\tdef save_model(self):\r\n\t\t# serialize model to JSON\r\n\t\t# model_json = self.model.to_json()\r\n\t\t# with open(\"model.json\", \"w\") as json_file:\r\n\t\t# \tjson_file.write(model_json)\r\n\t\t# # serialize weights to HDF5\r\n\t\t# self.model.save_weights(\"model.h5\")\r\n\t\t# print(\"Saved model to disk\")\r\n\t\tself.model.save('breakout.h5')\r\n\tdef load_model(self):\r\n\t\t# self.model = self._build_model()\r\n\t\tself.model = load_model('breakout.h5')\r\n\r\n#\r\n# Let’s Train the Agent\r\n# The training part is even shorter. I’ll explain in the comments.\r\nresume = False\r\n\r\nif __name__ == \"__main__\":\r\n\t# initialize gym environment and the agent\r\n\tenv = gym.make('Breakout-ram-v0')\r\n\tinx = env.observation_space.shape\r\n\tagent = DQNAgent(env.observation_space.shape, env.action_space.n)\r\n\tif resume:\r\n\t\tagent.load_model()\r\n\t# Iterate the game\r\n\tep_rewards = []\r\n\tframes = 0\r\n\tfor e in range(episodes):\r\n\t\t# reset state in the beginning of each game\r\n\t\tstate = env.reset()\r\n\t\tstate = state / 255\r\n\t\tstate = state.reshape((1, inx[0]))\r\n\t\t# state = np.reshape(state, [1, 4])\r\n\t\t# time_t represents each frame of the game\r\n\t\t# Our goal is to keep the pole upright as long as possible until score of 500\r\n\t\t# the more time_t the more score\r\n\t\tep_reward = 0\r\n\t\tdone = False\r\n\t\twhile not done:\r\n\t\t\t# turn this on if you want to render\r\n\t\t\t# env.render()\r\n\t\t\t# Decide action\r\n\t\t\taction = agent.act(state)\r\n\t\t\t# Advance the game to the next frame based on the action.\r\n\t\t\t# Reward is 1 for every frame the pole survived\r\n\t\t\tnext_state, reward, done, _ = env.step(action)\r\n\t\t\tnext_state = next_state / 255\r\n\t\t\tnext_state = next_state.reshape((1, inx[0]))\r\n\t\t\t# next_state = np.reshape(next_state, [1, 4])\r\n\t\t\t# Remember the previous state, action, reward, and done\r\n\t\t\tagent.remember(state, action, reward, next_state, done)\r\n\t\t\tep_reward += reward\r\n\t\t\t# make next_state the new current state for the next frame.\r\n\t\t\tstate = next_state\r\n\t\t\t# done becomes True when the game ends\r\n\t\t\t# ex) The agent drops the pole\r\n\t\t\tif done:\r\n\t\t\t\tep_rewards.append(ep_reward)\r\n\t\t\t\ta = min(100, len(ep_rewards))\r\n\t\t\t\t# print the score and break out of the loop\r\n\t\t\t\tprint(\"episode: {}/{}, score: {}, 100 episode trailing score: {}\"\r\n\t\t\t\t\t .format(e, episodes, ep_reward, np.mean(ep_rewards[-a:])))\r\n\t\t\tframes += 1\r\n\t\tif len(agent.memory) > MIN_MEM:\r\n\t\t\tfor _ in range(frames):\r\n\t\t\t\tagent.replay(BATCH_SIZE, 1)\r\n\t\t# train the agent with the experience of the episode\r\n\t\tif not e % 50:\r\n\t\t\tagent.save_model()","sub_path":"DQN/DQN Breakout RAM.py","file_name":"DQN Breakout RAM.py","file_ext":"py","file_size_in_byte":5108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"649501703","text":"import cv2\nimport numpy as np\nimport utlis\nfrom pythonRLSA import rlsa\nimport random\nimport math\n\n\nheightImg = 640\nwidthImg = 480\nutlis.initializeTrackbars()\n\n\ndef getXFromRectx(item):\n return item[0]\n\n\ndef getXFromRecty(item):\n return item[1]\n\n\ndef getMostCommonPixel(image):\n # image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)\n #image1 = Image.fromarray(image, 'RGB')\n histogram = {} #Dictionary keeps count of different kinds of pixels in image\n\n\n for i in range(0, image.shape[0]):\n for j in range(0, image.shape[1]):\n pixel = image.item(j, i)\n if pixel in histogram:\n histogram[pixel] += 1 # Increment count\n else:\n histogram[pixel] = 1 # pixel_val encountered for the first time\n\ndef remove_text(img,imgOrginal,imgConters):\n img2=cv2.cvtColor(img.copy(),cv2.COLOR_GRAY2RGB)\n img3=cv2.cvtColor(img.copy(),cv2.COLOR_GRAY2RGB)\n\n #img=cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)\n #sobelX = cv2.Sobel(img, cv2.CV_8U, 1, 0)\n #sobelY = cv2.Sobel(img, cv2.CV_8U, 0, 1)\n #sobelCombined = cv2.bitwise_and(sobelX, sobelY)\n kernel = np.ones((2,2))\n gradiant = cv2.morphologyEx(img, cv2.MORPH_GRADIENT, kernel)\n rect = cv2.morphologyEx(gradiant, cv2.MORPH_OPEN, kernel)\n kernel = np.ones((2, 2))\n dialate = cv2.morphologyEx(rect, cv2.MORPH_ERODE, kernel)\n\n\n #imgAdaptiveThre = cv2.adaptiveThreshold(gradiant, 255, 1, 1, 7, 2)\n ret3, otsu = cv2.threshold(gradiant, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n blankImage1 = cv2.resize(otsu, (widthImg, heightImg)) # RESIZE IMAGE\n cv2.imshow(\"otsu1\", blankImage1)\n for cntr in imgConters:\n x, y, w, h = cv2.boundingRect(cntr)\n if (((x == 0) & (y == 0)) | ((x == 0) & (y + h == otsu.shape[0])) | ((x + w == otsu.shape[1]) & (y == 0)) | (\n ((x + w) == otsu.shape[1]) & ((y + h) == otsu.shape[0]))) | (\n h * w < imgOrginal.shape[0] * imgOrginal.shape[1] * 0.0009) | (\n (w / h > 8 and w > 50) | (h / w > 8 and h > 50))|(h>otsu.shape[0]/2):\n continue\n\n otsu=cv2.rectangle(otsu, (x-8 if x>=8 else 0, y-8 if y>=8 else 0), (x+w+10 if x+w+10<=otsu.shape[1] else otsu.shape[1], y + h+10 if y + h+10<=otsu.shape[0] else otsu.shape[0]), (0, 0,0), -1)\n ret3, otsu = cv2.threshold(otsu, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n # kernel = np.ones((1, 10))\n # otsu = cv2.morphologyEx(otsu, cv2.MORPH_CLOSE, kernel)\n # kernel = np.ones((10, 1))\n # otsu = cv2.morphologyEx(otsu, cv2.MORPH_CLOSE, kernel)\n\n\n #bw = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 51, 10)\n # cv2.imshow(\"bw\",bw)\n otsuCanny=cv2.Canny(otsu, 50, 150, apertureSize=3)\n cv2.imwrite(\"gradiant.jpg\", otsu)\n blankImage1 = cv2.resize(otsu, (widthImg, heightImg)) # RESIZE IMAGE\n cv2.imshow(\"otsu\", blankImage1)\n # blankImage2 = cv2.resize(otsuCanny, (widthImg, heightImg)) # RESIZE IMAGE\n # cv2.imshow(\"canny\", blankImage2)\n contours = cv2.findContours(otsuCanny, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)\n contours = contours[0] if len(contours) == 2 else contours[1]\n\n rects = []\n blankImage = np.zeros((img.shape[0], img.shape[1], 3), np.uint8)\n blankImage2 = np.zeros((img.shape[0], img.shape[1], 3), np.uint8)\n line_contours=[]\n for cntr in contours:\n\n x, y, w, h = cv2.boundingRect(cntr)\n if(((w/h)>8) & (w>50)):\n line_contours.append(cv2.boundingRect(cntr))\n img2=cv2.line(img2, (x, y), (x+w, y ), ( 255,0, 0), 5)\n img2=cv2.line(img2, (x, y+h), (x+w, y+h ), ( 255,0, 0), 5)\n blankImage=cv2.line(blankImage, (x, y), (x+w, y ), (255, 255, 255), 5)\n blankImage=cv2.line(blankImage, (x, y+h), (x+w, y+h ), (255, 255, 255), 5)\n\n\n elif (((h/w)>8) & (h>50)):\n line_contours.append(cv2.boundingRect(cntr))\n\n img2 = cv2.line(img2, (x, y), (x , y+h), (0, 255, 0), 5)\n img2 = cv2.line(img2, (x+w, y ), (x + w, y + h), (0, 255, 0), 5)\n blankImage = cv2.line(blankImage, (x, y), (x , y+h), (255, 255, 255), 5)\n blankImage = cv2.line(blankImage, (x+w, y ), (x + w, y + h), (255, 255, 255), 5)\n\n else:\n img2 = cv2.rectangle(img2, (x, y), (x + w, y + h), (255, 255, 255), -1)\n blankImage = cv2.resize(img2, (widthImg, heightImg)) # RESIZE IMAGE\n cv2.imshow(\"textRMV_R44444\", blankImage)\n\n\n\n\n\n kernel = np.ones((5, 5))\n dilate = cv2.dilate(blankImage, kernel, iterations=3) # APPLY DILATION\n dilate = cv2.cvtColor(dilate,cv2.COLOR_RGB2GRAY)\n otsuCanny = cv2.Canny(dilate, 50, 150, apertureSize=3)\n lines_H = cv2.HoughLinesP(otsuCanny, 1, np.pi / 360, 200, minLineLength=int(img.shape[0] * .008), maxLineGap=int(img.shape[0] * .05))\n # if (lines_H is not None):\n # for line in lines_H:\n # x1, y1, x2, y2 = line[0]\n # cv2.line(blankImage2, (x1, y1), (x2, y2), (255, 255, 0), 5)\n #\n # if(y1==y2):\n # lx=min(x1,x2)\n # rx=max(x1,x2)\n # nearLCol=0\n # nearRCol=collems[0]\n # i=0\n # for col in len(collems):#collems must be sorted\n # if lx > col:\n # nearLCol=col\n # else:\n # break\n # for col in len(collems):#collems must be sorted\n # if rx > col:\n # continue\n # else:\n # nearRCol = col\n # break\n\n\n\n\n #\n\n #resize = cv2.resize(imgAdaptiveThre, (widthImg, heightImg)) # RESIZE IMAGE\n # imgForHorizontalLine_Inver_r = cv2.resize(blankImage2, (widthImg, heightImg)) # RESIZE IMAGE\n # imgForVerticalLine_Invert_R = cv2.resize(otsuCanny, (widthImg, heightImg)) # RESIZE IMAGE\n\n #cv2.imshow(\"resize\",resize)\n # cv2.imshow(\"gradiant\",imgForHorizontalLine_Inver_r)\n # cv2.imshow(\"imgForVerticalLine_Invert_R\",imgForVerticalLine_Invert_R)\n\n\n\n return img2,line_contours\n\n\ndef drowLines(img,imgOriginal,thresh):\n\n cv2.waitKey(10)\n cv2.waitKey(10)\n\n imgCopy1 = img.copy()\n imgCopy2 = img.copy()\n imgCopy3 = np.zeros((img.shape[0], img.shape[1], 3), np.uint8)\n imgCopy4 = np.zeros((img.shape[0], img.shape[1], 3), np.uint8)\n\n rmv=remove_images(imgCopy1,imgCopy2,thresh)\n\n img_withowt_pics=rmv[3]\n img_Conters=rmv[4]\n textRMV=remove_text(rmv[0], imgOriginal,rmv[4])\n contours=textRMV[1]\n\n\n kernel = np.ones((1, 5))\n\n sobelX = cv2.Sobel(img_withowt_pics, cv2.CV_16U, 1, 0)\n sobelY = cv2.Sobel(img_withowt_pics, cv2.CV_16U, 0, 1)\n\n sobelX = np.uint8(np.absolute(sobelX))\n sobelY = np.uint8(np.absolute(sobelY))\n\n\n for cntr in contours:\n\n x, y, w, h = cntr\n if(((w/h)>8) & (w>50)):\n\n #sobelY=cv2.line(sobelY, (x, y), (x+w, y ),(255, 255, 255) , 5)\n\n sobelX=cv2.line(sobelX, (x, y), (x+w, y ),(0, 0, 0) , 5)\n\n imgCopy3 = cv2.line(imgCopy3, (x, y), (x + w, y), (255, 255, 255), 5)\n imgCopy3 = cv2.line(imgCopy3, (x, y + h), (x + w, y + h), (255, 255, 255), 5)\n\n elif (((h/w)>8) & (h>50)):\n # sobelY=cv2.line(sobelY, (x, y), (x+w, y ),(0, 0, 0), 5)\n sobelY=cv2.line(sobelY, (int(x+w/2), y ), (int(x+w/2), y + h), (0, 0, 0), 10)\n imgCopy4=cv2.line(imgCopy4, (int(x+w/2), y), (int(x+w/2), y + h),(0, 0, 255), 10)\n\n # imgCopy3=cv2.line(imgCopy3, (x, y), (x+w, y ),(0, 0, 255), 5)\n # imgCopy3=cv2.line(imgCopy3, (x+w, y ), (x + w, y + h), (0, 0, 255), 5)\n\n # sobelX=cv2.line(sobelX, (x, y), (x+w, y ),(255, 255, 255), 5)\n sobelX=cv2.line(sobelX, (int(x+w/2), y ), (int(x+w/2), y + h), (255, 255, 255), 5)\n\n # kernel = np.ones((5,50))\n #\n # imgCopy3 = cv2.dilate(imgCopy3, kernel, iterations=2) # APPLY DILATION\n # imgCopy3 = cv2.erode(imgCopy3, kernel, iterations=2) # APPLY DILATION\n # imgCopy3=cv2.cvtColor(imgCopy3,cv2.COLOR_RGB2GRAY)\n ############################################################################\n\n kernel = np.ones((50, 2))\n imgCopy4 = cv2.dilate(imgCopy4, kernel, iterations=2) # APPLY DILATION\n imgCopy4 = cv2.erode(imgCopy4, kernel, iterations=2) # APPLY DILATION\n kernel = np.ones((10, 10))\n\n imgCopy4 = cv2.dilate(imgCopy4, kernel, iterations=1) # APPLY DILATION\n imgCopy4 = cv2.erode(imgCopy4, kernel, iterations=1) # APPLY DILATION\n\n ret3, otsu_hori_lines = cv2.threshold(cv2.cvtColor(imgCopy4, cv2.COLOR_RGB2GRAY), 0, 255,\n cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n\n contours_hori_lines = cv2.findContours(otsu_hori_lines, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_KCOS)\n\n contours_hori_lines = contours_hori_lines[0] if len(contours_hori_lines) == 2 else contours_hori_lines[1]\n # lines = cv2.HoughLinesP(imgCopy3, 5, np.pi/2, 100, minLineLength=int(imgCopy3.shape[0] / 7),maxLineGap=int(imgCopy3.shape[0] * .009))\n y_lines = []\n for line in contours_hori_lines:\n x1, y1, x2, y2 = cv2.boundingRect(line)\n cv2.line(imgCopy2, (int(x1 + (x2 / 2)), y1), (int(x1 + (x2 / 2)), y1 + y2), (255, 255, 255), 10)\n y_lines.append([x1, y1, x2, y2, False])\n ####################################################################################\n cv2.waitKey(10)\n cv2.waitKey(10)\n\n kernel = np.ones((2, 50))\n imgCopy3 = cv2.dilate(imgCopy3, kernel, iterations=2) # APPLY DILATION\n imgCopy3 = cv2.erode(imgCopy3, kernel, iterations=3) # APPLY DILATION\n kernel = np.ones((10, 10))\n imgCopy3 = cv2.dilate(imgCopy3, kernel, iterations=1) # APPLY DILATION\n imgCopy3 = cv2.erode(imgCopy3, kernel, iterations=1) # APPLY DILATION\n ret3, otsu = cv2.threshold(cv2.cvtColor(imgCopy3, cv2.COLOR_RGB2GRAY), 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n\n\n\n contours_l = cv2.findContours(otsu, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_KCOS)\n\n contours_l = contours_l[0] if len(contours_l) == 2 else contours_l[1]\n # lines = cv2.HoughLinesP(imgCopy3, 5, np.pi/2, 100, minLineLength=int(imgCopy3.shape[0] / 7),maxLineGap=int(imgCopy3.shape[0] * .009))\n x_line = []\n for line in contours_l:\n x1, y1, x2, y2 =cv2.boundingRect(line)\n cv2.line(imgCopy2, (x1,int( y1+(y2/2))), (x1+x2, int(y1+(y2/2))), (255, 255, 255), int(heightImg / 350))\n x_line.append([ x1, y1, x2, y2, False])\n ####################################################################################\n\n\n\n x_line.sort(key=getXFromRecty)\n i = 0\n j = 0\n maxWidth = 0\n maxWidthIndex = 0\n for line in x_line:\n x1, y1, w1, h1, s1 = line\n\n if imgCopy3.shape[0] / 4 > y1 + h1 / 2:\n if (maxWidth < w1):\n maxWidth = w1\n maxWidthIndex = int(y1 + h1 / 2)\n j = i\n else:\n break\n i = i + 1\n x_lineCopy = []\n upperBoder = 0\n\n\n if (maxWidth > (imgCopy3.shape[1] *3)/ 5):\n x_line = []\n k = 0\n upperBoder = maxWidthIndex\n\n #####################################################################################\n\n\n kernel = np.ones((1, 3))\n erodeY = cv2.erode(sobelY, kernel, iterations=1) # APPLY DILATION\n imgDial = cv2.dilate(erodeY, kernel, iterations=1) # APPLY DILATION\n\n kernelX = np.ones((3, 1))\n erodeX = cv2.erode(sobelX, kernelX, iterations=1) # APPLY DILATION\n imgDialX = cv2.dilate(erodeX, kernelX, iterations=1) # APPLY DILATION\n\n\n\n\n ret3, imgDial_I = cv2.threshold(imgDial, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n\n # imgDial_I = cv2.erode(imgDial_I, kernel, iterations=1) # APPLY DILATION\n # imgDial_I = cv2.dilate(imgDial_I, kernel, iterations=1) # APPLY DILATION\n\n kernel = np.ones((15, 15))\n\n imgDial_I = cv2.dilate(imgDial_I, kernel, iterations=1) # APPLY DILATION\n # imgDial_I = cv2.dilate(imgDial_I, kernel, iterations=1) # APPLY DILATION\n blankImageX = cv2.resize(imgCopy3, (widthImg, heightImg)) # RESIZE IMAGE\n\n cv2.imshow(\"lines\", blankImageX)\n ret3, imgDial_I = cv2.threshold(imgDial_I, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n\n image_rlsa_hori = rlsa.rlsa(image=imgDial_I, horizontal=False, vertical=True, value=imgDial_I.shape[0]/40)\n\n ret3, imgDial_IX = cv2.threshold(imgDialX, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n image_rlsa_X = rlsa.rlsa(image=imgDial_IX, horizontal=True , vertical=False, value=imgDial_I.shape[1]/40)\n\n\n #############\n\n for cntr in img_Conters:\n x, y, w, h = cv2.boundingRect(cntr)\n # epsilon = 0.01 * cv2.arcLength(cntr, True)\n # approx = cv2.approxPolyDP(cntr, epsilon, True)\n # hull = cv2.convexHull(cntr)\n\n if (((x == 0) & (y == 0)) | ((x == 0) & (y + h == image_rlsa_hori.shape[0])) | ((x+w == image_rlsa_hori.shape[1]) &(y == 0)) | (\n ((x+w) == image_rlsa_hori.shape[1]) & ((y+h) == image_rlsa_hori.shape[0]))) | (h * w < imgOriginal.shape[0] * imgOriginal.shape[1] * 0.0009):\n\n continue\n # img_with_mood_boxes=cv2.rectangle(img_with_mood_boxes, (x, y), (x + w, y + h), (mood[0], mood[0], mood[0]), -1)\n # imgDial_v=cv2.rectangle(imgDial_v, (x, y), (x + w, y + h), (0,0, 0), -1)\n image_rlsa_hori=cv2.rectangle(image_rlsa_hori, (x, y), (x + w, y + h), (0,0,0), -1)\n image_rlsa_X=cv2.rectangle(image_rlsa_X, (x, y), (x + w, y + h), (0,0,0), -1)\n # img_with_wight_boxes=cv2.rectangle(img_with_wight_boxes, (x, y), (x + w, y + h), (255, 255, 255), -1)\n\n #############\n\n\n\n # imgDial_IR = cv2.resize(imgDial, (widthImg, heightImg)) # RESIZE IMAGE\n # cv2.imshow(\"imgDial_IR\", imgDial_IR)\n\n kernel = np.ones((5, 5))\n\n image_rlsa_hori_dilate = cv2.erode(image_rlsa_hori, kernel, iterations=1) # APPLY DILATION\n kernelx = np.ones((15, 2))\n image_rlsa_x_dilate = cv2.erode(image_rlsa_X, kernelx, iterations=1) # APPLY DILATION\n for cntr in contours:\n\n x, y, w, h = cntr\n if(((w/h)>8) & (w>50)):\n\n #sobelY=cv2.line(sobelY, (x, y), (x+w, y ),(255, 255, 255) , 5)\n\n sobelX=cv2.line(sobelX, (x, y), (x+w, y ),(255, 255, 255) , 5)\n\n elif (((h/w)>8) & (h>img.shape[0]/50)):\n # image_rlsa_hori_dilate=cv2.line(image_rlsa_hori_dilate, (x, y), (x+w, y ),(255, 255, 255), 5)\n image_rlsa_hori_dilate=cv2.line(image_rlsa_hori_dilate, (int(x+w/2), y ), (int(x+w/2), y + h), (255, 255, 255), 10)\n\n # sobelX=cv2.line(sobelX, (x, y), (x+w, y ),(0, 0, 0), 5)\n sobelX=cv2.line(sobelX, (int(x+w/2), y ), (int(x+w/2), y + h), (0, 0, 0), 10)\n image_rlsa_hori_dilate = cv2.rectangle(img=image_rlsa_hori_dilate, pt1=(0, 0),\n pt2=(image_rlsa_hori_dilate.shape[1], upperBoder), color=(0, 0, 0),\n thickness=-1)\n blankImageX = cv2.resize(image_rlsa_hori_dilate, (widthImg, heightImg)) # RESIZE IMAGE\n cv2.imshow(\"textRMV_Rsla\", blankImageX)\n for line in y_lines:\n x1, y1, w1, h1, s1 = line\n image_rlsa_hori_dilate = cv2.line(image_rlsa_hori_dilate, (int(x1 + w1 / 2), y1), (int(x1 + w1 / 2), y1 + h1),\n (255, 255, 255), 10)\n\n lines_V = cv2.HoughLinesP(image_rlsa_hori_dilate, 5, np.pi, 100, minLineLength=int(image_rlsa_hori_dilate.shape[0] /7), maxLineGap=int(imgOriginal.shape[0] * .009))\n blankImage = np.zeros((img.shape[0], img.shape[1], 3), np.uint8)\n\n\n\n # if (lines_V is not None):\n # for line in lines_V:\n # x1, y1, x2, y2 = line[0]\n # cv2.line(blankImage, (x1, y1), (x2, y2), (255, 255, 255), int(heightImg / 350))\n blankImageX = cv2.resize(blankImage, (widthImg, heightImg)) # RESIZE IMAGE\n cv2.imshow(\"textRMV_Rsla\", blankImageX)\n blankImage = cv2.rectangle(img=blankImage, pt1=(0, 0),\n pt2=(blankImage.shape[1], upperBoder), color=(0, 0, 0),\n thickness=-1)\n # blankImage = cv2.cvtColor(blankImage, cv2.COLOR_BGR2GRAY)\n # textRMV_R = cv2.resize(blankImage, (widthImg, heightImg)) # RESIZE IMAGE\n\n\n kernel = np.ones((5, 5))\n blankImage = cv2.dilate(blankImage, kernel, iterations=2) # APPLY EROSION\n\n blankImage = cv2.resize(blankImage, (widthImg, heightImg)) # RESIZE IMAGE\n\n blankImageX = cv2.resize(image_rlsa_x_dilate, (widthImg, heightImg)) # RESIZE IMAGE\n cv2.imshow(\"textRMV_R\", blankImage)\n ret3, blankImage = cv2.threshold(cv2.cvtColor(blankImage,cv2.COLOR_RGB2GRAY), 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n ret3, blankImageX = cv2.threshold(blankImageX, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n\n blankimCopy=blankImageX.copy()\n blankImage[blankImage == 0] = 0\n blankImage[blankImage == 255] = 1\n vertical_projection = np.sum(blankImage, axis=0) * widthImg / heightImg\n\n blankImageX[blankImageX == 0] = 0\n blankImageX[blankImageX == 255] = 1\n x_projection = np.sum(blankImageX, axis=1) * widthImg / heightImg\n\n\n blankImageforVerticle = np.zeros((heightImg, widthImg, 3), np.uint8)\n blankImageforX = np.zeros((heightImg, widthImg, 3), np.uint8)\n\n for col in range(0, widthImg):\n cv2.line(blankImageforVerticle, (col, heightImg), (col, heightImg - int(vertical_projection[col])),\n (255, 255, 255), 1)\n for row in range(heightImg):\n cv2.line(blankImageforX, (0, row),\n (int(x_projection[row]), row),\n (255, 255, 255), 1)\n # cv2.imshow(\"blankImageforVerticle\",blankImageforVerticle)\n # cv2.imshow(\"test\",blankImageforVerticle)\n\n\n\n v_projection_C=blankImageforVerticle.copy()\n x_projection_C=blankImageforX.copy()\n cv2.imshow(\"hori\",x_projection_C)\n blankImageforHorizontal=cv2.rectangle(img=x_projection_C, pt1=(0, heightImg), pt2=(int((widthImg * widthImg * 0.98) / heightImg), 0), color=(0, 0, 0), thickness=-1)\n blankImageforHorizontal = cv2.cvtColor(blankImageforHorizontal, cv2.COLOR_BGR2GRAY)\n #kernel = np.array([[1, 1], [1, 1], [1, 1]], dtype=np.uint8)\n kernel = np.ones((3, 2))\n blankImageforHorizontal = cv2.dilate(blankImageforHorizontal, kernel, iterations=2) # APPLY DILATION\n blankImageforHorizontal = cv2.erode(blankImageforHorizontal, kernel, iterations=2) # APPLY DILATION\n\n\n\n contours_h = cv2.findContours(blankImageforHorizontal, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_KCOS)\n\n contours_h = contours_h[0] if len(contours_h) == 2 else contours_h[1]\n rects_h = []\n\n for cntr in contours_h:\n x, y, w, h = cv2.boundingRect(cntr)\n x, y, w, h = int(x * img.shape[1] / widthImg), int(y * img.shape[0] / heightImg), int(\n w * img.shape[1] / widthImg),int(h * img.shape[0] / heightImg)\n rects_h.append([x, y, w, h])\n rects_h.sort(key=getXFromRecty)\n\n upperConer = 0, 0, 0, 0\n bottomConer = [0, img.shape[0], 0, 0]\n if len(rects_h) > 1:\n if (rects_h[0][1] < 100):\n upperConer = [0, rects_h[0][1],0, rects_h[0][3] ]\n if (img.shape[0] - rects_h[len(rects_h) - 1][1] - rects_h[len(rects_h) - 1][3] maxWidth) and (x != 0) and ((x + w) != widthImg)) |((w > maxWidth) and (w > widthImg/5)):\n maxWidth = w\n\n ###############################################################\n ##############################################################\n kernel = np.ones((10, 10))\n blankImageforVerticle_2 = cv2.dilate(blankImageforVerticle_2, kernel, iterations=1) # APPLY EROSION\n blankImageforVerticle_2 = cv2.erode(blankImageforVerticle_2, kernel, iterations=1) # APPLY EROSION\n # img2=cv2.rectangle(blankImageforVerticle, (x, y), (x + w, y + h), (255, 0, 0), 1)\n\n bw_for_bottome_box = blankImageforVerticle_2.copy()\n bw_for_bottome_box[bw_for_bottome_box == 0] = 0\n bw_for_bottome_box[bw_for_bottome_box == 255] = 1\n x_projection_for_bottom_box = np.sum(bw_for_bottome_box, axis=1) * widthImg / heightImg\n blankImageforX_for_bottom_box = np.zeros((heightImg, widthImg, 3), np.uint8)\n\n for row in range(heightImg):\n cv2.line(blankImageforX_for_bottom_box, (0, row),\n (int(x_projection_for_bottom_box[row]), row),\n (255, 255, 255), 1)\n\n blankImageforX_for_bottom_box= cv2.cvtColor(blankImageforX_for_bottom_box,cv2.COLOR_RGB2GRAY)\n contour = cv2.findContours(blankImageforX_for_bottom_box, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)\n contour = contour[0] if len(contour) == 2 else contour[1]\n x, y, w, h = cv2.boundingRect(contour[0])\n blankImageforX_for_bottom_box=cv2.rectangle(img=blankImageforX_for_bottom_box, pt1=(0, heightImg), pt2=(int(w * 0.98), 0), color=(0, 0, 0), thickness=-1)\n\n cv2.imshow(\"test\", blankImageforVerticle_2)\n cv2.waitKey(100)\n contour= cv2.findContours(blankImageforX_for_bottom_box, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)\n contour = contour[0] if len(contour) == 2 else contour[1]#############################################################################################\n\n if(len(contour)>0):\n x, y, w, h = cv2.boundingRect(contour[0])\n if(x>(widthImg*widthImg*3)/(heightImg*10)):\n\n cv2.rectangle(img=v_projection_C, pt1=(0, heightImg),\n pt2=(widthImg, int(heightImg-(h*5/4))), color=(0, 0, 0), thickness=-1)\n cv2.rectangle(img=blankImageforVerticle_2, pt1=(0, heightImg),\n pt2=(widthImg, int(heightImg-(h*5/4))), color=(0, 0, 0), thickness=-1)\n\n\n\n cv2.imshow(\"test2\", blankImageforVerticle_2)\n kernel = np.ones((15, 15))\n v_projection_C = cv2.dilate(v_projection_C, kernel, iterations=1) # APPLY EROSION\n v_projection_C = cv2.erode(v_projection_C, kernel, iterations=1) # APPLY EROSION\n _, v_projection_C = cv2.threshold(cv2.cvtColor(v_projection_C, cv2.COLOR_RGB2GRAY), 0, 255,\n cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n # v_projection_C = cv2.cvtColor(v_projection_C, cv2.COLOR_RGB2GRAY)\n contours_v = cv2.findContours(v_projection_C, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)\n contours_v = contours_v[0] if len(contours_v) == 2 else contours_v[1]\n\n cv2.imshow(\"ttttttt\", v_projection_C)\n\n for cntr in contours_v:\n x, y, w, h = cv2.boundingRect(cntr)\n\n v_projection_C=cv2.rectangle(v_projection_C, (x-(5 if x>5 else 0), y+20), (x + w+(5 if x+w+5 1:\n if (rects[0][0] < img.shape[1]/15):\n leftConer = [rects[0][0] , rects[0][1],\n rects[0][2] , 0]\n if (widthImg - rects[len(rects) - 1][0] - rects[len(rects) - 1][2] < img.shape[1]/15):\n # rightConer =rects_v[len(rects_v)-1] **imgOrginal.imgThresholdBW.shape[1]/widthImg\n rightConer = [rects[len(rects) - 1][0] , rects[len(rects) - 1][1],\n rects[len(rects) - 1][2] , 0]\n\n\n blankImage = cv2.resize(v_projection_C, (widthImg, heightImg)) # RESIZE IMAGE\n\n cv2.imshow(\"blankImage_11\", blankImage)\n backgraond_line = lines_V\n num_of_effective_col = len(rects)\n if (leftConer[2] > 0):\n num_of_effective_col = num_of_effective_col - 1\n if (rightConer[2] > 0):\n num_of_effective_col = num_of_effective_col - 1\n cv2.waitKey(10)\n\n return [leftConer,rightConer],[upperConer,bottomConer],maxWidth,rects,rects_h,backgraond_line,num_of_effective_col #rects=collems rects_h=vertical separethins\n\n\ndef remove_images(img,imgOrginal,tresh):\n cv2.waitKey(10)\n\n kernalSize=int( img.shape[1] / widthImg/20)\n if ( kernalSize%2==0):kernalSize=kernalSize+1\n CropedImg1 = cv2.GaussianBlur(img,(kernalSize , kernalSize), 0) # gaussian\n #kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 2))\n kernel = np.ones((5, 5))#10\n # grayC=cv2.morphologyEx(gray,cv2.MORPH_CLOSE,kernel)\n imgDial = cv2.dilate(CropedImg1, kernel, iterations=4) # APPLY DILATION\n #cv2.imshow(\"dilate\", imgDial)\n kernel = np.ones((5, 5))\n imgThreshold = cv2.erode(imgDial, kernel, iterations=1) # APPLY EROSION\n\n\n imgThreshold = cv2.GaussianBlur(imgDial,(kernalSize , kernalSize), 0) # gaussian\n\n kernel = np.array([[-1, -1, -1],\n [-1, 9, -1],\n [-1, -1, -1]])\n sharpened = cv2.filter2D(imgThreshold, -1, kernel) # applying the sharpening kernel to the input image & displaying it.\n # sharpened = cv2.Canny(sharpened, 50, 150, apertureSize=3)\n\n ret3, bw1 = cv2.threshold(sharpened, tresh , 255, cv2.THRESH_BINARY_INV)\n bw1 = cv2.dilate(bw1, kernel, iterations=2) # APPLY EROSION\n\n contours = cv2.findContours(bw1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n contours = contours[0] if len(contours) == 2 else contours[1]\n sImg = cv2.resize(imgOrginal, (int(imgThreshold.shape[1] * widthImg / imgOrginal.shape[1]),\n int(imgThreshold.shape[0] * heightImg / imgOrginal.shape[0])))\n\n mood = utlis.getMostCommonPixel(sImg)\n img_with_wight_boxes = img.copy()\n img_with_mood_boxes = img.copy()\n img_with_black_boxes = img.copy()\n img_withowt_image = img.copy()\n for cntr in contours:\n x, y, w, h = cv2.boundingRect(cntr)\n epsilon = 0.01 * cv2.arcLength(cntr, True)\n approx = cv2.approxPolyDP(cntr, epsilon, True)\n hull = cv2.convexHull(cntr)\n\n if (((x == 0) & (y == 0)) | ((x == 0) & (y + h == img.shape[0])) | ((x+w == img.shape[1]) &(y == 0)) | (\n ((x+w) == img.shape[1]) & ((y+h) == img.shape[0]))) | (h * w < imgOrginal.shape[0] * imgOrginal.shape[1] * 0.0009)|((w/h>8 and w>50) | (h/w>8 and h>50) ):\n\n continue\n #img_with_mood_boxes=cv2.rectangle(img_with_mood_boxes, (x, y), (x + w, y + h), (mood[0], mood[0], mood[0]), -1)\n img_with_mood_boxes=cv2.rectangle(img_with_mood_boxes, (x-8 if x>=8 else 0, y-8 if y>=8 else 0), (x+w+8 if x+w+8<=img_with_mood_boxes.shape[1] else img_with_mood_boxes.shape[1], y + h+8 if y + h+8<=img_with_mood_boxes.shape[0] else img_with_mood_boxes.shape[0]), (mood[0], mood[0], mood[0]), -1)\n\n img_withowt_image = cv2.drawContours(img_withowt_image, [hull], -1, (mood[0], mood[0], mood[0]), thickness=-1)\n img_withowt_image = cv2.drawContours(img_withowt_image, [hull], -1, (mood[0], mood[0], mood[0]), thickness=10)\n\n img_with_black_boxes=cv2.rectangle(img_with_black_boxes, (x, y), (x + w, y + h), (0,0, 0), -1)\n img_with_wight_boxes=cv2.rectangle(img_with_wight_boxes, (x, y), (x + w, y + h), (255, 255, 255), -1)\n #\n # img_withowt_image=cv2.drawContours(img_withowt_image, [cntr], -1, (mood[0], mood[0], mood[0]), -1)\n # img_withowt_image=cv2.drawContours(img_withowt_image, [cntr], -1, (mood[0], mood[0], mood[0]), 10)\n\n cropedImg2 = cv2.resize(img_with_mood_boxes, (widthImg, heightImg))\n cv2.imshow(\"img2\", cropedImg2)\n cropedImg3 = cv2.resize(img_withowt_image, (widthImg, heightImg))\n cv2.imshow(\"img3\", cropedImg3)\n return img_withowt_image,img_with_mood_boxes,img_with_wight_boxes,img_with_black_boxes,contours\n\n\n#Fainalized vertical_separater\ndef vertical_separater(img,imgOrginal):\n\n imgCopy = img.copy()\n imgCopy2 = img.copy()\n sobelX = cv2.Sobel(img, cv2.CV_16S, 1, 0, ksize=3, scale=1, delta=0, borderType=cv2.BORDER_DEFAULT)\n # Gradient-Y\n # grad_y = cv.Scharr(gray,ddepth,0,1)\n sobelY = cv2.Sobel(img, cv2.CV_16S, 0, 1, ksize=3, scale=1, delta=0, borderType=cv2.BORDER_DEFAULT)\n\n\n sobelX = np.uint8(np.absolute(sobelX))\n\n sobelY = np.uint8(np.absolute(sobelY))\n\n # sobelX1 = cv2.cvtColor(sobelX, cv2.COLOR_BGR2GRAY)\n # sobelY1 = cv2.cvtColor(sobelY, cv2.COLOR_BGR2GRAY)\n\n ret3, sobelX1 = cv2.threshold(sobelX, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) # for vertical projection\n ret3, sobelY1 = cv2.threshold(sobelY, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) # for vertical hough line ditection\n\n imgForVerticalLine_Inver_Canny = cv2.Canny(sobelY1, 10, 50, apertureSize=3)\n\n kernel = np.ones((1, 5))\n\n imgDialsobel = cv2.erode(imgForVerticalLine_Inver_Canny, kernel, iterations=2) # APPLY DILATION\n kernel = np.ones((2, 5))\n imgDialsobel_D = cv2.dilate(imgDialsobel, kernel, iterations=1) # APPLY DILATION\n\n\n lines_H = cv2.HoughLinesP(imgDialsobel_D, 1, np.pi / 2, 10, minLineLength=int(img.shape[0] * .04), maxLineGap=int(img.shape[0] * .009))\n\n kernel = np.ones((int(img.shape[0] / 550), int(img.shape[1] / 650)))\n sobelX1 = cv2.dilate(sobelX1, kernel, iterations=2) # APPLY DILATION\n\n if (lines_H is not None):\n for line in lines_H:\n x1, y1, x2, y2 = line[0]\n cv2.line(sobelX1, (x1, y1), (x2, y2), (0, 0, 0), int(heightImg / 350))\n\n sobelX1 = cv2.resize(sobelX1, (widthImg, heightImg)) # RESIZE IMAGE\n _, sobelX1 = cv2.threshold(sobelX1, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n\n\n sobelX1[sobelX1 == 0] = 1\n sobelX1[sobelX1 == 255] = 0\n blankImageforHorizontal = np.zeros((heightImg, widthImg, 3), np.uint8)\n horizontal_projection = np.sum(sobelX1, axis=1) * widthImg / heightImg\n print(horizontal_projection)\n for row in range(heightImg):\n cv2.line(blankImageforHorizontal, (0, row), (int(horizontal_projection[row]), row), (255, 255, 255), 1)\n\n cv2.rectangle(img=blankImageforHorizontal, pt1=(0, heightImg), pt2=(int((widthImg * widthImg * 0.98) / heightImg), 0), color=(0, 0, 0), thickness=-1)\n blankImageforHorizontal = cv2.cvtColor(blankImageforHorizontal, cv2.COLOR_BGR2GRAY)\n kernel = np.array([[1, 1], [1, 1], [1, 1]], dtype=np.uint8)\n blankImageforHorizontal = cv2.dilate(blankImageforHorizontal, kernel, iterations=2) # APPLY DILATION\n cv2.line(blankImageforHorizontal, (0, 0), (widthImg, 0), (0, 0, 0), 2)\n cv2.line(blankImageforHorizontal, (0, heightImg), (widthImg, heightImg), (0, 0, 0), 2)\n ret3, imgThresholdBWInvert_h = cv2.threshold(blankImageforHorizontal, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n\n\n\n contours_h = cv2.findContours(blankImageforHorizontal, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_KCOS)\n\n contours_h = contours_h[0] if len(contours_h) == 2 else contours_h[1]\n\n rects = []\n\n # Just initialize bounding rects and set all bools to false\n for cnt in contours_h:\n rects.append(cv2.boundingRect(cnt))\n rects.sort(key=getXFromRecty)\n clusters_h = {}\n i = 0\n\n upperConer = {}\n bottomConer = {}\n imageArray = []\n a, b = 0, 0\n x1, y1 = 0, 0\n x, y, w, h = 0, 0, widthImg, heightImg\n\n\n for cntr in rects:\n x2, y2, w2, h2 = cntr[0], cntr[1], cntr[2], cntr[3]\n clusters_h[i] = [x2, y2, w2, h2]\n if (abs(y2) < 10):\n upperConer = [x2, y2, w2, h2]\n a = 1\n else:\n if (y2 + h2 > heightImg - 20):\n bottomConer = [x2, y2, w2, h2]\n b = 1\n else:\n\n if (h2 + y2 > heightImg * 0.25):\n x, y, w, h = x2, y2, w2, h2\n cropedImg = img[int(y1 * img.shape[0] / heightImg):int((y2 + (h2 / 2)) * img.shape[0] / heightImg),\n 0:int(widthImg * img.shape[1] / widthImg)]\n # cropedImg2 = cv2.resize(cropedImg, (int(cropedImg.shape[1]*widthImg/img.shape[1]),int(cropedImg.shape[0]*heightImg/img.shape[0]) ))\n\n x1 = x2\n y1 = int(y2 + h2 / 2)\n\n imageArray.append(cropedImg)\n\n # cv2.waitKey(10)\n i = i + 1\n if (heightImg - y1 > heightImg * 0.1):\n cropedImg = imgCopy[int(y1 * img.shape[0] / heightImg):img.shape[0], 0:img.shape[1]]\n\n imageArray.append(cropedImg)\n if (len(imageArray) == 0):\n imageArray.append(img)\n\n return imageArray\n\n\ndef blur_Sobel(img): # DONT USE ADEPTIVE THRESH\n imgCopy1 = img.copy()\n imgCopy2 = img.copy()\n kernel = np.ones((1, 5))\n\n sobelX = cv2.Sobel(img, cv2.CV_64F, 1, 0)\n sobelY = cv2.Sobel(img, cv2.CV_64F, 0, 1)\n\n sobelX = np.uint8(np.absolute(sobelX))\n sobelY = np.uint8(np.absolute(sobelY))\n\n sobelCombined = cv2.bitwise_or(sobelX, sobelY)\n # sobelCombined = cv2.cvtColor(sobelCombined, cv2.COLOR_BGR2GRAY)\n\n ret3, imgThresholdBW = cv2.threshold(sobelCombined, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n bw = imgThresholdBW.copy()\n\n return bw, sobelCombined\ndef pixeldensity(img,imgOrginal,thresh):\n cv2.waitKey(10)\n\n col_details=drowLines(img,imgOrginal,thresh)\n\n imgThresholdBW = blur_Sobel(img)[0]\n ret3, imgThresholdBWInvert = cv2.threshold(imgThresholdBW, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n height, width = imgThresholdBW.shape\n\n # kernel = np.ones((5, 1))\n # image_moprphology_verti = cv2.morphologyEx(imgThresholdBWInvert, cv2.MORPH_RECT, kernel)\n kernel = np.ones((2, 5))\n image_moprphology_hori = cv2.morphologyEx(imgThresholdBWInvert, cv2.MORPH_ERODE, kernel)\n\n # cropdResize_imgThresholdBWInvert_h = cv2.resize(image_moprphology_hori, (widthImg, heightImg)) # RESIZE IMAGE\n # cv2.imshow(\"cropdResize_imgThresholdBWInvert_h_h\", cropdResize_imgThresholdBWInvert_h)\n\n # cv2.imshow(\"image_moprphology_verti\", image_moprphology_verti)\n # cv2.imshow(\"image_moprphology_hori\", image_moprphology_hori)\n\n # image_moprphology_verti[image_moprphology_verti == 0] = 0\n # image_moprphology_verti[image_moprphology_verti == 255] = 1\n # # cv2.imshow(\"imgThresholdBW\", blur_Sobel(img)[0])\n # vertical_projection = np.sum(image_moprphology_verti, axis=0) * width / heightImg\n\n image_moprphology_hori[image_moprphology_hori == 0] = 0\n image_moprphology_hori[image_moprphology_hori == 255] = 1\n # cv2.imshow(\"imgThresholdBW\", blur_Sobel(img)[0])\n horizontal_projection = np.sum(image_moprphology_hori, axis=1) * width / height\n\n\n # print('width : ', width)\n # print('height : ', height)\n blankImageforHorizontal = np.zeros((height, width, 3), np.uint8)\n # blankImageforVerticle = np.zeros((height, width, 3), np.uint8)\n\n # for col in range(0, width):\n # cv2.line(blankImageforVerticle, (col, height), (col, height - int(vertical_projection[col])),\n # (255, 255, 255), 1)\n for row in range(height):\n cv2.line(blankImageforHorizontal, (0, row),\n (int(horizontal_projection[row]), row),\n (255, 255, 255), 1)\n # cropdResize_imgThresholdBWInvert_h = cv2.resize(blankImageforHorizontal, (widthImg, heightImg)) # RESIZE IMAGE\n # cv2.imshow(\"cropdResize_imgThresholdBWInvert_h\", cropdResize_imgThresholdBWInvert_h)\n\n # mean_vertical = int(np.mean(vertical_projection))\n # max_vertical = int(np.max(vertical_projection))\n max_horizontal = int(np.max(horizontal_projection))\n mean_horizontal = int(np.mean(horizontal_projection))\n # # print(\"M=\",mean)\n # imgHistrogram = blankImage.copy() # COPY IMAGE FOR DISPLAY PURPOSES\n # cv2.imshow(\"imgHistrogram\", imgHistrogram)\n # cv2.rectangle(img=blankImageforVerticle, pt1=(0, height), pt2=(width, height - mean + 100), color=(0, 0, 0), thickness=-1 )\n cv2.rectangle(img=blankImageforHorizontal, pt1=(0, height),\n pt2=(int((max_horizontal * 3 + mean_horizontal) / 4), 0), color=(0, 0, 0), thickness=-1)\n # cv2.rectangle(img=blankImageforVerticle, pt1=(0, height),\n # pt2=(width, int((mean_vertical + max_vertical * 2) / 3) - 50), color=(0, 0, 0), thickness=-1)\n\n # cv2.imshow(\"blankImageforVerticle\", blankImageforVerticle)\n # cv2.imshow(\"blankImageforHorizontal\", blankImageforHorizontal)\n # # cv2.rectangle(blankImage, , ,, -1)\n\n # blankImageforVerticle = cv2.cvtColor(blankImageforVerticle, cv2.COLOR_BGR2GRAY)\n # ret3, imgThresholdBWInvert_v = cv2.threshold(blankImageforVerticle, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n # kernel = np.ones((5, 5))\n # imgThresholdBWInvert_v = cv2.morphologyEx(imgThresholdBWInvert_v, cv2.MORPH_ERODE, kernel)\n # imgThreshold = cv2.Canny(imgThresholdBWInvert_v, thres[0], thres[1]) # APPLY CANNY BLUR\n # cv2.imshow(\"imgThreshold\", imgThresholdBWInvert_v)\n # ret3, imgThresholdBWInvert_v = cv2.threshold(imgThresholdBWInvert_v, 0, 255,\n # cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n # contours_v = cv2.findContours(imgThresholdBWInvert_v, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)\n # contours_v = contours_v[0] if len(contours_v) == 2 else contours_v[1]\n # rects_v=[]\n # sum_col_width=0\n # for cntr in contours_v:\n # sum_col_width=sum_col_width+cv2.boundingRect(cntr)[2]\n # rects_v.append(cv2.boundingRect(cntr))\n #\n # sum_col_width=sum_col_width*imgOrginal.shape[1]/ widthImg\n # rects_v.sort(key=getXFromRectx)\n # leftConer=0,0,0,0\n # rightConer=[int(width*(imgOrginal.shape[1]/widthImg)),0,0,0]\n # if len(rects_v)>1:\n # if (rects_v[0][0] < 20):\n # leftConer = [int(rects_v[0][0]*imgOrginal.shape[1]/widthImg),0,int(rects_v[0][2]*imgOrginal.shape[1]/widthImg),0]\n # if(width-rects_v[len(rects_v)-1][0]-rects_v[len(rects_v)-1][2]<20 ):\n # #rightConer =rects_v[len(rects_v)-1] **imgOrginal.imgThresholdBW.shape[1]/widthImg\n # rightConer = [int(rects_v[len(rects_v)-1][0] * imgOrginal.shape[1] / widthImg), 0,\n # int(rects_v[len(rects_v)-1][2] * imgOrginal.shape[1] / widthImg), 0]\n leftConer=col_details[0][0]\n rightConer=col_details[0][1]\n upperConer=col_details[1][0]\n bottomConer=col_details[1][1]\n sum_col_width=0\n num_of_effective_col=len(col_details[3])\n imgCopy=img.copy()\n for col in col_details[3]:#collem separation lines\n x, y, w, h = col\n sum_col_width = sum_col_width + w\n cv2.line(imgCopy, (int(x+(w/2)), 0),(int(x+(w/2)), imgCopy.shape[0]),(255, 255, 255), 5)\n\n\n\n if(leftConer[2]>0):\n num_of_effective_col=num_of_effective_col-1\n if(rightConer[2]>0):\n num_of_effective_col=num_of_effective_col-1\n\n median_col_width=(sum_col_width-leftConer[2]-rightConer[2])/1 if num_of_effective_col==0 else num_of_effective_col\n # print(\"median\",median_col_width)\n # print(\"l:\", leftConer)\n # print(\"R:\", rightConer)\n # print(\"clusters:\", col_details[3])\n\n # blankImageforHorizontal = cv2.cvtColor(blankImageforHorizontal, cv2.COLOR_BGR2GRAY)\n # ret3, imgThresholdBWInvert_h = cv2.threshold(blankImageforHorizontal, 0, 255,\n # cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n # kernel = np.ones((5, 5))\n # imgThresholdBWInvert_h = cv2.morphologyEx(imgThresholdBWInvert_h, cv2.MORPH_ERODE, kernel)\n # imgThreshold_h = cv2.Canny(imgThresholdBWInvert_h, thres[0], thres[1]) # APPLY CANNY BLUR\n # # cv2.imshow(\"imgThreshold_h\", imgThresholdBWInvert_h)\n # ret3, imgThresholdBWInvert_h = cv2.threshold(imgThresholdBWInvert_h, 0, 255,\n # cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n #\n # contours_h = cv2.findContours(imgThresholdBWInvert_h, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)\n # contours_h = contours_h[0] if len(contours_h) == 2 else contours_h[1]\n # rects_h=[]\n # for cntr in contours_h:\n # rects_h.append(cv2.boundingRect(cntr))\n # rects_h.sort(key=getXFromRecty)\n # upperConer = 0, 0, 0, 0\n # bottomConer = [0,int(height * (imgOrginal.shape[0] / height)), 0, 0]\n # if len(rects_h) > 1:\n # if (rects_h[0][1] < 20):\n # upperConer = [0,int(rects_h[0][1] * imgOrginal.shape[0] / height),\n # 0,int(rects_h[0][3] * imgOrginal.shape[0] / height),]\n # if (height - rects_h[len(rects_h) - 1][1] - rects_h[len(rects_h) - 1][3] < 20):\n # # rightConer =rects_v[len(rects_v)-1] **imgOrginal.imgThresholdBW.shape[1]/widthImg\n # bottomConer = [0,int(rects_h[len(rects_h) - 1][1] * imgOrginal.shape[0] / height),\n # 0,int(rects_h[len(rects_h) - 1][3] * imgOrginal.shape[0] / height) ]\n\n\n # print(\"U:\", upperConer)\n # print(\"B:\", bottomConer)\n rmv = remove_images(img,imgOrginal,thresh)\n imgWithoutPic=rmv[1].copy()\n cropedImg = imgWithoutPic[upperConer[1]+upperConer[3]-5:bottomConer[1], leftConer[0]+leftConer[2]:rightConer[0]]\n\n\n kernel = np.ones((2, 2))\n gradiant = cv2.morphologyEx(cropedImg, cv2.MORPH_GRADIENT, kernel)\n\n\n # imgAdaptiveThre = cv2.adaptiveThreshold(gradiant, 255, 1, 1, 7, 2)\n ret3, otsu = cv2.threshold(gradiant, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n ret3, otsu2 = cv2.threshold(cropedImg, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n # karnalSize=1\n # if(int((otsu.shape[1]^4)/(widthImg^4)/2)>1):\n # karnalSize=int((otsu.shape[1]^3)/(widthImg^3)/2)\n # kernel = np.ones((karnalSize,karnalSize))\n\n # otsu_After_D = cv2.dilate(otsu, kernel, iterations=3) # APPLY DILATION\n\n cropdResize = cv2.resize(otsu, (widthImg, heightImg)) # RESIZE IMAGE\n cv2.imshow(\"22\", cropdResize)\n\n contours = cv2.findContours(otsu, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)\n contours = contours[0] if len(contours) == 2 else contours[1]\n\n rects = []\n blankImage = np.zeros((img.shape[0], img.shape[1], 3), np.uint8)\n total_area=0\n for cntr in contours:\n\n x, y, w, h = cv2.boundingRect(cntr)\n total_area=total_area+ cv2.contourArea(cntr)\n #img2 = cv2.rectangle(img2, (x, y), (x + w, y + h), (255, 255, 255), -1)\n print(\"tt:\",total_area/(otsu.shape[0]*otsu.shape[1]))\n\n contours = cv2.findContours(otsu, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)\n contours = contours[0] if len(contours) == 2 else contours[1]\n rects=[]\n for cntr in contours:\n rects.append(cv2.boundingRect(cntr))\n rects.sort(key=getXFromRectx)\n\n\n\n sum_of_vertical_gap=0\n sum_of_effective_conters=0\n median_col_width= (leftConer[2]+rightConer[2])/2 if median_col_width<1 else median_col_width\n for i in range(0, len(rects)):\n x1,y1,w1,h1=rects[i]\n for j in range(i+1, len(rects)):\n x2, y2, w2, h2 = rects[j]\n if (((y1 <= y2) and (y2 <= y1 + h1)) or ((y1 <= y2 + h2) and (y2 + h2 <= y1 + h1)) or (y2 <= y1 and (y1 + h1 <= y2 + h2))) and (abs(h1-h1)0) and ((x2-(x1+w1))8) & (w>img.shape[1]/50)):\n\n\n blankImage_x = cv2.line(blankImage_x, (x, y), (x + w, y), (255, 255, 255), 5)\n blankImage_x = cv2.line(blankImage_x, (x, y + h), (x + w, y + h), (255, 255, 255), 5)\n # imgCopy3 = cv2.line(imgCopy3, (x, y), (x + w, y), (255, 255, 255), 5)\n # imgCopy3 = cv2.line(imgCopy3, (x, y + h), (x + w, y + h), (255, 255, 255), 5)\n\n elif (((h/w)>8) & (h>img.shape[0]/50)):\n\n blankImage_y = cv2.line(blankImage_y, (x, y), (x , y+h), (255, 255, 255), 5)\n blankImage_y = cv2.line(blankImage_y, (x+w, y ), (x + w, y + h), (255, 255, 255), 5)\n forgrount_line_y = cv2.line(forgrount_line_y, (int(x+w/2), y ), (int(x+w/2), y + h), (255, 255, 255), int(forgrount_line_y.shape[0] / 500))\n # blankImageResizeB = cv2.resize(blankImage_y, (widthImg, heightImg)) # RESIZE IMAGE\n # cv2.imshow(\"blankImage_y\", blankImageResizeB)\n img_for_line_length=forgrount_line_y.copy()\n kernel = np.ones((2, 20))\n img_for_line_lengthDial = cv2.dilate(img_for_line_length, kernel, iterations=1) # APPLY DILATION\n img_for_line_lengthErod = cv2.erode(img_for_line_lengthDial, kernel, iterations=1) # APPLY DILATION\n # img_for_line_lengthErodgray = cv2.cvtColor(img_for_line_lengthErod, cv2.COLOR_GRAY2RGB)\n ret3, img_for_line_lengthErodgray = cv2.threshold(cv2.cvtColor(img_for_line_lengthErod,cv2.COLOR_RGB2GRAY), 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n\n contours_linesForSumOfLen = cv2.findContours(img_for_line_lengthErodgray, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)\n contours_linesForSumOfLen = contours_linesForSumOfLen[0] if len(contours_linesForSumOfLen) == 2 else contours_linesForSumOfLen[1]\n\n for cntr in contours_linesForSumOfLen:\n x, y, w, h = cv2.boundingRect(cntr)\n length_of_real_lines = length_of_real_lines + h\n\n # forgrount_line_y=blankImage_y.copy()\n # blankImageResizeB = cv2.resize(forgrount_line_y, (widthImg, heightImg)) # RESIZE IMAGE\n # cv2.imshow(\"blankImage_y\", blankImageResizeB)\n lines_V=col_details[5]\n if (lines_V is not None):\n for line in lines_V:\n x1, y1, x2, y2 = line[0]\n cv2.line(forgrount_line_y, (x1, y1), (x2, y2), (255, 255, 255), int(forgrount_line_y.shape[0] / 500))#acurateed line set\n all_lines_y=forgrount_line_y.copy()\n # all_lines_y_b = cv2.resize(forgrount_line_y, (widthImg, heightImg)) # RESIZE IMAGE\n # cv2.imshow(\"all_lines_y_b\", all_lines_y_b)\n kernel = np.ones((2, 20))\n all_lines_y_imgDial = cv2.dilate(all_lines_y, kernel, iterations=1) # APPLY DILATION\n all_lines_y_imgErod = cv2.erode(all_lines_y_imgDial, kernel, iterations=1) # APPLY DILATION\n # all_lines_y_imgDialR = cv2.resize(all_lines_y_imgErod, (widthImg, heightImg)) # RESIZE IMAGE\n # cv2.imshow(\"all_lines_y_imgDialR\", all_lines_y_imgDialR)\n blankImage_x = cv2.rectangle(blankImage_x, (0, 0), (leftConer[0]+leftConer[2], blankImage_x.shape[0]), (0, 0, 0), -1)\n blankImage_x = cv2.rectangle(blankImage_x, (rightConer[0], 0), (rightConer[0]+rightConer[2], blankImage_x.shape[0]), (0, 0, 0), -1)\n blankImage_x = cv2.rectangle(blankImage_x, (0, 0), (blankImage_x.shape[1], upperConer[1]+upperConer[3]), (0, 0, 0), -1)\n blankImage_x = cv2.rectangle(blankImage_x, (0,bottomConer[1]), (blankImage_x.shape[1], bottomConer[1]+bottomConer[3]), (0, 0, 0), -1)\n\n blankImage_y = cv2.rectangle(blankImage_y, (0, 0), (leftConer[0]+leftConer[2], blankImage_y.shape[0]), (0, 0, 0), -1)\n blankImage_y = cv2.rectangle(blankImage_y, (rightConer[0], 0), (rightConer[0]+rightConer[2], blankImage_y.shape[0]), (0, 0, 0), -1)\n blankImage_y = cv2.rectangle(blankImage_y, (0, 0), (blankImage_y.shape[1], upperConer[1]+upperConer[3]), (0, 0, 0), -1)\n blankImage_y = cv2.rectangle(blankImage_y, (0,bottomConer[1]), (blankImage_y.shape[1], bottomConer[1]+bottomConer[3]), (0, 0, 0), -1)\n\n blankImageResizeA = cv2.resize(blankImage_y, (widthImg, heightImg)) # RESIZE IMAGE\n # cv2.imshow(\"A\", blankImageResizeA)\n\n kernel = np.ones((2, 50))\n imgDialx = cv2.dilate(blankImage_x, kernel, iterations=2) # APPLY DILATION\n imgDialx = cv2.erode(imgDialx, kernel, iterations=3) # APPLY DILATION\n kernel = np.ones((10, 10))\n imgDialx = cv2.dilate(imgDialx, kernel, iterations=1) # APPLY DILATION\n imgDialx = cv2.erode(imgDialx, kernel, iterations=1) # APPLY DILATION\n ret3, otsu = cv2.threshold(cv2.cvtColor(imgDialx,cv2.COLOR_RGB2GRAY), 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n contours = cv2.findContours(otsu, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)\n contours = contours[0] if len(contours) == 2 else contours[1]\n otsu=cv2.cvtColor(otsu,cv2.COLOR_GRAY2RGB)\n\n for cntr in contours:\n\n x, y, w, h = cv2.boundingRect(cntr)\n if (True):\n\n x_line.append([x, y, w, h,False])\n imgCopy=cv2.line(imgCopy, (x, int(y+(h/2))), (x+w, int(y+(h/2))), (0, 255 ,0), 30)#((x, ), (w, ))#horizontal real all lines\n\n kernel = np.ones((50, 2))\n imgDialy = cv2.dilate(blankImage_y, kernel, iterations=2) # APPLY DILATION\n imgDialy = cv2.erode(imgDialy, kernel, iterations=2) # APPLY DILATION\n kernel = np.ones((20, 20))\n imgDialy = cv2.dilate(imgDialy, kernel, iterations=1) # APPLY DILATION\n imgDialy = cv2.erode(imgDialy, kernel, iterations=1) # APPLY DILATION\n ret3, otsu_y = cv2.threshold(cv2.cvtColor(imgDialy, cv2.COLOR_RGB2GRAY), 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n contours_y = cv2.findContours(otsu_y, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)\n contours_y = contours_y[0] if len(contours_y) == 2 else contours_y[1]\n\n\n for cntr in contours_y:\n\n x, y, w, h = cv2.boundingRect(cntr)\n if (True):\n\n # y_line.append([x, y, w, h,False])\n # blankImage_y = cv2.line(imgCopy3, (int(x+(w/2)), y), (int(x+(w/2)), y+h), (0, 255 ,0),5)#((x, ), (w, ))\n imgCopy = cv2.line(imgCopy, (int(x+(w/2)), y), (int(x+(w/2)), y+h), (0, 255 ,0),5)#vertical real all lines\n blankImageResizeyyy = cv2.resize(blankImage_y, (widthImg, heightImg)) # RESIZE IMAGE\n # cv2.imshow(\"blankImageResizeyyy\", blankImageResizeyyy)\n # y_line.sort(key=getXFromRectx)\n\n\n\n #################################################################################\n x_line.sort(key=getXFromRecty)\n i = 0\n j = 0\n maxWidth = 0\n maxWidthIndex=0\n for line in x_line:\n x1, y1, w1, h1, s1 = line\n\n\n if imgCopy.shape[0]/4 >y1+h1/2:\n if(maxWidthimgCopy.shape[1]*3/5 ) and (abs( upperBoder[0][1] - maxWidthIndex)>img.shape[0]/9) else 100\n if abs(upperConer[1] + upperConer[3] - y1 - h1 / 2) < dy :\n continue\n x_lineCopy.append([x1, y1, w1, h1, s1])\n\n x_line=x_lineCopy.copy()\n isUpChage=False\n changingFactor=0\n if(maxWidth>imgCopy.shape[1]*3/5 ):\n\n x_line=[]\n k=0\n changingFactor=abs( upperBoder[0][1] - maxWidthIndex)\n upperBoder[0][1] = maxWidthIndex-100\n upperBoder[1][1] = maxWidthIndex-100\n isUpChage=True\n for line in x_lineCopy:\n x1, y1, w1, h1, s1 = line\n\n if(k img.shape[0] / 9 else 100\n\n if(( length_of_real_lines / col_details[6]) < img.shape[0] * 3 / 4):\n dy = 150 if changingFactor > img.shape[0] / 9 else 200\n\n\n if(int(y1+h1/2)-maxWidthIndeximg.shape[0]/9:\n dy = 250\n\n for cntr in contours:\n\n x, y, w, h = cntr\n\n if (((h / w) > 8) & (h > img.shape[0] / 50)):\n lines_2.append([x, y, x + w, y + h])\n # imgtest = cv2.line(imgtest, (x, y), (x, y+h), (255, 0, 255), 10)\n\n if((y<=(upperBoder[0][1]+(dy if isUpChage else 120))) and ((upperBoder[0][1]+(dy if isUpChage else 120))<(y+h)) )|((y>=(upperBoder[0][1]+(dy if isUpChage else 120))) and ((upperBoder[0][1]+(dy if isUpChage else 120))>(y+h)) ):\n y_lines_cross_ub.append(cntr)\n lines_V = col_details[5]\n lines_=[]\n if (length_of_real_lines/ col_details[6])= (upperBoder[0][1] + (250 if isUpChage else 150))) and (((upperBoder[0][1]+(250 if isUpChage else 150)) > y2))):\n y_lines_cross_ub.append([x1,y1,x2-x1,y2-y1])\n\n y_lines_cross_ub_collems=[]\n for cntr in y_lines_cross_ub:\n x, y, w, h = cntr\n minGap=imgtest.shape[1]\n near_col=cntr\n for col in collems:\n x2, y2, w2, h2 = col\n if abs(x2+(w2/2)-(x+(w/2)))imgCopy.shape[1]/100):\n for col in collems:\n x2, y2, w2, h2 = col\n\n if (int(x2)0 and x2-x1x1+w1) :#or( x1-x2>0 and x1-x2(x_2-x_1)):\n extended_lines[j][4] = False\n else:\n extended_lines[i][4] = False\n if((x1x_1)&(x_2>x2)):\n if ((x2 - x1) > (x_2 - x_1)):\n extended_lines[j][4] = False\n else:\n extended_lines[i][4] = False\n z=0\n # for i in range(0, len(y_lines_cross_ub_collems) - 1):\n # x1, y1, w1, h1 = y_lines_cross_ub_collems[i]\n # x2, y2, w2, h2 = y_lines_cross_ub_collems[i + 1]\n # # imgCopy = cv2.line(imgCopy, (x1, y1), (x1, 500), (255, 0, 255), 10) # ((x, ), (w, ))\n # # imgCopy = cv2.line(imgCopy, (int(x1 + w1 / 2), 500), (int(x2 + w2 / 2) - int(x1 + w1 / 2), 500), (255, 0, 255),\n # # 10) # ((x, ), (w, ))\n #\n # extended_lines.append([int(x1 + w1 / 2), 10, int(x2 + w2 / 2) - int(x1 + w1 / 2), 5, False])\n x=100\n for i in range(0, len(y_lines_cross_ub_collems)-1):\n x1, y1, w1, h1 = y_lines_cross_ub_collems[i]\n x2, y2, w2, h2 = y_lines_cross_ub_collems[i+1]\n # imgCopy = cv2.line(imgCopy, (x1, y1), (x1,250), (x, x, 0), 10) # ((x, ), (w, ))\n # imgCopy = cv2.line(imgCopy, (int(x1+w1/2)+50, 500), (int(x2+w2/2)-50,500), (x, 0, x), 10) # ((x, ), (w, ))\n x=x+50\n #\n extended_lines.append([int(x1+w1/2), upperBoder[0][1]+ (80 if isUpChage else 20),(int(x2+w2/2)), upperBoder[0][1]+ (80 if isUpChage else 20), True])\n extended_lines.sort(key=getXFromRecty)\n for i in range( len(extended_lines)-1,-1,-1):\n x1, y1, x2, y2, s = extended_lines[i]\n\n if (s):\n l_bottom = (x1, imgCopy.shape[0])\n r_bottom = (x2, imgCopy.shape[0])\n for j in range(i+1, len(extended_lines)):\n x_1, y_1, x_2, y_2, s_s = extended_lines[j]\n if (s_s):\n if(x1==x_1)|((x_1 self.max_upload_size:\n raise forms.ValidationError(\n _('Please upload file up to {}. Your file size is {}.').format(\n filesizeformat(self.max_upload_size),\n filesizeformat(file._size)\n )\n )\n else:\n raise forms.ValidationError(_('File type not supported.'))\n\n return data\n\n\nclass ContentTypeRestrictedImageField(ContentTypeRestrictedFileField, ImageField):\n # just mixing these two together\n pass\n\n\nclass MonthsField(models.PositiveSmallIntegerField):\n def __init__(self, *args, **kwargs):\n kwargs.update(dict(\n default=1,\n validators=[MaxValueValidator(12), MinValueValidator(1)],\n choices=zip(range(1, 13), range(1, 13)),\n ))\n super().__init__(*args, **kwargs)\n","sub_path":"src/utils/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":2459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"412484770","text":"\"\"\"\nUnit testing module for the topshape module.\n\"\"\"\nimport unittest\nimport time\ntry:\n from unittest import mock, TestCase\nexcept ImportError: # python 2\n import mock\n from unittest import TestCase\nfrom urwid import SimpleListWalker, AttrMap, Columns, ExitMainLoop, Filler, \\\n Frame, Text, Edit\nfrom topshape import BodyBox, TopShape, TopShapeError, CacheThread, Header\n\n\nclass TestBodyBox(TestCase):\n \"\"\"Unit tests for BodyBox class.\"\"\"\n def setUp(self):\n self.body = BodyBox([{'label': 'column1'}], None)\n\n def test_init(self):\n \"\"\"Test BodyBox.__init__().\"\"\"\n # columns should not be empty\n with self.assertRaises(TopShapeError):\n BodyBox([], None)\n\n self.assertIsInstance(self.body, BodyBox)\n self.assertEqual(10, self.body.default_column_size)\n self.assertEqual('center', self.body.default_column_alignment)\n self.assertEqual('desc', self.body.default_column_order)\n self.assertEqual([{'label': 'column1',\n 'size': 10,\n 'alignment': 'center',\n 'order': 'desc'}],\n self.body.columns)\n self.assertIsNone(self.body.func)\n\n def test__sort_key(self):\n \"\"\"Test BodyBox._sort_key().\"\"\"\n self.assertEqual('value1', self.body._sort_key(('value1',)))\n self.assertEqual(1, self.body._sort_key(('1',)))\n self.assertEqual(1.0, self.body._sort_key(('1.0',)))\n\n def test_sorting_column(self):\n \"\"\"Test BodyBox.sorting_column.\"\"\"\n self.assertEqual('column1', self.body.sorting_column)\n with self.assertRaises(TopShapeError):\n self.body.sorting_column = 'column2'\n\n self.body.columns = ({'label': 'column1'}, {'label': 'column2'})\n self.body.sorting_column = 'column2'\n self.assertEqual('column2', self.body.sorting_column)\n\n def test_column_names(self):\n \"\"\"Test BodyBox.column_names.\"\"\"\n self.assertEqual(['column1'], self.body.column_names)\n\n def test_columns(self):\n \"\"\"Test BodyBox.columns.\"\"\"\n expected = [{'label': 'column1', 'size': 10, 'alignment': 'center',\n 'order': 'desc'}]\n self.assertEqual(expected, self.body.columns)\n\n with self.assertRaises(TopShapeError):\n self.body.columns = [{'size': 10}]\n\n def test_update(self):\n \"\"\"Test BodyBox.update()\"\"\"\n def test_func():\n yield ('foo',)\n\n self.body.func = test_func\n\n self.assertEqual(SimpleListWalker([]), self.body.body)\n\n self.body.update()\n self.assertIsInstance(self.body.body.contents[0], AttrMap)\n self.assertIsInstance(self.body.body.contents[0].original_widget,\n Columns)\n self.assertIsInstance(self.body.body.contents[1], Columns)\n self.assertEqual('column1',\n self.body.body.contents[0].original_widget.contents[0]\n [0].original_widget.text)\n\n self.assertEqual('foo', self.body.body.contents[1][0].text)\n\n def test_move_sort(self):\n \"\"\"Test BodyBox.move_sort_right() and BodyBox.move_sort_left()\"\"\"\n def func():\n yield 'foo'\n self.body.func = func\n\n self.assertEqual('column1', self.body.sorting_column)\n\n self.body.columns = ({'label': 'column1'}, {'label': 'column2'})\n self.body.move_sort_right()\n self.assertEqual('column2', self.body.sorting_column)\n\n self.body.move_sort_right()\n self.assertEqual('column2', self.body.sorting_column)\n\n self.body.move_sort_left()\n self.assertEqual('column1', self.body.sorting_column)\n\n self.body.move_sort_left()\n self.assertEqual('column1', self.body.sorting_column)\n\n def test__filter_matches(self):\n \"\"\"Test BodyBox._filter_matches().\"\"\"\n self.body.filter_regex = '^regex$'\n\n row = ()\n self.assertFalse(self.body._filter_matches(row))\n\n row = ((''),)\n self.assertFalse(self.body._filter_matches(row))\n\n row = (('regex'),)\n self.assertTrue(self.body._filter_matches(row))\n\n\nclass TestTopShape(TestCase):\n \"\"\"Unit tests for TopShape class.\"\"\"\n def setUp(self):\n self.body_func = mock.Mock()\n self.header_func = mock.Mock()\n self.app = TopShape.create_app(({'label': 'column1'},),\n self.body_func,\n self.header_func)\n\n def test_handle_help(self):\n \"\"\"Test TopShape.handle('h').\"\"\"\n self.app.enter_help = mock.Mock()\n\n self.app._handle_key('h')\n self.app.enter_help.assert_called_with()\n\n def test_handle_help_quit_in_help(self):\n \"\"\"\n Test TopShape.handle('q') and TopShape.handle('esc') while\n help output is displayed.\n \"\"\"\n self.app.on_help = mock.Mock()\n self.app.exit_help = mock.Mock()\n\n self.app.on_help.return_value = True\n self.app._handle_key('q')\n self.app.exit_help.assert_called_with()\n\n self.app.on_help.reset_mock()\n self.app.exit_help.reset_mock()\n\n self.app.on_help.return_value = True\n self.app._handle_key('esc')\n self.app.exit_help.assert_called_with()\n\n def test_handle_help_quit_not_in_help(self):\n \"\"\"\n Test TopShape.handle('q') and TopShape.handle('esc') while\n help output is not displayed.\n \"\"\"\n self.app.on_help = mock.Mock()\n self.app.exit_help = mock.Mock()\n self.app.key_map = {}\n\n self.app.on_help.return_value = False\n self.app._handle_key('q')\n self.app.exit_help.assert_not_called()\n\n self.app.on_help.reset_mock()\n self.app.exit_help.reset_mock()\n\n self.app.on_help.return_value = False\n self.app._handle_key('esc')\n self.app.exit_help.assert_not_called()\n\n def test__handle_key_with_input(self):\n \"\"\"\n Test TopShape.handle('f') where pressing 'f' will cause an input\n request from the user.\n \"\"\"\n foo = mock.Mock()\n self.app.key_map = {'f': (foo, 'foo')}\n\n self.assertIsNone(self.app._handle_key('f'))\n foo.assert_not_called()\n\n def test__handle_key_custom_key(self):\n \"\"\"Test TopShape._handle_key('f') without an input request\"\"\"\n foo = mock.Mock()\n self.app.key_map = {'f': foo}\n\n self.assertIsNone(self.app._handle_key('f'))\n foo.assert_called_with(self.app)\n\n def test_create_app_minimal(self):\n \"\"\"Minimal TopShape.create_app()\"\"\"\n self.assertIsInstance(self.app, TopShape)\n self.assertEqual('', self.app.help_text)\n self.assertEqual(2, self.app.refresh_rate)\n self.assertIsInstance(self.app.key_map, dict)\n self.assertEqual(['q'], list(self.app.key_map.keys()))\n\n def test_create_app_with_footer(self):\n \"\"\"Test TopShape.create_app() with footer function\"\"\"\n body_func = mock.Mock()\n header_func = mock.Mock()\n footer_func = mock.Mock()\n app = TopShape.create_app(({'label': 'column1'},),\n body_func,\n header_func,\n footer_func)\n self.assertIsInstance(app, TopShape)\n\n def test_exit(self):\n \"\"\"Test TopShape.exit()\"\"\"\n with self.assertRaises(ExitMainLoop):\n TopShape.exit()\n\n def test_update(self):\n \"\"\"Test TopShape.update()\"\"\"\n self.app.frame = mock.Mock()\n self.app.frame.header = mock.Mock()\n self.app.frame.header.contents = ((mock.Mock(),),)\n self.app.frame.body = mock.Mock()\n self.app.frame.footer = mock.Mock()\n self.app.set_alarm_in = mock.Mock()\n\n self.assertIsNone(self.app.update())\n\n def test_enter_help(self):\n \"\"\"Test TopShape.enter_help()\"\"\"\n self.app.on_help = mock.Mock()\n self.app.on_help.return_value = False\n self.app.draw_screen = mock.Mock()\n\n self.assertIsNone(self.app.enter_help())\n self.app.on_help.assert_called_with()\n self.assertIsInstance(self.app.widget, Filler)\n\n def test_enter_help_in_help_already(self):\n \"\"\"Test TopShape.enter_help(), already displaying help\"\"\"\n self.app.on_help = mock.Mock()\n self.app.on_help.return_value = True\n self.app.draw_screen = mock.Mock()\n\n self.assertIsNone(self.app.enter_help())\n self.app.on_help.assert_called_with()\n self.assertIsInstance(self.app.widget, Frame)\n\n def test_exit_help(self):\n \"\"\"Test TopShape.exit_help()\"\"\"\n self.app.on_help = mock.Mock()\n self.app.on_help.return_value = True\n self.app.draw_screen = mock.Mock()\n\n self.assertIsNone(self.app.exit_help())\n self.app.on_help.assert_called_with()\n self.assertIsNone(self.app.widget)\n\n def test_exit_help_not_in_help_already(self):\n \"\"\"Test TopShape.exit_help(), not displaying help already\"\"\"\n self.app.on_help = mock.Mock()\n self.app.on_help.return_value = False\n self.app.draw_screen = mock.Mock()\n\n self.assertIsNone(self.app.exit_help())\n self.app.on_help.assert_called_with()\n self.assertIsInstance(self.app.widget, Frame)\n\n def test_on_help(self):\n \"\"\"Test TopShape.on_help()\"\"\"\n self.app.draw_screen = mock.Mock()\n\n self.assertFalse(self.app.on_help())\n\n self.app.exit_help()\n self.assertFalse(self.app.on_help())\n\n self.app.enter_help()\n self.assertTrue(self.app.on_help())\n\n self.app.enter_help()\n self.assertTrue(self.app.on_help())\n\n self.app.exit_help()\n self.assertFalse(self.app.on_help())\n\n def test_move_sort_right(self):\n \"\"\"Test TopShape.move_sort_right()\"\"\"\n self.app.on_help = mock.Mock()\n self.app.on_help.return_value = False\n self.app.widget = mock.Mock()\n self.app.widget.body = mock.Mock()\n\n self.assertIsNone(self.app.move_sort_right())\n self.app.widget.body.move_sort_right.assert_called_with()\n\n def test_move_sort_right_on_help(self):\n \"\"\"Test TopShape.move_sort_right() while on help screen\"\"\"\n self.app.on_help = mock.Mock()\n self.app.on_help.return_value = True\n self.app.widget = mock.Mock()\n self.app.widget.body = mock.Mock()\n\n self.assertIsNone(self.app.move_sort_right())\n self.app.widget.body.move_sort_right.assert_not_called()\n\n def test_move_sort_left(self):\n \"\"\"Test TopShape.move_sort_left()\"\"\"\n self.app.on_help = mock.Mock()\n self.app.on_help.return_value = False\n self.app.widget = mock.Mock()\n self.app.widget.body = mock.Mock()\n\n self.assertIsNone(self.app.move_sort_left())\n self.app.widget.body.move_sort_left.assert_called_with()\n\n def test_move_sort_left_on_help(self):\n \"\"\"Test TopShape.move_sort_right() while on help screen\"\"\"\n self.app.on_help = mock.Mock()\n self.app.on_help.return_value = True\n self.app.widget = mock.Mock()\n self.app.widget.body = mock.Mock()\n\n self.assertIsNone(self.app.move_sort_left())\n self.app.widget.body.move_sort_left.assert_not_called()\n\n def test_run(self):\n \"\"\"Test TopShape.run().\"\"\"\n self.app._cache_thread = mock.Mock()\n self.app._cache_thread.body = []\n self.app.update = mock.Mock(side_effect=Exception())\n\n with self.assertRaises(Exception):\n self.app.run()\n\n self.app._cache_thread.start.assert_called_with()\n\n\nclass TestCacheThread(TestCase):\n \"\"\"Unit tests for CacheThread class.\"\"\"\n def setUp(self):\n self.header_func = mock.Mock()\n self.header_func.return_value = 'header'\n self.footer_func = mock.Mock()\n self.footer_func.return_value = 'footer'\n self.body_func = mock.Mock()\n self.body_func.return_value = []\n\n self.thread = CacheThread(self.header_func,\n self.body_func,\n self.footer_func,\n 0.1)\n\n def test_start(self):\n \"\"\"Test CacheThread.start().\"\"\"\n self.thread.daemon = True\n self.thread.start()\n while not self.thread.ran_once:\n time.sleep(0.1)\n\n self.assertEqual('header', self.thread.header)\n self.assertEqual('footer', self.thread.footer)\n self.assertEqual([], self.thread.body)\n self.assertEqual(0.1, self.thread.refresh_rate)\n self.body_func.assert_called_with()\n self.header_func.assert_called_with()\n self.footer_func.assert_called_with()\n\n def test_start_no_footer(self):\n \"\"\"Test CacheThread.start() with no footer.\"\"\"\n header_func = mock.Mock()\n header_func.return_value = 'header'\n body_func = mock.Mock()\n body_func.return_value = []\n\n thread = CacheThread(header_func, body_func, None, 0.1)\n thread.daemon = True\n thread.start()\n while not thread.ran_once:\n time.sleep(0.1)\n self.assertTrue(thread.is_alive())\n\n\nclass TestHeader(TestCase):\n \"\"\"Unit tests for Header class.\"\"\"\n def setUp(self):\n header_text = Text('')\n self.header = Header((('pack', header_text), ('pack', Text(''))))\n self.header.app = mock.Mock()\n\n def test_keypress_not_enter(self):\n \"\"\"Test Header.keypress() when key is not enter\"\"\"\n self.assertEqual('i', (self.header.keypress((None, None), 'i')))\n\n def test_keypress_enter(self):\n \"\"\"Test Header.keypress() when key is enter\"\"\"\n self.header.contents[1] = (Edit(''), ('pack', None))\n self.assertIsNone(self.header.keypress((None, None), 'enter'))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"topshape/tests/test_topshape.py","file_name":"test_topshape.py","file_ext":"py","file_size_in_byte":13884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"474117252","text":"import numpy as np\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport random\nfrom sklearn.model_selection import RepeatedKFold\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.neighbors import NearestNeighbors\n\n\nnp.random.seed(124)\n# target dataframe\ndf = pd.read_csv(\"D:/DataScience/R/Profile/Kaggle_MoA/train_targets_scored.csv\")\n# rename index by sig_id\nids = list(df.iloc[:,0])\ndf.index = ids\ndf = df.drop([\"sig_id\"], axis=1)\n\n## check for imbalance label\nmax(df.sum())\n## 832 out of 23814 highly imbalanced\n\n\n# rmove samples with no target\nY = df.drop(df.sum(axis=1)[df.sum(axis=1) <1].index.values, axis=0)\n\n\n## get index names to filter Features\n\nidNames = list(Y.index.values)\n\n\n\n# features\nFeatures = pd.read_csv(\"D:/DataScience/R/Profile/Kaggle_MoA/train_features.csv\")\n\n# rename index by sig_id\nids = list(Features.iloc[:,0])\nFeatures.index = ids\n\n\n# remove non-informative features\nFeatures = Features.drop([\"sig_id\",\"cp_type\"], axis = 1)\n\n# convert categoricals to binary\nFeatures = pd.get_dummies(Features, columns=[\"cp_time\", \"cp_dose\"])\n\n## select samples based on filtered targets dataframe\n\nX = Features.loc[idNames,:]\n\n\n## convert dataframe to array\n\narrX = np.array(X)\narrY = np.array(Y)\n\n\n## split for training by cross validation\ncv = RepeatedKFold(n_splits=10, n_repeats=3, random_state=1)\nfor train_ix, test_ix in cv.split(arrX):\n X_train, X_test = arrX[train_ix], arrX[test_ix]\n Y_train, Y_test = arrY[train_ix], arrY[test_ix]\n\n\n### define the first model\nmodel = Sequential()\nmodel.add(Dense(1000, input_dim = 877, kernel_initializer= \"he_uniform\", activation=\"relu\"))\nmodel.add(Dropout(0.2))\nmodel.add(Dense(512, activation=\"relu\"))\nmodel.add(Dropout(0.1))\nmodel.add(Dense(206, activation=\"sigmoid\"))\n\nmodel.compile(optimizer=\"adam\", loss=\"binary_crossentropy\", metrics=[\"accuracy\"])\n\nhist = model.fit(X_train, Y_train, epochs=10, validation_data=(X_test, Y_test), batch_size=32)\n\n## loss: 0.0035 - accuracy: 0.9989 - val_loss: 0.0378 - val_accuracy: 0.9952\n\nplt.plot(hist.history[\"loss\"])\nplt.plot(hist.history[\"val_loss\"])\nplt.title(\"Model loss\")\nplt.xlabel(\"Epoch\")\nplt.legend([\"Train\", \"val\"], loc=\"upper left\")\nplt.show()\n\n\n\n## visualize accuracy\nplt.plot(hist.history[\"accuracy\"])\nplt.plot(hist.history[\"val_accuracy\"])\nplt.title(\"Model Accuracy\")\nplt.xlabel(\"Epoch\")\nplt.legend([\"Train\", \"val\"], loc=\"upper left\")\nplt.show()\n\n\n\n\n### evaluate on the whole set of data\nyhat = model.predict(arrX)\nyhat = yhat.round()\n\nacc = accuracy_score(arrY, yhat)\n\nprint('>%.3f' % acc)\n## 0.875\n\n\n\n\n############ handle imbalanced labels with MLSMOTE\ndef get_tail_label(df):\n \"\"\"\n get tail label columns of the target dataframe\n\n Parameters\n ----------\n df : pandas.DataFrame\n \n\n Returns\n -------\n tail_label: a list of column name of all tail label, minority\n\n \"\"\"\n columns = df.columns\n n = len(columns)\n irpl = np.zeros(n)\n for column in range(n):\n irpl[column] = df[columns[column]].value_counts()[1]\n irpl = max(irpl)/irpl\n mir = np.average(irpl)\n tail_label = []\n for i in range(n):\n if irpl[i] > mir:\n tail_label.append(columns[i])\n return tail_label\n\n\ndef get_index(df):\n \"\"\"\n get the index of all tail_label rows\n\n Parameters\n ----------\n df : pandas.DataFrame target df\n \n\n Returns\n -------\n index: a list of index number of all the tail labels\n\n \"\"\"\n tail_labels = get_tail_label(df)\n index = set()\n for tail_label in tail_labels:\n sub_index = set(df[df[tail_label]==1].index)\n index = index.union(sub_index)\n return list(index)\n\ndef get_minority_instance(X, y):\n \"\"\"\n get minority data frame containing all the tail labels\n\n Parameters\n ----------\n X : pandas.DataFrame\n Features dataframe.\n y : pandas.DataFrame\n target dataframe.\n\n Returns\n -------\n X_sub: pandas.DataFrame, features of minority.\n y_sub: pandas.DataFrame, target of minority\n\n \"\"\"\n index = get_index(y)\n X_sub = X[X.index.isin(index)].reset_index(drop=True)\n y_sub = y[y.index.isin(index)].reset_index(drop=True)\n return X_sub, y_sub\n\ndef nearest_neighbour(X):\n \"\"\"\n get index of 5 nearest neighbours of all instances\n\n Parameters\n ----------\n X : np.array\n \n\n Returns\n -------\n indices: list of list of index of 5 NN of each element in X\n\n \"\"\"\n \n nbs = NearestNeighbors(n_neighbors=5, metric=\"euclidean\",\n algorithm=\"kd_tree\").fit(X)\n euclidean, indices = nbs.kneighbors(X)\n return indices\n\ndef MLSMOTE(X,y, n_sample):\n \"\"\"\n get augmented data using MLSMOTE algorithm\n\n Parameters\n ----------\n X : pandas.DataFrame\n features dataframe.\n y : pandas.DataFrame\n target dataframe\n n_sample : number of samples to be augmented\n\n Returns\n -------\n new_X: pandas.DataFrame, aumented features\n target: pandas.DataFrame, augmented target\n\n \"\"\"\n indices2 = nearest_neighbour(X)\n n = len(indices2)\n new_X = np.zeros((n_sample, X.shape[1]))\n target = np.zeros((n_sample, y.shape[1]))\n for i in range(n_sample):\n reference = random.randint(0, n-1)\n neighbour = random.choice(indices2[reference, 1:])\n all_point = indices2[reference]\n nn_df = y[y.index.isin(all_point)]\n ser = nn_df.sum(axis = 0, skipna = True)\n target[i] = np.array([1 if val>2 else 0 for val in ser])\n ratio = random.random()\n gap = X.loc[reference,:] - X.loc[neighbour,:]\n new_X[i] = np.array(X.loc[reference,:] + ratio*gap)\n new_X = pd.DataFrame(new_X, columns = X.columns)\n target = pd.DataFrame(target, columns = y.columns)\n new_X = pd.concat([X, new_X], axis=0)\n target = pd.concat([y, target], axis=0)\n return new_X, target\n\n\n#### Getting minority instances\n\nX_sub, y_sub = get_minority_instance(X, Y)\n\n# simulate data from minority samples\nX_res, y_res = MLSMOTE(X_sub, y_sub, 50000)\n\n### concatante simulated data and the original data\n\ntrain_X = pd.concat([X, X_res], axis=0)\ntrain_Y = pd.concat([Y, y_res], axis=0)\n\n\n## convert to np.array\ntrain_X = np.array(train_X)\ntrain_Y = np.array(train_Y)\n\n\n\n# split data for trianing and test\n\nfor train_ix, test_ix in cv.split(train_X):\n X_train2, X_test2 = train_X[train_ix], train_X[test_ix]\n Y_train2, Y_test2 = train_Y[train_ix], train_Y[test_ix]\n\n\n\n\n###############\nmodel2 = Sequential()\nmodel2.add(Dense(1000, input_dim = 877, kernel_initializer= \"he_uniform\", activation=\"relu\"))\nmodel2.add(Dropout(0.2))\nmodel2.add(Dense(512, activation=\"relu\"))\nmodel2.add(Dropout(0.1))\nmodel2.add(Dense(206, activation=\"sigmoid\"))\n\nmodel2.compile(optimizer=\"adam\", loss=\"binary_crossentropy\", metrics=[\"accuracy\"])\n\nhist2 = model2.fit(X_train2, Y_train2, epochs=10,\n validation_data=(X_test2, Y_test2), batch_size=32)\n\n## loss: 0.0022 - accuracy: 0.9994 - val_loss: 0.0083 - val_accuracy: 0.9988\n\n## visualize loss\nplt.plot(hist2.history[\"loss\"])\nplt.plot(hist2.history[\"val_loss\"])\nplt.title(\"Model2 loss\")\nplt.xlabel(\"Epoch\")\nplt.legend([\"Train\", \"val\"], loc=\"upper left\")\nplt.show()\n\n\n\n## visualize accuracy\nplt.plot(hist2.history[\"accuracy\"])\nplt.plot(hist2.history[\"val_accuracy\"])\nplt.title(\"Model2 Accuracy\")\nplt.xlabel(\"Epoch\")\nplt.legend([\"Train\", \"val\"], loc=\"upper left\")\nplt.show()\n\n\n\n### evaluate on the whole set of data\nyhat2 = model2.predict(train_X)\nyhat2 = yhat2.round()\n\nacc2 = accuracy_score(train_Y, yhat2)\n\nprint('>%.3f' % acc2)\n## 0.930\n\n","sub_path":"MoA.py","file_name":"MoA.py","file_ext":"py","file_size_in_byte":7614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"285416679","text":"'''\nCreated on 08/03/2015\n\n@author: lapaesleme\n'''\nfrom utils import load_data\nfrom utils import save_data\nfrom utils import get_path\n\nif __name__ == '__main__':\n filename = get_path('temp', 'trajectories2.000.json')\n dataset = load_data(filename)\n \n categories = {}\n datasets = {}\n trajectories = set()\n for t in dataset:\n trajectories.add(t['id'])\n for c in t['categories']:\n categories[c] = categories.get(c, 0) + 1\n for d in t['datasets']:\n datasets[d] = datasets.get(d, 0) + 1\n \n filename = get_path('temp', 'categories.json')\n save_data(categories, filename)\n filename = get_path('temp', 'datasets.json')\n save_data(datasets, filename)\n \n categories = sorted(categories.items(), key=lambda cat: cat[1], reverse=True)\n print ([c[0] for c in categories[:20]])\n print ([c for c in categories[:20]])\n \n print ('')\n datasets = sorted(datasets.items(), key=lambda dat: dat[1], reverse=True)\n print ([d[0] for d in datasets[:20]])\n print ([d for d in datasets[:20]])\n \n print ('')\n print (len(trajectories))\n \n \n","sub_path":"MyTwitter/src/utils/tasks/Table2.py","file_name":"Table2.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"337987246","text":"'''\nQuest step object\n'''\n\nclass QuestStep:\n\n def __init__(self, StepID, Description, CompletesQuest, StepType, KillCount, ExplorationID, NPCID, NextStep):\n self.StepID = StepID\n self.Description = Description\n self.CompletesQuest = CompletesQuest\n self.StepType = StepType\n self.KillCount = KillCount\n self.ExplorationID = ExplorationID\n self.NPCID = NPCID\n self.NextStep = NextStep","sub_path":"QuestStep.py","file_name":"QuestStep.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"251547019","text":"from __future__ import absolute_import\nimport recurrentshop\nfrom recurrentshop.cells import *\nfrom recurrentshop import LSTMCell, RecurrentSequential\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense, Dropout, TimeDistributed, Bidirectional, Input, Lambda, Activation\nfrom keras.layers import add, multiply, concatenate\nfrom keras import backend as K\n\n'''\nPapers:\n[1] Sequence to Sequence Learning with Neural Networks (http://arxiv.org/abs/1409.3215)\n[2] Learning Phrase Representations using RNN Encoder-Decoder for Statistical Machine Translation (http://arxiv.org/abs/1406.1078)\n[3] Neural Machine Translation by Jointly Learning to Align and Translate (http://arxiv.org/abs/1409.0473)\n'''\n\n\ndef SimpleSeq2Seq(output_dim, output_length, hidden_dim=None, input_shape=None,\n batch_size=None, batch_input_shape=None, input_dim=None,\n input_length=None, depth=1, dropout=0.0, unroll=False,\n stateful=False):\n\n '''\n Simple model for sequence to sequence learning.\n The encoder encodes the input sequence to vector (called context vector)\n The decoder decodes the context vector in to a sequence of vectors.\n There is no one on one relation between the input and output sequence\n elements. The input sequence and output sequence may differ in length.\n Arguments:\n output_dim : Required output dimension.\n hidden_dim : The dimension of the internal representations of the model.\n output_length : Length of the required output sequence.\n depth : Used to create a deep Seq2seq model. For example, if depth = 3,\n there will be 3 LSTMs on the enoding side and 3 LSTMs on the\n decoding side. You can also specify depth as a tuple. For example,\n if depth = (4, 5), 4 LSTMs will be added to the encoding side and\n 5 LSTMs will be added to the decoding side.\n dropout : Dropout probability in between layers.\n '''\n\n if isinstance(depth, int):\n depth = (depth, depth)\n if batch_input_shape:\n shape = batch_input_shape\n elif input_shape:\n shape = (batch_size,) + input_shape\n elif input_dim:\n if input_length:\n shape = (batch_size,) + (input_length,) + (input_dim,)\n else:\n shape = (batch_size,) + (None,) + (input_dim,)\n else:\n # TODO Proper error message\n raise TypeError\n if hidden_dim is None:\n hidden_dim = output_dim\n encoder = RecurrentSequential(unroll=unroll, stateful=stateful)\n encoder.add(LSTMCell(hidden_dim, batch_input_shape=(shape[0], shape[-1])))\n\n for _ in range(1, depth[0]):\n encoder.add(Dropout(dropout))\n encoder.add(LSTMCell(hidden_dim))\n\n decoder = RecurrentSequential(unroll=unroll, stateful=stateful,\n decode=True, output_length=output_length)\n decoder.add(Dropout(dropout, batch_input_shape=(shape[0], hidden_dim)))\n\n if depth[1] == 1:\n decoder.add(LSTMCell(output_dim))\n else:\n decoder.add(LSTMCell(hidden_dim))\n for _ in range(depth[1] - 2):\n decoder.add(Dropout(dropout))\n decoder.add(LSTMCell(hidden_dim))\n decoder.add(Dropout(dropout))\n decoder.add(LSTMCell(output_dim))\n\n _input = Input(batch_shape=shape)\n x = encoder(_input)\n output = decoder(x)\n return Model(_input, output)\n\n\ndef AttentionSeq2Seq(output_dim, output_length, batch_input_shape=None,\n batch_size=None, input_shape=None, input_length=None,\n input_dim=None, hidden_dim=None, depth=1,\n bidirectional=True, unroll=False, stateful=False, dropout=0.0,):\n '''\n This is an attention Seq2seq model based on [3].\n Here, there is a soft allignment between the input and output sequence elements.\n A bidirection encoder is used by default. There is no hidden state transfer in this\n model.\n The math:\n Encoder:\n X = Input Sequence of length m.\n H = Bidirection_LSTM(X); Note that here the LSTM has return_sequences = True,\n so H is a sequence of vectors of length m.\n Decoder:\n y(i) = LSTM(s(i-1), y(i-1), v(i)); Where s is the hidden state of the LSTM (h and c)\n and v (called the context vector) is a weighted sum over H:\n v(i) = sigma(j = 0 to m-1) alpha(i, j) * H(j)\n The weight alpha[i, j] for each hj is computed as follows:\n energy = a(s(i-1), H(j))\n alpha = softmax(energy)\n Where a is a feed forward network.\n '''\n\n if isinstance(depth, int):\n depth = (depth, depth)\n if batch_input_shape:\n shape = batch_input_shape\n elif input_shape:\n shape = (batch_size,) + input_shape\n elif input_dim:\n if input_length:\n shape = (batch_size,) + (input_length,) + (input_dim,)\n else:\n shape = (batch_size,) + (None,) + (input_dim,)\n else:\n # TODO Proper error message\n raise TypeError\n if hidden_dim is None:\n hidden_dim = output_dim\n\n _input = Input(batch_shape=shape)\n _input._keras_history[0].supports_masking = True\n\n encoder = RecurrentSequential(unroll=unroll, stateful=stateful,\n return_sequences=True)\n encoder.add(LSTMCell(hidden_dim, batch_input_shape=(shape[0], shape[2])))\n\n for _ in range(1, depth[0]):\n encoder.add(Dropout(dropout))\n encoder.add(LSTMCell(hidden_dim))\n\n if bidirectional:\n encoder = Bidirectional(encoder, merge_mode='sum')\n encoder.forward_layer.build(shape)\n encoder.backward_layer.build(shape)\n # patch\n encoder.layer = encoder.forward_layer\n\n encoded = encoder(_input)\n decoder = RecurrentSequential(decode=True, output_length=output_length,\n unroll=unroll, stateful=stateful)\n decoder.add(Dropout(dropout, batch_input_shape=(shape[0], shape[1], hidden_dim)))\n if depth[1] == 1:\n decoder.add(AttentionDecoderCell(output_dim=output_dim, hidden_dim=hidden_dim))\n else:\n decoder.add(AttentionDecoderCell(output_dim=output_dim, hidden_dim=hidden_dim))\n for _ in range(depth[1] - 2):\n decoder.add(Dropout(dropout))\n decoder.add(LSTMDecoderCell(output_dim=hidden_dim, hidden_dim=hidden_dim))\n decoder.add(Dropout(dropout))\n decoder.add(LSTMDecoderCell(output_dim=output_dim, hidden_dim=hidden_dim))\n \n inputs = [_input]\n decoded = decoder(encoded)\n model = Model(inputs, decoded)\n return model\n\n\nclass LSTMDecoderCell(ExtendedRNNCell):\n \n def __init__(self, hidden_dim=None, **kwargs):\n if hidden_dim:\n self.hidden_dim = hidden_dim\n else:\n self.hidden_dim = self.output_dim\n super(LSTMDecoderCell, self).__init__(**kwargs)\n\n def build_model(self, input_shape):\n hidden_dim = self.hidden_dim\n output_dim = self.output_dim\n\n x = Input(batch_shape=input_shape)\n h_tm1 = Input(batch_shape=(input_shape[0], hidden_dim))\n c_tm1 = Input(batch_shape=(input_shape[0], hidden_dim))\n\n W1 = Dense(hidden_dim * 4,\n kernel_initializer=self.kernel_initializer,\n kernel_regularizer=self.kernel_regularizer,\n use_bias=False)\n W2 = Dense(output_dim,\n kernel_initializer=self.kernel_initializer,\n kernel_regularizer=self.kernel_regularizer,)\n U = Dense(hidden_dim * 4,\n kernel_initializer=self.kernel_initializer,\n kernel_regularizer=self.kernel_regularizer,)\n\n z = add([W1(x), U(h_tm1)])\n\n z0, z1, z2, z3 = get_slices(z, 4)\n i = Activation(self.recurrent_activation)(z0)\n f = Activation(self.recurrent_activation)(z1)\n c = add([multiply([f, c_tm1]), multiply([i, Activation(self.activation)(z2)])])\n o = Activation(self.recurrent_activation)(z3)\n h = multiply([o, Activation(self.activation)(c)])\n y = Activation(self.activation)(W2(h))\n\n return Model([x, h_tm1, c_tm1], [y, h, c])\n\n\nclass AttentionDecoderCell(ExtendedRNNCell):\n\n def __init__(self, hidden_dim=None, **kwargs):\n if hidden_dim:\n self.hidden_dim = hidden_dim\n else:\n self.hidden_dim = self.output_dim\n self.input_ndim = 3\n super(AttentionDecoderCell, self).__init__(**kwargs)\n\n\n def build_model(self, input_shape):\n \n input_dim = input_shape[-1]\n output_dim = self.output_dim\n input_length = input_shape[1]\n hidden_dim = self.hidden_dim\n\n x = Input(batch_shape=input_shape)\n h_tm1 = Input(batch_shape=(input_shape[0], hidden_dim))\n c_tm1 = Input(batch_shape=(input_shape[0], hidden_dim))\n \n W1 = Dense(hidden_dim * 4,\n kernel_initializer=self.kernel_initializer,\n kernel_regularizer=self.kernel_regularizer)\n W2 = Dense(output_dim,\n kernel_initializer=self.kernel_initializer,\n kernel_regularizer=self.kernel_regularizer)\n W3 = Dense(1,\n kernel_initializer=self.kernel_initializer,\n kernel_regularizer=self.kernel_regularizer)\n U = Dense(hidden_dim * 4,\n kernel_initializer=self.kernel_initializer,\n kernel_regularizer=self.kernel_regularizer)\n\n C = Lambda(lambda x: K.repeat(x, input_length), output_shape=(input_length, input_dim))(c_tm1)\n _xC = concatenate([x, C])\n _xC = Lambda(lambda x: K.reshape(x, (-1, input_dim + hidden_dim)), output_shape=(input_dim + hidden_dim,))(_xC)\n\n alpha = W3(_xC)\n alpha = Lambda(lambda x: K.reshape(x, (-1, input_length)), output_shape=(input_length,))(alpha)\n alpha = Activation('softmax')(alpha)\n\n _x = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=(1, 1)), output_shape=(input_dim,))([alpha, x])\n\n z = add([W1(_x), U(h_tm1)])\n\n z0, z1, z2, z3 = get_slices(z, 4)\n\n i = Activation(self.recurrent_activation)(z0)\n f = Activation(self.recurrent_activation)(z1)\n\n c = add([multiply([f, c_tm1]), multiply([i, Activation(self.activation)(z2)])])\n o = Activation(self.recurrent_activation)(z3)\n h = multiply([o, Activation(self.activation)(c)])\n y = Activation(self.activation)(W2(h))\n\n return Model([x, h_tm1, c_tm1], [y, h, c])\n","sub_path":"8_Seq2seq/script/model/.ipynb_checkpoints/seq2seq-checkpoint.py","file_name":"seq2seq-checkpoint.py","file_ext":"py","file_size_in_byte":10463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"548721949","text":"def solution(A):\n cars_going_east = [i for i, x in enumerate(A) if x == 0]\n sums_per_split = [sum(A[i:]) for i in cars_going_east]\n total = sum(sums_per_split)\n return total\n\n\ndef prefix_sum(A):\n len_a = len(A)\n pref_sum = [0] * (len_a + 1)\n\n for i in range(1, len_a + 1):\n pref_sum[i] = pref_sum[i - 1] + A[i - 1]\n\n return pref_sum\n\n\ndef suffix_sum(A):\n len_a = len(A)\n suf_sum = [0] * (len_a + 1)\n for i in range(len(A) - 1, -1, -1):\n suf_sum[i] = A[i] + suf_sum[i + 1]\n return suf_sum\n\n\ndef solution2(A):\n # write your code in Python 3.6\n pref_sum_A = prefix_sum(A)\n total = pref_sum_A[-1]\n\n accum = 0\n for i in range(len(A)):\n if A[i] == 0:\n accum = accum + total - pref_sum_A[i + 1]\n if accum > 1000000: # condition i did not check\n return -1\n return accum\n","sub_path":"code/lesson_05_01.py","file_name":"lesson_05_01.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"490519239","text":"import math\nimport os\nimport time\nfrom functools import lru_cache\n\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport numpy as np\nimport pyperclip\nfrom tqdm import tqdm\n\n\ndef draf_graph(graph):\n pos = nx.spring_layout(graph, seed=225) # Seed for reproducible layout\n nx.draw(graph, pos, with_labels=True)\n plt.show()\n\n\ndef first_task(input_data):\n count = 0\n G = nx.DiGraph()\n for i in range(len(input_data)):\n value = input_data[i].strip(\"Valve \")\n start_valve, rest = value.split(\" has flow rate=\")\n flow_rate, rest = rest.split(\"; tunnels lead to valves \")\n flow_rate = int(flow_rate)\n end_valves = rest.split(\", \")\n\n start_valve_flow = start_valve + \"_flow\"\n\n for end_valve in end_valves:\n G.add_edge(start_valve, end_valve)\n G.add_edge(end_valve, start_valve)\n\n if flow_rate > 0:\n G.add_edge(start_valve, start_valve_flow)\n G.add_edge(start_valve_flow, end_valve, flow_rate=flow_rate)\n\n print(start_valve, flow_rate, end_valves)\n\n preassures = []\n global max_pressure_released\n max_pressure_released = 0\n\n @lru_cache()\n def visit_valve(valve, time_stamp, valves_opened, current_flow, preassure_released):\n global max_pressure_released\n neighbors = G.neighbors(valve)\n\n if time_stamp >= 30:\n if preassure_released > max_pressure_released:\n max_pressure_released = preassure_released\n print(f\"New max pressure found: {max_pressure_released}\")\n # preassures.append(preassure_released)\n return preassure_released\n\n preassures = []\n\n for neighbor in neighbors:\n try:\n flow_rate = G[valve][neighbor][\"flow_rate\"]\n if neighbor not in valves_opened:\n preassures.append(\n visit_valve(\n neighbor,\n time_stamp + 1,\n tuple(valves_opened + (neighbor,)),\n current_flow + flow_rate,\n preassure_released + current_flow,\n )\n )\n\n except KeyError:\n pass\n\n preassures.append(\n visit_valve(\n neighbor,\n time_stamp + 1,\n valves_opened,\n current_flow,\n preassure_released + current_flow,\n )\n )\n\n print(f\"Returning local max pressure: {max(preassures)}\")\n return max(preassures)\n\n start_valve = \"AA\"\n # visit_valve(\n # valve=start_valve,\n # time_stamp=0,\n # valves_opened=tuple(),\n # current_flow=0,\n # preassure_released=0,\n # )\n\n paths = nx.dfs_edges(G, start_valve, depth_limit=30)\n print(paths)\n for path in paths:\n print(path)\n\n # draf_graph(G)\n return count\n\n\ndef second_task(input_data):\n count = 0\n for i in range(len(input_data)):\n value = input_data[i]\n pass\n return None\n\n\ndef run_day():\n input_file = os.path.join(os.path.dirname(__file__), \"input.txt\")\n input_data = list(map(lambda line: line.strip(), open(input_file, \"r\")))\n\n t_start = time.time()\n first_answer = first_task(input_data)\n t_end = time.time()\n first_time = round(t_end - t_start, 2)\n if first_answer is not None:\n pyperclip.copy(str(first_answer))\n pyperclip.paste()\n\n print(\"#############################\")\n print(\"The answer to the 1st task is\")\n print(first_answer, f\"in {first_time} seconds\")\n\n t_start = time.time()\n second_answer = second_task(input_data)\n t_end = time.time()\n second_time = round(t_end - t_start, 3)\n if second_answer is not None:\n pyperclip.copy(str(second_answer))\n pyperclip.paste()\n\n print()\n print(\"The answer to the 2nd task is\")\n print(second_answer, f\"in {second_time} seconds\")\n print(\"#############################\")\n\n\nif __name__ == \"__main__\":\n run_day()\n","sub_path":"aoc_2022/src/2022_day_16/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":4138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"439128901","text":"import astropy.units as u\nimport astropy.constants as c\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom astropy.cosmology import WMAP9 as cosmo\nfrom . import general\nfrom .data import atnf as atnf\nfrom scipy import signal\nimport scipy.interpolate as interp\n\nclass Source():\n \"\"\"\n The base class for a gravitational wave source.\n \"\"\"\n name = \"Generic Source\"\n frequencies = np.logspace(-5, 5, 1000) * u.hertz\n M = 30 * u.solMass\n r = 300 * u.parsec\n \n def __init__(self, frequencies=None, M=None, r=None):\n if frequencies: self.frequencies = frequencies\n if r: self.r = r\n if M: self.M = M\n \n def raw_strain(self, frequencies=None):\n if not frequencies: frequencies = self.frequencies\n return ((1./self.r) * ((5*np.pi)/(24*c.c**3))**(0.5) * (c.G * self.chirp_mass())**(5./6) * (np.pi*frequencies)**(-7./6)).to(1/u.hertz)\n \n def psd(self, frequencies=None):\n \"\"\"\n The one-sided power spectral density\n \n Parameters\n ----------\n frequencies : ndarray\n An array of frequencies where the PSD should be calculated.\n \n Returns : ndarray\n An array of the PSDs at the given frequencies for this source.\n \"\"\"\n if not frequencies: frequencies = self.frequencies\n return 2 * (frequencies**0.5) * np.abs(self.raw_strain(frequencies))\n \n def srpsd(self, frequencies=None):\n if not frequencies: frequencies = self.frequencies\n return np.sqrt(self.psd(frequencies)) \n \n def characteristic_strain(self, frequencies=None):\n if not frequencies: frequencies = self.frequencies\n return np.sqrt(4 * frequencies**2 * np.abs(self.raw_strain(frequencies))**2)\n \n def energy_density(frequencies=None):\n if not frequencies: frequencies = self.frequencies\n return (2*pi**2)/3 * frequencies**3 * self.psd(frequencies)\n \n def plot(self, axis, label=None):\n if axis:\n if not label:\n label = self.name\n line = axis.loglog(self.frequencies, self.characteristic_strain(self.frequencies), label=label, lw=2)\n axis.set_xlabel('Frequency [Hz]')\n #axis.set_ylabel('Root Noise Power spectral density')\n axis.legend()\n return line\n \n def snr(self, detector):\n return general.snr(self, detector)\n\nclass Pulsar(Source):\n \"\"\"\n A gravitational-wave pulsar.\n \"\"\"\n name = \"Pulsar\"\n\n def __init__(self, psrj, Izz=1e-5 * 10**38 * u.kilogram * u.meter**2):\n \"\"\"\n Object representing a pulsar.\n \n Parameters\n ----------\n prsj : str\n The Julian (J) name of the pulsar.\n Izz : float\n The magnitude of the zz component of the moment of inertia tensor.\n\n \"\"\"\n self.Izz = Izz\n catalogue = atnf.get_atnf()\n rowdata = catalogue.loc['PSRJ', psrj]\n self.data = rowdata\n self.name = psrj\n\n def raw_strain(self, frequencies = None):\n \"\"\"Calculate the raw strain which the pulsar should produce. Note\n that unlike other sources this will be at a single frequency,\n since pulsars are not broadband emitters.\n\n Parameters\n ----------\n \n \"\"\"\n if not frequencies: frequencies = self.frequencies\n response = np.ones(len(frequencies)) * np.nan\n def find_nearest(array,value):\n idx = (np.abs(array-value)).argmin()\n return idx\n response[find_nearest(frequencies, 2*self.data['F0']*u.hertz)] = 1\n distance = self.data['DIST'] * 1000 * u.parsec\n f = 2*self.data['F0'] * u.hertz\n fdot = self.data['F1']\n fratio = fdot / f\n GoC = c.G / c.c**3\n rational = - (5.0/4.0) * GoC * self.Izz * fratio\n return response * (1/distance) * np.sqrt(rational)\n \n def plot(self, axis):\n if axis: \n axis.loglog(self.frequencies, self.characteristic_strain(self.frequencies), 'o', label=self.name,)\n axis.set_xlabel('Frequency [Hz]')\n #axis.set_ylabel('Root Noise Power spectral density')\n axis.legend()\n \nclass Type1ASupernova(Source):\n \"\"\"\n A Type-1A supernova source. Based on https://arxiv.org/abs/1511.02542.\n \"\"\"\n name = \"Type Ia SN\"\n r = 10 * 1000 * u.parsec\n \n def __init__(self, frequencies = None, r = None):\n if frequencies: self.frequencies = frequencies\n if r: self.r = r\n\n def characteristic_strain(self, frequencies = None):\n if not frequencies: frequencies = self.frequencies\n response = np.ones(len(frequencies)) * ((9e-21) * (1*u.parsec) / self.r)\n response[frequencies < 0.25 * u.hertz ] = np.nan\n response[frequencies > 1.5 * u.hertz ] = np.nan\n \n return response\n\nclass CoreCollapseSupernova(Source):\n \"\"\"\n A core-collapse supernova source. Based on Dimmelmeier.\n \"\"\"\n name = \"CCSN\"\n r = 10 * 1000 * u.parsec\n frequencies = np.logspace(2,3,1000) * u.hertz\n \n def __init__(self, frequencies = None, r = None):\n if frequencies: self.frequencies = frequencies\n if r: self.r = r\n\n def characteristic_strain(self, frequencies = None):\n if not frequencies: frequencies = self.frequencies\n return np.ones(len(frequencies)) * ((8.9e-21) * (1 * u.parsec) / self.r)\n\nclass Numerical(Source):\n \"\"\"\n Model a numerical relativity waveform.\n \"\"\"\n name = \"Numerical\"\n\n pass\n \nclass CBC(Source):\n \"\"\"\n A compact binary coallescence source\n \"\"\"\n name = \"CBC\"\n M = 30 * u.solMass\n r = 300 * u.parsec\n \n def __init__(self, frequencies=None, m1=None, m2=None, r=None):\n if frequencies: self.frequencies = frequencies\n if r: self.r = r\n if m1: self.m1 = m1\n if m2: self.m2 = m2\n self.M = self.chirp_mass()\n \n def fdot(self, frequencies=None, M=None):\n \"\"\"\n Calculate the first time derivative of the CBC's frequency.\n \n Parameters\n ---------\n frequencies : ndarray\n The frequencies at which the number of cycles need to be found.\n \n M : float\n The chirp mass of the CBC.\n \n Returns\n -------\n fdot : ndarray\n The df/dt of each frequency.\n \"\"\"\n if not frequencies: frequencies = 0.5*self.frequencies\n if not M: M = self.chirp_mass()\n return (((96*np.pi**(8./3)) / (5 * c.c**5)) * (c.G*M)**(5./3) * frequencies**(11./3))#.to(u.hertz**2)\n\n def ncycles(self, frequencies=None, M=None):\n \"\"\"\n Calculate the number of cycles that the CBC spends in each frequency bin.\n \n Parameters\n ---------\n frequencies : ndarray\n The frequencies at which the number of cycles need to be found.\n \n M : float\n The chirp mass of the CBC.\n \n Returns\n -------\n ncycles : ndarray\n The number of cycles in each frequency bin.\n \"\"\"\n if not frequencies: frequencies = 0.5*self.frequencies\n if not M: M = self.chirp_mass()\n return np.sqrt(frequencies**2/ self.fdot(frequencies, M))#.to(1)\n \n def characteristic_strain(self, frequencies=None):\n if not frequencies: frequencies = self.frequencies\n return np.sqrt(2*self.ncycles())*np.sqrt(4 * frequencies**2 * np.abs(self.raw_strain())**2)\n \n def chirp_mass(self):\n return ((self.m1*self.m2)**(3./5) / (self.m1 + self.m2)**(1./5)).to(u.kilogram)\n \n def fisco(self):\n return ((c.c**3) / (np.pi*c.G*(self.m1+self.m2)*6*6**0.5 )).to(u.hertz)\n \n def raw_strain(self, frequencies=None):\n if not frequencies: frequencies = self.frequencies\n h = ((1./self.r) * ((5*np.pi)/(24*c.c**3))**(0.5) * (c.G * self.M)**(5./6) * (np.pi*frequencies)**(-7./6)).to(1/u.hertz)\n h[frequencies>2*self.fisco()] = np.nan\n return h\n\n\n\nclass IMR(Source):\n \"\"\"\n An inspiral, merger, ringdown frequency spectrum.\n\n Modelled on IMRPhenomA, and does not include contributions from spin.\n \"\"\"\n\n def __init__(self, frequencies=None, m1=None, m2=None, r=None):\n if frequencies: self.frequencies = frequencies\n self.distance = r.to(u.meter)\n self.mass1 = m1.to(u.kilogram)\n self.mass2 = m2.to(u.kilogram)\n \n @property\n def eta(self):\n \"\"\"\n The symmetric mass ratio of the CBC system.\n \"\"\"\n eta = (self.mass1 * self.mass2) / (self.mass1 + self.mass2)**2\n return eta\n \n def fk(self, k):\n\n # The various transition frequencies.\n # Broadly\n # 0 is the merger,\n # 1 is the ringdown\n # 2 decay width\n # 3 cut-off frequency\n a = [2.9740e-1, 5.9411e-1, 5.0801e-1, 8.4845e-1]\n b = [4.4810e-2, 8.9794e-2, 7.7515e-2, 1.2848e-1]\n d = [9.5560e-2, 1.9111e-1, 2.2369e-2, 2.7299e-1]\n \n top = a[k] * self.eta**2 + b[k] * self.eta + d[k]\n bot = np.pi * (c.G*(self.mass1+self.mass2) / c.c**3)\n return top / bot\n \n @property\n def chirp_mass(self):\n return ((self.mass1*self.mass2)**(3./5) / (self.mass1 + self.mass2)**(1./5)).to(u.kilogram)\n \n def ncycles(self, frequencies=None, M=None):\n return None\n \n @property\n def w(self):\n first = (np.pi * self.fk(2)/2)\n second = (self.fk(0) / self.fk(1))**(2./3)\n\n return first * second\n\n def L(self, f):\n first = (1/(2*np.pi))\n second = (self.fk(2)/((f - self.fk(1))**2 + self.fk(2)**2/4.))\n\n return first * second\n\n def amplitude(self, f):\n first = np.sqrt(5./24)\n second = (c.G * self.chirp_mass / c.c**3)**(5./6) * (self.fk(0))**(-7./6)\n third = (np.pi**(2/3.) * (self.distance / c.c))\n\n tail = np.ones(len(f))*np.nan\n tail[f>> pip install minke\n which will give you access to any of the waveforms it supports.\n \"\"\"\n name = \"Minke Signal\"\n frequencies = np.linspace(0.1, 1000, 1000) * u.hertz\n #def ncycles(self, a):\n # return None\n def __init__(self, source, name=None, frequencies=None, **params):\n if frequencies: self.frequencies = frequencies\n if name: self.name = name\n if \"sample_rate\" in params.keys():\n self.sample_rate = params['sample_rate']\n del(params['sample_rate'])\n else:\n self.sample_rate = 4096\n self.waveform = source(**params)\n self.waveform.has_memory=True\n self.waveform.tail=True\n self.strain_of_t = self.waveform._make_strain(sample_rate=self.sample_rate)\n \n b,a = signal.butter(4, 10./(self.sample_rate), btype='high')\n self.strain_of_t[:,1] = signal.filtfilt(b,a, self.strain_of_t[:,1])\n self.strain_of_t[:,2] = signal.filtfilt(b,a, self.strain_of_t[:,2])\n \n def raw_strain(self, frequencies=None, fft_len=None):\n\n if not fft_len:\n fft_len = self.sample_rate\n if not frequencies: frequencies = self.frequencies\n\n delta_t = np.diff(self.strain_of_t[:,0])[0]\n strain_of_f = 1./np.sqrt(fft_len)*np.fft.fft(signal.windows.hanning(len(self.strain_of_t[:,1]))*self.strain_of_t[:,1], fft_len)\n freqs = np.fft.fftfreq(fft_len, delta_t)\n \n interpolator = interp.interp1d(freqs, np.sqrt((strain_of_f* strain_of_f.conj()).real), \"linear\")\n return interpolator(frequencies.value)\n","sub_path":"gravpy/sources.py","file_name":"sources.py","file_ext":"py","file_size_in_byte":12123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"493300666","text":"import glob\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\ndef create_dict_relative_norepeat_counters_onebyone(directory):\n thr=[]\n data_files=glob.glob(directory+'/*.dat')\n benchmark=''\n benchmarks=[]\n chunk_sizes=[]\n block_sizes={}\n mat_sizes={}\n nodes=[]\n \n for filename in data_files:\n (node, benchmark, th, runtime, chunk_size, block_size_row, block_size_col, mat_size) = filename.split('/')[-1].replace('.dat','').split('-') \n mat_size=mat_size.split(',')[0]\n if benchmark not in benchmarks:\n benchmarks.append(benchmark) \n mat_sizes[benchmark]=[]\n block_sizes[benchmark]=[]\n if int(mat_size) not in mat_sizes[benchmark]:\n mat_sizes[benchmark].append(int(mat_size))\n if int(th) not in thr:\n thr.append(int(th)) \n if block_size_row+'-'+block_size_col not in block_sizes[benchmark]:\n block_sizes[benchmark].append(block_size_row+'-'+block_size_col)\n if int(chunk_size) not in chunk_sizes:\n chunk_sizes.append(int(chunk_size))\n if node not in nodes:\n nodes.append(node)\n \n thr.sort()\n nodes.sort() \n chunk_sizes.sort()\n benchmarks.sort() \n repeats=5\n \n d={}\n for node in nodes:\n d[node]={}\n for benchmark in benchmarks: \n mat_sizes[benchmark].sort()\n block_sizes[benchmark].sort()\n d[node][benchmark]={}\n \n for th in thr:\n d[node][benchmark][th]={}\n for bs in block_sizes[benchmark]:\n d[node][benchmark][th][bs]={}\n for cs in chunk_sizes:\n d[node][benchmark][th][bs][cs]={}\n d[node][benchmark][th][bs][cs]['size']=mat_sizes[benchmark]\n d[node][benchmark][th][bs][cs]['mflops']=[0]*len(mat_sizes[benchmark])\n d[node][benchmark][th][bs][cs]['counters']=[0]*len(mat_sizes[benchmark])\n\n data_files.sort() \n for filename in data_files: \n f=open(filename, 'r')\n \n results=f.read()\n (node, benchmark, th, runtime, chunk_size, block_size_row, block_size_col, mat_size) = filename.split('/')[-1].replace('.dat','').split('-') \n th=int(th) \n cs=int(chunk_size) \n counters_avg={'idle_rate':[0]*th, 'average_time':[0]*th, 'cumulative_overhead_time':[0]*th, 'cumulative_count':[0]*th, 'average_overhead_time':[0]*th, 'papi_tca':[0]*th, 'papi_tcm':[0]*th}\n s=mat_sizes[benchmark].index(int(mat_size))\n\n bs=block_size_row+'-'+block_size_col\n mflops=float((results.split(' '+mat_size+' ')[1].split('\\n')[0]).strip())\n d[node][benchmark][th][bs][cs]['mflops'][s]=mflops\n s=mat_sizes[benchmark].index(int(mat_size))\n d[node][benchmark][th][bs][cs]['counters'][s]={}\n d[node][benchmark][th][bs][cs]['counters'][s]['ind']=[]\n d[node][benchmark][th][bs][cs]['counters'][s]['avg']={}\n \n reps=results.split('Done')[1:]\n for rep in reps[1:-1]:\n counters_ind={'idle_rate':[0]*th, 'average_time':[0]*th, 'cumulative_overhead_time':[0]*th, 'cumulative_count':[0]*th, 'average_overhead_time':[0]*th,'papi_tca':[0]*th, 'papi_tcm':[0]*th} \n\n rep_lines=rep.split('Initialization')[0].split('\\n') \n for r in rep_lines:\n if 'idle-rate' in r and 'pool' in r:\n idle_rate=float(r.strip().split(',')[-2])/100\n th_num=int(r.strip().split('thread#')[1].split('}')[0])\n counters_ind['idle_rate'][th_num]=idle_rate\n counters_avg['idle_rate'][th_num]+=idle_rate\n elif 'cumulative-overhead' in r and 'pool' in r:\n cumulative_overhead=float(r.strip().split(',')[-2])/1000\n th_num=int(r.strip().split('thread#')[1].split('}')[0])\n counters_ind['cumulative_overhead_time'][th_num]=cumulative_overhead\n counters_avg['cumulative_overhead_time'][th_num]+=cumulative_overhead\n elif 'average-overhead' in r and 'pool' in r:\n average_overhead=float(r.strip().split(',')[-2])/1000\n th_num=int(r.strip().split('thread#')[1].split('}')[0])\n counters_ind['average_overhead_time'][th_num]=average_overhead \n counters_avg['average_overhead_time'][th_num]+=average_overhead \n elif 'average,' in r and 'pool' in r:\n average_time=float(r.strip().split(',')[-2])/1000\n th_num=int(r.strip().split('thread#')[1].split('}')[0])\n counters_ind['average_time'][th_num]=average_time\n counters_avg['average_time'][th_num]+=average_time\n elif 'cumulative,' in r and 'pool' in r:\n cumulative=float(r.strip().split(',')[-1])\n th_num=int(r.strip().split('thread#')[1].split('}')[0])\n counters_ind['cumulative_count'][th_num]=cumulative\n counters_avg['cumulative_count'][th_num]+=cumulative\n elif 'PAPI_L2_TCA' in r :\n papi_tca=float(r.strip().split(',')[-1])\n th_num=int(r.strip().split('thread#')[1].split('}')[0])\n counters_ind['papi_tca'][th_num]=papi_tca\n counters_avg['papi_tca'][th_num]+=papi_tca\n elif 'PAPI_L2_TCM' in r :\n papi_tca=float(r.strip().split(',')[-1])\n th_num=int(r.strip().split('thread#')[1].split('}')[0])\n counters_ind['papi_tcm'][th_num]=papi_tca\n counters_avg['papi_tcm'][th_num]+=papi_tca\n\n \n d[node][benchmark][th][bs][cs]['counters'][s]['ind'].append(counters_ind)\n for counter in counters_avg.keys():\n counters_avg[counter]=[counters_avg[counter][thread]/repeats for thread in range(th)]\n d[node][benchmark][th][bs][cs]['counters'][s]['avg']=counters_avg\n\n return (d, chunk_sizes, block_sizes, thr, benchmarks, mat_sizes) \n\n\n\n\npapi_directory='/home/shahrzad/repos/Blazemark/data/matrix/08-07-2019/performance_counters/marvin/'\n(d_hpx, chunk_sizes, block_sizes, thr, benchmarks, mat_sizes)=create_dict_relative_norepeat_counters_onebyone(papi_directory) \n\n","sub_path":"python_scripts/performance_counters.py","file_name":"performance_counters.py","file_ext":"py","file_size_in_byte":6554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"33522665","text":"# -*- coding: UTF-8 -*-\nfrom pdoauth.models.User import User\nfrom pdoauth.models.Assurance import Assurance\nimport time\nfrom test.helpers.todeprecate.UserTesting import UserTesting\nfrom test.helpers.PDUnitTest import PDUnitTest, test\nfrom pdoauth.ReportedError import ReportedError\n\nclass UserInfoTest(PDUnitTest, UserTesting):\n\n def setUp(self):\n PDUnitTest.setUp(self)\n self.createLoggedInUser()\n\n @test\n def logged_in_user_can_get_its_info(self):\n resp = self.showUserByCurrentUser('me')\n self.assertEquals(resp.status_code, 200)\n data = self.fromJson(resp)\n self.assertTrue(data.has_key('userid'))\n\n @test\n def userid_returned_is_the_string_one(self):\n resp = self.showUserByCurrentUser('me')\n self.assertEquals(resp.status_code, 200)\n data = self.fromJson(resp)\n userid = data['userid']\n self.assertTrue(isinstance(userid,basestring))\n self.assertTrue('-' in userid)\n\n @test\n def user_info_contains_assurance(self):\n current_user = self.controller.getCurrentUser()\n myEmail = current_user.email\n now = time.time()\n Assurance.new(current_user, 'test', current_user, now)\n Assurance.new(current_user, 'test2', current_user, now)\n Assurance.new(current_user, 'test2', current_user, now)\n resp = self.showUserByCurrentUser('me')\n self.assertEquals(resp.status_code, 200)\n data = self.fromJson(resp)\n self.assertTrue(data.has_key('assurances'))\n assurances = data['assurances']\n assurance = assurances['test'][0]\n self.assertEqual(assurance['assurer'], myEmail)\n self.assertEqual(assurance['user'], myEmail)\n self.assertEqual(assurance['timestamp'], now)\n self.assertEqual(assurance['readable_time'], time.asctime(time.gmtime(now)))\n self.assertEqual(len(assurances['test2']),2)\n\n @test\n def user_info_contains_hash(self):\n current_user = self.controller.getCurrentUser()\n current_user.hash = self.createHash()\n current_user.save()\n resp = self.showUserByCurrentUser('me')\n self.assertEquals(resp.status_code, 200)\n data = self.fromJson(resp)\n self.assertEquals(data['hash'],current_user.hash)\n self.tearDownController()\n\n @test\n def users_with_assurer_assurance_can_get_email_and_digest_for_anyone(self):\n current_user = self.controller.getCurrentUser()\n Assurance.new(current_user, 'assurer', current_user)\n targetuser=self.createUserWithCredentials()\n Assurance.new(targetuser,'test',current_user)\n target = User.getByEmail(self.usercreation_email)\n resp = self.showUserByCurrentUser(target.userid)\n data = self.fromJson(resp)\n assurances = data['assurances']\n self.assertEquals(assurances['test'][0]['assurer'], current_user.email)\n \n @test\n def users_without_assurer_assurance_cannot_get_email_and_digest_for_anyone(self):\n current_user = self.controller.getCurrentUser()\n targetuser=self.createUserWithCredentials()\n Assurance.new(targetuser,'test',current_user)\n target = User.getByEmail(self.usercreation_email)\n with self.assertRaises(ReportedError) as e:\n self.showUserByCurrentUser(target.userid)\n self.assertTrue(e.exception.status,403)\n\n @test\n def users_with_assurer_assurance_can_get_user_by_email(self):\n current_user = self.controller.getCurrentUser()\n Assurance.new(current_user, 'assurer', current_user)\n self.setupRandom()\n self.createUserWithCredentials()\n target = User.getByEmail(self.usercreation_email)\n resp = self.controller.do_get_by_email(target.email)\n self.assertUserResponse(resp)\n\n @test\n def no_by_email_with_wrong_email(self):\n current_user = self.controller.getCurrentUser()\n Assurance.new(current_user, 'assurer', current_user)\n self.setupRandom()\n self.createUserWithCredentials()\n target = User.getByEmail(self.usercreation_email)\n with self.assertRaises(ReportedError) as e:\n self.controller.do_get_by_email('u'+target.email)\n self.assertTrue(e.exception.status,404)\n\n @test\n def users_without_assurer_assurance_cannot_get_user_by_email(self):\n user = self.createUserWithCredentials()\n self.assertTrue(user is not None)\n target = User.getByEmail(self.usercreation_email)\n with self.assertRaises(ReportedError) as e:\n self.controller.do_get_by_email(target.email)\n self.assertTrue(e.exception.status,403)\n\n @test\n def users_without_login_cannot_get_user_by_email(self):\n self.controller._testdata.current_user = None\n self.createUserWithCredentials()\n target = User.getByEmail(self.usercreation_email)\n with self.assertRaises(ReportedError) as e:\n self.controller.do_get_by_email(target.email)\n self.assertEquals(e.exception.status,403)\n","sub_path":"src/test/UserInfotest.py","file_name":"UserInfotest.py","file_ext":"py","file_size_in_byte":5015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"483884703","text":"import hashlib\nimport pymysql\nfrom django.shortcuts import render, redirect, reverse, HttpResponse\nfrom bysj import models\nfrom bysj.forms import RegForm, UserInfoForm\nfrom django.utils.safestring import mark_safe\nfrom django.conf import settings\n\n\n\ndef index(request):\n conn = pymysql.connect(\n host='127.0.0.source',\n port=3306,\n user='root',\n password='',\n database='mooc_course',\n charset='utf8'\n )\n\n cursor = conn.cursor(pymysql.cursors.DictCursor)\n sql = \"select * from computer;\"\n cursor.execute(sql) # res我们说是得到的行数,如果这个行数不为零,说明用户输入的用户名和密码存在,如果为0说名存在,你想想对不\n obj = mark_safe(cursor.fetchall())\n print(obj)\n conn.close()\n return render(request,'index.html',{\"obj\":obj})\n# 登陆\ndef login(request):\n if request.method == 'POST':\n user = request.POST.get('username')\n pwd = request.POST.get('password')\n\n md5 = hashlib.md5()\n md5.update(pwd.encode('utf-8'))\n pwd = md5.hexdigest()\n\n obj = models.UserInfo.objects.filter(username=user, password=pwd, is_active=True).first()\n if obj:\n # 登录成功 跳转到主页面\n # 保存当前用户的id\n request.session['pk'] = obj.pk\n # print(obj.pk)\n a = models.UserInfo.objects.filter(id=obj.pk).values('roles').first() # {'roles': 2}\n request.session['qx'] = a['roles']\n\n # 保存用户的权限\n permission_query = obj.roles.filter().values('permission__url',\n 'permission__title',\n 'permission__icon',\n 'permission__is_menu',\n ).distinct()\n # print(permission_query)\n # 权限列表\n permission_list = []\n # 菜单列表\n menu_list = []\n\n for i in permission_query:\n permission_list.append({'url':i['permission__url']})\n if i['permission__is_menu']:\n menu_list.append({'url':i['permission__url'],\n 'title':i['permission__title'],\n 'icon':i['permission__icon'],\n })\n\n request.session[settings.PERMISSION_SESSION_KEY] = list(permission_list) # json序列化\n request.session[settings.MENU_SESSION_KEY] = menu_list\n # print(menu_list)\n return redirect(reverse('index'))\n else:\n # 登录失败\n # return HttpResponse('账号或密码错误')\n return render(request, 'login.html', {'error': '用户名或密码错误'})\n return render(request, 'login.html')\n\n\n# 注册\ndef reg(request):\n # 判断请求方式\n if request.method == 'POST':\n form_obj = RegForm(request.POST)\n # print(request.POST)\n # print(\"?????????????????\", form_obj)\n # 对数据进行校验\n if form_obj.is_valid():\n print('111')\n # 数据正确 插入数据库\n print(form_obj.cleaned_data)\n # form_obj.cleaned_data.pop('re_password')\n # models.UserProfile.objects.create(**form_obj.cleaned_data)\n form_obj.save()\n return redirect(reverse('login'))\n else:\n print(form_obj.errors)\n\n else:\n form_obj = RegForm()\n\n return render(request, 'reg.html', {'form_obj': form_obj})\n\n\n# 注销\ndef logout(request):\n del request.session['pk']\n return redirect(reverse('login'))\n\n# 个人信息修改\ndef ge(request):\n ids = request.session['pk']\n # print(ids)\n\n obj = models.UserInfo.objects.filter(pk=ids).first()\n # print('----',obj)\n if request.method == \"POST\":\n form_obj = UserInfoForm(request.POST, instance=obj)\n print(request.POST.get('name'))\n if form_obj.is_valid():\n print('111')\n form_obj.save() # 保存修改\n # 跳转到展示页面\n return redirect(reverse('ge'))\n else:\n form_obj = UserInfoForm(instance=obj)\n return render(request, 'ge.html', {\"form_obj\":form_obj})\n\n\n\n\n# 用户管理\ndef userinfo_change(request):\n if request.method == \"POST\":\n ids = request.POST.getlist('ids')\n for i in ids:\n models.UserInfo.objects.filter(id=int(i)).delete()\n obj = models.UserInfo.objects.all()\n return render(request, 'userinfo.html',{'obj':obj})\n\n# def userinfo_edit(request, uid):\n","sub_path":"bysj/views/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":4721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"577132126","text":"\nfrom django.contrib.auth.models import User\n\nfrom apps.ventas.models import ArticuloVenta\nfrom apps.articulos.models import Articulo\n\nfrom apps.lib.cajas.gestion import CajaFunctions\nfrom apps.lib.multi_cajas.gestion import CajaMultiFunctions\nfrom apps.lib.articulos.gestion_stock import ArticuloStock\n\nfrom .models import MultiVenta, TarjetaCredito, TipoCuota, DescuentoEfectivo\n\nfrom .serializer import MultiVentaSerializer, TipoCuotaSerializer, TarjetaCreditoSerializer, DescuentoEfectivoSerializer\n\nfrom rest_framework import viewsets\nfrom rest_framework.response import Response\n\n\nclass DescuentoEfectivoViewSet(viewsets.ModelViewSet):\n\n queryset = DescuentoEfectivo.objects.all()\n serializer_class = DescuentoEfectivoSerializer\n\n\nclass TarjetaCreditoViewSet(viewsets.ModelViewSet):\n\n queryset = TarjetaCredito.objects.all()\n serializer_class = TarjetaCreditoSerializer\n\n\nclass TipoCuotaViewSet(viewsets.ModelViewSet):\n\n queryset = TipoCuota.objects.all()\n serializer_class = TipoCuotaSerializer\n\n def get_queryset(self):\n\n queryset = TipoCuota.objects.all()\n tarjeta = self.request.query_params.get('tarjeta', None)\n\n if tarjeta is not None:\n queryset = TipoCuota.objects.filter(tarjeta_credito__id=tarjeta)\n return queryset\n\n\nclass MultiVentaViewSet(viewsets.ViewSet):\n\n queryset = MultiVenta.objects.all()\n serializer_class = MultiVentaSerializer\n\n ''' class ArticuloVenta(models.Model): cantidad articulo precio_venta '''\n '''\n {'token': 'xD6TKS52N5szLrQ4PWWHVTaq0wkBloQcYccUTHPc9LCMamPybBWn81Uuqucgg42M', 'forma_pago': 'Efectivo', 'porcentaje_aumento': 0, 'porcentaje_descuento': 0, 'precio_venta_total': 204}\n '''\n\n '''\n class MultiVenta: fecha, forma_pago, porcentaje_aumento, porcentaje_descuento\n total_credito, total_efectivo, total_debito, articulo_venta,\n precio_venta_total, usuario\n '''\n\n def create(self, request):\n print(request.data.get('articulos'))\n # recorrer articulos vendidos\n funciones_caja = CajaFunctions()\n funciones_multi_caja = CajaMultiFunctions()\n stock_funciones = ArticuloStock()\n articulo_venta_array = []\n\n for articulo in request.data.get('articulos'):\n articulo_object = Articulo.objects.get(pk=articulo.get('id'))\n print('*************************')\n print(articulo.get('descripcion'))\n articulo_venta_object = ArticuloVenta(articulo=articulo_object,\n cantidad=articulo.get('cantidad'),\n precio_venta=articulo.get('precio_venta')\n )\n articulo_venta_object.save()\n stock_funciones.restar_stock(id=articulo_object.id, cantidad=articulo.get('cantidad'))\n print('*************************')\n articulo_venta_array.append(articulo_venta_object)\n print('aca mueroooooo................')\n print(articulo_venta_array)\n print('????????????????????????????????')\n print(request.user.id)\n print('????????????????????????????????')\n usuario_object = User.objects.get(pk=request.user.id)\n print('&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&')\n print(usuario_object)\n print('&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&')\n if request.data.get('forma_pago') == 'Efectivo':\n multi_venta = MultiVenta(forma_pago=request.data.get('forma_pago'),\n porcentaje_aumento=request.data.get('porcentaje_aumento'),\n porcentaje_descuento=request.data.get('porcentaje_descuento'),\n total_efectivo=request.data.get('precio_venta_total'),\n precio_venta_total=request.data.get('precio_venta_total'),\n usuario=usuario_object\n )\n funciones_caja.sumar_venta_efectivo(precio_efectivo=request.data.get('precio_venta_total'))\n funciones_multi_caja.sumar_venta_efectivo(precio_efectivo=request.data.get('precio_venta_total'), id_user=request.user.id)\n multi_venta.save()\n for a in articulo_venta_array:\n multi_venta.articulo_venta.add(a)\n print(multi_venta)\n\n if request.data.get('forma_pago') == 'Debito':\n print('&&&&&&&&&&&&&&&&&&&&&')\n print('vengo por debito')\n print('&&&&&&&&&&&&&&&&&&&&&')\n multi_venta = MultiVenta(forma_pago=request.data.get('forma_pago'),\n porcentaje_aumento=request.data.get('porcentaje_aumento'),\n porcentaje_descuento=request.data.get('porcentaje_descuento'),\n total_debito=request.data.get('precio_venta_total'),\n precio_venta_total=request.data.get('precio_venta_total'),\n usuario=usuario_object\n )\n print(multi_venta)\n funciones_caja.sumar_venta_debito(precio_debito=request.data.get('precio_venta_total'))\n print('1111111111111111111111111111')\n funciones_multi_caja.sumar_venta_debito(precio_debito=request.data.get('precio_venta_total'), id_user=usuario_object.id)\n print('1111111111111111111111111111')\n multi_venta.save()\n \n print(multi_venta)\n \n for a in articulo_venta_array:\n multi_venta.articulo_venta.add(a)\n\n if request.data.get('forma_pago') == 'Credito':\n multi_venta = MultiVenta(forma_pago=request.data.get('forma_pago'),\n porcentaje_aumento=request.data.get('porcentaje_aumento'),\n porcentaje_descuento=request.data.get('porcentaje_descuento'),\n total_efectivo=request.data.get('precio_venta_total'),\n precio_venta_total=request.data.get('precio_venta_total'),\n usuario=usuario_object\n )\n funciones_caja.sumar_venta_credito(precio_credito=request.data.get('precio_venta_total'))\n funciones_multi_caja.sumar_venta_credito(precio_credito=request.data.get('precio_venta_total'), id_user=request.user.id)\n multi_venta.save()\n for a in articulo_venta_array:\n multi_venta.articulo_venta.add(a)\n\n # multi_venta = MultiVenta(forma_pago=self.request.data.get('forma_pago'),)\n return Response({'status': '200 ok', 'id': str(multi_venta.id)})\n","sub_path":"apps/multiventas/viewset.py","file_name":"viewset.py","file_ext":"py","file_size_in_byte":7016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"455155953","text":"import os\nimport pickle5 as pickle\nimport numpy as np\nimport pandas as pd\nfrom itemrep_inference import topk_distance\n\nimport uvicorn\nfrom fastapi import FastAPI\n\n# artifact_path = 'artifacts'\nartifact_path = '/Users/sparshagarwal/Desktop/work/Recofront/dash/test_app/recommender/model1/artifacts'\n\numap = pickle.load(open(os.path.join(artifact_path,\"usermap.p\"), \"rb\"))\nimap_inverse = pickle.load(open(os.path.join(artifact_path,\"itemmap_inv.p\"), \"rb\"))\ninteractions = pd.read_pickle(os.path.join(artifact_path,\"interactions.p\"))\n\ndef recommend(userid, topk=2):\n uid = umap[userid]\n _temp = interactions.iloc[uid]\n _temp = _temp[_temp!=0]\n _tempdf = pd.DataFrame(columns=['itemid','distance'])\n for row in _temp.iteritems():\n _temp1 = topk_distance(imap_inverse[row[0]])\n _temp2 = pd.DataFrame(list(_temp1.items()), columns=['itemid','distance'])\n _temp2['weight'] = row[1]\n _tempdf = _tempdf.append(_temp2)\n _tempdf = _tempdf[_tempdf['distance']!=0]\n _tempdf['distance']+=1\n _tempdf['score'] = np.sqrt(_tempdf['weight'])/np.log(_tempdf['distance'])\n _tempdf = _tempdf.set_index('itemid')\n _tempdf = _tempdf[['score']].groupby(['itemid']).mean()\n _tempdf = _tempdf.sort_values(by='score', ascending=False)\n return _tempdf.index.values[:topk]\n\napp = FastAPI()\n\n@app.get(\"/recommend/{userid}\")\nasync def read_item(userid: str):\n recs = recommend(userid).tolist()\n return recs\n\nif __name__ == \"__main__\":\n uvicorn.run(app, host=\"0.0.0.0\", port=8000)","sub_path":"recommender/model1/recommend.py","file_name":"recommend.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"368520344","text":"# Standard Library\nimport datetime\nimport json\nimport pathlib\nimport uuid\n\n# Django\nfrom django.utils import timezone\n\n# Third Party Libraries\nimport pytest\nimport pytz\nimport requests\n\n# RadioFeed\nfrom radiofeed.episodes.factories import EpisodeFactory\n\n# Local\nfrom ..factories import CategoryFactory, PodcastFactory\nfrom ..rss_parser import RssParser, get_categories_dict\nfrom ..rss_parser.date_parser import parse_date\n\npytestmark = pytest.mark.django_db\n\n\n@pytest.fixture(scope=\"function\")\ndef clear_categories_cache():\n get_categories_dict.cache_clear()\n\n\nclass BaseMockResponse:\n def __init__(self, raises=False):\n self.raises = raises\n\n def raise_for_status(self):\n if self.raises:\n raise requests.exceptions.HTTPError()\n\n\nclass MockHeaderResponse(BaseMockResponse):\n def __init__(self):\n super().__init__()\n self.headers = {\n \"ETag\": uuid.uuid4().hex,\n \"Last-Modified\": \"Sun, 05 Jul 2020 19:21:33 GMT\",\n }\n\n\nclass MockResponse(BaseMockResponse):\n def __init__(self, mock_file=None, raises=False):\n super().__init__(raises)\n self.headers = {\n \"ETag\": uuid.uuid4().hex,\n \"Last-Modified\": \"Sun, 05 Jul 2020 19:21:33 GMT\",\n }\n\n if mock_file:\n self.content = open(\n pathlib.Path(__file__).parent / \"mocks\" / mock_file, \"rb\"\n ).read()\n self.raises = raises\n\n def json(self):\n return json.loads(self.content)\n\n\nclass TestParseDate:\n def test_parse_date_if_valid(self):\n dt = datetime.datetime(2020, 6, 19, 16, 58, 3, tzinfo=pytz.UTC)\n assert parse_date(\"Fri, 19 Jun 2020 16:58:03 +0000\") == dt\n\n def test_parse_date_if_no_tz(self):\n dt = datetime.datetime(2020, 6, 19, 16, 58, 3, tzinfo=pytz.UTC)\n assert parse_date(\"Fri, 19 Jun 2020 16:58:03\") == dt\n\n def test_parse_date_if_invalid(self):\n assert parse_date(\"Fri, 33 June 2020 16:58:03 +0000\") is None\n\n\nclass TestRssParser:\n def test_parse(self, mocker, clear_categories_cache):\n mocker.patch(\"requests.head\", return_value=MockHeaderResponse())\n mocker.patch(\"requests.get\", return_value=MockResponse(\"rss_mock.txt\"))\n [\n CategoryFactory(name=name)\n for name in (\n \"Philosophy\",\n \"Science\",\n \"Social Sciences\",\n \"Society & Culture\",\n \"Spirituality\",\n \"Religion & Spirituality\",\n )\n ]\n podcast = PodcastFactory(\n rss=\"https://mysteriousuniverse.org/feed/podcast/\",\n last_updated=None,\n pub_date=None,\n )\n RssParser.parse_from_podcast(podcast)\n podcast.refresh_from_db()\n\n assert podcast.last_updated\n assert podcast.pub_date\n\n assert podcast.title == \"Mysterious Universe\"\n assert podcast.etag\n assert podcast.authors\n assert podcast.extracted_text\n assert podcast.categories.count() == 6\n assert podcast.episode_set.count() == 20\n\n def test_parse_if_already_updated(self, mocker, clear_categories_cache):\n mocker.patch(\"requests.head\", return_value=MockHeaderResponse())\n mocker.patch(\"requests.get\", return_value=MockResponse(\"rss_mock.txt\"))\n\n podcast = PodcastFactory(\n rss=\"https://mysteriousuniverse.org/feed/podcast/\",\n last_updated=timezone.now(),\n pub_date=None,\n )\n\n RssParser.parse_from_podcast(podcast)\n podcast.refresh_from_db()\n\n assert podcast.pub_date is None\n assert podcast.title != \"Mysterious Universe\"\n assert podcast.episode_set.count() == 0\n\n def test_parse_existing_episodes(self, mocker, clear_categories_cache):\n mocker.patch(\"requests.head\", return_value=MockHeaderResponse())\n mocker.patch(\"requests.get\", return_value=MockResponse(\"rss_mock.txt\"))\n podcast = PodcastFactory(\n rss=\"https://mysteriousuniverse.org/feed/podcast/\",\n last_updated=None,\n pub_date=None,\n )\n\n EpisodeFactory(podcast=podcast, guid=\"https://mysteriousuniverse.org/?p=168097\")\n EpisodeFactory(podcast=podcast, guid=\"https://mysteriousuniverse.org/?p=167650\")\n EpisodeFactory(podcast=podcast, guid=\"https://mysteriousuniverse.org/?p=167326\")\n\n RssParser.parse_from_podcast(podcast)\n podcast.refresh_from_db()\n assert podcast.episode_set.count() == 20\n","sub_path":"radiofeed/podcasts/tests/test_rss_parser.py","file_name":"test_rss_parser.py","file_ext":"py","file_size_in_byte":4513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"94020528","text":"from django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponse\nfrom .models import Word\n\ncount = 5\ndef index(request):\n\tword_list = Word.objects.order_by('date')\n\t# for i in webpages:\n\t# \tfor top in topic:\n\t# \t\tprint(i.topic == top)\n\tmy_dict = {'word_list':word_list,\n\t\t\t\t'insert_me': \"Mirkan Kilic, came from1 views.py!\"\n\t\t\t\t}\n\treturn render(request, 'Dictionary/index.html', context=my_dict)\n\ndef explain(request):\n\tglobal count\n\tcount -= 1\n\tmy_dict = {\n\t'insert_me': \"Mirkan Kilic, came from1 views.py!\",\n\t\"count\": count,\n\t}\n\t# print(count)\n\tif count == 0:\n\t\tcount = 5\n\t\treturn HttpResponse(\"You explained fast and counted bro XD
Click to return back
(You see this because Count is %d )
\" % (count - count))\n\treturn render(request, 'Dictionary/explain.html', context=(my_dict))\n\ndef other(request):\n\treturn render(request, 'Dictionary/other.html')\n\ndef word_detail(request, pk):\n\tword = get_object_or_404(Word, pk=pk)\n\treturn render(request, 'Dictionary/word_detail.html', {'word':word})","sub_path":"Dictionary/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"102276312","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nBase management command that provides common functionality for the other commands in this app.\n\"\"\"\nimport codecs\nimport locale\nimport logging\nimport os\nimport requests\nimport sys\nfrom re import sub\nfrom email.utils import parsedate_tz, mktime_tz\nfrom datetime import datetime\nfrom django.utils import timezone\nfrom django.utils.termcolors import colorize\nfrom django.core.management.base import BaseCommand\nfrom calaccess_raw import get_data_directory\nfrom calaccess_raw.models import RawDataVersion\nlogger = logging.getLogger(__name__)\n\n\nclass CalAccessCommand(BaseCommand):\n \"\"\"\n Base management command that provides common functionality for the other commands in this app.\n \"\"\"\n url = 'https://campaignfinance.cdn.sos.ca.gov/dbwebexport.zip'\n\n def handle(self, *args, **options):\n \"\"\"\n The generic handler function.\n\n Any command subclassing this object should implement its own\n handle method, as is standard in Django, and run this method\n via a super call to inherit its functionality.\n \"\"\"\n self.set_global_options(options)\n\n def set_global_options(self, options):\n \"\"\"\n Set options to all commands.\n \"\"\"\n # Set global options\n self.verbosity = options.get(\"verbosity\")\n self.no_color = options.get(\"no_color\")\n\n # set up data directories\n self.data_dir = get_data_directory()\n self.tsv_dir = os.path.join(self.data_dir, 'tsv')\n self.csv_dir = os.path.join(self.data_dir, 'csv')\n\n os.path.exists(self.data_dir) or os.makedirs(self.data_dir)\n os.path.exists(self.tsv_dir) or os.makedirs(self.tsv_dir)\n os.path.exists(self.csv_dir) or os.makedirs(self.csv_dir)\n\n # set path where zip will be downloaded\n self.download_dir = os.path.join(self.data_dir, 'download')\n self.zip_path = os.path.join(\n self.download_dir,\n self.url.split('/')[-1]\n )\n\n # Start the clock\n self.start_datetime = datetime.now()\n\n def parse_imf_datetime_str(self, datetime_str):\n \"\"\"\n Parse a string containing a datetime value in Internet Message Format.\n\n See Section 7.1.1.1 of RFC 7231:\n https://tools.ietf.org/html/rfc7231.html#section-7.1.1.1\n\n Return a utc datetime object.\n \"\"\"\n datetime_tuple = parsedate_tz(datetime_str)\n timestamp = mktime_tz(datetime_tuple)\n datetime_obj = datetime.fromtimestamp(timestamp, timezone.utc)\n return datetime_obj\n\n def get_download_metadata(self):\n \"\"\"\n Returns a dict with metadata about the current CAL-ACCESS snapshot.\n \"\"\"\n response = requests.head(self.url)\n logger.debug(\n 'Response status {0.status_code} ({0.reason}) from HEAD request.'.format(response)\n )\n if not response.ok:\n response.raise_for_status()\n # content length is a string, need to convert\n try:\n # long int type is big enough for double the current size of the zip\n length = long(response.headers['content-length'])\n except NameError:\n # in py3, no long(), instead int will suffice\n length = int(response.headers['content-length'])\n return {\n # should prob not call int here, can this remain a string until writing to db?\n 'content-length': length,\n 'last-modified': response.headers['last-modified'],\n 'etag': response.headers['etag'],\n 'server': response.headers['server'],\n }\n\n def get_or_create_version(self, expected_size, release_datetime):\n \"\"\"\n Get or create a RawDataVersion.\n\n Create a new one only if:\n * expected_size is different from the latest version; or\n * release_datetime is five minutes later than latest_version's.\n\n Return a tuple of (object, created), where created is a boolean\n specifying whether an object was created.\n \"\"\"\n obj = None\n try:\n latest = RawDataVersion.objects.latest('release_datetime')\n except RawDataVersion.DoesNotExist:\n obj = RawDataVersion.objects.create(\n release_datetime=release_datetime,\n expected_size=expected_size\n )\n created = True\n else:\n diff = release_datetime - latest.release_datetime\n if latest.expected_size == expected_size and diff.total_seconds() < 300:\n obj = latest\n created = False\n else:\n obj = RawDataVersion.objects.create(\n release_datetime=release_datetime,\n expected_size=expected_size,\n )\n created = True\n return obj, created\n\n #\n # Logging methods\n #\n\n def header(self, string):\n \"\"\"\n Writes out a string to stdout formatted to look like a header.\n \"\"\"\n logger.debug(string)\n if not self.no_color:\n string = colorize(string, fg=\"cyan\", opts=(\"bold\",))\n self.stdout.write(string)\n\n def log(self, string):\n \"\"\"\n Writes out a string to stdout formatted to look like a standard line.\n \"\"\"\n logger.debug(string)\n if not self.no_color:\n string = colorize(\"%s\" % string, fg=\"white\")\n self.stdout.write(string)\n\n def success(self, string):\n \"\"\"\n Writes out a string to stdout formatted green to communicate success.\n \"\"\"\n logger.debug(string)\n if not self.no_color:\n string = colorize(string, fg=\"green\")\n self.stdout.write(string)\n\n def failure(self, string):\n \"\"\"\n Writes out a string to stdout formatted red to communicate failure.\n \"\"\"\n logger.debug(string)\n if not self.no_color:\n string = colorize(string, fg=\"red\")\n self.stdout.write(string)\n\n def duration(self):\n \"\"\"\n Calculates how long the command has been running and writes it to stdout.\n \"\"\"\n duration = datetime.now() - self.start_datetime\n self.stdout.write('Duration: {}'.format(str(duration)))\n logger.debug('Duration: {}'.format(str(duration)))\n\n def confirm_proceed(self, prompt):\n \"\"\"\n Prompts the user for yes/no confirmation to proceed.\n \"\"\"\n # Ensure stdout can handle Unicode data: http://bit.ly/1C3l4eV\n locale_encoding = locale.getpreferredencoding()\n old_stdout = sys.stdout\n sys.stdout = codecs.getwriter(locale_encoding)(sys.stdout)\n\n # Send the confirmation prompt out to the user\n user_input = input(prompt)\n\n confirm = None\n\n while confirm is None:\n if user_input.lower() in ['y', 'yes']:\n confirm = True\n elif user_input.lower() in ['n', 'no']:\n confirm = False\n else:\n user_input = input(\"Invalid input. Please type 'yes', 'no', 'y' or 'n':\\n\")\n\n # Set things back to the way they were before continuing.\n sys.stdout = old_stdout\n\n # Pass back what the user typed\n return confirm\n\n def __str__(self):\n return sub(r'(.+\\.)*', '', self.__class__.__module__)\n","sub_path":"calaccess_raw/management/commands/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"163039386","text":"'''\r\nСтворіть масив А [1..8] за допомогою генератора випадкових чисел з\r\nелементами від -10 до 10 і виведіть його на екран. Підрахуйте кількість від’ємних\r\nелементів масиву.\r\nКотулич К.А. 122А\r\n'''\r\nfrom random import randint # вводимо рандом\r\nimport numpy as np # вводимо бібліотеку для масивів\r\nx = np.array([randint(-10,10) for i in range(8)]) # за допомогою генератора списків створюємо масив\r\na = list() # створюємо пустий список\r\nfor i in range(len(x)): # перебираємо кожен елемент масиву\r\n if x[i]<0: # умова для визначення елементів що менше нуля\r\n a.append(x[i]) # додаємо потрібні елементи в пустий список\r\nprint(f'Масив: {x}\\nКількість від’ємних елементів: {len(a)}') # виводимо довжину заповненного спискуєєє","sub_path":"kol6.py","file_name":"kol6.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"590048522","text":"from panda3d.core import NodePath\nfrom panda3d.core import Point3\nfrom panda3d.core import LineSegs\n\nfrom wecs import panda3d as wp3d\nfrom wecs import mechanics\nfrom wecs.aspects import Aspect\nfrom wecs.panda3d import aspects\nfrom mapedit.helpers import draw_grid\nfrom mapedit import mapedit\nfrom mapedit.cursor import cursor\n\nsystem_types = [\n wp3d.ManageGeometry,\n mechanics.DetermineTimestep,\n wp3d.UpdateCharacter,\n mapedit.cursor.Cursoring, # Horizontal movement with optional grid-snapping.\n wp3d.ExecuteMovement,\n wp3d.UpdateCameras,\n mapedit.mapeditor.UpdateMapEditor, # Handles Creator and Tilemap (to be split up later)\n]\n\n# empty scene with a grid.\ngridsize = 500 # Size of grid in cells\ncellsize = 2 # Size of cells in meters\naspects.empty_scene.add(\n base.ecs_world.create_entity(),\n overrides = {\n panda3d.Model: dict(node=draw_grid(gridsize, gridsize, cellsize)),\n }\n)\n\n\n# cursor entity.\ncursor_node = NodePath(\"cursor\")\ncursor_model = loader.loadModel(\"../../assets/cursor.bam\")\ncursor_model.set_scale(cellsize)\ncursor_model.reparent_to(cursor_node)\ncursor.add(\n base.ecs_world.create_entity(),\n overrides={\n panda3d.ThirdPersonCamera: dict(distance=15.0, focus_height=0),\n panda3d.TurntableCamera: dict(pitch=-90),\n panda3d.CursorMovement: dict(move_snap=cellsize),\n panda3d.Model: dict(node=cursor_node),\n panda3d.Position: dict(value=Point3(gridsize/2, gridsize/2, 0)),\n }\n)\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"179290351","text":"from google.appengine.ext import db\nimport os\nfrom google.appengine.api import users\nfrom google.appengine.ext.webapp import template\n\nclass Expense(db.Model):\n user = db.UserProperty()\n datetime = db.DateTimeProperty()\n amount = db.FloatProperty()\n category = db.CategoryProperty()\n comment = db.StringProperty()\n\nclass MainPage(webapp.RequestHandler):\n def get(self):\n expense_query = Expense.all().order('-datetime')\n expenses = expense_query.fetch(100)\n\n if users.get_current_user():\n url = users.create_logout_url(self.request.uri)\n url_linktext = 'Logout'\n else:\n url = users.create_login_url(self.request.uri)\n url_linktext = 'Login'\n\n template_values = {\n 'expenses': expenses,\n 'url': url,\n 'url_linktext': url_linktext,\n }\n\n path = os.path.join(os.path.dirname(__file__), 'index.html')\n self.response.out.write(template.render(path, template_values))\n\n\n","sub_path":"python/src/expense.py","file_name":"expense.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"416616419","text":"import pandas as pd\r\nimport plotly.express as px # (version 4.7.0)\r\nimport plotly.graph_objects as go\r\nimport pycountry\r\nimport numpy as np\r\nimport dash # (version 1.12.0) pip install dash\r\nimport dash_core_components as dcc\r\nimport dash_html_components as html\r\nfrom dash.dependencies import Input, Output\r\nimport dash_bootstrap_components as dbc\r\nimport dash_table\r\nfrom app import app\r\n\r\ncard1 = dbc.Card(\r\n [\r\n dbc.CardImg(src=\"/assets/world.png\", top=True),\r\n dbc.CardBody(\r\n [\r\n html.H4(\"COVID-19 Worldwide\", className=\"card-title\"),\r\n html.P(\r\n \"The Worldwide visualisations page shows you different \"\r\n \"COVID 19 trends across the world.\",\r\n className=\"card-text\",\r\n ),\r\n dbc.Button(\"Worldwide visualisations\", color=\"primary\", href='/apps/dashboard'),\r\n ]\r\n ),\r\n ],\r\n\r\n)\r\ncard2 = dbc.Card(\r\n [\r\n dbc.CardImg(src=\"/assets/country.jpg\", top=True),\r\n dbc.CardBody(\r\n [\r\n html.H4(\"Country wise\", className=\"card-title\"),\r\n html.P(\r\n \"The country-wise visualisations show you COVID 19 \"\r\n \"trends as per your selected country.\",\r\n className=\"card-text\",\r\n ),\r\n dbc.Button(\"Countrywise visualisations\", color=\"primary\", href='/apps/contact_tracing_graphs'),\r\n ]\r\n ),\r\n ],\r\n\r\n)\r\ncard3 = dbc.Card(\r\n [\r\n dbc.CardImg(src=\"/assets/virus.jpg\", top=True),\r\n dbc.CardBody(\r\n [\r\n html.H4(\"SIR model\", className=\"card-title\"),\r\n html.P(\r\n \"The SEIR model allows you to visualize how COVID 19 \"\r\n \"progresses.\",\r\n className=\"card-text\",\r\n ),\r\n dbc.Button(\"SIR model\", color=\"primary\", href='/apps/sirmodel'),\r\n ]\r\n ),\r\n ],\r\n\r\n)\r\ndata = 'https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv'\r\ndata2 = \"https://raw.githubusercontent.com/OxCGRT/covid-policy-tracker/master/data/OxCGRT_latest.csv\"\r\ndf = pd.read_csv(data)\r\ntotal_cases = df.loc[df.location == 'World', ['total_cases']].max()\r\nnew_cases = df.loc[df.location == 'World', ['new_cases']].max()\r\ntotal_deaths = df.loc[df.location == 'World', ['total_deaths']].max()\r\nnew_deaths = df.loc[df.location == 'World', ['new_deaths']].max()\r\nlayout = html.Div([\r\n dbc.Container([\r\n dbc.Row([\r\n dbc.Col(html.H1(children='Welcome to our COVID 19 Dashboard!'), className=\"mb-2\")\r\n ]),\r\n dbc.Row(\r\n [dbc.Col(dbc.Alert(\"Total number of people affected by COVID:\", color=\"primary\"), width=4),\r\n dbc.Col(dbc.Alert(html.Div(\r\n id='placeholder', children=\"...\"\r\n ), color=\"danger\"), width=2)]), dcc.Interval(\r\n id='serverside-interval',\r\n interval=5000,\r\n n_intervals=1\r\n ),\r\n dbc.Row(\r\n [dbc.Col(dbc.Alert(\"Total number of deaths due to COVID:\", color=\"primary\"), width=4),\r\n dbc.Col(dbc.Alert(html.Div(\r\n id='placeholder2', children=\"...\"\r\n ), color=\"danger\"), width=2)]), dcc.Interval(\r\n id='serverside-interval2',\r\n interval=5000,\r\n n_intervals=1\r\n ),\r\n dbc.Row([\r\n dbc.Col(html.H4(children='Aim of this Dashboard'), className=\"mb-2\")\r\n ]),\r\n dbc.Row([\r\n dbc.Col(dbc.Card(dbc.CardBody('One of the most severe public health crises our world has faced, '\r\n 'the coronavirus disease, has not only posed a serious threat to human '\r\n 'life, but has significantly impacted our social and economic activity. In '\r\n 'order to survive such a pandemic, we have to effectively control the '\r\n 'infectious disease, this can be achieved through many interventions such '\r\n 'as quarantining and travelling restrictions. Contact tracing is one of the '\r\n 'most effective ways of identifying such a situation as the virus has an '\r\n 'incubation period which may allow the spread through asymptomatic '\r\n 'infection while remaining undetected. Despite this, there seems to be no '\r\n 'widespread integration of digital contact tracing strategies across the '\r\n 'world. We aim to equip the population with a better understanding of '\r\n 'epidemiology so that they may participate in reducing and preventing the '\r\n 'spread of the disease. We aim to achieve this with the help of '\r\n 'visualizations, information and simulations to help educate the general '\r\n 'public to the severity of COVID-19.')))\r\n ]),\r\n html.Br(), html.Br(),\r\n dbc.Row([dbc.Col(card1, width=4),\r\n dbc.Col(card2, width=4), dbc.Col(card3, width=4)] )\r\n\r\n ])])\r\n\r\n\r\n@app.callback(\r\n Output('placeholder', 'children'),\r\n Input('serverside-interval', 'n_intervals'),\r\n)\r\ndef update_cases(n_intervals):\r\n data1 = 'https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv'\r\n df1 = pd.read_csv(data1)\r\n total_cases1 = df1.loc[df1.location == 'World', ['total_cases']].max()\r\n return total_cases1\r\n\r\n\r\n@app.callback(\r\n Output('placeholder2', 'children'),\r\n Input('serverside-interval2', 'n_intervals'),\r\n)\r\ndef update_deaths(n_intervals):\r\n data3 = 'https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv'\r\n df2 = pd.read_csv(data3)\r\n total_deaths2 = df2.loc[df2.location == 'World', ['total_deaths']].max()\r\n return total_deaths2\r\n","sub_path":"apps/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":6241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"492111580","text":"#!/usr/bin/env python\n# vim:set et ts=4 sw=4 fileencoding=utf-8:\n# @Author: djluo\n\nfrom clouds.backend import MachinePool\n\n\nif __name__ == '__main__':\n import sys\n import logging\n\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n stream = logging.StreamHandler(sys.stdout)\n stream.setLevel(logging.DEBUG)\n logger.addHandler(stream)\n\n # 获取主机实例\n MP = MachinePool()\n print(MP.Pool)\n","sub_path":"FusionCli/clouds-new/old/examples/lists_machine.py","file_name":"lists_machine.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"139151323","text":"minion_id=[3,4,5,2,3,4,3,6,4,3,5,2,2,2,2]\n\n\ndef answer (data,n):\n minion_set=sorted(set(data))\n for i in minion_set:\n if (i<=n):\n newlist=filter(lambda a: a != i, data)\n data=newlist\n return (data)\n\nprint(answer(minion_id,4))\n","sub_path":"GoogleChallenge/minion.py","file_name":"minion.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"647175434","text":"from __future__ import print_function\nimport sys\n\nfrom __main__ import vtk, qt, ctk, slicer\nimport vtk.util.numpy_support\nfrom MultiVolumeImporter.Helper import Helper\n\n#\n# MultiVolumeImporter\n#\n\nclass MultiVolumeImporter:\n def __init__(self, parent):\n parent.title = \"MultiVolumeImporter\"\n parent.categories = [\"Work in Progress.MultiVolume Support\"]\n parent.contributors = [\"Andrey Fedorov (SPL, BWH)\",\\\n \"Jean-Christophe Fillion-Robin (Kitware)\", \\\n \"Julien Finet (Kitware)\", \\\n \"Steve Pieper (SPL, BWH)\",\\\n \"Ron Kikinis (SPL, BWH)\"]\n\n parent.index = 0\n parent.helpText = \"\"\"\n Support of MultiVolume import in Slicer4\n \"\"\"\n # MultiVolumeExplorer registers the MRML node type this module is using\n parent.dependencies = ['MultiVolumeExplorer']\n parent.acknowledgementText = \"\"\"\n Development of this module was supported in part by the following grants:\n P41EB015898, P41RR019703, R01CA111288 and U01CA151261.\n \"\"\"\n self.parent = parent\n\n#\n# qMultiVolumeImporterWidget\n#\n\nclass MultiVolumeImporterWidget:\n def __init__(self, parent = None):\n if not parent:\n self.parent = slicer.qMRMLWidget()\n self.parent.setLayout(qt.QVBoxLayout())\n self.parent.setMRMLScene(slicer.mrmlScene)\n else:\n self.parent = parent\n self.layout = self.parent.layout()\n if not parent:\n self.setup()\n self.parent.show()\n\n def setup(self):\n # Instantiate and connect widgets ...\n\n # Collapsible button\n dummyCollapsibleButton = ctk.ctkCollapsibleButton()\n dummyCollapsibleButton.text = \"Basic settings\"\n self.layout.addWidget(dummyCollapsibleButton)\n dummyFormLayout = qt.QFormLayout(dummyCollapsibleButton)\n\n # add input directory selector\n label = qt.QLabel('Input directory:')\n self.__fDialog = ctk.ctkDirectoryButton()\n self.__fDialog.caption = 'Input DICOM directory'\n dummyFormLayout.addRow(label, self.__fDialog)\n\n label = qt.QLabel('Output node:')\n self.__mvSelector = slicer.qMRMLNodeComboBox()\n self.__mvSelector.nodeTypes = ['vtkMRMLMultiVolumeNode']\n self.__mvSelector.setMRMLScene(slicer.mrmlScene)\n self.__mvSelector.connect('mrmlSceneChanged(vtkMRMLScene*)', self.onMRMLSceneChanged)\n self.__mvSelector.addEnabled = 1\n dummyFormLayout.addRow(label, self.__mvSelector)\n\n label = qt.QLabel('Input data type:')\n self.__modeSelector = qt.QComboBox()\n\n # parameter tuples: long title (for the selector), dicom tag, units, short title\n self.__processingModes = []\n self.__processingModes.append(['DICOM 4D DCE MRI (GE)', '0018|1060', 'ms', 'DCE'])\n self.__processingModes.append(['DICOM variable TE MRI (GE)', '0018|0081', 'ms', 'vTE'])\n self.__processingModes.append(['DICOM variable FA MRI (GE)', '0018|1314', 'deg', 'vFA'])\n self.__processingModes.append(['DICOM variable TR MRI (GE)', '0018|0080', 'ms', 'vTR'])\n self.__processingModes.append(['User-defined DICOM', '??', '??', 'MultiVolumeDICOM'])\n self.__processingModes.append(['User-defined non-DICOM', 'N/A', '??', 'MultiVolume'])\n\n for p in self.__processingModes:\n print(\"Processing mode found: %s\" % p)\n self.__modeSelector.addItem(p[0])\n self.__modeSelector.currentIndex = 0\n\n self.__modeSelector.connect('currentIndexChanged(int)', self.onProcessingModeChanged)\n dummyFormLayout.addRow(label, self.__modeSelector)\n\n # Collapsible button\n dummyCollapsibleButton = ctk.ctkCollapsibleButton()\n dummyCollapsibleButton.text = \"Advanced settings\"\n dummyCollapsibleButton.collapsed = 1\n self.layout.addWidget(dummyCollapsibleButton)\n dummyFormLayout = qt.QFormLayout(dummyCollapsibleButton)\n self.__advancedFrame = dummyCollapsibleButton\n\n # label name and values\n label = qt.QLabel('DICOM tag:')\n label.toolTip = 'DICOM tag used to separate individual volumes in the series'\n self.__dicomTag = qt.QLineEdit()\n self.__dicomTag.text = \"\"\n dummyFormLayout.addRow(label, self.__dicomTag)\n\n label = qt.QLabel('Frame identifying units:')\n self.__veLabel = qt.QLineEdit()\n dummyFormLayout.addRow(label, self.__veLabel)\n\n label = qt.QLabel('Initial value:')\n self.__veInitial = qt.QDoubleSpinBox()\n self.__veInitial.value = 0\n dummyFormLayout.addRow(label, self.__veInitial)\n\n label = qt.QLabel('Step:')\n self.__veStep = qt.QDoubleSpinBox()\n self.__veStep.value = 1\n dummyFormLayout.addRow(label, self.__veStep)\n\n importButton = qt.QPushButton(\"Import\")\n importButton.toolTip = \"Import the contents of the DICOM directory as a MultiVolume\"\n self.layout.addWidget(importButton)\n importButton.connect('clicked(bool)', self.onImportButtonClicked)\n\n self.__status = qt.QLabel('Status: Idle')\n self.layout.addWidget(self.__status)\n\n # Add vertical spacer\n self.layout.addStretch(1)\n\n def enter(self):\n self.onProcessingModeChanged(self.__modeSelector.currentIndex)\n\n def onProcessingModeChanged(self, idx):\n nModes = len(self.__processingModes)\n mode = self.__processingModes[idx]\n self.__advancedFrame.collapsed = 0\n if idx < nModes-2:\n self.__advancedFrame.enabled = 0\n else:\n self.__advancedFrame.enabled = 1\n if idx == nModes-1:\n self.__dicomTag.enabled = 0\n else:\n self.__dicomTag.enabled = 1\n self.__dicomTag.text = mode[1]\n self.__veLabel.text = mode[2]\n self.__veInitial.value = 0\n self.__veStep.value = 1\n\n def onMRMLSceneChanged(self, mrmlScene):\n self.__mvSelector.setMRMLScene(slicer.mrmlScene)\n return\n\n def onImportButtonClicked(self):\n # check if the output container exists\n mvNode = self.__mvSelector.currentNode()\n if mvNode == None:\n self.__status.text = 'Status: Select output node!'\n return\n\n modeIdx = self.__modeSelector.currentIndex\n processingMode = self.__processingModes[modeIdx]\n\n # There are two options:\n # 1. DICOM series in a directory, with either predefined or custom parse tag\n # 2. Series of frames alpha-ordered, all in the input directory\n # Assume here that the last mode in the list is for parsing a list of\n # non-DICOM frames\n\n fileNames = [] # file names on disk\n frameList = [] # frames as MRMLScalarVolumeNode's\n frameFolder = \"\"\n volumeLabels = vtk.vtkDoubleArray()\n\n if modeIdx < len(self.__processingModes)-1:\n # DICOM series\n\n # get logic\n logic = slicer.modules.multivolumeexplorer.logic()\n\n # create a clean temporary directory\n tmpDir = slicer.app.settings().value('Modules/TemporaryDirectory')\n if not os.path.exists(tmpDir):\n os.mkdir(tmpDir)\n tmpDir = tmpDir+'/MultiVolumeImporter'\n if not os.path.exists(tmpDir):\n os.mkdir(tmpDir)\n else:\n # clean it up\n print(\"tmpDir: %s\" % tmpDir)\n oldFileNames = os.listdir(tmpDir)\n for f in oldFileNames:\n print(\"%s will be unlinked\" % f)\n os.unlink(tmpDir+'/'+f)\n\n nFrames = logic.ProcessDICOMSeries(self.__fDialog.directory, tmpDir, self.__dicomTag.text, volumeLabels)\n\n self.__status.text = 'Series processed OK, '+str(nFrames)+' volumes identified'\n\n print(\"Location of files: %s\" % tmpDir)\n for f in os.listdir(tmpDir):\n if not f.startswith('.'):\n fileNames.append(f)\n fileNames.sort()\n\n frameFolder = tmpDir\n\n else:\n # each frame is saved as a separate volume\n for f in os.listdir(self.__fDialog.directory):\n if not f.startswith('.'):\n fileNames.append(f)\n fileNames.sort()\n\n frameFolder = self.__fDialog.directory\n nFrames = len(fileNames)\n volumeLabels.SetNumberOfTuples(nFrames)\n volumeLabels.SetNumberOfComponents(1)\n volumeLabels.Allocate(nFrames)\n for i in range(len(fileNames)):\n frameId = self.__veInitial.value+self.__veStep.value*i\n volumeLabels.SetComponent(i, 0, frameId)\n\n # read the first frame to get the extent for DWI node\n fullName = frameFolder+'/'+fileNames[0]\n volumesLogic = slicer.modules.volumes.logic()\n frame = volumesLogic.AddArchetypeVolume(fullName, processingMode[3]+' Frame 0', 0)\n frameImage = frame.GetImageData()\n frameExtent = frameImage.GetExtent()\n frameSize = frameExtent[1]*frameExtent[3]*frameExtent[5]\n\n nFrames = len(fileNames)\n mvImage = vtk.vtkImageData()\n mvImage.SetExtent(frameExtent)\n mvImage.SetNumberOfScalarComponents(nFrames)\n\n mvImage.AllocateScalars()\n mvImageArray = vtk.util.numpy_support.vtk_to_numpy(mvImage.GetPointData().GetScalars())\n\n mat = vtk.vtkMatrix4x4()\n frame.GetRASToIJKMatrix(mat)\n mvNode.SetRASToIJKMatrix(mat)\n frame.GetIJKToRASMatrix(mat)\n mvNode.SetIJKToRASMatrix(mat)\n\n self.annihilateScalarNode(frame)\n\n for frameId in range(0,nFrames):\n fullName = frameFolder+'/'+fileNames[frameId]\n print(\"Processing frame %d: %s\" % (frameId, fullName))\n frame = volumesLogic.AddArchetypeVolume(fullName, 'Frame'+str(frameId), 0)\n frameImage = frame.GetImageData()\n frameImageArray = vtk.util.numpy_support.vtk_to_numpy(frameImage.GetPointData().GetScalars())\n mvImageArray.T[frameId] = frameImageArray\n self.annihilateScalarNode(frame)\n\n mvDisplayNode = slicer.mrmlScene.CreateNodeByClass('vtkMRMLMultiVolumeDisplayNode')\n mvDisplayNode.SetScene(slicer.mrmlScene)\n slicer.mrmlScene.AddNode(mvDisplayNode)\n mvDisplayNode.SetReferenceCount(mvDisplayNode.GetReferenceCount()-1)\n mvDisplayNode.SetDefaultColorMap()\n\n mvNode.SetAndObserveDisplayNodeID(mvDisplayNode.GetID())\n mvNode.SetAndObserveImageData(mvImage)\n mvNode.SetNumberOfFrames(nFrames)\n slicer.mrmlScene.AddNode(mvNode)\n mvNode.SetReferenceCount(mvNode.GetReferenceCount()-1)\n\n mvNode.SetLabelArray(volumeLabels)\n mvNode.SetLabelName(self.__veLabel.text)\n print(\"MultiVolume node setup complete !\")\n\n Helper.SetBgFgVolumes(mvNode.GetID(),None)\n\n # leave no trace of the temporary nodes\n def annihilateScalarNode(self, node):\n dn = node.GetDisplayNode()\n sn = node.GetStorageNode()\n node.SetAndObserveDisplayNodeID(None)\n node.SetAndObserveStorageNodeID(None)\n slicer.mrmlScene.RemoveNode(dn)\n slicer.mrmlScene.RemoveNode(sn)\n slicer.mrmlScene.RemoveNode(node)\n\n","sub_path":"MultiVolumeImporter.py","file_name":"MultiVolumeImporter.py","file_ext":"py","file_size_in_byte":10243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"8732327","text":"import requests\nimport json\nimport csv\n\n################ LOGIN ######################\n# This uses the DevNet Sandbox - Replace with appropriate credentials\n# Use env file for more security :)\nurl = \"https://sandboxdnac2.cisco.com/dna/system/api/v1/auth/token\"\n\nuser = \"devnetuser\"\npw = \"Cisco123!\"\n\nresponse = requests.post(url, auth=(user, pw)).json()\ntoken = response[\"Token\"]\n\n############ GET Site HEALTH STATS ################\n\nurl = \"https://sandboxdnac2.cisco.com/dna/intent/api/v1/site-health\"\n\nquerystring = {\"timestamp\": \"\"}\n\nheaders = {\n \"x-auth-token\": token,\n \"Accept\": \"*/*\",\n}\n\nresponse = requests.get(url, headers=headers, params=querystring).json()\n\n\nsites = response[\"response\"]\n\n# Create a new CSV to export the site data to\nwith open(\"Site-Clients.csv\", \"w\", newline=\"\") as file:\n writer = csv.writer(file)\n writer.writerow(\n [\"Site\", \"Wireless Clients\", \"Wired Clients\", \"Total Number of Clients\"]\n )\n\n for site in sites:\n # Parse clients per site\n # Print to cmdline\n # Export to a csv as well.\n print(f\"Site: {site['siteName']}\")\n print(f\"Wireless Clients: {site['numberOfWirelessClients']}\")\n print(f\"Wireless Clients: {site['numberOfWiredClients']}\")\n print(f\"Wireless Clients: {site['numberOfClients']}\")\n print(\"-----------------------------------\")\n writer.writerow(\n [\n site[\"siteName\"],\n site[\"numberOfWirelessClients\"],\n site[\"numberOfWiredClients\"],\n site[\"numberOfClients\"],\n ]\n )\n","sub_path":"DNA-Site-Health.py","file_name":"DNA-Site-Health.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"221887993","text":"import collections\nimport os\n\nimport numpy as np\nimport tensorflow as tf\nimport unicodedata\n\n\ndef convert_to_unicode(text):\n \"\"\"Converts `text` to Unicode (if it's not already), assuming utf-8 input.\"\"\"\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n\n\ndef load_vocab(vocab_file):\n \"\"\"Loads a vocabulary file into a dictionary.\"\"\"\n vocab = collections.OrderedDict()\n index = 0\n with tf.gfile.GFile(vocab_file, \"r\") as reader:\n while True:\n token = convert_to_unicode(reader.readline())\n if not token:\n break\n token = token.strip()\n vocab[token] = index\n index += 1\n return vocab\n\n\ndef convert_by_vocab(vocab, items):\n \"\"\"Converts a sequence of [tokens|ids] using the vocab.\"\"\"\n return [vocab[item] for item in items]\n\n\ndef whitespace_tokenize(text):\n \"\"\"Runs basic whitespace cleaning and splitting on a piece of text.\"\"\"\n text = text.strip()\n if not text:\n return []\n tokens = text.split()\n return tokens\n\n\nclass FullTokenizer:\n \"\"\"Runs end-to-end tokenization.\"\"\"\n\n def __init__(self, vocab_file, do_lower_case=True):\n self.vocab = load_vocab(vocab_file)\n self.inv_vocab = {v: k for k, v in self.vocab.items()}\n self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)\n self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)\n\n def tokenize(self, text):\n split_tokens = []\n for token in self.basic_tokenizer.tokenize(text):\n for sub_token in self.wordpiece_tokenizer.tokenize(token):\n split_tokens.append(sub_token)\n return split_tokens\n\n def convert_tokens_to_ids(self, tokens):\n return convert_by_vocab(self.vocab, tokens)\n\n def convert_ids_to_tokens(self, ids):\n return convert_by_vocab(self.inv_vocab, ids)\n\n\nclass BasicTokenizer:\n \"\"\"Runs basic tokenization (punctuation splitting, lower casing, etc.).\"\"\"\n\n def __init__(self, do_lower_case=True):\n \"\"\"Constructs a BasicTokenizer.\n\n Args:\n do_lower_case: Whether to lower case the input.\n \"\"\"\n self.do_lower_case = do_lower_case\n\n def tokenize(self, text):\n \"\"\"Tokenizes a piece of text.\"\"\"\n text = convert_to_unicode(text)\n text = self._clean_text(text)\n\n orig_tokens = whitespace_tokenize(text)\n split_tokens = []\n for token in orig_tokens:\n if self.do_lower_case:\n token = token.lower()\n token = self._run_strip_accents(token)\n split_tokens.extend(self._run_split_on_punc(token))\n\n output_tokens = whitespace_tokenize(\" \".join(split_tokens))\n return output_tokens\n\n @staticmethod\n def _run_strip_accents(text):\n \"\"\"Strips accents from a piece of text.\"\"\"\n text = unicodedata.normalize(\"NFD\", text)\n output = []\n for char in text:\n cat = unicodedata.category(char)\n if cat == \"Mn\":\n continue\n output.append(char)\n return \"\".join(output)\n\n @staticmethod\n def _run_split_on_punc(text):\n \"\"\"Splits punctuation on a piece of text.\"\"\"\n chars = list(text)\n i = 0\n start_new_word = True\n output = []\n while i < len(chars):\n char = chars[i]\n if _is_punctuation(char):\n output.append([char])\n start_new_word = True\n else:\n if start_new_word:\n output.append([])\n start_new_word = False\n output[-1].append(char)\n i += 1\n\n return [\"\".join(x) for x in output]\n\n @staticmethod\n def _clean_text(text):\n \"\"\"Performs invalid character removal and whitespace cleanup on text.\"\"\"\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)\n\n\nclass WordpieceTokenizer:\n \"\"\"Runs WordPiece tokenization.\"\"\"\n\n def __init__(self, vocab, unk_token=\"[UNK]\", max_input_chars_per_word=200):\n self.vocab = vocab\n self.unk_token = unk_token\n self.max_input_chars_per_word = max_input_chars_per_word\n\n def tokenize(self, text):\n \"\"\"Tokenizes a piece of text into its word pieces.\n\n This uses a greedy longest-match-first algorithm to perform tokenization\n using the given vocabulary.\n\n For example:\n input = \"unaffable\"\n output = [\"un\", \"##aff\", \"##able\"]\n\n Args:\n text: A single token or whitespace separated tokens. This should have\n already been passed through `BasicTokenizer.\n\n Returns:\n A list of wordpiece tokens.\n \"\"\"\n\n text = convert_to_unicode(text)\n\n output_tokens = []\n for token in whitespace_tokenize(text):\n chars = list(token)\n if len(chars) > self.max_input_chars_per_word:\n output_tokens.append(self.unk_token)\n continue\n\n is_bad = False\n start = 0\n sub_tokens = []\n while start < len(chars):\n end = len(chars)\n cur_substr = None\n while start < end:\n substr = \"\".join(chars[start:end])\n if start > 0:\n substr = \"##\" + substr\n if substr in self.vocab:\n cur_substr = substr\n break\n end -= 1\n if cur_substr is None:\n is_bad = True\n break\n sub_tokens.append(cur_substr)\n start = end\n\n if is_bad:\n output_tokens.append(self.unk_token)\n else:\n output_tokens.extend(sub_tokens)\n return output_tokens\n\n\ndef _is_whitespace(char):\n \"\"\"Checks whether `chars` is a whitespace character.\"\"\"\n # \\t, \\n, and \\r are technically control characters but we treat them\n # as whitespace since they are generally considered as such.\n if char == \" \" or char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return True\n cat = unicodedata.category(char)\n if cat == \"Zs\":\n return True\n return False\n\n\ndef _is_control(char):\n \"\"\"Checks whether `chars` is a control character.\"\"\"\n # These are technically control characters but we count them as whitespace\n # characters.\n if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return False\n cat = unicodedata.category(char)\n if cat in (\"Cc\", \"Cf\"):\n return True\n return False\n\n\ndef _is_punctuation(char):\n \"\"\"Checks whether `chars` is a punctuation character.\"\"\"\n cp = ord(char)\n # We treat all non-letter/number ASCII as punctuation.\n # Characters such as \"^\", \"$\", and \"`\" are not in the Unicode\n # Punctuation class but we treat them as punctuation anyways, for\n # consistency.\n if 33 <= cp <= 47 or 58 <= cp <= 64 or 91 <= cp <= 96 or 123 <= cp <= 126:\n return True\n cat = unicodedata.category(char)\n if cat.startswith(\"P\"):\n return True\n return False\n\n\nclass InputExample:\n \"\"\"A single training/test example for simple sequence classification.\"\"\"\n\n def __init__(self, guid, text):\n self.guid = guid # Unique id for the example\n self.text = text # string. The untokenized text of the sequence.\n\n\ndef convert_single_example(tokenizer, example, max_seq_length=256):\n \"\"\"Converts a single `InputExample` into a single `InputFeatures`.\"\"\"\n\n tokens_a = tokenizer.tokenize(example.text)\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[0: (max_seq_length - 2)]\n\n tokens = [\"[CLS]\"]\n segment_ids = [0]\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n return input_ids, input_mask, segment_ids\n\n\ndef convert_examples_to_features(tokenizer, examples, max_seq_length=256):\n \"\"\"Convert a set of `InputExample`s to a list of `InputFeatures`.\"\"\"\n\n input_ids, input_masks, segment_ids = [], [], []\n for example in examples:\n input_id, input_mask, segment_id = convert_single_example(tokenizer, example, max_seq_length)\n input_ids.append(input_id)\n input_masks.append(input_mask)\n segment_ids.append(segment_id)\n return np.array(input_ids), np.array(input_masks), np.array(segment_ids)\n\n\ndef convert_text_to_examples(texts):\n \"\"\"Create input_examples\"\"\"\n return [InputExample(guid=None, text=\" \".join(text)) for text in texts]\n\n\ndef get_bert_features(texts, bert_path, max_seq_length, do_lower_case=True):\n # Instantiate tokenizer\n vocab_file = os.path.join(bert_path, 'vocab.txt')\n tokenizer = FullTokenizer(vocab_file=vocab_file, do_lower_case=do_lower_case)\n\n # Convert data to InputExample format\n train_examples = convert_text_to_examples(texts)\n\n # Convert to features\n input_ids, input_masks, segment_ids = convert_examples_to_features(tokenizer, train_examples, max_seq_length)\n return input_ids, input_masks, segment_ids\n","sub_path":"src/bert/tokenization.py","file_name":"tokenization.py","file_ext":"py","file_size_in_byte":10007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"525428685","text":"import os\nimport requests\nimport sys\nimport shutil\n\nurl = 'http://0.0.0.0:8000/'\n\ndef request_post(file_name):\n\n with open(file_name, 'rb') as img:\n name_img= os.path.basename(file_name)\n files= {'file': (name_img,img,'multipart/form-data',{'Expires': '0'}) }\n \n with requests.Session() as s:\n r = s.post(url,files=files)\n print(r.status_code)\n\ndef main(): \n\tfor i in range(1):\n request_get('')\n\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"tfinal/request2.py","file_name":"request2.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"192946694","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport cv2\nimport numpy as np\nimport glob\n\ndef video(name):\n\tnumber = 1\n\timg_array = []\n\t#b = sorted(glob.glob('*.png'))\n\tb = sorted(glob.glob('*.jpg'), key=os.path.getmtime) #已排序\n\tfor filename in b:\n\t #print(filename)\n\t img = cv2.imread(filename)\n\t height, width, layers = img.shape\n\t size = (width,height)\n\t img_array.append(img)\n\t \n\tout = cv2.VideoWriter(name + '.mp4',cv2.VideoWriter_fourcc(*'DIVX'), 30, size)\n\t \n\tfor i in range(len(img_array)):\n\t # print(\"len=\",len(img_array))\n\t # print(\"name=\",img_array[i])\n\t out.write(img_array[i])\n\tout.release()\n\tprint(\"Done!Done!\")\n\nvideo(\"Driving\")\n","sub_path":"frames轉video.py","file_name":"frames轉video.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"27599006","text":"import torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader, TensorDataset\n# import torchvision.datasets as dsets\n# import torchvision.transforms as transforms\nfrom torch.autograd import Variable\nimport pickle\nimport numpy as np\nimport random\n\nBATCH_SIZE = 32\nLEARNING_RATE = 0.001\nEPOCH = 400\n\n# transform = transforms.Compose([\n# transforms.RandomSizedCrop(224),\n# transforms.RandomHorizontalFlip(),\n# transforms.ToTensor(),\n# transforms.Normalize(mean = [ 0.485, 0.456, 0.406 ],\n# std = [ 0.229, 0.224, 0.225 ]),\n# ])\n\n\n\n# with open('./ck+.pkl', 'rb') as f:\n# data = pickle.load(f)\n# print('load done') \n\n# index = [i for i in range(927)]\n# random.shuffle(index)\n# trainindex = index[:835]\n# testindex = index[835:]\n\n# # dic = {'angry':0, 'disgust':1, 'fear':2, 'happy':3, 'neutral':4, 'sad':5, 'surprise':6}\n# dic = {'angry':0, 'disgusted':1, 'fearful':2, 'happy':3, 'sadness':4, 'surprised':5}\n# for i in range(len(data[1])):\n# data[1][i] = dic[data[1][i]]\n# testlabel = np.array(data[1])[testindex]\n# trainlabel = np.array(data[1])[trainindex]\n# testdata = np.array(data[3])[testindex]\n# traindata = np.array(data[3])[trainindex]\n\n# trainData = TensorDataset(torch.Tensor(traindata), torch.LongTensor(trainlabel))\n# testData = TensorDataset(torch.Tensor(testdata), torch.LongTensor(testlabel))\n# print('to tensor done')\n\n# trainLoader = DataLoader(dataset=trainData, batch_size=BATCH_SIZE, shuffle=False)\n# testLoader = DataLoader(dataset=testData, batch_size=1, shuffle=False)\n# print('dataloader done')\n\nwith open('./raf_train.pkl', 'rb') as f:\n train_data = pickle.load(f)\nprint('load train done')\n\n# with open('./raf_test.pkl', 'rb') as f:\n# test_data = pickle.load(f)\n# print('load test done')\n\n\ntrainData = TensorDataset(torch.Tensor(train_data[2]), torch.LongTensor(train_data[0]))\n# testData = TensorDataset(torch.Tensor(test_data[3]), torch.LongTensor(test_data[1]))\nprint('to tensor done')\n\ntrainLoader = DataLoader(dataset=trainData, batch_size=BATCH_SIZE, shuffle=True)\n# testLoader = DataLoader(dataset=testData, batch_size=1, shuffle=False)\nprint('dataloader done')\n\nclass VGG16(nn.Module):\n def __init__(self):\n super(VGG16, self).__init__()\n self.layer1 = nn.Sequential(\n\n # 1-1 conv layer\n nn.Conv2d(3, 64, kernel_size=3, padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(),\n\n # 1-2 conv layer\n nn.Conv2d(64, 64, kernel_size=3, padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(),\n\n # 1 Pooling layer\n nn.MaxPool2d(kernel_size=2, stride=2))\n\n self.layer2 = nn.Sequential(\n\n # 2-1 conv layer\n nn.Conv2d(64, 128, kernel_size=3, padding=1),\n nn.BatchNorm2d(128),\n nn.ReLU(),\n\n # 2-2 conv layer\n nn.Conv2d(128, 128, kernel_size=3, padding=1),\n nn.BatchNorm2d(128),\n nn.ReLU(),\n\n # 2 Pooling lyaer\n nn.MaxPool2d(kernel_size=2, stride=2))\n \n self.layer3 = nn.Sequential(\n\n # 3-1 conv layer\n nn.Conv2d(128, 256, kernel_size=3, padding=1),\n nn.BatchNorm2d(256),\n nn.ReLU(),\n\n # 3-2 conv layer\n nn.Conv2d(256, 256, kernel_size=3, padding=1),\n nn.BatchNorm2d(256),\n nn.ReLU(),\n\n # 3 Pooling layer\n # nn.MaxPool2d(kernel_size=2, stride=2))\n nn.AvgPool2d(kernel_size=24, stride=24))\n \n\n self.layer6 = nn.Sequential(\n\n # 6 Fully connected layer\n nn.Linear(256, 256),\n # nn.Dropout(),\n nn.ReLU())\n\n\n self.layer7 = nn.Sequential(\n\n # 7 Fully connected layer\n nn.Linear(256*11, 1024),\n nn.Dropout(),\n nn.ReLU())\n\n self.layer8 = nn.Sequential(\n\n # 8 output layer\n nn.Linear(1024, 7),\n nn.Softmax())\n\n def forward(self, x):\n # x.shape ==(-1,10,96,96,3)\n out = []\n for k in range(11):\n #print(k)\n f = x[:,k,:,:,:]\n f = f.permute(0,3,1,2) \n #(-1,3,96,96)\n f = self.layer1(f) \n # (-1, 64, 48, 48)\n f = self.layer2(f) \n # (-1, 128, 24, 24)\n f = self.layer3(f) \n # (-1, 256, 1, 1)\n f = f.view(f.size(0),-1)\n # (-1, 256)\n f = self.layer6(f)\n # (-1, 256)\n out.append(f)\n out = torch.cat(out,axis=1) \n # (-1, 256*10)\n out = self.layer7(out)\n # (-1, 1024)\n out = self.layer8(out)\n # (-1, 7)\n return out\n \nimport torch.optim as optim\n\n\n \n\n\nvgg16 = VGG16()\n\n# checkpoint = torch.load('./RAFDB/vgg_dropout_epoch329.pkl') \n# vgg16.load_state_dict(checkpoint['net'], strict=False)\n\nvgg16.cuda()\n \n\n# Loss and Optimizer\ncost = nn.CrossEntropyLoss()\noptimizer = optim.SGD(vgg16.parameters(), lr=LEARNING_RATE, momentum=0.9, weight_decay=5e-4)\n# optimizer.load_state_dict(checkpoint['opt'])\n# optimizer = torch.optim.Adam(vgg16.parameters(), lr=LEARNING_RATE)\n# Train the model\nvgg16.train()\n# loss_list = []\n# acc_list = []\nfor epoch in range(0, EPOCH):\n if epoch > 350:\n for param_group in optimizer.param_groups:\n param_group['lr'] = 1e-4\n# if epoch > 260:\n# for param_group in optimizer.param_groups:\n# param_group['lr'] = 1e-5\n \n correct = 0\n total = 0\n \n for i, (images, labels) in enumerate(trainLoader):\n images = Variable(images).cuda()\n labels = Variable(labels).cuda()\n out = vgg16.forward(images)\n #print(out.shape,labels.shape)\n loss = cost(out, labels)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n _, predicted = torch.max(out.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum()\n \n# for i in range(400):\n# index = np.random.randint(0, 12198, size=BATCH_SIZE)\n# # images = trainregion[index]\n# images = np.array(train_data[3]).reshape(-1, 10, 96, 96, 3)[index]\n# images = Variable(torch.Tensor(images)).cuda()\n# labels = np.array(train_data[1])[index]\n# labels = Variable(torch.LongTensor(labels)).cuda()\n \n# out = vgg16.forward(images)\n# # print(out.shape,labels.shape)\n# loss = cost(out, labels)\n# optimizer.zero_grad()\n# loss.backward()\n# optimizer.step()\n# _, predicted = torch.max(out.data, 1)\n# total += labels.size(0)\n# correct += (predicted == labels).sum()\n \n \n# loss_list.append(loss.data)\n# acc_list.append(correct / total)\n print('epoch: %d' % epoch, loss.data)\n print('Test Accuracy of the model on the train images: %d %%' % (100 * correct / total))\n if (epoch+1) % 10 == 0 :\n saved_dict = {\n 'net': vgg16.state_dict(),\n 'opt': optimizer.state_dict()\n }\n torch.save(saved_dict, './RAFDB/vgg_dropout_allepoch{}.pkl'.format(epoch))\n # torch.save(vgg16.state_dict(), './RAFDB/vgg_dropout_epoch{}.pkl'.format(epoch))\n \n# Test the model\n# vgg16.eval()\n# correct = 0\n# total = 0\n\n# for images, labels in testLoader:\n# images = Variable(images).cuda()\n# outputs = vgg16(images)\n# _, predicted = torch.max(outputs.data, 1)\n# total += labels.size(0)\n# correct += (predicted.cpu() == labels).sum()\n\n# print('Test Accuracy of the model on the test images: %d %%' % (100 * correct / total))\n\n# Save the Trained Model\n","sub_path":"fuben.py","file_name":"fuben.py","file_ext":"py","file_size_in_byte":7730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"307391328","text":"import inspect\nfrom typing import Any\nimport types\n\nfrom rx.concurrency import current_thread_scheduler\n\nfrom .observable import ObservableBase\nfrom .autodetachobserver import AutoDetachObserver\nfrom .anonymousobserver import AnonymousObserver\nfrom .disposable import Disposable\nfrom . import abc\n\n\ndef subscribe(source: ObservableBase, observer: abc.Observer = None,\n scheduler: abc.Scheduler = None):\n \"\"\"Subscribe an observer to the observable sequence.\n\n Examples:\n 1 - source.subscribe()\n 2 - source.subscribe(observer)\n\n Keyword arguments:\n observer -- [Optional] The object that is to receive\n notifications. You may subscribe using an observer or\n callbacks, not both.\n\n Return disposable object representing an observer's subscription\n to the observable sequence.\n \"\"\"\n\n observer = observer or AnonymousObserver()\n assert isinstance(observer, abc.Observer) or isinstance(observer, types.GeneratorType)\n\n if isinstance(observer, types.GeneratorType):\n if inspect.getgeneratorstate(observer) == inspect.GEN_CREATED:\n observer.on_next(None)\n\n auto_detach_observer = AutoDetachObserver(observer)\n\n def fix_subscriber(subscriber):\n \"\"\"Fixes subscriber to make sure it returns a Disposable instead\n of None or a dispose function\"\"\"\n if not hasattr(subscriber, \"dispose\"):\n subscriber = Disposable.create(subscriber)\n\n return subscriber\n\n def set_disposable(_: abc.Scheduler = None, __: Any = None):\n try:\n subscriber = source._subscribe_core(auto_detach_observer, scheduler)\n except Exception as ex: # By design. pylint: disable=W0703\n if not auto_detach_observer.fail(ex):\n raise\n else:\n auto_detach_observer.subscription = fix_subscriber(subscriber)\n\n # Subscribe needs to set up the trampoline before for subscribing.\n # Actually, the first call to Subscribe creates the trampoline so\n # that it may assign its disposable before any observer executes\n # OnNext over the CurrentThreadScheduler. This enables single-\n # threaded cancellation\n # https://social.msdn.microsoft.com/Forums/en-US/eb82f593-9684-4e27-\n # 97b9-8b8886da5c33/whats-the-rationale-behind-how-currentthreadsche\n # dulerschedulerequired-behaves?forum=rx\n if current_thread_scheduler.schedule_required():\n current_thread_scheduler.schedule(set_disposable)\n else:\n set_disposable()\n\n # Hide the identity of the auto detach observer\n return Disposable.create(auto_detach_observer.dispose)\n","sub_path":"rx/core/subscribe.py","file_name":"subscribe.py","file_ext":"py","file_size_in_byte":2613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"203930687","text":"from environ import DATABASE_URL\nimport psycopg2\nfrom logger import logger\n\n\nclass HashDatabase:\n def __init__(self):\n self.conn = psycopg2.connect(DATABASE_URL, sslmode=\"require\")\n self.cur = self.conn.cursor()\n\n self.logger = logger\n self.logger.disabled = False\n self.logger.info(\"HashDB initilaized\")\n\n def create_table(self, name, values):\n # values = postid TEXT, dhash TEXT\n sql = f\"\"\"CREATE TABLE IF NOT EXISTS {name} ({values});\"\"\"\n self.cur.execute(sql)\n self.conn.commit()\n\n def insert_data(self, postid, dhash, ahash, phash):\n try:\n self.cur.execute(\n \"INSERT INTO Hashes (postid, dhash, ahash, phash) VALUES (%s, %s, %s, %s);\",\n (postid, dhash, ahash, phash),\n )\n self.logger.info(f\"saved into the db: {postid}\")\n except psycopg2.errors.UniqueViolation:\n self.logger.warning(f\"same post skipping: {postid}\")\n finally:\n self.conn.commit()\n\n def query(self, base_post_id):\n self.cur.execute(\"SELECT postid, ahash, phash, dhash FROM Hashes WHERE postid != %s;\", (base_post_id,))\n for row in self.cur:\n yield row\n\n def fetch_all(self, table_name):\n sql = f\"SELECT * FROM {table_name};\"\n self.cur.execute(sql)\n return self.cur.fetchall()\n\n def update_before_and_after(self, before=None, after=None):\n ba_l = []\n if before is not None:\n ba_l.append(f\"before = '{before}'\")\n if after is not None:\n ba_l.append(f\"after = '{after}'\")\n sql = f\"UPDATE beforeafter SET {','.join(ba_l)}\"\n self.cur.execute(sql)\n self.conn.commit()\n\n def reset_before_and_after(self):\n self.update_before_and_after(\"None\", \"None\")\n\n def initialize_before_and_after(self):\n self.cur.execute(\n \"INSERT INTO beforeafter (before, after) VALUES ('None', 'None');\"\n )\n self.conn.commit()\n\n def fetch_before_and_after(self):\n self.cur.execute(\"SELECT * FROM beforeafter;\")\n res = self.cur.fetchone()\n return res\n\n def delete_table(self, table_name):\n sql = f\"DROP TABLE {table_name}\"\n self.cur.execute(sql)\n self.conn.commit()\n\n def custom_execute(self, sql):\n self.cur.execute(sql)\n self.conn.commit()\n","sub_path":"src/HashDatabase/HashDatabase.py","file_name":"HashDatabase.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"17735889","text":"import math\nimport os\nimport sys\nfrom textdoc import TextDoc\nimport metric\n\ndef get_idf_map(textdocs):\n m = {}\n N = float(len(textdocs))\n for doc in textdocs:\n for word in doc.get_frequencies():\n if word in m:\n m[word] += 1\n else:\n m[word] = 1\n idf_map = {}\n # Using log base 2 for now\n for word in m:\n idf_map[word] = 1 + math.log(N / m[word], 2)\n return idf_map\n\ndef get_ittf_map(textdocs):\n m = {}\n for doc in textdocs:\n frequencies = doc.get_frequencies()\n for word in frequencies:\n if word in m:\n m[word] += frequencies[word]\n else:\n m[word] = frequencies[word]\n total_words = 0.0\n for word in m:\n total_words += m[word]\n ittf_map = {}\n for word in m:\n ittf_map[word] = 1 + math.log(total_words / m[word], 2)\n return ittf_map\n\ndef initialize(directory_name):\n textdocs = []\n for filename in os.listdir(directory_name):\n if filename == \".DS_Store\":\n continue\n textdocs.append(TextDoc(os.path.join(directory_name, filename)))\n idf_map = get_idf_map(textdocs)\n ittf_map = get_ittf_map(textdocs)\n def idf_weight_fn(word):\n return idf_map[word]\n def ittf_weight_fn(word):\n return ittf_map[word]\n # Add metrics that require global knowledge of documents\n\n # Add the TF-IDF (Term Frequency Inverse Document Frequency) metric\n metric.metrics['TF-IDF'] = metric.Metric(metric.mult_fn, metric.unit_fn, metric.divide_by_magnitudes_fn, idf_weight_fn)\n # Add the sublinear TF-IDF metric\n metric.metrics['Sublinear TF-IDF'] = metric.Metric(metric.log_mult_fn, metric.unit_fn, metric.divide_by_log_magnitudes_fn, idf_weight_fn)\n # Add the TF-ITTF (Term Frequency Inverse Total Term Frequency) metric\n metric.metrics['TF-ITTF'] = metric.Metric(metric.mult_fn, metric.unit_fn, metric.divide_by_magnitudes_fn, ittf_weight_fn)\n\n return textdocs, metric\n\ndef get_graph(textdocs, metric, metric_name):\n ids = []\n nodes = {}\n id_map = {}\n id_counter = 1\n for doc in textdocs:\n num_words = len(doc.get_frequencies().keys())\n ids.append(id_counter)\n nodes[id_counter] = (doc.get_title(), num_words)\n id_map[doc] = id_counter\n id_counter += 1\n\n edges = {}\n if metric_name in metric.metrics:\n m = metric.metrics[metric_name]\n elif metric_name in metric.asymmetric_metrics:\n m = metric.asymmetric_metrics[metric_name]\n else:\n return\n for i in xrange(len(textdocs)):\n for j in xrange(i+1, len(textdocs)):\n doc1 = textdocs[i]\n doc2 = textdocs[j]\n edges[(id_map[doc1], id_map[doc2])] = m.distance(doc1, doc2)\n edges[(id_map[doc2], id_map[doc1])] = m.distance(doc2, doc1)\n\n return {\n 'ids': ids,\n 'nodes': nodes,\n 'edges': edges\n }\n","sub_path":"visualization/compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":2781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"548754146","text":"from typing import Mapping, Callable, Union, Any, Sequence, MutableMapping\nfrom os import path\nfrom random import randint\n\n\ndef GetUserAgent() -> str:\n data: Sequence[str] = [\n \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.246\",\n \"Mozilla/5.0 (X11; CrOS x86_64 8172.45.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.64 Safari/537.36\",\n \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/601.3.9 (KHTML, like Gecko) Version/9.0.2 Safari/601.3.9\",\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.111 Safari/537.36\",\n \"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:15.0) Gecko/20100101 Firefox/15.0.1\"\n ]\n agent: str = data[randint(0, len(data)-1)]\n return agent\n\n\nGetXPath: Callable[[int], str] = lambda i: f\"/html/body/div[2]/div/div[3]/div[1]/div/div[1]/div[1]/table[2]/tbody/tr/td[{i}]\"\n\n\nclass Types:\n DictableRate = Mapping[str, Union[str, float]]\n File = Mapping[str, Union[Sequence[DictableRate], float]]\n URLResponse = Union[bytes, Mapping[str, Any]]\n Number = Union[int, float]\n\n\nclass Constants:\n url: str = \"https://www.x-rates.com/table/?from=EUR&amount=1\"\n header: MutableMapping[str, str] = {\n \"User-Agent\": GetUserAgent(),\n \"Accept\": \"*/*\"\n }\n dataPath: str = str(path.join(path.dirname(__file__), '..', 'data'))\n logPath: str = str(path.join(path.dirname(__file__), '..', 'log'))\n xpathName: str = GetXPath(1)\n xpathFrom: str = GetXPath(2)\n xpathTo: str = GetXPath(3)\n source: str = \"x-rates.com\"\n name: str = \"name\"\n currencyCode: str = \"currency_code\"\n fromEuro: str = \"from_euro\"\n data: str = \"data\"\n timestamp: str = \"timestamp\"\n toEuro: str = \"to_euro\"\n defaultFrom: str = \"eur\"\n defaultFromName: str = \"euro\"\n defaultTo: str = \"usd\"\n defaultAmount: float = 1.0\n defaultDict: Mapping[str, None] = {\"data\": None}\n logFileName: str = \"pyrates.log\"\n dataFileName: str = \"data.json\"\n rateStringLength: int = 31\n nameStringLength: int = 9\n cacheLimitInSeconds: int = 1800\n currencies: Mapping[str, str] = {\n \"ARS\": \"Argentine Peso\",\n \"AUD\": \"Australian Dollar\",\n \"BHD\": \"Bahraini Dinar\",\n \"BWP\": \"Botswana Pula\",\n \"BRL\": \"Brazilian Real\",\n \"BND\": \"Bruneian Dollar\",\n \"BGN\": \"Bulgarian Lev\",\n \"CAD\": \"Canadian Dollar\",\n \"CLP\": \"Chilean Peso\",\n \"CNY\": \"Chinese Yuan Renminbi\",\n \"COP\": \"Colombian Peso\",\n \"HRK\": \"Croatian Kuna\",\n \"CZK\": \"Czech Koruna\",\n \"DKK\": \"Danish Krone\",\n \"HKD\": \"Hong Kong Dollar\",\n \"HUF\": \"Hungarian Forint\",\n \"ISK\": \"Icelandic Krona\",\n \"INR\": \"Indian Rupee\",\n \"IDR\": \"Indonesian Rupiah\",\n \"IRR\": \"Iranian Rial\",\n \"ILS\": \"Israeli Shekel\",\n \"JPY\": \"Japanese Yen\",\n \"KZT\": \"Kazakhstani Tenge\",\n \"KRW\": \"South Korean Won\",\n \"KWD\": \"Kuwaiti Dinar\",\n \"LYD\": \"Libyan Dinar\",\n \"MYR\": \"Malaysian Ringgit\",\n \"MUR\": \"Mauritian Rupee\",\n \"MXN\": \"Mexican Peso\",\n \"NPR\": \"Nepalese Rupee\",\n \"NZD\": \"New Zealand Dollar\",\n \"NOK\": \"Norwegian Krone\",\n \"OMR\": \"Omani Rial\",\n \"PKR\": \"Pakistani Rupee\",\n \"PHP\": \"Philippine Peso\",\n \"PLN\": \"Polish Zloty\",\n \"QAR\": \"Qatari Riyal\",\n \"RON\": \"Romanian New Leu\",\n \"RUB\": \"Russian Ruble\",\n \"SAR\": \"Saudi Arabian Riyal\",\n \"SGD\": \"Singapore Dollar\",\n \"ZAR\": \"South African Rand\",\n \"LKR\": \"Sri Lankan Rupee\",\n \"SEK\": \"Swedish Krona\",\n \"CHF\": \"Swiss Franc\",\n \"TWD\": \"Taiwan New Dollar\",\n \"THB\": \"Thai Baht\",\n \"TTD\": \"Trinidadian Dollar\",\n \"TRY\": \"Turkish Lira\",\n \"AED\": \"Emirati Dirham\",\n \"GBP\": \"British Pound\",\n \"USD\": \"US Dollar\",\n \"VEF\": \"Venezuelan Bolivar\" \n }\n\n\nclass GUI:\n title: str = \"PyRatesGUI 0.2\"\n source: str = \"https://github.com/lindeneg/pyrates-cl\"\n defaultUtilityContent: str = \"PyRatesGUI - 0.2\\n\\nMake a Conversion..\"\n defaultMainView: str = \"default\"\n supportedRatesView: str = \"supportedRates\"\n height: int = 800\n width: int = 800\n conversionHeight: int = 150\n conversionWidth: int = 645\n labelWidth: int = 20\n xOffset: int = 10\n yOffset: int = 5\n utilityXOffset: int = 300\n utilityYOffset: float = height-(height * 0.9)+20\n utilityContentHeight: int = 9\n utilityContentWidth: int = 44\n componentTableHeight: int = 32\n backgroundColor: str = \"#000000\"\n textColor: str = \"#ffffff\"\n buttonColor: str = \"#666666\"\n buttonWidth: int = 16\n buttonYPadding: int = 20\n amountInputLabelSpacing: int = 4\n fromInputLabelSpacing: int = 15\n toInputLabelSpacing: int = 21\n resizeable: bool = False","sub_path":"pyrates/util/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":4905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"246821291","text":"from getkey import getkey, keys\nimport sys, os\nfrom copy import deepcopy\nfrom glob import glob\nimport random\nfrom random import choice\nimport pygame\nimport time\nimport menu \nimport field\nimport player\nimport enemy\nimport aicontrollers\n\n'''Создание окна'''\nwindow=pygame.display.set_mode((1024,768))\nwindow_image=pygame.image.load(('game_res/images/menu.jpg'))\nscreen = pygame.Surface((500,275))\n\n'''Название игры'''\npygame.display.set_caption('MAZE')\n\n\"\"\"Шрифты\"\"\"\npygame.font.init()\nsfont=pygame.font.SysFont('DejaVu Serif',24, True)\n\n'''Sprites for game'''\nE=pygame.image.load('game_res/images/portal.png')\nE.set_colorkey((255,255,255))\nsprites = { '!': pygame.image.load('game_res/images/briks.jpg'), \n\t\t\t'#': pygame.image.load('game_res/images/field.png'),\n\t\t\t'E': E,\n\t\t\t'S': pygame.image.load('game_res/images/field.png')}\n\nplayer_sprite = pygame.image.load('game_res/images/hero.jpg')\nplayer_sprite.set_colorkey((255,255,255))\nenemy_sprite = pygame.image.load('game_res/images/enemy.png')\nenemy_sprite.set_colorkey((255,255,255))\n\n\"\"\"Создание меню\"\"\"\ndef process_menu():\n\tbutton_start = menu.MenuButton(\"START\")\n\tbutton_exit = menu.MenuButton(\"EXIT\")\n\n\tmymenu = menu.Menu(window,window_image,[button_start,button_exit])\n\tselected_button = mymenu.get_item()\n\n\tif selected_button == button_exit:\n\t\tsys.exit(0)\n\nprocess_menu()\n\ndef mark_player(m,player):\n\tm.sets(player.s,player.x,player.y)\n\ndef get_screen_block_size(sx,sy):\n\treturn screen.get_size()[1]//sx, screen.get_size()[0]//sy\n\ndef draw_cart(cart,screen, player, enemys):\n\tsx, sy = get_screen_block_size(cart.size_x, cart.size_y)\n\tfor i, col in enumerate(cart.current_state):\n\t\tfor j, e in enumerate(col):\n\t\t\tx = j*sx\n\t\t\ty = i*sy\n\t\t\tscreen.blit(pygame.transform.scale(sprites[e],(sx,sy)),(x,y))\n\n\tscreen.blit(pygame.transform.scale(player_sprite,(sx,sy)), (player.screen_xc-sx//2, player.screen_yc-sy//2))\n\n\tfor enemy in enemys:\n\t\tscreen.blit(pygame.transform.scale(enemy_sprite,(sx,sy)),(enemy.screen_xc-sx//2, enemy.screen_yc-sy//2))\n\n\nl = glob(\"./game_res/levels/level*.txt\")\n\nlevel = 0\nm = None\nstart_x,start_y = None,None\nend_x,end_y = None, None\nplayer1 = None\nenemys = []\nAIs=[]\npygame.init()\n\ndef next_level(levelname):\n\tglobal m, start_x,start_y,end_x,end_y,player1,enemys,AIs\n\tm = field.Field(levelname) \n\tstart_x,start_y = m.get_starting_position()\n\tend_x,end_y = m.get_ending_position()\n\n\tplayer1=player.Player(\"@\",start_x,start_y, get_screen_block_size(m.size_x, m.size_y))\n\tenemys=[enemy.Enemy(\"%\",8,15, get_screen_block_size(m.size_x, m.size_y)),\n\t\t\tenemy.Enemy(\"%\",8,5, get_screen_block_size(m.size_x, m.size_y))]\n\n\tAIs = []\n\t[AIs.append(aicontrollers.SimpleAI(player1,enemy, m)) for enemy in enemys]\n\n'''Подготовка к запуску игры'''\nrunning = True\nnext_level(l[level])\nwindow.blit(window_image,(0,0))\ncount=1\n'''Игровой цикл'''\nwhile running:\n\t'''Отрисовка экрана'''\n\tm.clear()\n\twindow.blit(sfont.render('Уровень:'+ str(count),1,(210,120,10)),(10,5))\n\twindow.blit(screen,(300,200))\n\t'''Обработчик событий'''\n\tfor event in pygame.event.get():\n\t\tif event.type == pygame.QUIT:\n\t\t\trunning = False\n\tif pygame.key.get_pressed()[pygame.K_ESCAPE]:\n\t\tprocess_menu()\n\n\t'''Передвижение игрока'''\n\tif not player1.in_transition():\n\t\tkey_pressed = pygame.key.get_pressed()\n\t\tif pygame.key.get_pressed()[pygame.K_RIGHT]:\n\t\t\tplayer1.move(\"right\", m.can_move_to)\n\t\tif pygame.key.get_pressed()[pygame.K_UP]:\n\t\t\tplayer1.move(\"up\", m.can_move_to)\n\t\tif pygame.key.get_pressed()[pygame.K_DOWN]:\n\t\t\tplayer1.move(\"down\", m.can_move_to)\n\t\tif pygame.key.get_pressed()[pygame.K_LEFT]:\n\t\t\tplayer1.move(\"left\", m.can_move_to)\n\t\n\tplayer1.update_screen_coords()\n\n\t'''Передвижение препятствия'''\n\tfor i in AIs:\n\t\tif not i.enemy.in_transition():\n\t\t\ti.step()\n\t\ti.enemy.update_screen_coords()\n\n\t#if player1.x == enemy.x and player1.y == enemy.y:\n\t#\tnext_level(l[level])\n\n\tif end_x == player1.x and end_y == player1.y:\n\t\tlevel+=1\n\t\tnext_level(l[level])\n\t\tcount+=1\n\n\tdraw_cart(m,screen,player1, enemys)\n\t\n\n\tpygame.display.flip()\n\ttime.sleep(0.001)\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"30386049","text":"from django.test import TestCase\nfrom django.urls import reverse\nfrom user.models import User, Department, Company\nfrom django.utils import timezone\nfrom administrator.models import UnregisteredUser\n\n\nclass ViewsTestCase(TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.company = Company.objects.create(name=\"Abc\")\n cls.department = Department.objects.create(name=\"abc\", company=cls.company)\n cls.test_user = User.objects.create(password=\"\", email=\"abc@mail.ru\", role=3, is_superuser=True,\n first_name=\"abc\", last_name=\"abc\", is_staff=1, is_active=1,\n date_joined=timezone.now(), post=\"user\", department=cls.department)\n\n @classmethod\n def tearDownClass(cls):\n for elem in [cls.test_user, cls.department, cls.company]:\n elem.delete()\n\n def test_1_check_access_index_view(self):\n self.client.force_login(self.test_user)\n resp = self.client.get(\"/\")\n self.assertEqual(resp.status_code, 302)\n self.assertEqual(resp.url, reverse(\"user-page\"))\n\n def test_2_check_access_index_view(self):\n self.test_user.role = 2\n self.test_user.save()\n self.client.force_login(self.test_user)\n resp = self.client.get(\"/\")\n self.assertEqual(resp.status_code, 302)\n self.assertEqual(resp.url, reverse(\"director-page\"))\n\n def test_3_check_access_index_view(self):\n self.test_user.role = 1\n self.test_user.save()\n self.client.force_login(self.test_user)\n resp = self.client.get(\"/\")\n self.assertEqual(resp.status_code, 302)\n self.assertEqual(resp.url, reverse(\"administrator-page\"))\n\n def test_4_check_access_index_view(self):\n resp = self.client.get(\"/\")\n self.assertEqual(resp.status_code, 302)\n self.assertEqual(resp.url, \"/accounts/login/?next=/\")\n","sub_path":"app/main/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"303872665","text":"import os\n\nimport scruffy\nfrom scruffy import *\n\n\ndef test_directory():\n d = Directory('tests/env1')\n p = '/tmp/scruffy_test'\n assert os.path.exists(d.path)\n try:\n os.removedirs(p)\n except:\n pass\n with Directory(p, cleanup=True) as d:\n assert os.path.exists(p)\n assert d.exists\n assert d.path_to('x') == os.path.join(p, 'x')\n assert not os.path.exists(p)\n\ndef test_plugin_directory():\n scruffy.plugin.PluginRegistry.plugins = []\n assert len(PluginManager().plugins) == 0\n d = PluginDirectory('tests/env1/plugins')\n d.load()\n assert len(PluginManager().plugins) == 2\n\ndef test_package_directory():\n d = PackageDirectory()\n assert d._base == os.path.join(os.getcwd(), 'tests')\n d = PackageDirectory(package='scruffy')\n assert d._base == os.path.join(os.getcwd(), 'scruffy')\n d = PackageDirectory('xxx', package='scruffy')\n assert d._base == os.path.join(os.getcwd(), 'scruffy')\n assert d.path == os.path.join(os.getcwd(), 'scruffy/xxx')\n\ndef test_nested_package_plugin():\n d = PluginDirectory('env1/plugins', parent=PackageDirectory())\n assert d.path == os.path.join(os.getcwd(), 'tests/env1/plugins')\n scruffy.plugin.PluginRegistry.plugins = []\n assert len(PluginManager().plugins) == 0\n d.load()\n assert len(PluginManager().plugins) == 2\n\ndef test_directory_config():\n d = Directory('tests/env1', config=ConfigFile('json_config'))\n d.prepare()\n assert type(d.config) == ConfigFile\n assert d.config.setting1 == 667\n\ndef test_directory_file():\n d = Directory('tests/env1', thing=File('raw_file'))\n d.prepare()\n assert type(d.thing) == File\n assert d.thing.content.strip() == 'raw_file value'\n\ndef test_directory_file_with():\n with Directory('tests/env1', thing=File('raw_file')) as d:\n assert type(d.thing) == File\n assert d.thing.content.strip() == 'raw_file value'\n","sub_path":"tests/directory_tests.py","file_name":"directory_tests.py","file_ext":"py","file_size_in_byte":1916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"565153664","text":"import csv\nimport locale\nimport pandas as pd\n\nencoding = locale.getpreferredencoding()\ndf = pd.read_json('input/train_line.json', encoding='utf-8')\ndf.to_csv(\n 'output/train_line.csv',\n index=False,\n quoting=csv.QUOTE_NONNUMERIC,\n columns=['time', 'destination', 'platform', 'type'],\n header=['時刻', '目的地', 'プラットフォーム', '種別'],\n encoding=encoding\n )\n","sub_path":"practice/python/basic/021_030/023_read_json_and_write_csv.py","file_name":"023_read_json_and_write_csv.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"645225985","text":"#!/usr/bin/env python\n\nimport collections\nimport os.path as osp\n\nimport numpy as np\nimport cv2\nimport torch\nfrom torch.utils import data\nfrom random import shuffle\nimport os.path\nimport os\nimport copy\nimport math\n\nclass RelativeTileDataLoader(data.Dataset):\n\n def __init__(self, img_root, image_list, crop_shape, mirror = True, split = 'train'):\n self.img_root = img_root\n self.split = split\n self.image_list = [line.rstrip('\\n') for line in open(image_list)]\n\n self.mirror = mirror\n self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434])\n self.std_bgr = 255*np.array([0.229, 0.224, 0.225])\n self.crop_shape = crop_shape\n\n\n self.files = collections.defaultdict(list)\n for f in self.image_list:\n self.files[self.split].append({'img': img_root+f, 'lbl': 0})\n \n def __len__(self):\n return len(self.files[self.split])\n\n def __getitem__(self, index):\n \"\"\" get the image\"\"\"\n image_file_name = self.img_root + self.image_list[index]\n \n image = None\n if os.path.isfile(image_file_name):\n image = cv2.imread(image_file_name)\n else:\n print('ERROR: couldn\\'t find image -> ', image_file_name)\n \n if self.mirror:\n flip = torch.LongTensor(1).random_(0, 2)[0]*2-1\n image = image[:, ::flip, :]\n\n \n \"\"\" Divide image into 3x3\"\"\"\n tile_shape = (image.shape[0]+2)/3, (image.shape[1]+2)/3 # add 2 to consider non multiples of 3\n center_tile_topleft_corner= tile_shape\n \n \"\"\" get random crop location from center tile \"\"\"\n center_crop_topleft_corner = (int(np.random.randint(0,tile_shape[0] - self.crop_shape[0]-1) + center_tile_topleft_corner[0]) \\\n , int(np.random.randint(0,tile_shape[1] - self.crop_shape[1]-1) + center_tile_topleft_corner[1]))\n \n \"\"\" choose random tile location out of the 8 neighbouring \"\"\"\n possible_tile_locs = [(0,0),(0,1),(0,2),(1,0),(1,2),(2,0),(2,1),(2,2)]\n# chosen_tile_idx = np.random.randint(0,len(possible_tile_locs))\n chosen_tile_idx = torch.LongTensor(1).random_(len(possible_tile_locs))[0]\n chosen_tile_loc = possible_tile_locs[chosen_tile_idx]\n \n \"\"\" Get a random crop location out of this tile \"\"\"\n random_tile_topleft_corner = (chosen_tile_loc[0] * tile_shape[0]) , (chosen_tile_loc[1] * tile_shape[1])\n random_crop_topleft_corner = (int(np.random.randint(0,tile_shape[0] - self.crop_shape[0]-1) + random_tile_topleft_corner[0]) \\\n , int(np.random.randint(0,tile_shape[1] - self.crop_shape[1]-1) + random_tile_topleft_corner[1]) )\n \n\n \n \"\"\" Get the actual crops \"\"\"\n center_crop = image[center_crop_topleft_corner[0]:center_crop_topleft_corner[0]+self.crop_shape[0] , \\\n center_crop_topleft_corner[1]:center_crop_topleft_corner[1]+self.crop_shape[1], \\\n :]\n\n random_crop = image[random_crop_topleft_corner[0]:random_crop_topleft_corner[0]+self.crop_shape[0] , \\\n random_crop_topleft_corner[1]:random_crop_topleft_corner[1]+self.crop_shape[1], \\\n :]\n\n\n return self.transform_image(center_crop),self.transform_image(random_crop),chosen_tile_idx,chosen_tile_loc #torch.from_numpy(chosen_tile_idx),chosen_tile_loc\n\n\n def transform_image(self, image):\n image = image.astype(np.float64)\n image -= self.mean_bgr\n image = image.transpose(2, 0, 1)\n image = torch.from_numpy(image.copy()).float()\n\n return image\n\n\n","sub_path":"Beyond_Supervised/relative_utils.py","file_name":"relative_utils.py","file_ext":"py","file_size_in_byte":3716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"251918434","text":"#!/usr/bin/env python3\n\n# Strang : Breaking the % operator since 1994.\n\n# Treat strings like numbers which are read left to right. Instead of a digit going 0-9, it goes from codepoint 32 to infinity.\n# Far left is the starting index (as per usual with strings), as opposed to numbers having the far right be the start.\n# For example:\n# Index : 0123456789\n# Value : strang str\n\nclass Strang(str):\n\n def verify_stringy(questionable):\n if type(questionable) is not str:\n try:\n str(questionable)\n return True\n except:\n raise TypeError('Cannot convert {} to string.'.format(questionable))\n return True\n\n\n def __init__(self, given):\n if Strang.verify_stringy(given):\n string = str(given)\n else:\n string = given\n self.s = string\n\n\n # The operation that sparked the idea.\n # aka : What if I made \"'%s' % (string)\" more confusing?\n # Casts each character in a string to an int, mods the lsv by the corresponding rsv\n # Casts the result back to characters.\n # Then it's all joined into a new (and slightly less useful) string.\n def __mod__(self, other):\n if Strang.verify_stringy(other):\n other = str(other)\n else:\n raise TypeError('Cannot convert {} to a string.'.format(repr(other)))\n\n A = self.s\n B = other\n\n ## Pad both strings to the max length of either.\n pad_to = max( len(A), len(B) )\n pad = lambda string:'{: <{pad_to}}'.format(string, pad_to=pad_to)\n A, B = pad(A), pad(B)\n\n # -32 at start, and +32 at exit.\n # Aligns to ascii 32 (space) and beyond. kinda hard to print control codes.\n aints = tuple( ord(char)-32 for char in A )\n bints = tuple( ord(char)-32 for char in B )\n\n # Manual toggle to watch each step.\n if False:\n changed(self.s, A, pad_to)\n changed(other, B, pad_to)\n stepwise(aints, bints)\n # All lines below 100 characters will be ~~REDACTED~~.\n return ''.join( chr(pair[0]) if pair[1] <= 0 else chr((pair[0] % pair[1]) + 32 ) for pair in zip(aints, bints) )\n\n #def __radd__(self, other):\n # pass\n\n # Wait... is it possible to have a negative string?\n # What does that even mean?\n def __add__(self, other):\n if Strang.verify_stringy(other):\n other = str(other)\n else:\n raise TypeError('Cannot convert {} to a string.'.format(repr(other)))\n\n A = self.s\n B = other\n\n ## Pad both strings to the max length of either.\n pad_to = max( len(A), len(B) )\n pad = lambda string:'{: <{pad_to}}'.format(string, pad_to=pad_to)\n A, B = pad(A), pad(B)\n\n # -32 at start, and +32 at exit.\n # Aligns to ascii 32 (space) and beyond. kinda hard to print control codes.\n aints = tuple( ord(char)-32 for char in A )\n bints = tuple( ord(char)-32 for char in B )\n\n # The nice thing about using unicode as the the domain/range is you literally can't rollover.\n # No carry required!\n return ''.join( chr(p[0] + p[1] + 32) for p in zip(aints, bints) )\n\n\n def __repr__(self):\n return repr(self.s)\n\n# Strang wants to be a big boy like str\nstra = Strang\n\ndef stepwise(aints, bints):\n pairs = tuple(pair for pair in zip(aints, bints))\n mod_pairs = tuple( pair[0] if pair[1] <=0 else pair[0] % pair[1] for pair in pairs )\n mod_pairs_chars = tuple( '' if (v+32)<0 else chr(v+32) for v in mod_pairs)\n for p, m, c in zip(pairs, mod_pairs, mod_pairs_chars):\n print('{}\\n{}\\n\"{}\"'.format(p,m,c))\ndef changed(pre, post, pad_to):\n print('{:<{pad_to}} ({}) -> {} ({})'.format(pre, len(pre), post, len(post), pad_to=pad_to))\n\n\n\ndef main():\n strang = Strang('abc')\n print('\"abc\" % \"defdef\"')\n print('\"{}\"'.format( strang % 'defdef') )\n\n print('')\n print('\"defdef\" % \"abc\"')\n print('\"{}\"'.format( Strang('defdef') % strang) )\n\n print('')\n print('\"abc\" % \"aaa\"')\n print('\"{}\"'.format( strang % 'aaa') )\n\n print('')\n print('\"aaa\" % \"abc\"')\n print('\"{}\"'.format( Strang('aaa')%strang) )\n\n print('')\n print('\"abc\" + \"def\"')\n print('\"{}\"'.format( strang + 'def'))\n\n strange_string = Strang('e')\n\n\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"strang.py","file_name":"strang.py","file_ext":"py","file_size_in_byte":4346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"250854336","text":"import math\n\ndef convert(direction):\n if direction == \"s\":\n return 270\n elif direction == \"n\":\n return 90\n elif direction == \"ne\":\n return 30\n elif direction == \"nw\":\n return 150\n elif direction == \"sw\":\n return 210\n else:\n return 330\n\nclass Vector:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n def __repr__(self):\n return \"({0}, {1})\".format(self.x, self.y)\n def magnitude(self):\n return math.sqrt((self.x*self.x) + (self.y*self.y))\n\nfile = open(\"InputFiles/Day11.dat\")\ndirections = file.readline().split(\",\")\n\ndirections = [\"ne\", \"se\",\"ne\", \"se\",\"ne\", \"se\",\"ne\", \"se\",\"ne\", \"se\",\"ne\", \"se\",\"ne\", \"se\",\"ne\", \"se\", \"ne\", \"ne\", \"ne\", \"ne\", \"ne\", \"ne\", \"ne\", \"ne\", \"ne\", \"ne\", \"ne\", \"ne\", \"ne\", \"ne\", \"ne\"]\n\nvec = Vector(0, 0)\n\nfor direction in directions:\n angle = convert(direction)\n vec.x += math.cos(math.radians(angle))\n vec.y += math.sin(math.radians(angle))\n\nsideLength = math.fabs(vec.x/math.sqrt(3))\nlongestSide = sideLength*2\ntotal = longestSide + (math.fabs(vec.y) - sideLength)\n\nprint(max(round(total), round(longestSide)))\n","sub_path":"2017/Day 11/Day11_Part1.py","file_name":"Day11_Part1.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"102796605","text":"class Solution:\n def maxDistToClosest(self, seats: List[int]) -> int:\n str_arr = [str(a) for a in seats]\n string_vals = \"\".join(str_arr)\n new_arr = string_vals.split(\"1\")\n max_val = 0\n max_ind = len(new_arr)-1\n \n for i,v in enumerate(new_arr):\n if v == \"\":\n continue\n else:\n # group of zeros\n if i in [0,max_ind]:\n candidate_val = len(v)\n max_val = int(max(max_val,candidate_val))\n else:\n # group of zeros in middle segments\n max_val = int(max(max_val,(len(v)+1)/2))\n\n return max_val\n \n s\n# a better solution using the same concept:\n\nclass Solution(object):\n def maxDistToClosest(self, seats):\n ans = 0\n for seat, group in itertools.groupby(seats):\n if not seat:\n K = len(list(group))\n ans = max(ans, (K+1)/2)\n\n return max(ans, seats.index(1), seats[::-1].index(1))","sub_path":"849. Maximize Distance to Closest Person/.ipynb_checkpoints/solution-checkpoint.py","file_name":"solution-checkpoint.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"425882249","text":"# coding = utf-8\r\n'''Данный скрипт побуквенно изменяет сообщение. Получается что-то вроде ввода сообщения по буквам (я не знаю как это объяснить, просто попробуйте ;c)'''\r\nimport requests, time\r\ndef call(method, options={}, **kwargs):\r\n '''Фукнция вызова api ВК.'''\r\n options['access_token'] = token \r\n options['v'] = '5.73'\r\n options.update(kwargs)\r\n resp = requests.get('https://api.vk.com/method/'+method, params=options).json()\r\n if 'error' in resp:\r\n print('VKERROR: {error_code}: {error_msg}'.format(**resp['error']))\r\n return resp\r\n\r\ndef send_message(peer_id, message):\r\n '''Функция отправки сообщений.'''\r\n options = {\r\n 'message' : message,\r\n 'peer_id' : peer_id,\r\n }\r\n message_id = call('messages.send', options)['response']\r\n print('Сообщение {} отправлено'.format(message))\r\n print('Работаем с сообщением с ID {}'.format(message_id))\r\n return message_id\r\n\r\ndef main(peer_id, message):\r\n '''Изменяет сообщение побуквенно и выводит его\r\n peer_id - id беседы\r\n message - любое сообщение'''\r\n lastmessage = message[0]\r\n message_id = send_message(peer_id, lastmessage)\r\n for i in message[1:]:\r\n lastmessage += i\r\n options = {\r\n 'peer_id': peer_id,\r\n 'message': lastmessage,\r\n 'message_id': message_id\r\n }\r\n print('Добавлена буква {}'.format(i))\r\n time.sleep(1)\r\n call('messages.edit', options)\r\n\r\nif __name__ == '__main__':\r\n token = input(\"Введите токен: \")\r\n peer_id = input(\"Введите ID чата: \")\r\n message = input(\"Введите сообщение: \")\r\n main(peer_id, message)","sub_path":"message_edit.py","file_name":"message_edit.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"378130978","text":"import torch\nimport numpy as np\nfrom PIL import Image\nfrom PIL import ImagePath\nimport random\nimport config\nimport albumentations as A\nimport cv2\nimport glob\n\nclass ImageTransform():\n def __init__(self, image_array):\n self.image_array = image_array\n\n def normalize(self, image):\n # normalise image with 0 mean, 1 std\n return (image - np.mean(image)) / (np.std(image)).astype(np.float32)\n \n def normalize_xy(self, image):\n image = (image - np.mean(image, axis = 0, keepdims = True))/np.std(image, axis = 0, keepdims = True)\n image = (image - np.mean(image, axis = 1, keepdims = True))/np.std(image, axis = 1, keepdims = True)\n return image\n \n def normalize_ft(self, image, tstacks = 6, fstacks = 1, p=0.5):\n if np.random.uniform(0, 1) <= p: \n final_shape = image.shape\n chnl_shape = (final_shape[0]//tstacks, final_shape[1]//fstacks) #will be approx to note.\n f = chnl_shape[1]\n t = chnl_shape[0]\n trans_image_array = np.copy(image)\n for t_ in range(tstacks):\n for f_ in range(fstacks):\n trans_image_array[t_*t:(t_+1)*t, f_*f:(f_+1)*f] = self.normalize_xy(trans_image_array[t_*t:(t_+1)*t, f_*f:(f_+1)*f])\n return trans_image_array\n else:\n return image\n \n def minmax_norm(self, image):\n # min-max to bring image in range 0,1. albumentations requires it.\n return ((image - np.min(image))/(np.max(image) - np.min(image)))\n \n def flip(self,image, p=0.5):\n# transform = A.Compose([\n# # A.OneOf([\n# # A.RandomBrightnessContrast(brightness_limit = [-0.3,0.2], contrast_limit = [-0.3,0.2], p =0.75),\n# # A.Sharpen(alpha = [0.1,0.4], lightness = [0.6, 1], p=0.75),\n# # ]),\n# A.HorizontalFlip(p=1),\n# # A.ShiftScaleRotate(shift_limit_x=(-0.08, 0.08), scale_limit=0, rotate_limit=0,\n# # p=1)\n# ])\n \n# trans_image_array = transform(image = self.minmax_norm(np.copy(self.image_array)))['image']\n if np.random.uniform(0, 1) <= p: \n trans_image_array = np.fliplr(image)\n return trans_image_array\n else:\n return image\n\n def swap_channels(self, image, p = 0.3):\n if np.random.uniform(0, 1) <= p:\n # init_shape = (t, f)\n final_shape = image.shape\n chnl_shape = (final_shape[0]//6, final_shape[1]//1) #will be approx to note.\n\n\n chnls = {'pos_chnls': [0,2,4], 'neg_chnls': [1,3,5]}\n chnls['pos_chnls'].remove(random.choice(chnls['pos_chnls']))\n chnls['neg_chnls'].remove(random.choice(chnls['neg_chnls']))\n swap_op = random.choice(['pos_chnls', 'neg_chnls', 'both_swap'])\n\n f = chnl_shape[1]\n t = chnl_shape[0]\n\n # image_patches = [self.image_array[c:(c+1)*t, : f]], c = 0, 1, 2 ,3, 4, 5\n trans_image_array = np.copy(image)\n if swap_op == 'pos_chnls' or swap_op == 'both_swap':\n c1 = chnls['pos_chnls'][0]\n c2 = chnls['pos_chnls'][1]\n# print(f'swapping{c1}{c2}')\n trans_image_array[c1*t:(c1+1)*t, : f] = image[c2*t:(c2+1)*t, : f]\n trans_image_array[c2*t:(c2+1)*t, : f] = image[c1*t:(c1+1)*t, : f]\n\n if swap_op == 'neg_chnls' or swap_op == 'both_swap':\n c1 = chnls['neg_chnls'][0]\n c2 = chnls['neg_chnls'][1]\n# print(f'swapping{c1}{c2}')\n trans_image_array[c1*t:(c1+1)*t, : f] = image[c2*t:(c2+1)*t, : f]\n trans_image_array[c2*t:(c2+1)*t, : f] = image[c1*t:(c1+1)*t, : f] \n\n return trans_image_array\n else:\n return image.astype(np.float32)\n\n def drop_channels(self, image, p = 0.3,):\n if np.random.uniform(0, 1) <= p:\n # init_shape = (t, f)\n final_shape = image.shape\n chnl_shape = (final_shape[0]//6, final_shape[1]//1) #will be approx to note.\n\n chnls = {'pos_chnls': [0,2,4], 'neg_chnls': [1,3,5]}\n chnls_to_remove = random.sample(chnls['neg_chnls'], random.choice([1,2]))\n\n f = chnl_shape[1]\n t = chnl_shape[0]\n\n # image_patches = [self.image_array[c:(c+1)*t, : f]], c = 0, 1, 2 ,3, 4, 5\n trans_image_array = np.copy(image)\n for c in chnls_to_remove:\n trans_image_array[c*t:(c+1)*t, : f] = 0.25*image[c*t:(c+1)*t, : f]\n\n return trans_image_array\n else:\n return image.astype(np.float32)\n \n def add_needle(self, chls_to_add_needle, needle_img, needle_mask):\n fimg = np.copy(self.image_array)\n final_shape = fimg.shape\n chnl_shape = (final_shape[0]//6, final_shape[1]//1) #will be approx to note.\n f = chnl_shape[1]\n t = chnl_shape[0]\n \n for chl in chls_to_add_needle:\n chnl_needle_mask = needle_mask[chl*t:(chl+1)*t, : f]\n fimg[chl*t:(chl+1)*t, : f][chnl_needle_mask] = (np.random.uniform(0.5, 0.7)*needle_img[chl*t:(chl+1)*t, : f][chnl_needle_mask] + fimg[chl*t:(chl+1)*t, : f][chnl_needle_mask])\n return self.normalize(fimg).astype(np.float32)\n\n def apply_ext_needle(self):\n ftarget_type = np.random.choice([0, 1], p = [0.35, 0.65])\n needle_type = np.random.choice([1, 2, 5], p = [0.33, 0.33, 0.34])\n \n # needle_target_encoding = {\n# 0'brightpixel':[1, 0, 0, 0, 0, 0, 0],\n# 1'narrowband': [0, 1, 0, 0, 0, 0, 0],\n# 2'narrowbanddrd': [0, 0, 1, 0, 0, 0, 0],\n# 3'noise': [0, 0, 0, 1, 0, 0, 0], \n# 4'squarepulsednarrowband': [0, 0, 0, 0, 1, 0, 0],\n# 5'squiggle': [0, 0, 0, 0, 0, 1, 0],\n# 6'squigglesquarepulsednarrowband': [0, 0, 0, 0, 0, 0, 1]\n# }\n \n needle_mask_path = random.choice(glob.glob(f'{config.NEEDLE_PATH}mask_*_{needle_type}.npy'))\n needle_path = needle_mask_path.replace('mask_', '')\n \n# print(needle_path, needle_mask_path)\n needle_img = np.load(needle_path)\n needle_mask = np.load(needle_mask_path)\n# print(needle_img.shape, needle_mask.shape)\n if ftarget_type == 1:\n chls_to_add_needle = random.sample([0, 2, 4], random.choice([1, 2, 3]))\n trans_image_array = self.add_needle(chls_to_add_needle, needle_img, needle_mask)\n else:\n# needle_img = np.amax(needle_img) - needle_img\n chls_to_add_needle = random.sample([1, 3, 5], random.choice([1, 2, 3]))\n trans_image_array = self.add_needle(chls_to_add_needle, needle_img, needle_mask)\n return trans_image_array, ftarget_type \n\nclass SetiDataset:\n def __init__(self, image_paths, targets = None, ids = None, resize=None, augmentations = False):\n self.image_paths = image_paths\n self.targets = targets\n self.ids = ids\n self.resize = resize\n self.augmentations = augmentations\n\n def __len__(self):\n return len(self.image_paths)\n \n def __getitem__(self, item):\n # image = Image.open(self.image_paths[item])\n image = np.load(self.image_paths[item])\n \n id = self.ids[item]\n \n if config.ORIG_IMAGE:\n# converting 6 channels to 1 for original image, inverting off channels\n# image = np.vstack(image)\n image = image[0].astype(np.float32)\n \n if self.targets is not None:\n target = self.targets[item]\n\n if self.resize is not None:\n image = image.resize(self.resize[1], self.resize[0], resample = Image.BILINEAR)\n\n \n \n imt = ImageTransform(image)\n image = imt.normalize(image)\n \n if config.APPLY_NEEDLE:\n if target == 0 and np.random.uniform(0,1) <=0.55:\n image, target = imt.apply_ext_needle()\n \n if self.augmentations:\n image = imt.flip(image = image, p = 0.5)\n image = imt.swap_channels(image = image, p = 0.65)\n image = imt.drop_channels(image = image, p = 0.25)\n# print(target) \n# print('1ds', np.mean(image), np.std(image))\n# image = imt.normalize(cv2.resize(image, dsize=(256, 256), interpolation=cv2.INTER_AREA))\n# image1 = np.copy(image)\n# image1 = imt.normalize(image1)\n# print(f'o im {image1.mean()},{image1.std()}')\n \n# image0 = np.copy(image)\n if config.INVERT_OFF_CHANNELS:\n #inverting off channels\n chnl_shape = (config.IMAGE_SIZE[1]//6, config.IMAGE_SIZE[0]//1) #will be approx to note.(time,freq)\n f = chnl_shape[1]\n t = chnl_shape[0]\n # image_patches = [self.image_array[c:(c+1)*t, : f]], c = 0, 1, 2 ,3, 4, 5\n chnls_to_invert = [1, 3, 5]\n for c in chnls_to_invert:\n image0[c*t:(c+1)*t, : f] = np.amax(image0[c*t:(c+1)*t, : f]) - image0[c*t:(c+1)*t, : f]\n# image0[c*t:(c+1)*t, : f] = imt.normalize(image0[c*t:(c+1)*t, : f], )\n image0 = imt.normalize(image0, )\n# print(f'i im {image0.mean()},{image0.std()}')\n \n# image2 = imt.normalize_ft(image, p=1)\n if config.INVERT_OFF_CHANNELS:\n #inverting off channels\n chnl_shape = (config.IMAGE_SIZE[1]//6, config.IMAGE_SIZE[0]//1) #will be approx to note.(time,freq)\n f = chnl_shape[1]\n t = chnl_shape[0]\n # image_patches = [self.image_array[c:(c+1)*t, : f]], c = 0, 1, 2 ,3, 4, 5\n chnls_to_invert = [1, 3, 5]\n for c in chnls_to_invert:\n image2[c*t:(c+1)*t, : f] = np.amax(image2[c*t:(c+1)*t, : f]) - image2[c*t:(c+1)*t, : f]\n image2 = imt.normalize(image2, )\n \n \n# image3ch = np.zeros((3, image.shape[0], image.shape[1]))\n# image3ch[0] = image0.reshape(1,image0.shape[0],image0.shape[1])\n# image3ch[1] = image1.reshape(1,image1.shape[0],image1.shape[1])\n# image3ch[2] = image2.reshape(1,image2.shape[0],image2.shape[1])\n \n image = image.reshape(1,image.shape[0],image.shape[1])\n \n #pytorch expects channelHeightWidth instead of HeightWidthChannel\n # image = np.transpose(image, (2, 0, 1)).astype(np.float32)\n \n if self.targets is not None:\n return{'images': torch.tensor(image, dtype = torch.float), \n 'targets': torch.tensor(target, dtype = torch.long),\n 'ids': torch.tensor(id, dtype = torch.int32)}\n else:\n return{'images': torch.tensor(image, dtype = torch.float),\n 'ids': torch.tensor(id, dtype = torch.int32)}\n\n# i = SetiDataset([f'{config.DATA_PATH}train/1/1a0a41c753e1.npy'], targets = [1], ids =[0], resize=None, augmentations = None)[0]\n\n# i = SetiDataset([f'/content/drive/MyDrive/SETI/resized_images/256256/train/1a0a41c753e1.npy'], targets = [1], ids =[0], resize=None, augmentations = None)[0]\n# print(i)","sub_path":"vae/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":11191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"647984726","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport tempfile\nimport copy\nimport subprocess\nimport time\nimport sys\nimport shutil\nimport socket\nimport re\n\nfrom .utils import find_executable, get_free_port\n\nMONGOD_BIN = 'mongod'\nDEFAULT_ARGS = [\n # don't flood stdout, we're not reading it\n \"--quiet\",\n # disable unused.\n \"--nounixsocket\",\n # use a smaller default file size\n \"--smallfiles\",\n # journaling on by default in 2.0 and makes it to slow\n # for tests, can causes failures in jenkins\n \"--nojournal\",\n]\nSTARTUP_TIME = 0.4\nSTART_CHECK_ATTEMPTS = 200\n\n\nclass MongoBox(object):\n\n def __init__(self, mongod_bin=None, port=None,\n log_path=None, db_path=None, scripting=False,\n prealloc=False, auth=False):\n\n self.mongod_bin = mongod_bin or find_executable(MONGOD_BIN)\n assert self.mongod_bin, 'Could not find \"{}\" in system PATH. Make sure you have MongoDB installed.'.format(MONGOD_BIN)\n\n self.port = port or get_free_port()\n self.log_path = log_path or os.devnull\n self.scripting = scripting\n self.prealloc = prealloc\n self.db_path = db_path\n self.auth = auth\n\n if self.db_path:\n if os.path.exists(self.db_path) and os.path.isfile(self.db_path):\n raise AssertionError('DB path should be a directory, but it is a file.')\n\n self.process = None\n\n def start(self):\n '''Start MongoDB.\n\n Returns `True` if instance has been started or\n `False` if it could not start.\n '''\n if self.db_path:\n if not os.path.exists(self.db_path):\n os.mkdir(self.db_path)\n self._db_path_is_temporary = False\n else:\n self.db_path = tempfile.mkdtemp()\n self._db_path_is_temporary = True\n\n args = copy.copy(DEFAULT_ARGS)\n args.insert(0, self.mongod_bin)\n\n args.extend(['--dbpath', self.db_path])\n args.extend(['--port', str(self.port)])\n args.extend(['--logpath', self.log_path])\n\n if self.auth:\n args.append(\"--auth\")\n\n if not self.scripting:\n args.append(\"--noscripting\")\n\n if not self.prealloc:\n args.append(\"--noprealloc\")\n\n if not self.get_version().startswith('3.'):\n args.append(\"--nohttpinterface\")\n\n self.process = subprocess.Popen(\n args,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT\n )\n\n return self._wait_till_started()\n\n def stop(self):\n if not self.process:\n return\n\n # Not sure if there should be more checks for\n # other platforms.\n if sys.platform == 'darwin':\n self.process.kill()\n else:\n os.kill(self.process.pid, 9)\n self.process.wait()\n\n\n if self._db_path_is_temporary:\n shutil.rmtree(self.db_path)\n self.db_path = None\n\n self.process = None\n\n def running(self):\n return self.process is not None\n\n def client(self):\n import pymongo\n try:\n return pymongo.MongoClient(port=self.port) # version >=2.4\n except AttributeError:\n return pymongo.Connection(port=self.port)\n\n def _wait_till_started(self):\n attempts = 0\n while self.process.poll() is None and attempts < START_CHECK_ATTEMPTS:\n attempts += 1\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n try:\n s.connect(('localhost', int(self.port)))\n return True\n except (IOError, socket.error):\n time.sleep(0.25)\n finally:\n s.close()\n\n self.stop()\n return False\n\n def __enter__(self):\n self.start()\n return self\n\n def __exit__(self, *args, **kwargs):\n self.stop()\n\n def get_version(self):\n args = [\n self.mongod_bin,\n '--version'\n ]\n p = self.process = subprocess.Popen(\n args,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT\n )\n stdout, stderr = p.communicate()\n version = None\n for line in stdout.split('\\n'):\n g = re.match(r'db version v([\\d\\.]+)', line)\n if g:\n version = g.group(1)\n break\n if not version:\n raise Exception('can not parse version')\n return version\n","sub_path":"mongobox/mongobox.py","file_name":"mongobox.py","file_ext":"py","file_size_in_byte":4501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"442393287","text":"from tkinter import *\nimport calendar\ndef showCal():\n\tgui = Tk()\n\tgui.config(background=\"white\")\n\tgui.title(\"CALENDAR\")\n\tgui.geometry(\"570x600\")\n\tfetch_year = int(year_field.get())\n\tcal_content = calendar.calendar(fetch_year)\n\tcal_year = Label(gui, text=cal_content, font=\"Consolas 10 bold\")\n\tcal_year.grid(row=2, column=1, padx=20)\n\tgui.mainloop()\nif __name__ == \"__main__\":\n\tgui = Tk()\n\tgui.config(background=\"white\")\n\tgui.title(\"CALENDAR\")\n\tgui.geometry(\"250x140\")\n\tcal = Label(gui, text=\"CALENDAR\", bg=\"blue\", font=(\"italic\", 28, 'bold'))\n\tyear = Label(gui, text=\"Enter any Year\", bg=\"light green\")\n\tyear_field = Entry(gui)\n\tShow = Button(gui, text=\"Show Calendar\", fg=\"Pink\", bg=\"Green\", command=showCal)\n\tExit = Button(gui, text=\"Exit\", fg=\"Pink\", bg=\"Green\", command=exit)\n\tcal.grid(row=1, column=1)\n\tyear.grid(row=2, column=1)\n\tyear_field.grid(row=3, column=1)\n\tShow.grid(row=4, column=1)\n\tExit.grid(row=6, column=1)\n\tgui.mainloop()\n \n","sub_path":"calndr.py","file_name":"calndr.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"575580452","text":"from __future__ import print_function\n\n\nclass subclassfloat(float):\n def __repr__(self):\n return \"{0:0.2f}\".format(self)\n \n\n#Return True or False if object is a list\ndef islist(obj):\n return hasattr(obj, \"__iter__\") and not isinstance(obj, (str,bytes))\n\ndef try_cast_float(argument, failure, *exceptions):\n try:\n return float(argument)\n except exceptions or ValueError:\n return failure() if callable(failure) else failure\n \ndef try_except(success, failure, *exceptions):\n try:\n return success() if callable(success) else success\n except exceptions or Exception:\n return failure() if callable(failure) else failure\n\n\n#Deep copy a list and change type when possible\ndef deepcopy(obj, element_type = float):\n newlist = []\n for element in obj:\n if islist(element):\n newlist.append( deepcopy(element, element_type) )\n else:\n newlist.append(try_except(element_type(element), element, ValueError))\n return newlist\n\n#Function prints list with custom format\ndef pprint(obj):\n if islist(obj):\n newlist = []\n newlist = deepcopy(obj, subclassfloat)\n print(newlist)\n else:\n print(\"{0:0.2f}\".format(obj) if isinstance(obj, (int,float)) else obj) \n\n\n\n#function that flattens nested lists.\ndef flatten(list_object):\n for element in list_object:\n #check if element is another list\n if islist(element):\n #recursively check if next layer is a list\n for sub_element in flatten(element):\n yield sub_element\n else:\n #termination condition; element is not a list, return element\n yield element\n\n#simulates digital electronics encoder\ndef encode(output_range, integer):\n if integer <= output_range:\n return list(1 if index == integer-1 else 0 for index in range(output_range))\n else:\n print(\"Error: Data encoding size error\")\n return list([0 for element in range(output_range)])\n\n\ndef decode(list_obj):\n output = sum([index+1 for index in range(len(list_obj)) if list_obj[index]==1])\n return output\n\n\n\ndef winner_takes_all(list_obj):\n maximum = max(list_obj)\n if (list_obj).count(maximum) > 1:\n #Return empty list if more than 1 max\n return list([0 for element in list_obj])\n else:\n return list([int(element/maximum) for element in list_obj])\n\n\n \n","sub_path":"assign03/code/neural_misc.py","file_name":"neural_misc.py","file_ext":"py","file_size_in_byte":2410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"231312062","text":"#!/home/user/anaconda3/bin/python\nimport warnings\nimport itertools\nimport pandas as pd\n# import numpy as np\n# import statsmodels.api as sm\n# import statsmodels\nfrom influxdb import InfluxDBClient, DataFrameClient\nimport json\nimport requests\nimport time\nfrom ServerClass import Channel_object\n\n\nall_copies_of_object = []\n\n\ndef read_node_config_file(node_name):\n\n with open('node.cnfg.json') as f:\n json_file = json.load(f)\n\n return json_file[node_name]\n\n\ndef iniatialize():\n\n with open('ch-ad.cnfg.json') as f:\n json_file = json.load(f)\n\n object_names = json_file.keys()\n print(\"all object names: \", object_names)\n for obj_name in object_names:\n channel_names = json_file[obj_name].keys()\n for ch_name in channel_names:\n\n attr_of_node = read_node_config_file(\"influxdb\")\n\n SRC_ip_addr = attr_of_node[\"host\"]\n SRC_port = attr_of_node[\"port\"]\n SRC_username = attr_of_node[\"username\"]\n SRC_userpass = attr_of_node[\"userpass\"]\n\n SRC_source_type = json_file[obj_name][ch_name][\"input\"][\"source\"]\n SRC_measurement = json_file[obj_name][ch_name][\"input\"][\"measurement\"]\n SRC_db_name = json_file[obj_name][ch_name][\"input\"]['database']\n\n model_name = json_file[obj_name][ch_name][\"input\"]['model_name']\n model_dir = json_file[obj_name][ch_name][\"input\"][\"model_dir\"]\n rate = json_file[obj_name][ch_name][\"input\"][\"rate\"]\n\n OUT_node = json_file[obj_name][ch_name][\"output\"][\"node\"]\n OUT_database = json_file[obj_name][ch_name][\"output\"][\"database\"]\n OUT_measurement = json_file[obj_name][ch_name][\"output\"][\"measurement\"]\n\n\n\n all_copies_of_object.append(Channel_object(ch_name, SRC_ip_addr, SRC_port, SRC_username, SRC_userpass, SRC_source_type, SRC_db_name, SRC_measurement, model_name, model_dir, rate, OUT_node, OUT_database, OUT_measurement))\n\n\nif __name__ == \"__main__\":\n\n iniatialize()\n\n while True:\n\n for object in all_copies_of_object:\n\n object.get_raw_data_from_source()\n\n object.send_raw_data_to_ml()\n\n object.put_preprocessed_data_to_db()\n\n\n time.sleep(20)\n\n\n\n\n\n\n\n\n\n","sub_path":"16october/preprocessed_data_to_ml.py","file_name":"preprocessed_data_to_ml.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"166738833","text":"\nimport time, math\n\ntry:\n from processors.robot_processor import RobotProcessor\nexcept:\n import sys, os\n\n sys.path.append(os.path.join(os.path.dirname(__file__), '..'))\n from processors.robot_processor import RobotProcessor\n\ncurrent_aliens = None\nalien_update_counter = 0\ncurrent_distances = None\ncurrent_orientation = None\nlast_sensor_reading_timestamp = None\nlast_drive_params = (0,0)\nprocessor = None\nLEFT = 0\nRIGHT = 1\nTARGET_WALL_FOLLOWING_DISTANCE = 20\nNUM_PREDICT_STEPS_AHEAD = 28\nMAX_DIFFERENCE_PER_CYCLE_TO_STEER = 0.21\nSPEED_FORWARD = 25\nDISTANCE_TO_WALL_THRESHOLD = 30\nMIN_SENSOR_READING_THRESHOLD = 16\n\nclass RobotLostExpection(Exception):\n pass\n\ndef alien_update(aliens):\n global current_aliens, alien_update_counter\n if aliens is not None and 'aliens' in aliens:\n current_aliens = aliens['aliens']\n alien_update_counter += 1\n #print(\"alien_update_counter\",alien_update_counter,aliens['aliens'])\n\ndistance_update_count = 0\n\ndef distance_update(distances):\n global current_distances,last_sensor_reading_timestamp,distance_update_count\n if distances is not None and 'readings' in distances:\n distance_update_count += 1\n current_distances = distances['readings']\n last_sensor_reading_timestamp = time.time()\n #if distance_update_count % 50 == 0: print(\"t\", last_sensor_reading_timestamp, \"sensor distance\", current_distances)\n\ndef orientation_update(orientation):\n global current_orientation\n current_orientation = orientation['angle']\n #print(\"t\", time.time(), \"current_orientation\", current_orientation)\n\n\ndef find_first_alien_target():\n while current_aliens is None or len(current_aliens) == 0:\n time.sleep(0.05)\n central_alien = sorted(current_aliens,key=lambda r:math.fabs(r['xAngle']))[0]\n print(\"first alien found:\",central_alien)\n return central_alien\n\n\ndef little_kick(period):\n drive_robot(55, 55)\n time.sleep(period)\n\ndef wait_until_next_sensor_reading():\n current_sensor_timestamp = last_sensor_reading_timestamp\n while current_sensor_timestamp+0.0001>=last_sensor_reading_timestamp:\n time.sleep(0.001)\n\ndef wait_until_next_alien_reading(skip_frame=False):\n current_counter = alien_update_counter\n increment = 2 if skip_frame else 1\n while current_counter + increment > alien_update_counter:\n time.sleep(0.001)\n\ndef keep_driving_n_sensor_cycles(n):\n actual_drive_cycles = 0\n ultrasonic_low_distance_counter = 0\n for i in range(n):\n if current_distances is not None and 'C' in current_distances and current_distances['C'] < DISTANCE_TO_WALL_THRESHOLD:\n print(\"ultrasonic distance is too low. stopping\")\n drive_robot(0, 0)\n ultrasonic_low_distance_counter += 1\n if ultrasonic_low_distance_counter >= 5:\n return actual_drive_cycles, True\n wait_until_next_sensor_reading()\n continue\n drive_robot(SPEED_FORWARD,SPEED_FORWARD)\n ultrasonic_low_distance_counter = 0\n wait_until_next_sensor_reading()\n actual_drive_cycles+=1\n return actual_drive_cycles, False\n\ndef drive_robot(speed_left, speed_right):\n global last_drive_params\n last_drive_params = (speed_left,speed_right)\n processor.drive(speed_left,speed_right)\n\ndef check_if_robot_is_lost(throw=True):\n if (current_distances['R'] < MIN_SENSOR_READING_THRESHOLD and current_distances['L'] < MIN_SENSOR_READING_THRESHOLD):\n wait_until_next_sensor_reading()\n if (current_distances['R'] < MIN_SENSOR_READING_THRESHOLD and current_distances['L'] < MIN_SENSOR_READING_THRESHOLD):\n drive_robot(0,0)\n if throw: raise RobotLostExpection()\n\n\n\ndef drive_to_wall_ahead(follow_wall=None):\n little_kick(0.1)\n if(follow_wall is None):\n drive_robot(SPEED_FORWARD, SPEED_FORWARD)\n keep_driving_n_sensor_cycles(1000)\n return\n #initial measurement of direction\n last_difference = current_distances['R' if follow_wall == RIGHT else 'L'] - TARGET_WALL_FOLLOWING_DISTANCE\n drive_robot(SPEED_FORWARD, SPEED_FORWARD)\n wait_until_next_sensor_reading()\n cycles_driven, _ = keep_driving_n_sensor_cycles(int(NUM_PREDICT_STEPS_AHEAD/2))\n while True:\n check_if_robot_is_lost()\n current_difference = current_distances['R' if follow_wall==RIGHT else 'L']-TARGET_WALL_FOLLOWING_DISTANCE\n #detect sudden disappearence of the wall\n if current_difference > last_difference + 15:\n #looks suspicious\n for i in range(5):\n wait_until_next_sensor_reading()\n cycles_driven +=1\n current_difference = current_distances['R' if follow_wall == RIGHT else 'L'] - TARGET_WALL_FOLLOWING_DISTANCE\n if current_difference > last_difference + 15: #suspicion confirmed, side wall disappeared, drive to wall ahead\n keep_driving_n_sensor_cycles(1000)\n return\n\n difference_derivative_per_cycle = (current_difference-last_difference)/cycles_driven if cycles_driven>10 else 0\n print(\"current_difference\", current_difference, \"difference_derivative_per_cycle \", difference_derivative_per_cycle, \"R\",current_distances['R'], \"L\",current_distances['L'] )\n few_steps_ahead_prediction = current_difference + NUM_PREDICT_STEPS_AHEAD * difference_derivative_per_cycle\n if math.fabs(few_steps_ahead_prediction)>7:\n #correct the course\n if current_difference/few_steps_ahead_prediction > 0: #same sign of current difference and prediction - not enough to recover in 4 steps, need to steer more\n steer_factor = few_steps_ahead_prediction/math.fabs(few_steps_ahead_prediction)\n else: #different sign, overshoot, need to steer reverse\n steer_factor = -few_steps_ahead_prediction/math.fabs(few_steps_ahead_prediction)\n sign_factor = steer_factor if follow_wall == RIGHT else -steer_factor\n if math.fabs(difference_derivative_per_cycle) < MAX_DIFFERENCE_PER_CYCLE_TO_STEER and difference_derivative_per_cycle/few_steps_ahead_prediction>0:\n #if difference_derivative_per_cycle is not that big or we are looking to reduce it with steer in opposite direction\n drive_robot(sign_factor*35, sign_factor*-35)\n print(\"t\", time.time(), \"steerting driving\", sign_factor*35, sign_factor*-35, \"for\",math.fabs(few_steps_ahead_prediction)*0.007)\n time.sleep(math.fabs(few_steps_ahead_prediction)*0.005)\n #go forward and calc next derivative from sensors\n print(\"t\", time.time(), \"finished steerting driving\")\n drive_robot(SPEED_FORWARD, SPEED_FORWARD)\n wait_until_next_sensor_reading()\n current_difference = current_distances['R' if follow_wall==RIGHT else 'L']-TARGET_WALL_FOLLOWING_DISTANCE\n last_difference = current_difference\n cycles_driven, wall_ahead = keep_driving_n_sensor_cycles(NUM_PREDICT_STEPS_AHEAD)\n if wall_ahead:\n break\n\n\n\ndef turn(direction, angle=90, stop=False):\n start_orientation = current_orientation\n print(\"start_orientation\",start_orientation)\n if direction==RIGHT:\n drive_robot(65, -65)\n else:\n drive_robot(-65, 65)\n while True:\n check_if_robot_is_lost()\n if direction==RIGHT:\n if current_orientation - start_orientation < -angle+30:\n drive_robot(40, -40)\n if current_orientation - start_orientation < -angle+5:\n break\n if direction==LEFT:\n if current_orientation - start_orientation > angle-60:\n drive_robot(-40, 40)\n if current_orientation - start_orientation > angle-5:\n break\n time.sleep(0.01)\n if stop: drive_robot(0, 0)\n print(\"stop_orientation\",current_orientation)\n\n # cycles_turning = 0\n # while True:\n # current_side_distance = current_distances['L' if direction==RIGHT else 'R']\n # current_front_distance = current_distances['C']\n # side_diff = current_side_distance-TARGET_WALL_FOLLOWING_DISTANCE\n # if(current_front_distance>50):\n # keep_driving_n_sensor_cycles(int(cycles_turning/2.0))\n # drive_robot(0, 0)\n # break\n # if direction==RIGHT:\n # drive_robot(50, -50)\n # else:\n # drive_robot(-50, 50)\n # wait_until_next_sensor_reading()\n # cycles_turning +=1\n\ndef find_next_alien_target(prev_alien=None):\n while current_aliens is None or len(current_aliens) == 0 or (prev_alien is not None and len(current_aliens)==1 and current_aliens[0]['id']<= prev_alien['id']):\n time.sleep(0.05)\n central_alien = sorted(current_aliens,key=lambda r:math.fabs(r['xAngle']))[0]\n print(\"first alien found:\",central_alien)\n return central_alien\n\ndef follow_alien(alien):\n ultrasonic_low_distance_counter = 0\n last_turn_alien_counter = -2\n while True:\n #print(\"following alien\", alien['id'])\n aliens_by_id = [a for a in current_aliens if a['id']==alien['id']]\n if aliens_by_id is None or len(aliens_by_id)<1:\n print(\"missed alien. stopping\", alien)\n processor.drive(0,0)\n return\n if (aliens_by_id[0]['xAngle']<6 and aliens_by_id[0]['xAngle']>-6):\n #print(\"straight angle, driving straight\", aliens_by_id[0]['xAngle'])\n processor.drive(SPEED_FORWARD,SPEED_FORWARD)\n elif last_turn_alien_counter+4=6:\n turn(direction=RIGHT, angle=aliens_by_id[0]['xAngle'],stop=True)\n last_turn_alien_counter = alien_update_counter\n elif aliens_by_id[0]['xAngle']<=-6:\n turn(direction=LEFT, angle=-aliens_by_id[0]['xAngle'],stop=True)\n last_turn_alien_counter = alien_update_counter\n #print(\"updated alien details\",aliens_by_id[0])\n #if current_distances is not None and 'C' in current_distances:\n #print(\"sensor distance\",current_distances['C'])\n if aliens_by_id[0]['distance'] < 20:\n print(\"distance is too close. stopping\", aliens_by_id[0]['distance'])\n processor.drive(0,0)\n return\n if current_distances is not None and 'C' in current_distances and current_distances['C'] < DISTANCE_TO_WALL_THRESHOLD:\n print(\"ultrasonic distance is too low. stopping\")\n processor.drive(0,0)\n ultrasonic_low_distance_counter+=1\n if ultrasonic_low_distance_counter >=5:\n return\n wait_until_next_sensor_reading()\n continue\n else:\n ultrasonic_low_distance_counter=0\n\n time.sleep(0.005)\n\ndef determine_turn_direction():\n last_result = -1\n result_in_a_row =0\n print(\"determine_turn_direction. R:\", current_distances['R'], \"L\", current_distances['L'])\n while True:\n if current_distances['R'] > 55 and current_distances['L']>55:\n if last_result == 1:\n if result_in_a_row == 5:\n return RIGHT, LEFT, True\n result_in_a_row+=1\n else:\n last_result = 1\n result_in_a_row = 1\n elif current_distances['R'] < current_distances['L']:\n if last_result == 2:\n if result_in_a_row == 5:\n return LEFT, RIGHT, False\n result_in_a_row+=1\n else:\n last_result = 2\n result_in_a_row = 1\n else:\n if last_result == 3:\n if result_in_a_row == 5:\n return RIGHT, LEFT, False\n result_in_a_row+=1\n else:\n last_result = 3\n result_in_a_row = 1\n wait_until_next_sensor_reading()\n\ndef main():\n try:\n global processor\n processor = RobotProcessor()\n processor.initialise()\n processor.set_alien_update_handler(alien_update)\n processor.set_distance_update_handler(distance_update)\n processor.set_orientation_update_handler(orientation_update)\n processor.set_camera_mode(0)\n while current_distances is None or current_orientation is None:\n print(\"Waiting for sensor data\")\n time.sleep(0.5)\n input(\"Ready to go. Press Enter to start\")\n while True:\n try:\n alien = find_next_alien_target()\n print(\"alien\", alien)\n follow_alien(alien)\n turn_direction, follow_wall, is_last_turn = determine_turn_direction()\n print(\"turn_direction\",turn_direction, \"follow_wall\",follow_wall, \"is_last_turn\",is_last_turn)\n turn(direction=turn_direction, stop=True)\n wait_until_next_alien_reading(skip_frame=True)\n wait_until_next_alien_reading()\n for i in range(5):\n wait_until_next_sensor_reading()\n if is_last_turn:\n little_kick(0.6)\n break\n except RobotLostExpection:\n print(\"Robot lost\")\n while check_if_robot_is_lost(throw=False):\n time.sleep(0.1)\n print(\"Robot recovery\")\n\n # little_kick(0.4)\n # follow_wall = RIGHT\n # # while True:\n # while True:\n # try:\n # drive_to_wall_ahead(follow_wall=follow_wall)\n # turn_direction, follow_wall, is_last_turn = determine_turn_direction()\n # print(\"turn_direction\",turn_direction, \"follow_wall\",follow_wall, \"is_last_turn\",is_last_turn)\n # turn(direction=turn_direction)\n # except RobotLostExpection:\n # print(\"Robot lost\")\n # while check_if_robot_is_lost(throw=False):\n # time.sleep(0.1)\n # print(\"Robot recovery\")\n # if is_last_turn:\n # little_kick(0.6)\n # break\n # while check_if_robot_is_lost(throw=False):\n # time.sleep(0.1)\n processor.close()\n time.sleep(0.5)\n except KeyboardInterrupt:\n processor.close()\n\nif __name__ == '__main__':\n main()\n # loop = asyncio.get_event_loop()\n # loop.run_until_complete(main())\n # loop.close()","sub_path":"server/challenges/maze_alien.py","file_name":"maze_alien.py","file_ext":"py","file_size_in_byte":14421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"19834229","text":"# The maximum sum subarray problem consists in finding the maximum sum of a contiguous subsequence in an array or\n# list of integers:\n#\n# max_sequence([-2, 1, -3, 4, -1, 2, 1, -5, 4]) # should be 6: [4, -1, 2, 1] Easy case is when the list is made up of\n# only positive numbers and the maximum sum is the sum of the whole array. If the list is made up of only negative\n# numbers, return 0 instead.\n#\n# Empty list is considered to have zero greatest sum. Note that the empty list or array is also a valid\n# sublist/subarray.\n\n# def max_sequence(arr):\n# curent, max = 0, 0\n# for i in arr:\n# curent += i\n# if curent < 0:\n# curent = 0\n# if curent > max:\n# max = curent\n# return max\n\n\ndef max_sequence(arr):\n maximum = 0\n for i in range(len(arr)):\n for j in range(i, len(arr) + 1):\n current = sum(arr[i:j])\n if current > maximum:\n maximum = current\n return maximum\n\n\na = max_sequence([-2, 1, -3, 4, -1, 2, 1, -5, 4])\nprint(a)","sub_path":"5_Maximum_subarray_sum.py","file_name":"5_Maximum_subarray_sum.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"577462392","text":"from getnumIPs import cuenta\nfrom getvecinos import datos\nfrom gethostname import hostname\nfrom getshow import showtodo\ntitulo={0:\"Hostname\",1:\"Local Interface\",2:\"Neighbor\",3:\"Neighbor Interface\"}\n\nshowtodo()\nc = cuenta()\nveci, loc, vec, nrolin = datos()\nhost = hostname()\nl=[]\nr=[]\ntit=[]\nfor i in range(len(titulo)):\n\ttit.append(len(titulo[i]))\n\tprint(\"+\"+\"-\"*(len(titulo[i])+2),end=\"\")\nprint(\"+\")\n\nfor k in range(4):\n\tprint(\"| \"+titulo[k],end=\" \")\nprint(\"|\")\n\n\nfor i in range(len(titulo)):\n\tprint(\"+\"+\"-\"*(len(titulo[i])+2),end=\"\")\nprint(\"+\")\n\nfor i in range(c):\n\tfor j in range(int(nrolin[str(i)])):\n\t\tval =[len(host[\"host\"+str(i)]),len(loc[\"loc\"+str(i)+str(j)]),len(veci[\"veci\"+str(i)+str(j)]),len(vec[\"vec\"+str(i)+str(j)])] \n\t\tfor k in range(len(tit)):\n\t\t\ta=tit[k]+2-val[k]\n\t\t\tif a%2==0:\n\t\t\t\tl.append(a/2)\n\t\t\t\tr.append(a/2)\n\t\t\telse:\n\t\t\t\tl.append((a-1)/2+1)\n\t\t\t\tr.append((a-1)/2)\n\t\t\t\t\n\t\tprint(\"|\"+\" \"*int(l[0])+host[\"host\"+str(i)]+\" \"*(int(r[0]))+\"|\"+\" \"*int(l[1])+loc[\"loc\"+str(i)+str(j)]+\" \"*(int(r[1]))+\"|\"+\" \"*int(l[2])+veci[\"veci\"+str(i)+str(j)]+\" \"*(int(r[2]))+\"|\"+\" \"*int(l[3])+vec[\"vec\"+str(i)+str(j)]+\" \"*(int(r[3]))+\"|\")\n\nfor i in range(len(titulo)):\n\tprint(\"+\"+\"-\"*(len(titulo[i])+2),end=\"\")\nprint(\"+\")\n\n#print(\"+\"+\"-\"*(len(titulo[0])+2)+\"+\"+\"-\"*(len(titulo[1])+2)+\"+\"+\"-\"*(len(titulo[2])+2)+\"+\"+\"-\"*(len(titulo[3])+2)+\"+\")\n","sub_path":"salida.py","file_name":"salida.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"220127991","text":"\"\"\"\nDerek Gloudemans - August 4, 2020\nThis file contains a simple script to train a retinanet object detector on the UA Detrac\ndetection dataset.\n- Pytorch framework\n- Resnet-50 Backbone\n- Manual file separation of training and validation data\n- Automatic periodic checkpointing\n\"\"\"\n\n### Imports\n\nimport os ,sys\nimport numpy as np\nimport random \nimport cv2\nrandom.seed = 0\nimport torch\nfrom torch.utils import data\nfrom torch import optim\nimport collections\n\n# add relevant packages and directories to path\ndetector_path = os.path.join(os.getcwd(),\"models\",\"pytorch_retinanet_detector\")\nsys.path.insert(0,detector_path)\ndetrac_util_path = os.path.join(os.getcwd(),\"util_detrac\")\nsys.path.insert(0,detrac_util_path)\n\n#from _detectors.pytorch_retinanet.retinanet import model, csv_eval \nfrom models.pytorch_retinanet_detector.retinanet import model\nfrom util_detrac.detrac_detection_dataset import Detection_Dataset,collate\n\n\n# surpress XML warnings (for UA detrac data)\nimport warnings\nwarnings.filterwarnings(action='once')\n\ndef plot_detections(dataset,retinanet):\n \"\"\"\n Plots detections output\n \"\"\"\n retinanet.training = False\n retinanet.eval()\n\n idx = np.random.randint(0,len(dataset))\n\n im,label,meta = dataset[idx]\n\n im = im.to(device).unsqueeze(0).float()\n #im = im[:,:,:224,:224]\n\n\n with torch.no_grad():\n\n scores,labels, boxes = retinanet(im)\n\n if len(boxes) > 0:\n keep = [] \n for i in range(len(scores)):\n if scores[i] > 0.5:\n keep.append(i)\n boxes = boxes[keep,:]\n\n im = dataset.denorm(im[0])\n cv_im = np.array(im.cpu()) \n cv_im = np.clip(cv_im, 0, 1)\n\n # Convert RGB to BGR \n cv_im = cv_im[::-1, :, :] \n\n im = cv_im.transpose((1,2,0))\n\n for box in boxes:\n box = box.int()\n im = cv2.rectangle(im,(box[0],box[1]),(box[2],box[3]),(0.7,0.3,0.2),1)\n cv2.imshow(\"Frame\",im)\n cv2.waitKey(2000)\n\n retinanet.train()\n retinanet.training = True\n retinanet.module.freeze_bn()\n\n\nif __name__ == \"__main__\":\n\n # define parameters here\n depth = 50\n num_classes = 13\n patience = 0\n max_epochs = 50\n start_epoch = 0\n checkpoint_file = None\n\n # Paths to data here\n label_dir = \"/home/worklab/Desktop/detrac/DETRAC-Train-Annotations-XML-v3\"\n train_partition = \"/home/worklab/Desktop/detrac/DETRAC-train-data\"\n val_partition = \"/home/worklab/Desktop/detrac/DETRAC-val-data\"\n\n\n\n ###########################################################################\n\n\n # Create the model\n if depth == 18:\n retinanet = model.resnet18(num_classes=num_classes, pretrained=True)\n elif depth == 34:\n retinanet = model.resnet34(num_classes=num_classes, pretrained=True)\n elif depth == 50:\n retinanet = model.resnet50(num_classes=num_classes, pretrained=True)\n elif depth == 101:\n retinanet = model.resnet101(num_classes=num_classes, pretrained=True)\n elif depth == 152:\n retinanet = model.resnet152(num_classes=num_classes, pretrained=True)\n else:\n raise ValueError('Unsupported model depth, must be one of 18, 34, 50, 101, 152')\n\n\n # create dataloaders\n try:\n train_data\n except:\n # get dataloaders\n train_data = Detection_Dataset(train_partition,label_dir)\n val_data = Detection_Dataset(val_partition,label_dir)\n #train_data = LocMulti_Dataset(train_partition,label_dir)\n #val_data = LocMulti_Dataset(val_partition,label_dir)\n params = {'batch_size' : 8,\n 'shuffle' : True,\n 'num_workers': 0,\n 'drop_last' : True,\n 'collate_fn' : collate\n }\n trainloader = data.DataLoader(train_data,**params)\n testloader = data.DataLoader(val_data,**params)\n\n \n\n # CUDA\n use_cuda = torch.cuda.is_available()\n device = torch.device(\"cuda:0\" if use_cuda else \"cpu\")\n if use_cuda:\n if torch.cuda.device_count() > 1:\n retinanet = torch.nn.DataParallel(retinanet,device_ids = [0,1])\n retinanet = retinanet.to(device)\n else:\n retinanet = retinanet.to(device)\n\n\n # load checkpoint if necessary\n try:\n if checkpoint_file is not None:\n retinanet.load_state_dict(torch.load(checkpoint_file).state_dict())\n except:\n retinanet.load_state_dict(torch.load(checkpoint_file)[\"model_state_dict\"])\n\n # training mode\n retinanet.training = True\n retinanet.train()\n retinanet.module.freeze_bn()\n\n optimizer = optim.Adam(retinanet.parameters(), lr=1e-5)\n scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=patience, verbose=True, mode = \"min\")\n loss_hist = collections.deque(maxlen=500)\n most_recent_mAP = 0\n\n print('Num training images: {}'.format(len(train_data)))\n\n\n # main training loop \n for epoch_num in range(start_epoch,max_epochs):\n\n\n print(\"Starting epoch {}\".format(epoch_num))\n retinanet.train()\n retinanet.module.freeze_bn()\n epoch_loss = []\n\n\n for iter_num, (im,label,ignore) in enumerate(trainloader):\n \n retinanet.train()\n retinanet.training = True\n retinanet.module.freeze_bn() \n \n try:\n optimizer.zero_grad()\n if torch.cuda.is_available():\n classification_loss, regression_loss = retinanet([im.to(device).float(), label.to(device).float()])\n else:\n classification_loss, regression_loss = retinanet([im.float(),label.float()])\n\n classification_loss = classification_loss.mean()\n regression_loss = regression_loss.mean()\n\n loss = classification_loss + regression_loss\n\n if bool(loss == 0):\n continue\n\n loss.backward()\n\n torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1)\n\n optimizer.step()\n\n loss_hist.append(float(loss))\n\n epoch_loss.append(float(loss))\n\n if iter_num % 10 == 0:\n print(\n 'Epoch: {} | Iteration: {} | Classification loss: {:1.5f} | Regression loss: {:1.5f} | Running loss: {:1.5f}'.format(\n epoch_num, iter_num, float(classification_loss), float(regression_loss), np.mean(loss_hist)))\n if iter_num % 100 == 0:\n plot_detections(val_data, retinanet)\n\n del classification_loss\n del regression_loss\n except Exception as e:\n print(e)\n continue\n\n print(\"Epoch {} training complete\".format(epoch_num))\n \n\n scheduler.step(np.mean(epoch_loss))\n torch.cuda.empty_cache()\n \n #save checkpoint every epoch\n PATH = \"detrac_retinanet_34_{}.pt\".format(epoch_num)\n torch.save(retinanet.state_dict(),PATH)\n","sub_path":"train_detector.py","file_name":"train_detector.py","file_ext":"py","file_size_in_byte":6986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"339412468","text":"# -*- python -*-\n\n# This software was produced by NIST, an agency of the U.S. government,\n# and by statute is not subject to copyright in the United States.\n# Recipients of this software assume all responsibilities associated\n# with its operation, modification and maintenance. However, to\n# facilitate maintenance we ask that before distributing modified\n# versions of this software, you first contact the authors at\n# oof_manager@nist.gov. \n\nfrom generics import *\n\ndef skeletonBdySensitizationCheck0():\n return sensitizationCheck(\n {'New' : 1,\n 'Modify' : 0,\n 'Rename' : 0,\n 'Delete' : 0\n },\n base='OOF2:Skeleton Boundaries Page:Pane:Boundaries')\n\ndef skeletonBdySensitizationCheck1():\n return sensitizationCheck(\n {'New' : 1,\n 'Modify' : 1,\n 'Rename' : 1,\n 'Delete' : 1\n },\n base='OOF2:Skeleton Boundaries Page:Pane:Boundaries')\n \n\ndef skeletonBdySizeCheck(skeleton, bdyname, size):\n from ooflib.common.IO import whoville\n sc = whoville.getClass('Skeleton')[skeleton]\n bdy = sc.getBoundary(bdyname)\n return bdy.current_size() == size\n","sub_path":"TEST/GUI/000350_tutorial_nonrect/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"296919769","text":"def conv(s):\n if s == 0: \n return \"\"\n \n elif s <= 19:\n return (\"uno\", \"due\", \"tre\", \"quattro\", \"cinque\", \n \"sei\", \"sette\", \"otto\", \"nove\", \"dieci\", \n \"undici\", \"dodici\", \"tredici\", \n \"quattordici\", \"quindici\", \"sedici\", \n \"diciassette\", \"diciotto\", \"diciannove\")[s-1]\n \n elif s <= 99:\n decine = (\"venti\", \"trenta\", \"quaranta\",\n \"cinquanta\", \"sessanta\", \n \"settanta\", \"ottanta\", \"novanta\")\n letter = decine[int(s/10)-2]\n l = s%10\n if l == 1 or l == 8:\n letter = letter[:-1]\n return letter + conv(s%10)\n \n elif s <= 199:\n return \"cento\" + conv(s%100)\n \n elif s <= 999:\n k = s%100\n k = int(k/10)\n letter = \"cent\"\n if k != 8:\n letter = letter + \"o\"\n return conv( int(s/100)) + \\\n letter + \\\n conv(s%100)\n \n elif s<= 1999 :\n return \"mille\" + conv(s%1000)\n \n elif s<= 999999:\n return conv(int(s/1000)) + \\\n \"mila\" + \\\n conv(s%1000)\n \n elif s <= 1999999:\n return \"unmilione\" + conv(s%1000000)\n \n elif s <= 999999999:\n return conv(int(s/1000000))+ \\\n \"milioni\" + \\\n conv(s%1000000)\n elif s <= 1999999999:\n return \"unmiliardo\" + conv(s%1000000000)\n\n elif s <= 999999999999:\n return conv(int(s/1000000000))+ \\\n \"miliardi\" + \\\n conv(s%1000000000) \n\n elif s <= 1999999999999:\n return \"unbiliardo\" + conv(s%1000000000000)\n\n else:\n return conv(int(s/1000000000000)) + \\\n \"biliardi\" + \\\n conv(s%1000000000000)\n","sub_path":"students/1740669/homework01/program02.py","file_name":"program02.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"43878103","text":"import random\r\nimport math\r\nimport numpy as np\r\nfrom numpy import linalg\r\n\r\ndef noiseR(p):\r\n r = random.uniform(0,1)\r\n if p > 0.0 and p < 1.0:\r\n if r >= p:\r\n return 0\r\n else:\r\n return 1\r\n return -1\r\n\r\ndef sign(x):\r\n return np.sign(x)\r\n\r\ndef gen(N):\r\n xlist = []\r\n ylist = []\r\n for i in range(N):\r\n x1 = random.uniform(-1,1)\r\n x2 = random.uniform(-1,1)\r\n flip = noiseR(0.1)\r\n ans = sign(x1*x1 + x2*x2 - 0.6)\r\n if flip == 1:\r\n # flip\r\n xlist.append([1,x1,x2])\r\n ylist.append(int(0-ans))\r\n elif flip == 0:\r\n # don't flip\r\n xlist.append([1,x1,x2])\r\n ylist.append(int(ans))\r\n else:\r\n print('noise error')\r\n return (np.array(xlist), np.array(ylist))\r\n\r\n\r\ndef Ein(w,x,y):\r\n n = len(x)\r\n total = 0\r\n for i in range(n):\r\n tempY = sign(np.dot(w,x[i]))\r\n item = 0\r\n if tempY != y[i]:\r\n item = 1\r\n else:\r\n item = 0\r\n total = total + item\r\n ans = float(total)/float(n)\r\n return ans\r\n \r\ndef pseudoinv(x):\r\n xT = np.transpose(x)\r\n xInv = linalg.pinv( np.dot(xT,x) )\r\n return np.dot(xInv, xT)\r\n\r\ndef findW(x,y):\r\n px = pseudoinv(x)\r\n return np.dot(px,y)\r\n\r\ndef main():\r\n N = 1000\r\n Round = 1000\r\n sumEin = 0\r\n for i in range(Round):\r\n data1,data2 = gen(N)\r\n wlin = findW(data1,data2)\r\n #print('Wlin: ')\r\n #print(wlin)\r\n\r\n ans = Ein(wlin,data1,data2)\r\n sumEin = sumEin + ans\r\n #print('ans = {}'.format(ans))\r\n aver = sumEin / Round\r\n print('average {} times of Ein is {}'.format(Round,aver))\r\n\r\nif __name__=='__main__':\r\n main()\r\n","sub_path":"ML/hw3/q16.py","file_name":"q16.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"447012716","text":"import tkinter as tk\r\nimport json\r\nimport threading\r\n\r\n#Used for example\r\nimport time\r\n\r\nclass Application(tk.Frame):\r\n\tdef __init__(self, master=None):\r\n\t\tsuper().__init__(master)\r\n\t\tself.master = master\r\n\t\tself.config()\r\n\t\tself.grid()\r\n\t\tself.map_functions()\r\n\t\tself.generate_objects(self)\r\n\r\n\tdef read_json_file(self, file):\r\n\t\twith open(file, \"r\") as file:\r\n\t\t\treturn json.load(file)\r\n\r\n\tdef map_functions(self):\r\n\t\tself.function_mapper = {\r\n\t\t\t\"self.button_press\": self.button_press\r\n\t\t}\r\n\r\n\tdef generate_objects(self, frame):\r\n\t\ttype_id_dict = {\r\n\t\t\t1: tk.Entry,\r\n\t\t\t2: tk.Button,\r\n\t\t\t3: tk.Label\r\n\t\t}\r\n\r\n\t\tself.object_dict = dict()\r\n\r\n\t\tfor object in self.read_json_file(\"template.json\")[\"objects\"]:\r\n\t\t\tself.object_dict[object['name']] = type_id_dict[object['type']](frame)\r\n\r\n\t\t\tsettings = object['config']\r\n\t\t\tfor item in settings:\r\n\t\t\t\tif item == \"command\":\r\n\t\t\t\t\tself.object_dict[object['name']][\"command\"] = self.function_mapper[settings[item]]\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.object_dict[object['name']][item] = settings[item]\r\n\r\n\t\t\tself.object_dict[object['name']].grid(object[\"grid\"])\r\n\r\n\tdef button_press(self):\r\n\t\tdef button_program():\r\n\t\t\tprint(\"sleep for 2 seconds\")\r\n\t\t\ttime.sleep(2)\r\n\t\t\tself.object_dict[\"button\"][\"state\"] = \"normal\"\r\n\r\n\t\tself.object_dict[\"button\"][\"state\"] = tk.DISABLED\r\n\t\tthreading.Thread(target=button_program).start()\r\n\r\nroot = tk.Tk()\r\nroot.title(\"Template\")\r\napp = Application(master=root)\r\napp.mainloop()\r\n\r\nif __name__ == '__main__':\r\n\tpass\r\n","sub_path":"app_template.py","file_name":"app_template.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"129633145","text":"from sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.tree import DecisionTreeClassifier\n\n\ndef classifier(x_train , y_train , x_test , y_test):\n\ty_train = y_train.astype(int)\n\ty_test = y_test.astype(int)\n\tx_train = x_train[:6000]\n\ty_train = y_train[:6000]\n\tx_test = x_test[:1600]\n\ty_test = y_test[:1600]\n\tmodel = DecisionTreeClassifier(criterion='entropy',max_depth=5)\n\tclf = AdaBoostClassifier(base_estimator = model, n_estimators = 20)\n\tclf.fit(x_train , y_train)\n\treturn clf.score(x_test , y_test)\n","sub_path":"hw3/boosting/Classifier.py","file_name":"Classifier.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"315227322","text":"import pygame\nimport random\nimport time\n\nWIDTH, HEIGHT = 1100, 750\nWIN = pygame.display.set_mode((WIDTH, HEIGHT)) \npygame.display.set_caption(\"Bubble Sorter!\") \n\nBLACK = (33, 33, 33)\nBLUE = (41, 45, 62) \nGRAY = (76, 76, 76)\nWHITE = (255, 255, 255)\nBROWN = (149, 134, 124)\nFPS = 60\nlines = []\nW = 5\n\ndef create_array_of_lines(width, height):\n w = width//W\n for i in range(0,w):\n length = random.randrange(10,height)\n lines.append(length)\n\ndef sort_array_of_lines(height, lines): \n length = len(lines) - 1 \n for i in range(0, length):\n for j in range(0, length - i):\n a = lines[j]\n b = lines[j+1]\n\n if a > b:\n temp = a\n lines[j] = b \n lines[j+1] = temp\n WIN.fill(BROWN)\n draw_rectangles(lines, HEIGHT)\n pygame.display.update()\n\ndef draw_rectangles(lines, height):\n posx = 1\n state = 1\n for length in lines:\n if state % 2 == 0 :\n pygame.draw.rect(WIN, WHITE, (posx, height-length, W, length))\n else:\n pygame.draw.rect(WIN, BLACK, (posx, height-length, W, length))\n\n state += 1\n posx += W\n\ndef draw_window():\n WIN.fill(GRAY)\n sort_array_of_lines(HEIGHT, lines)\n\ndef main(): \n clock = pygame.time.Clock()\n run = True \n while run: \n clock.tick(FPS)\n for event in pygame.event.get(): \n if event.type == pygame.QUIT: \n run = False\n\n draw_window() \n\n pygame.quit() \n\n\ncreate_array_of_lines(WIDTH, HEIGHT) \nif __name__ == \"__main__\": \n main() \n","sub_path":"bubble-sorter.py","file_name":"bubble-sorter.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"83461851","text":"#!/usr/bin/env python\n\n'''\n DAVID LETTIER\n (C) 2015.\n\n http://www.lettier.com/\n\n Slackotron\n'''\n\nimport time\nimport peewee\nimport database.database_manager\n\n\nclass Base(peewee.Model):\n class Meta:\n database = database.database_manager.DatabaseManager.database\n\n def __str__(self):\n return '%s()' % (self.__class__.__name__)\n\n @classmethod\n def database(cls):\n return cls._meta.database\n\n\nclass Channel(Base):\n slack_name = peewee.CharField()\n slack_id = peewee.CharField(unique=True)\n is_direct = peewee.BooleanField(default=False)\n is_secure = peewee.BooleanField(default=True)\n\n def __str__(self):\n return '%s(%s %s %s %s)' % (\n self.__class__.__name__,\n self.slack_name,\n self.slack_id,\n self.is_direct,\n self.is_secure\n )\n\n def users(self):\n users = User.select().join(\n ChannelUserRelationship\n ).where(\n ChannelUserRelationship.channel == self\n )\n return users\n\n def direct_channel_user_name(self):\n if not self.is_direct:\n return ''\n if self.slack_name == 'USLACKBOT':\n return self.slack_name\n try:\n user = User.get(\n User.slack_id == self.slack_name\n )\n return user.slack_name\n except:\n return ''\n\n\nclass User(Base):\n slack_name = peewee.CharField()\n slack_id = peewee.CharField(unique=True)\n is_slackbot = peewee.BooleanField(default=False)\n\n def __str__(self):\n return '%s(%s %s %s)' % (\n self.__class__.__name__,\n self.slack_name,\n self.slack_id,\n self.is_slackbot\n )\n\n def channels(self):\n channels = Channel.select().join(\n ChannelUserRelationship\n ).where(\n ChannelUserRelationship.user == self\n )\n return channels\n\n\nclass Message(Base):\n text = peewee.CharField()\n slack_timestamp = peewee.CharField()\n channel = peewee.ForeignKeyField(Channel, related_name='messages')\n user = peewee.ForeignKeyField(User, related_name='messages')\n is_deleted = peewee.BooleanField(default=False)\n\n def __str__(self):\n return '%s(%s %s %s %s %s)' % (\n self.__class__.__name__,\n self.text,\n self.slack_timestamp,\n self.channel,\n self.user,\n self.is_deleted\n )\n\n\nclass Response(Base):\n text = peewee.CharField()\n generated_at = peewee.CharField()\n from_plugin = peewee.CharField(null=True)\n in_response_to = peewee.ForeignKeyField(\n Message,\n related_name='response',\n null=True\n )\n to_channel = peewee.ForeignKeyField(\n Channel,\n related_name='responses'\n )\n to_user = peewee.ForeignKeyField(User, related_name='responses')\n is_approved = peewee.BooleanField(default=False)\n is_sent = peewee.BooleanField(default=False)\n is_deleted = peewee.BooleanField(default=False)\n slack_timestamp = peewee.CharField(default='')\n\n def __str__(self):\n return '%s(%s %s %s %s %s %s %s %s %s %s)' % (\n self.__class__.__name__,\n self.text,\n self.generated_at,\n self.from_plugin,\n self.in_response_to,\n self.to_channel,\n self.to_user,\n self.is_approved,\n self.is_sent,\n self.is_deleted,\n self.slack_timestamp\n )\n\n def save(self, *args, **kwargs):\n if self.generated_at.__class__.__name__ == 'NoneType':\n self.generated_at = str('%.7f' % time.time())\n return super(Response, self).save(*args, **kwargs)\n\n\nclass ChannelUserRelationship(Base):\n '''\n Many-to-Many channel >-< user intermediary model.\n '''\n\n class Meta:\n indexes = ((('channel', 'user'), True),)\n channel = peewee.ForeignKeyField(Channel)\n user = peewee.ForeignKeyField(User)\n\n def __str__(self):\n return '%s(%s %s)' % (\n self.__class__.__name__,\n str(self.channel),\n str(self.user)\n )\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"413386714","text":"#coding:utf-8\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.select import Select\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.common.exceptions import NoSuchElementException\nimport unittest,time,re\n\nclass Baidu(unittest.TestCase):\n def setUp(self):\n self.browser = webdriver.Chrome()\n self.browser.maximize_window()\n self.browser.implicitly_wait(29)\n self.base_url = 'https://www.baidu.com/?tn=93380420_hao_pg'\n self.verificationErrors =[]\n self.accept_next_alert = True\n #百度搜索功能的测试用例\n def testbaidu_serach(self):\n '''百度搜索'''\n browser = self.browser\n browser.get(self.base_url)\n browser.find_element_by_id('kw').send_keys('慢慢的我们都变成了自己讨厌的那种人')\n time.sleep(2)\n browser.find_element_by_id('su').click()\n time.sleep(5)\n #百度搜索设置的测试用例\n def testbaidu_set(self):\n '''百度设置'''\n browser = self.browser\n browser.get(self.base_url)\n browser.find_element_by_xpath('//*[@id=\"u1\"]/a[8]').click()\n browser.find_element_by_link_text('搜索设置').click()\n time.sleep(3)\n s = Select(browser.find_element_by_xpath('//*[@id=\"nr\"]'))\n s.select_by_value('50')\n time.sleep(2)\n browser.find_element_by_xpath('//*[@id=\"gxszButton\"]/a[1]').click()\n alert = browser.switch_to.alert\n print('弹框提示的内容是:%s' % alert.text )\n alert.accept()\n #释放\n def tearDown(self):\n self.browser.close()\n self.assertEqual([],self.verificationErrors)\nif __name__ == '__main__':\n unittest.main()\n\n#baidu.py 文件编写了两条用例\n","sub_path":"baidu.py","file_name":"baidu.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"117007661","text":"from django import forms\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom simple_history.models import HistoricalRecords\n\n# Create your models here.\n\nYEAR_CHOICES = (\n (1,'First'),\n (2,'Second'), \n (3,'Third'), \n (4,'Fourth'),\n (5,'Fifth'),\n)\n\nclass Participant(models.Model):\n id = models.AutoField(primary_key=True)\n firstName = models.CharField(max_length=50, blank= False)\n lastName = models.CharField(max_length=50, blank=False)\n mobileNo = models.DecimalField(\n max_digits=13,\n decimal_places=0,\n blank=False)\n emailID = models.EmailField(blank=False)\n year = models.IntegerField(blank=False, choices=YEAR_CHOICES)\n college = models.CharField(max_length=255, blank=False)\n\n history = HistoricalRecords()\n \n def __team__(self):\n team_str = ''\n for i in self.team.all():\n team_str += (i.__unicode__()+', ')\n return team_str\n\n def __unicode__(self):\n return str(self.id) + '-' + self.firstName + ' ' + self.lastName\n\nclass ParticipantForm(forms.ModelForm):\n class Meta:\n model = Participant\n fields= '__all__'\n labels = {\n 'firstName': _('First Name'),\n 'lastName': _('Last Name'),\n 'mobileNo': _('Mobile Number'),\n 'emailID': _('EMail ID'),\n 'year': _('Year of Study'),\n 'college': _('College'),\n }\n widgets = {\n 'firstName': forms.TextInput(attrs={'required':'True', 'placeholder':'First Name','size':'50'}),\n 'lastName': forms.TextInput(attrs={'required':'True', 'placeholder':'Last Name','size':'50'}),\n 'mobileNo': forms.TextInput(attrs={'required':'True', 'pattern':'[7-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]', 'title':'Enter 10 digit valid mobile number','placeholder':'10 digit Mobile Number','maxlength':'10', 'size':'50'}),\n 'emailID': forms.EmailInput(attrs={'required':'True', 'placeholder':'email@domain.com','size':'50'}),\n 'college': forms.TextInput(attrs={'required':'True', 'placeholder':'College','size':'50'}),\n }\n\nclass FindForm(forms.Form):\n participant_mobile = forms.IntegerField(\n required = True,\n label='',\n widget = forms.TextInput(attrs={'required':'True', 'placeholder':'Enter your 10 digit Mobile Number','maxlength':'10', 'size':'50'}))","sub_path":"participant/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"106996070","text":"#CSCI 1133 Section 19 Lab 006, Homework Problem A, Isaiah Herr\n\ndef main():\n R = int(input(\"Please enter a value for red here: \"))\n G = int(input(\"Please enter a value for green here: \"))\n B = int(input(\"Please enter a value for blue here: \"))\n\n print(\"Red component: \", R)\n print(\"Green component: \", G)\n print(\"Blue component: \", B)\n print(RGB_to_CYMK(R, G, B))\n\n \n\ndef RGB_to_CYMK(R, G, B):\n\n Red = R/255\n Green = G/255\n Blue = B/255\n\n K = (1 - max(Red,Green,Blue))\n C = ((1-Red - K)/(KK))\n M = ((1-Green-K)/(KK))\n Y = ((1-Blue-K)/(KK))\n\n\n \n \n\n K2 = round(K2*100)\n C2 = round(C2*100)\n M2 = round(M2*100)\n Y2 = round(Y2*100)\n\n\n CYMK_list = [C2, M2, Y2, K2]\n CYMK = ' '.join(map(str,CYMK_list))\n return CYMK\n \n\nmain()\n\n","sub_path":"repo-herrx080/homeworks/HW3_A.py","file_name":"HW3_A.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"462738366","text":"import json\n\nimport grpc\nimport time\nfrom goods_service.proto import goods_pb2, goods_pb2_grpc\nfrom goods_service.model.models import *\nfrom google.protobuf import empty_pb2\nfrom loguru import logger\n\n\nclass GoodsServices(goods_pb2_grpc.GoodsServicer):\n def category_model_to_dic(self, category: Category) -> dict:\n res = {\n \"id\": category.id,\n \"name\": category.name,\n \"level\": category.level,\n \"parent\": category.parent_category_id,\n \"is_tab\": category.is_tab\n }\n return res\n\n def convert_model_to_message(self, goods: BaseModel) -> goods_pb2.GoodsInfoResponse:\n info_rsp = goods_pb2.GoodsInfoResponse()\n info_rsp.id = goods.id\n info_rsp.categoryId = goods.category_id\n info_rsp.name = goods.name\n info_rsp.goodsSn = goods.goods_sn\n info_rsp.clickNum = goods.click_num\n info_rsp.soldNum = goods.sold_num\n info_rsp.favNum = goods.fav_num\n info_rsp.marketPrice = goods.market_price\n info_rsp.shopPrice = goods.shop_price\n info_rsp.goodsBrief = goods.goods_brief\n info_rsp.shipFree = goods.ship_free\n info_rsp.goodsFrontImage = goods.goods_front_image\n info_rsp.isNew = goods.is_new\n info_rsp.descImages.extend(goods.desc_images)\n info_rsp.images.extend(goods.desc_images)\n info_rsp.isHot = goods.is_hot\n info_rsp.onSale = goods.on_sale\n\n info_rsp.category.id = goods.category.id\n info_rsp.category.name = goods.category.name\n\n info_rsp.brand.id = goods.brand.id\n info_rsp.brand.name = goods.brand.name\n info_rsp.brand.logo = goods.brand.logo\n\n return info_rsp\n\n @logger.catch\n def GoodsList(self, request: goods_pb2.GoodsFilterRequest, context) -> goods_pb2.GoodsListResponse:\n rsp = goods_pb2.GoodsListResponse()\n goods: BaseModel = Goods.select()\n if request.keyWords:\n goods = goods.where(Goods.name.contains(request.keyWords))\n if request.isHot:\n goods = goods.filter(Goods.is_hot == True)\n if request.isNew:\n goods = goods.filter(Goods.is_new == True)\n if request.priceMin:\n goods = goods.filter(Goods.shop_price >= request.priceMin)\n if request.priceMax:\n goods = goods.filter(Goods.shop_price <= request.priceMax)\n if request.brand:\n goods = goods.filter(Goods.brand_id == request.brand)\n if request.topCategory:\n try:\n ids = []\n category = Category.get(Category.id == request.topCategory)\n level = category.level\n if level == 1:\n c2 = Category.alias()\n categorys = Category.select().where(Category.parent_category_id.in_(\n c2.select(c2.id).where(c2.parent_category_id == request.topCategory)\n ))\n for category in categorys:\n ids.append(category.id)\n elif level == 2:\n categorys = Category.select().where(Category.parent_category_id == request.topCategory)\n for category in categorys:\n ids.append(category.id)\n elif level == 3:\n ids.append(request.topCategory)\n goods = goods.where(Goods.category_id.in_(ids))\n except Exception as e:\n pass\n start, per_page_nums = 0, 10\n if request.pagePerNums:\n per_page_nums = request.pagePerNums\n if request.pages:\n start = per_page_nums * (request.pages - 1)\n rsp.total = goods.count()\n goods = goods.limit(per_page_nums).offset(start)\n for good in goods:\n rsp.data.append(self.convert_model_to_message(good))\n return rsp\n\n @logger.catch\n def GetGoodsDetail(self, request: goods_pb2.GoodInfoRequest, context):\n try:\n goods = Goods.get(Goods.id == request.id)\n goods.click_num += 1\n goods.save()\n return self.convert_model_to_message(goods)\n except:\n context.set_code(grpc.StatusCode.NOT_FOUND)\n context.set_details(\"Goods Does not exist\")\n return goods_pb2.GoodsInfoResponse()\n\n @logger.catch\n def BatchGetGoods(self, request: goods_pb2.BatchGoodsIdInfo, context) -> goods_pb2.GoodsListResponse:\n rsp = goods_pb2.GoodsListResponse()\n ids = list(request.id)\n goods = Goods.select().where(Goods.id.in_(ids))\n rsp.total = goods.count()\n for good in goods:\n rsp.data.append(self.convert_model_to_message(good))\n return rsp\n\n @logger.catch\n def CreateGoods(self, request: goods_pb2.CreateGoodsInfo, context) -> goods_pb2.GoodsInfoResponse:\n try:\n category = Category.get(Category.id == request.categoryId)\n except DoesNotExist as e:\n context.set_code(grpc.StatusCode.NOT_FOUND)\n context.set_details(\"Category does not exist\")\n return goods_pb2.GoodsInfoResponse()\n try:\n brand = Brands.get(Brands.id == request.brandId)\n except DoesNotExist as e:\n context.set_code(grpc.StatusCode.NOT_FOUND)\n context.set_details(\"Brand does not exist\")\n return goods_pb2.GoodsInfoResponse()\n goods = Goods()\n goods.brand = brand\n goods.category = category\n goods.name = request.name\n goods.goods_sn = request.goodsSn\n goods.market_price = request.marketPrice\n goods.shop_price = request.shopPrice\n goods.goods_brief = request.goodsBrief\n goods.ship_free = request.shipFree\n goods.images = list(request.images)\n goods.desc_images = list(request.descImages)\n goods.goods_front_image = request.goodsFrontImage\n goods.is_new = request.isNew\n goods.is_hot = request.isHot\n goods.on_sale = request.onSale\n goods.save()\n return self.convert_model_to_message(goods)\n\n @logger.catch\n def DeleteGoods(self, request: goods_pb2.DeleteGoodsInfo, context):\n try:\n goods: Goods = Goods.get(Goods.id == request.id)\n goods.delete_instance()\n return empty_pb2.Empty()\n except DoesNotExist as e:\n context.set_code(grpc.StatusCode.NOT_FOUND)\n context.set_details(\"Good does not exist\")\n return empty_pb2.Empty()\n except Exception as e:\n context.set_code(grpc.StatusCode.INTERNAL)\n context.set_details(str(e))\n return empty_pb2.Empty()\n\n @logger.catch\n def UpdateGoods(self, request: goods_pb2.CreateGoodsInfo, context):\n if not request.id:\n context.set_code(grpc.StatusCode.NOT_FOUND)\n context.set_details(\"Empty Goods id\")\n return empty_pb2.Empty()\n category, brand = None, None\n if request.categoryId:\n try:\n category = Category.get(Category.id == request.categoryId)\n except DoesNotExist as e:\n context.set_code(grpc.StatusCode.NOT_FOUND)\n context.set_details(\"Category does not exist\")\n return goods_pb2.GoodsInfoResponse()\n if request.brandId:\n try:\n brand = Brands.get(Brands.id == request.brandId)\n except DoesNotExist as e:\n context.set_code(grpc.StatusCode.NOT_FOUND)\n context.set_details(\"Brand does not exist\")\n return goods_pb2.GoodsInfoResponse()\n\n try:\n goods = Goods.get(Goods.id == request.id)\n except DoesNotExist as e:\n context.set_code(grpc.StatusCode.NOT_FOUND)\n context.set_details(\"Goods does not exist\")\n return goods_pb2.GoodsInfoResponse()\n\n if brand:\n goods.brand = brand\n if category:\n goods.category = category\n goods.name = request.name\n goods.goods_sn = request.goodsSn\n goods.market_price = request.marketPrice\n goods.shop_price = request.shopPrice\n goods.goods_brief = request.goodsBrief\n goods.ship_free = request.shipFree\n goods.images = list(request.images)\n goods.desc_images = list(request.descImages)\n goods.goods_front_image = request.goodsFrontImage\n goods.is_new = request.isNew\n goods.is_hot = request.isHot\n goods.on_sale = request.onSale\n\n goods.save()\n return self.convert_model_to_message(goods)\n\n @logger.catch\n def GetAllCategorysList(self, request, context):\n rsp = goods_pb2.CategoryListResponse()\n categories = Category.select()\n rsp.total = categories.count()\n level1, level2, level3 = [], [], []\n for category in categories:\n category_rsp = goods_pb2.CategoryInfoResponse()\n category_rsp.id = category.id\n category_rsp.name = category.name\n if category.parent_category_id:\n category_rsp.parentCategory = category.parent_category_id\n category_rsp.level = category.level\n category_rsp.isTab = category.is_tab\n rsp.data.append(category_rsp)\n if category.level == 1:\n level1.append(self.category_model_to_dic(category))\n elif category.level == 2:\n level2.append(self.category_model_to_dic(category))\n elif category.level == 3:\n level3.append(self.category_model_to_dic(category))\n\n for data3 in level3:\n for data2 in level2:\n if data3[\"parent\"] == data2[\"id\"]:\n if \"sub_category\" not in data2:\n data2[\"sub_category\"] = [data3]\n else:\n data2[\"sub_category\"].append(data3)\n for data2 in level2:\n for data1 in level1:\n if data2[\"parent\"] == data1[\"id\"]:\n if \"sub_category\" not in data1:\n data1[\"sub_category\"] = [data2]\n else:\n data1[\"sub_category\"].append(data2)\n rsp.jsonData = json.dumps(level1)\n return rsp\n\n def GetSubCategory(self, request, context):\n category_list_rsp = goods_pb2.SubCategoryListResponse()\n\n try:\n category_info = Category.get(Category.id == request.id)\n category_list_rsp.info.id = category_info.id\n category_list_rsp.info.name = category_info.name\n category_list_rsp.info.level = category_info.level\n category_list_rsp.info.isTab = category_info.is_tab\n if category_info.parent_category:\n category_list_rsp.info.parentCategory = category_info.parent_category_id\n except DoesNotExist:\n context.set_code(grpc.StatusCode.NOT_FOUND)\n context.set_details('Category does not exist')\n return goods_pb2.SubCategoryListResponse()\n\n categorys = Category.select().where(Category.parent_category == request.id)\n category_list_rsp.total = categorys.count()\n for category in categorys:\n category_rsp = goods_pb2.CategoryInfoResponse()\n category_rsp.id = category.id\n category_rsp.name = category.name\n if category_info.parent_category:\n category_rsp.parentCategory = category_info.parent_category_id\n category_rsp.level = category.level\n category_rsp.isTab = category.is_tab\n\n category_list_rsp.subCategorys.append(category_rsp)\n\n return category_list_rsp\n\n def CreateCategory(self, request, context):\n try:\n category = Category()\n category.name = request.name\n if request.level != 1:\n category.parent_category = request.parentCategory\n category.level = request.level\n category.is_tab = request.isTab\n category.save()\n\n category_rsp = goods_pb2.CategoryInfoResponse()\n category_rsp.id = category.id\n category_rsp.name = category.name\n if category.parent_category:\n category_rsp.parentCategory = category.parent_category.id\n category_rsp.level = category.level\n category_rsp.isTab = category.is_tab\n except Exception as e:\n context.set_code(grpc.StatusCode.INTERNAL)\n context.set_details(str(e))\n return goods_pb2.CategoryInfoResponse()\n\n return category_rsp\n\n def DeleteCategory(self, request, context):\n try:\n category = Category.get(request.id)\n category.delete_instance()\n\n # TODO 删除响应的category下的商品\n return empty_pb2.Empty()\n except DoesNotExist:\n context.set_code(grpc.StatusCode.NOT_FOUND)\n context.set_details('Does not exist')\n return empty_pb2.Empty()\n\n def UpdateCategory(self, request, context):\n try:\n category = Category.get(request.id)\n if request.name:\n category.name = request.name\n if request.parentCategory:\n category.parent_category = request.parentCategory\n if request.level:\n category.level = request.level\n if request.isTab:\n category.is_tab = request.isTab\n category.save()\n\n return empty_pb2.Empty()\n except DoesNotExist:\n context.set_code(grpc.StatusCode.NOT_FOUND)\n context.set_details('Does not exist')\n return empty_pb2.Empty()\n\n @logger.catch\n def BannerList(self, request: empty_pb2.Empty, context):\n # 获取分类列表\n rsp = goods_pb2.BannerListResponse()\n banners = Banner.select()\n\n rsp.total = banners.count()\n for banner in banners:\n banner_rsp = goods_pb2.BannerResponse()\n\n banner_rsp.id = banner.id\n banner_rsp.image = banner.image\n banner_rsp.index = banner.index\n banner_rsp.url = banner.url\n\n rsp.data.append(banner_rsp)\n\n return rsp\n\n @logger.catch\n def CreateBanner(self, request: goods_pb2.BannerRequest, context):\n banner = Banner()\n\n banner.image = request.image\n banner.index = request.index\n banner.url = request.url\n banner.save()\n\n banner_rsp = goods_pb2.BannerResponse()\n banner_rsp.id = banner.id\n banner_rsp.image = banner.image\n banner_rsp.url = banner.url\n\n return banner_rsp\n\n @logger.catch\n def DeleteBanner(self, request: goods_pb2.BannerRequest, context):\n try:\n banner = Banner.get(request.id)\n banner.delete_instance()\n\n return empty_pb2.Empty()\n except DoesNotExist:\n context.set_code(grpc.StatusCode.NOT_FOUND)\n context.set_details('Banner does not exist')\n return empty_pb2.Empty()\n\n @logger.catch\n def UpdateBanner(self, request: goods_pb2.BannerRequest, context):\n try:\n banner = Banner.get(request.id)\n if request.image:\n banner.image = request.image\n if request.index:\n banner.index = request.index\n if request.url:\n banner.url = request.url\n\n banner.save()\n\n return empty_pb2.Empty()\n except DoesNotExist:\n context.set_code(grpc.StatusCode.NOT_FOUND)\n context.set_details('Banner does not exist')\n return empty_pb2.Empty()\n\n # 品牌相关的接口\n\n @logger.catch\n def BrandList(self, request: empty_pb2.Empty, context):\n # 获取品牌列表\n start, per_page = 0, 10\n if request.pagePerNums:\n per_page = request.pagePerNums\n if request.pages:\n start = per_page * (request.pages-1)\n rsp = goods_pb2.BrandListResponse()\n brands = Brands.select()\n\n rsp.total = brands.count()\n brands = brands.limit(per_page).offset(start)\n for brand in brands:\n brand_rsp = goods_pb2.BrandInfoResponse()\n\n brand_rsp.id = brand.id\n brand_rsp.name = brand.name\n brand_rsp.logo = brand.logo\n\n rsp.data.append(brand_rsp)\n\n return rsp\n\n @logger.catch\n def CreateBrand(self, request: goods_pb2.BrandRequest, context):\n brands = Brands.select().where(Brands.name == request.name)\n if brands:\n context.set_code(grpc.StatusCode.ALREADY_EXISTS)\n context.set_details('Brand Already Exists')\n return goods_pb2.BrandInfoResponse()\n\n brand = Brands()\n\n brand.name = request.name\n brand.logo = request.logo\n\n brand.save()\n\n rsp = goods_pb2.BrandInfoResponse()\n rsp.id = brand.id\n rsp.name = brand.name\n rsp.logo = brand.logo\n\n return rsp\n\n @logger.catch\n def DeleteBrand(self, request: goods_pb2.BrandRequest, context):\n try:\n brand = Brands.get(request.id)\n brand.delete_instance()\n\n return empty_pb2.Empty()\n except DoesNotExist:\n context.set_code(grpc.StatusCode.NOT_FOUND)\n context.set_details('Record does not exist')\n return empty_pb2.Empty()\n\n @logger.catch\n def UpdateBrand(self, request: goods_pb2.BrandRequest, context):\n try:\n brand = Brands.get(request.id)\n if request.name:\n brand.name = request.name\n if request.logo:\n brand.logo = request.logo\n\n brand.save()\n\n return empty_pb2.Empty()\n except DoesNotExist:\n context.set_code(grpc.StatusCode.NOT_FOUND)\n context.set_details('Record does not exist')\n return empty_pb2.Empty()\n\n @logger.catch\n def CategoryBrandList(self, request: empty_pb2.Empty, context):\n # 获取品牌分类列表\n rsp = goods_pb2.CategoryBrandListResponse()\n category_brands = GoodsCategoryBrand.select()\n\n # 分页\n start = 0\n per_page_nums = 10\n if request.pagePerNums:\n per_page_nums = request.PagePerNums\n if request.pages:\n start = per_page_nums * (request.pages - 1)\n\n category_brands = category_brands.limit(per_page_nums).offset(start)\n\n rsp.total = category_brands.count()\n for category_brand in category_brands:\n category_brand_rsp = goods_pb2.CategoryBrandResponse()\n\n category_brand_rsp.id = category_brand.id\n category_brand_rsp.brand.id = category_brand.brand.id\n category_brand_rsp.brand.name = category_brand.brand.name\n category_brand_rsp.brand.logo = category_brand.brand.logo\n\n category_brand_rsp.category.id = category_brand.category.id\n category_brand_rsp.category.name = category_brand.category.name\n category_brand_rsp.category.parentCategory = category_brand.category.parent_category_id\n category_brand_rsp.category.level = category_brand.category.level\n category_brand_rsp.category.isTab = category_brand.category.is_tab\n\n rsp.data.append(category_brand_rsp)\n return rsp\n\n @logger.catch\n def GetCategoryBrandList(self, request, context):\n # 获取某一个分类的所有品牌\n rsp = goods_pb2.BrandListResponse()\n try:\n category = Category.get(Category.id == request.id)\n category_brands = GoodsCategoryBrand.select().where(GoodsCategoryBrand.category == category)\n rsp.total = category_brands.count()\n for category_brand in category_brands:\n brand_rsp = goods_pb2.BrandInfoResponse()\n brand_rsp.id = category_brand.brand.id\n brand_rsp.name = category_brand.brand.name\n brand_rsp.logo = category_brand.brand.logo\n\n rsp.data.append(brand_rsp)\n except DoesNotExist as e:\n context.set_code(grpc.StatusCode.NOT_FOUND)\n context.set_details('Record does not exist')\n return rsp\n\n return rsp\n\n @logger.catch\n def CreateCategoryBrand(self, request: goods_pb2.CategoryBrandRequest, context):\n category_brand = GoodsCategoryBrand()\n\n try:\n brand = Brands.get(request.brandId)\n category_brand.brand = brand\n category = Category.get(request.categoryId)\n category_brand.category = category\n category_brand.save()\n\n rsp = goods_pb2.CategoryBrandResponse()\n rsp.id = category_brand.id # 是另外一种思路\n\n return rsp\n except DoesNotExist:\n context.set_code(grpc.StatusCode.NOT_FOUND)\n context.set_details('Record does not exist')\n return goods_pb2.CategoryBrandResponse()\n except Exception as e:\n context.set_code(grpc.StatusCode.INTERNAL)\n context.set_details('Internal Server Error')\n return goods_pb2.CategoryBrandResponse()\n\n @logger.catch\n def DeleteCategoryBrand(self, request: goods_pb2.CategoryBrandRequest, context):\n try:\n category_brand = GoodsCategoryBrand.get(request.id)\n category_brand.delete_instance()\n\n return empty_pb2.Empty()\n except DoesNotExist:\n context.set_code(grpc.StatusCode.NOT_FOUND)\n context.set_details('Record does not exist')\n return empty_pb2.Empty()\n\n @logger.catch\n def UpdateCategoryBrand(self, request: goods_pb2.CategoryBrandRequest, context):\n try:\n category_brand = GoodsCategoryBrand.get(request.id)\n brand = Brands.get(request.brandId)\n category_brand.brand = brand\n category = Category.get(request.categoryId)\n category_brand.category = category\n category_brand.save()\n\n return empty_pb2.Empty()\n except DoesNotExist:\n context.set_code(grpc.StatusCode.NOT_FOUND)\n context.set_details('Record does not exist')\n return empty_pb2.Empty()\n except Exception as e:\n context.set_code(grpc.StatusCode.INTERNAL)\n context.set_details('Internal server error')\n return empty_pb2.Empty()\n\n\n\n\n","sub_path":"goods_service/handler/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":22474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"75702129","text":"import urllib.request\nimport sys\nfrom threading import Thread\nimport pdb\nimport datetime as datetime\nimport os\nimport time\n\ndef download(params, part):\n \n output_file = params['file_name']+'_'+str(part)+'.pdn'\n print('Part :{output_file} Lunch'.format(output_file=output_file))\n\n debut = params['buffer_size']*part\n fin = (params['buffer_size']*(part+1))-1\n bytes_range = 'bytes={debut}-{fin}'\n bytes_range = bytes_range.format(debut=debut, fin=fin)\n\n #pdb.set_trace()\n req = urllib.request.Request(params['url'])\n req.method = 'GET'\n req.add_header('Range', bytes_range) \n #pdb.set_trace()\n page = urllib.request.urlopen(req)\n buffer = page.read(params['buffer_size'])\n #pdb.set_trace()\n with open(output_file, 'wb') as output:\n while len(buffer):\n output.write(buffer)\n buffer = page.read(0)\n\n page.close()\n fin = datetime.datetime.now()\n \n print('Part :{output_file} Done'.format(output_file=output_file))\n\n\ndef requete(url):\n req = urllib.request.Request(url)\n user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_2)'\\\n ' AppleWebKit/537.36 (KHTML, like Gecko) '\\\n 'Chrome/41.0.2272.101 Safari/537.36'\n req.add_header('User-Agent', user_agent)\n\n with urllib.request.urlopen(req) as page:\n headers = dict(page.getheaders())\n if 'Accept-Ranges' in headers.keys():\n if headers['Accept-Ranges'] == 'none':\n print('ERROR : Can\\‘t preform a range request')\n sys.exit()\n else:\n print('ERROR : Accept-Ranges header not found')\n sys.exit()\n\n file_size = int(headers['Content-Length'])\n buffer_size = round(file_size/5)\n file_name = urllib.request.unquote(url.split('/')[len(url.split('/'))-1])\n file_ext = '.'+file_name.split('.')[len(file_name.split('.'))-1]\n\n return {'url':url,\n 'file_name':file_name,\n 'file_ext':file_ext,\n 'file_size':file_size, \n 'buffer_size': buffer_size,\n 'headers':headers}\n\ndef assemblage(params):\n #test de la fonction avec une extension connu\n #il faut recuperer le nom du fichier pour\n #idee module re\n with open(params['file_name'],'wb') as output:\n for e in list(range(0, 5)):\n with open(params['file_name']+'_'+str(e)+'.pdn','rb') as input_file:\n buffer = input_file.read(params['buffer_size'])\n while len(buffer):\n output.write(buffer)\n buffer = input_file.read(params['buffer_size'])\n print('Merging : '+ params['file_name']+ 'done')\n\ndef cleaning(params):\n for e in list(range(0, 5)):\n os.remove(params['file_name']+'_'+str(e)+'.pdn')\n print('Cleaning Done')\n\ndef compteur(file_name, buffer_size):\n\n def generator(file_name, buffer_size):\n while(int(os.path.getsize(file_name)) 0:\n print(words_before.get(), end = ' ')\n print(display, end = ' ')\n print('...', end = ' ')\n print('\\n')\n print('tf-idf score=', sorted_scores[i][1])\n print('\\n')\n print('============================================')","sub_path":"TechMania/IR Project/Search/ranked_retreival.py","file_name":"ranked_retreival.py","file_ext":"py","file_size_in_byte":4551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"447010293","text":"from django.contrib import admin\nfrom django.urls import path\nfrom students import views\n\nurlpatterns = [\n path(\"\",views.home, name='home'),\n path(\"home\",views.home,name='home'),\n path(\"about\",views.about, name='about'),\n path(\"contact\",views.contact, name=\"contact\"),\n path(\"subjects\",views.subjects, name=\"subjects\"),\n path(\"teachers\",views.teachers, name=\"teachers\"),\n path(\"assignments\",views.assignments, name=\"assignments\"),\n path(\"mathassignments\",views.mathassignments, name=\"mathassignments\"),\n path(\"physicsassignments\",views.physicsassignments, name=\"physicsassignments\"),\n\n]","sub_path":"students/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"304691240","text":"from __future__ import annotations\nimport json\nfrom pysinonimos.sinonimos import Search, historic\n\n\nclass Token:\n def __init__(self, token_type, token_value):\n self.type = token_type\n self.value = token_value\n\n\nclass Tokenizer:\n def __init__(self):\n self.origin = \"\"\n self.position = 0\n self.tokens = []\n self.actual = None\n\n def tokenize(self, src):\n self.origin = src\n results = []\n for word in self.origin.split():\n if word.lower() == \"and\":\n results.append(Token(\"AND\", word))\n elif word.lower() == \"or\":\n results.append(Token(\"OR\", word))\n else:\n results.append(Token(\"TERM\", word))\n # for result in results:\n # print(result.type, result.value)\n results.append(Token(\"EOF\", \"\"))\n self.tokens = results\n return self.tokens\n\n def nextToken(self):\n if self.position + 1 <= len(self.tokens):\n self.actual = self.tokens[self.position]\n self.position += 1\n\n\nclass Node:\n def evaluate(self, index):\n return set()\n\n\nclass Term(Node):\n def __init__(self, term):\n super().__init__()\n self.term = term\n\n def evaluate(self, index):\n try:\n return set(index[self.term])\n except:\n return set()\n\n\nclass Operation(Node):\n def __init__(self, nodes: list[Node]):\n super().__init__()\n self.nodes = nodes\n\n def combine(self, result, new_results):\n return set()\n\n def evaluate(self, index):\n result = self.nodes[0].evaluate(index)\n for node in self.nodes[1:]:\n result = self.combine(result, node.evaluate(index))\n return result\n\n\nclass OpAnd(Operation):\n def __init__(self, nodes):\n super().__init__(nodes)\n\n def combine(self, result, new_results):\n return result & new_results\n\n\nclass OpOr(Operation):\n def __init__(self, nodes):\n super().__init__(nodes)\n\n def combine(self, result, new_results):\n return result | new_results\n\n\ndef build_query(query):\n node_type = query[0]\n if node_type == \"term\":\n # [\"term\", \"abelha\"]\n return Term(query[1])\n else:\n # [\"and\", [\"term\", \"abelha\"], [\"term\", \"rainha\"]]\n arg_list = []\n for arg in query[1:]:\n arg_node = build_query(arg)\n arg_list.append(arg_node)\n if node_type == \"and\":\n return OpAnd(arg_list)\n elif node_type == \"or\":\n return OpOr(arg_list)\n else:\n raise KeyError(f\"Operação {node_type} desconhecida.\")\n\n\ndef synonymTrees(terms):\n trees = []\n for term in terms:\n if term not in [\"or\", \"and\"]:\n termos = []\n synArg = Search(term)\n synArg = synArg.synonyms()\n if synArg != 404:\n synArg.append(term)\n for syn in synArg:\n termos.append(Term(syn))\n tree = OpOr(termos)\n else:\n tree = Term(term)\n trees.append(tree)\n return trees\n\n\ndef parseTerm(tk, idx, sTree):\n tk.nextToken()\n if tk.actual.type == \"TERM\":\n return idx + 1, sTree[idx]\n\n\ndef parseAnd(tk, idx, sTree):\n idx, firstChild = parseTerm(tk, idx, sTree)\n output = firstChild\n if tk.actual.type == \"EOF\":\n return output\n tk.nextToken()\n while tk.actual.type == \"AND\":\n idx, secondChild = parseTerm(tk, idx, sTree)\n output = OpAnd([output, secondChild])\n tk.nextToken()\n return idx, output\n\n\ndef parseOr(tk, idx, sTree):\n idx, firstChild = parseAnd(tk, idx, sTree)\n output = firstChild\n while tk.actual.type == \"OR\":\n idx, secondChild = parseAnd(tk, idx, sTree)\n output = OpOr([output, secondChild])\n return output\n\n\ndef parse_raw_query(raw_query: str):\n query = raw_query.split()\n sTree = synonymTrees(query)\n if len(query) == 1:\n return sTree[0]\n elif len(query) > 1 and len(query) % 2 != 0:\n if query[1].lower() == \"or\" or query[1].lower() == \"and\":\n tk = Tokenizer()\n tk.tokenize(raw_query)\n resultado = parseOr(tk, 0, sTree)\n tk.nextToken()\n if tk.actual.type == \"EOF\":\n return resultado\n else:\n raise Exception(\"Erro no parser\")\n else:\n raise Exception(\"As queries devem ser ligadas por 'and' ou 'or'\")\n\n raise Exception(\"Problema na query\")\n\n\ndef parse_json_query(json_query: str):\n q = json.loads(json_query)\n print(q)\n query = build_query(q)\n return query\n","sub_path":"se/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":4664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"265916487","text":"from keras.layers import Input, Dense, \\\n Dropout, Reshape, BatchNormalization, LeakyReLU, Conv2DTranspose, Activation, Conv2D, Flatten, UpSampling2D\nfrom keras.models import Model, Sequential\nfrom keras.optimizers import Adam, RMSprop\nimport keras\nimport keras.backend as K\nfrom keras.datasets import mnist\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport random\nimport cv2\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.python.client import device_lib\nprint(device_lib.list_local_devices())\n\nfilelist = []\nfor rawdata in os.listdir(\"data/pokemon/\"):\n if rawdata.find(\"png\") > -1:\n filelist.append(rawdata)\n\ntest_list = set()\nwhile len(test_list) < 500:\n rand = random.choice(filelist)\n test_list.add(rand)\n\nfilelist = set(filelist)\ntrain_data = filelist - test_list\n\n\ndef generate_data(data, list):\n for file in list:\n img = cv2.imread('data/pokemon/' + file)\n data.append(img)\n return data\n\n\ndata = []\ndata = np.array(generate_data(data, train_data))\n\nprint(data.shape)\ntest_data = []\ntest_data = np.array(generate_data(test_data, test_list))\n\n\n# test_data = test_data.astype('float32')\n# data = data.astype('float32')\n\n\ndef discriminator():\n net = Sequential()\n input_shape = (256, 256, 3)\n dropout_prob = 0.4\n\n net.add(Conv2D(64, 5, strides=2, input_shape=input_shape, padding='same'))\n net.add(LeakyReLU())\n\n net.add(Conv2D(128, 5, strides=2, padding='same'))\n net.add(LeakyReLU())\n net.add(Dropout(dropout_prob))\n\n net.add(Conv2D(256, 5, strides=2, padding='same'))\n net.add(LeakyReLU())\n net.add(Dropout(dropout_prob))\n\n net.add(Conv2D(512, 5, strides=2, padding='same'))\n net.add(LeakyReLU())\n net.add(Dropout(dropout_prob))\n\n net.add(Flatten())\n net.add(Dense(1))\n net.add(Activation('sigmoid'))\n\n return net\n\n\ndef generator():\n net = Sequential()\n dropout_prob = 0.4\n\n net.add(Dense(8 * 8 * 256, input_dim=100))\n net.add(BatchNormalization(momentum=0.9))\n net.add(Activation('relu'))\n net.add(Reshape((32, 32, 256)))\n net.add(Dropout(dropout_prob))\n\n net.add(UpSampling2D())\n net.add(Conv2D(128, 5, padding='same'))\n net.add(BatchNormalization(momentum=0.9))\n net.add(Activation('relu'))\n\n net.add(UpSampling2D())\n net.add(Conv2D(128, 5, padding='same'))\n net.add(BatchNormalization(momentum=0.9))\n net.add(Activation('relu'))\n\n net.add(UpSampling2D())\n net.add(Conv2D(64, 5, padding='same'))\n net.add(BatchNormalization(momentum=0.9))\n net.add(Activation('relu'))\n\n net.add(Conv2D(32, 5, padding='same'))\n net.add(BatchNormalization(momentum=0.9))\n net.add(Activation('relu'))\n\n net.add(Conv2D(3, 5, padding='same'))\n net.add(Activation('sigmoid'))\n\n return net\n\nnet_discriminator = discriminator()\n\noptim_discriminator = RMSprop(lr=0.0002, clipvalue=1.0, decay=6e-8)\nmodel_discriminator = Sequential()\nmodel_discriminator.add(net_discriminator)\nmodel_discriminator.compile(loss='binary_crossentropy', optimizer=optim_discriminator, metrics=['accuracy'])\n\nnet_generator = generator()\n\noptim_adversarial = Adam(lr=0.0001, clipvalue=1.0, decay=3e-8)\nmodel_adversarial = Sequential()\nmodel_adversarial.add(net_generator)\n\n# Disable layers in discriminator\nfor layer in net_discriminator.layers:\n layer.trainable = False\n\nmodel_adversarial.add(net_discriminator)\nmodel_adversarial.compile(loss='binary_crossentropy', optimizer=optim_adversarial, metrics=['accuracy'])\n\n\ndef show_images(generated_images):\n n_images = 5\n\n for i in range(n_images):\n plt.subplot(1, 5, i+1)\n plt.imshow(generated_images[i])\n plt.xticks([])\n plt.yticks([])\n plt.tight_layout()\n plt.show()\n\n\n\nepochs = 0\n\n\nfor i in range(0, epochs):\n\n # Select a random set of training images from the new dataset\n batch_size = 16\n images_train = np.array([random.choice(data) for i in range(batch_size)])\n\n # Generate a random noise vector\n noise = np.random.uniform(-1.0, 1.0, size=[batch_size, 100])\n\n # Use the generator to create fake images from the noise vector\n images_fake = net_generator.predict(noise)\n\n # Create a dataset with fake and real images\n print(images_fake.shape)\n print(images_train.shape)\n\n x = np.concatenate((images_train, images_fake))\n y = np.ones([2 * batch_size, 1])\n y[batch_size:, :] = 0\n\n # Train discriminator for one batch\n d_stats = model_discriminator.train_on_batch(x, y)\n\n # Train the generator\n # The input of th adversarial model is a list of noise vectors. The generator is 'good' if the discriminator classifies\n # all the generated images as real. Therefore, the desired output is a list of all ones.\n y = np.ones([batch_size, 1])\n noise = np.random.uniform(-1.0, 1.0, size=[batch_size, 100])\n a_stats = model_adversarial.train_on_batch(noise, y)\n\n#print images\nshow_images(images_train)\nshow_images(images_fake)","sub_path":"GAN2.py","file_name":"GAN2.py","file_ext":"py","file_size_in_byte":4944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"54893680","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nA basic script inspired by a fortran code written by Dr. Michael Roach\r\nthat computes gravity terrain corrections for a given gravity data set \r\nusing digital elevation data derived from a Arc/Info ASCII grid file. \r\nTerrain corrections are computed using the 'mass-line' approximation \r\nfor determining the gravitational attraction of a prism whose mass is \r\nconcentrated along an infinitely thin line in the centre of the prism \r\nat a distant observation point, in this case a gravity station.\r\n\r\nThe approximation breaks down at short distances from the gravity \r\nstation, leading to the introduction of significant errors to the \r\ncorrection. This is overcome by ensuring prisms are computed at least\r\nthree prism sides, or in this case DEM grid cell widths, from the \r\ngravity station. For more on the method see St John and Green (1967)\r\nhere:\r\nhttps://onlinelibrary.wiley.com/doi/abs/10.1111/j.1365-2478.1967.tb01778.x\r\n\r\nDuffet (2016) found little difference between this approach and that\r\nof the commercial code RasterTC™ when computing terrain corrections\r\nin Tasmania:\r\nhttps://pdfs.semanticscholar.org/148d/28283714686ad4d8315cca6ff7a53657ace3.pdf\r\n\r\nNote: This code is single threaded and memory intensive. Multi-threaded\r\n functionality for faster processing is a work in progres. It is \r\n recommended that systems have free memory equivalent to approx\r\n 8 times the size of the ASCII DEM grid and operate at high core \r\n clocks.\r\n\"\"\"\r\n\r\n# import necessary libraries\r\nimport os\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n# link to gravity and topography file\r\ngfn = r\"F:\\path\\to\\example_GRAV_bouguer_mga55.xyz\"\r\ntfn = r\"F:\\path\\to\\example_DEM_100m_srtm_mga55.asc\"\r\n\r\n# set some key parameters\r\nradmax = 2000 # maximum radius for TC calculation (m) - large prisms inside this radii\r\nradmin = 500 # minimum radius for TC calculation (m) - small prisms inside this radii\r\ndensity = 2670 # density for TC calculation (kg/m3)\r\n\r\n# set cmap for data plotting\r\ndem_cmap = 'gray'\r\ngrav_cmap = 'viridis'\r\ntcorr_cmap = 'plasma'\r\n\r\n# calculate the constant for the mass line calculation\r\ncc = 6.6743e-11 * density * 100000.0 \r\n\r\n# import gravity data into 2d array\r\ntmp = np.genfromtxt(gfn) # read txt file into temp array\r\ng = np.zeros((len(tmp),5))\r\ng[:,:-2] = tmp\r\n\r\n# read DEM grid to get grid parameters from header\r\nwith open(tfn,'r') as grd:\r\n lines = grd.readlines()\r\n nx = int(lines[0].strip().split()[1])\r\n ny = int(lines[1].strip().split()[1])\r\n xllcorner = float(lines[2].strip().split()[1])\r\n yllcorner = float(lines[3].strip().split()[1])\r\n dx = float(lines[4].strip().split()[1])\r\n no_data = float(lines[5].strip().split()[1])\r\n grd.close()\r\n\r\n# make x amd y arrays from these parameters\r\nx = np.arange(xllcorner,xllcorner+nx*dx,dx,dtype=float)\r\ny = np.arange(yllcorner,yllcorner+ny*dx,dx,dtype=float)\r\n\r\n# read elevation data into array, set no-data value to nan, flip along axis 0 and transpose\r\nprint ('\\n--------- Reading %s into array ---------'%(os.path.basename(tfn)))\r\nZ = np.genfromtxt(tfn,skip_header=6)\r\nZ[Z==no_data] = np.nan\r\nZ = np.flip(Z,axis=0).T\r\n\r\n# subset the DEM array so it is just bigger than radmax from all gravity stations\r\nxmin, xmax = min(g[:,0])-radmax*1.1, max(g[:,0])+radmax*1.1\r\nymin, ymax = min(g[:,1])-radmax*1.1, max(g[:,1])+radmax*1.1\r\nx_subset_idx = np.argwhere((x > xmin) & (x < xmax))\r\ny_subset_idx = np.argwhere((y > ymin) & (y < ymax))\r\n\r\n# subset the x and y arrays \r\nx = np.take(x,x_subset_idx)\r\ny = np.take(y,y_subset_idx)\r\n\r\n# subset the Z array\r\nZ = Z[min(x_subset_idx)[0]:max(x_subset_idx)[0],:] # subset x-dimension\r\nZ = Z[:,min(y_subset_idx)[0]:max(y_subset_idx)[0]] # subset y-dimension\r\n\r\n# plot the DEM matrices as colormesh and overlay with gravity data points\r\nprint ('Commence plotting routine for DEM and Gravity datasets...')\r\nX, Y = np.meshgrid(x,y)\r\nfig, ax = plt.subplots()\r\ndem = ax.pcolormesh(X,Y,Z.T,cmap=dem_cmap)\r\ndem_cb = plt.colorbar(dem,shrink=0.75,pad=0.1)\r\ndem_cb.set_label(label='Elevation (m)',size=9)\r\ngrav = ax.scatter(g[:,0],g[:,1],c=g[:,2],s=30,cmap=grav_cmap)\r\ngrav_cb = plt.colorbar(grav,shrink=0.75,pad=0.05)\r\ngrav_cb.set_label(label='Bouguer Anomaly (mGal)',size=9)\r\nplt.title('Uncorrected Gravity on DEM grid')\r\nplt.tight_layout()\r\nplt.show()\r\nplt.close()\r\n\r\n# define a function to apply the terrain correction\r\ndef terrain_correction(g,Z,x,y,density,radmax,radmin,dx):\r\n \"\"\"This function loops through a gravity data array and computes\r\n the gravity terrain correction for each station using a digital\r\n elevation model array\"\"\"\r\n # start looping through gravity stations in g array\r\n print ('Start looping through gravity stations...')\r\n for i in range(0,len(g)):\r\n stn_x, stn_y = g[i,0], g[i,1] # get station locations\r\n\r\n # find station x, y and z indices and values in DEM array\r\n x_idx = np.argmin(np.abs(x - stn_x))\r\n y_idx = np.argmin(np.abs(y - stn_y))\r\n stn_x, stn_y, stn_z = x[x_idx], y[y_idx], Z[x_idx,y_idx]\r\n \r\n # create emtpy variable for station specific t_corr summation\r\n tcorr = 0\r\n\r\n # find minimum and maximum index values for radmax in x and y direction\r\n minrx_ix, maxrx_ix = int(x_idx - radmax/dx), int(x_idx + radmax/dx)\r\n minrx_iy, maxrx_iy = int(y_idx - radmax/dx), int(y_idx + radmax/dx)\r\n\r\n # loop through terrain cells within radmax indices\r\n for j in range(minrx_ix, maxrx_ix):\r\n for k in range(minrx_iy,maxrx_iy):\r\n # compute coarse prisms within radmin and radmax\r\n if abs(j-x_idx)*dx > radmin and abs(k-y_idx)*dx > radmin:\r\n r1 = np.sqrt((stn_x-x[j])**2 + (stn_y-y[k])**2)\r\n r2 = np.sqrt((stn_x-x[j])**2 + (stn_y-y[k])**2 + (stn_z-Z[j,k])**2)\r\n t = cc * dx * dx * (1/r1 - 1/r2)\r\n tcorr = tcorr + t \r\n # compute fine prisms within radmin and 3*dx\r\n if abs(j-x_idx) > 3 and abs(k-y_idx) > 3:\r\n # NW quadrant prism\r\n prism_x, prism_y = x[j] - dx/4, y[k] + dx/4\r\n prism_z = (Z[j,k] + Z[j-1,k] + Z[j-1,k+1] + Z[j,k+1]) / 4 # mean of surrounding DEM points\r\n r1 = np.sqrt((stn_x-prism_x)**2 + (stn_y-prism_y)**2)\r\n r2 = np.sqrt((stn_x-prism_x)**2 + (stn_y-prism_y)**2 + (stn_z-prism_z)**2)\r\n t = cc * (dx/2) * (dx/2) * (1/r1 - 1/r2)\r\n tcorr = tcorr + t \r\n # NE quadrant prism\r\n prism_x, prism_y = x[j] + dx/4, y[k] + dx/4\r\n prism_z = (Z[j,k] + Z[j,k+1] + Z[j+1,k+1] + Z[j+1,k]) / 4 # mean of surrounding DEM points\r\n r1 = np.sqrt((stn_x-prism_x)**2 + (stn_y-prism_y)**2)\r\n r2 = np.sqrt((stn_x-prism_x)**2 + (stn_y-prism_y)**2 + (stn_z-prism_z)**2)\r\n t = cc * (dx/2) * (dx/2) * (1/r1 - 1/r2)\r\n tcorr = tcorr + t \r\n # SE quadrant prism\r\n prism_x, prism_y = x[j] + dx/4, y[k] - dx/4\r\n prism_z = (Z[j,k] + Z[j+1,k] + Z[j+1,k-1] + Z[j,k-1]) / 4 # mean of surrounding DEM points\r\n r1 = np.sqrt((stn_x-prism_x)**2 + (stn_y-prism_y)**2)\r\n r2 = np.sqrt((stn_x-prism_x)**2 + (stn_y-prism_y)**2 + (stn_z-prism_z)**2)\r\n t = cc * (dx/2) * (dx/2) * (1/r1 - 1/r2)\r\n tcorr = tcorr + t \r\n # SW quadrant prism\r\n prism_x, prism_y = x[j] - dx/4, y[k] - dx/4\r\n prism_z = (Z[j,k] + Z[j,k-1] + Z[j-1,k-1] + Z[j-1,k]) / 4 # mean of surrounding DEM points\r\n r1 = np.sqrt((stn_x-prism_x)**2 + (stn_y-prism_y)**2)\r\n r2 = np.sqrt((stn_x-prism_x)**2 + (stn_y-prism_y)**2 + (stn_z-prism_z)**2)\r\n t = cc * (dx/2) * (dx/2) * (1/r1 - 1/r2)\r\n tcorr = tcorr + t \r\n \r\n # print some details to console for this station\r\n print ('Terrain coorection for station %s of %s ---> %s mGal'%(i,len(g),np.round(tcorr[0],4)))\r\n \r\n # add tcorr and g + tcorr to empty columns in array\r\n g[i,3] = tcorr\r\n g[i,4] = g[i,2] + tcorr\r\n\r\n # return the gravity array\r\n return g\r\n\r\n# apply the terrain correction\r\ng = terrain_correction(g,Z,x,y,density,radmax,radmin,dx)\r\n\r\n# plot the data\r\nfig, ax = plt.subplots(1,3,figsize=(18,9))\r\nfig.suptitle('Mass-Line TC with density %skgm$^{-3}$ and %sm DEM resolution'%(density,round(dx,2)),fontsize=16)\r\nfor i in range(0,3):\r\n ax[i].pcolormesh(X,Y,Z.T,cmap=dem_cmap)\r\n ax[i].set_aspect(1)\r\n if i == 1:\r\n sc = ax[i].scatter(g[:,0],g[:,1],c=g[:,i+2],s=60,cmap=tcorr_cmap)\r\n else:\r\n sc = ax[i].scatter(g[:,0],g[:,1],c=g[:,i+2],s=60,cmap=grav_cmap,vmin=min(g[:,2]),vmax=max(g[:,4]))\r\n ax[i].set_xlabel('Easting (m)')\r\n cbar = fig.colorbar(sc,ax=ax[i],shrink=0.6,orientation='horizontal',pad=0.1)\r\n cbar.set_label('mGal')\r\nax[0].set_title('Uncorrected Bouguer')\r\nax[1].set_title('Terrain Correction')\r\nax[2].set_title('Corrected Bouguer')\r\nax[0].set_ylabel('Northing (m)')\r\nplt.tight_layout()\r\nplt.show()\r\nplt.close()\r\n\r\n# make output filename\r\noutfn = gfn.replace('.xyz','_tc%s.csv'%(density))\r\n\r\n# write gravity array to output file\r\nhd = 'x,y,g,t_corr%s,g_t_corr%s'%(density,density)\r\nnp.savetxt(outfn,g,delimiter=',',header=hd,comments='') # write gravity array\r\n","sub_path":"massline_tcorr.py","file_name":"massline_tcorr.py","file_ext":"py","file_size_in_byte":9563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"47682493","text":"import sys\nfrom datetime import datetime\nimport time\n\n# from sklearn import datasets\nfrom sklearn import svm\nfrom sklearn.preprocessing import StandardScaler\nimport redis\nfrom sklearn.externals import joblib\n\nNORMAL = 0\nFAULT = 1\n\n# Later, move to configure...\nstddumpfile = '/opt/dma/var/sklearn-dump/std.cmp'\nscdumpfile = '/opt/dma/var/sklearn-dump/svm.cmp'\n\n\ndef read_redis(stime, etime, stat):\n conn = redis.StrictRedis(host='localhost', port=6379)\n rawlist = conn.zrangebyscore('collectd/localhost/memory/memory-used',\n stime, etime)\n datalist = [s.split(\":\") for s in rawlist]\n dlist = [[float(d[1]), float(d[1])] for d in datalist]\n llist = [stat for d in datalist]\n print(\"DEBUG read: \", dlist)\n print(\"DEBUG read: \", llist)\n return (dlist, llist)\n\n\ndef learn(dlist, llist):\n std_scl = StandardScaler()\n std_scl.fit(dlist)\n dlist_std = std_scl.transform(dlist)\n clf = svm.SVC()\n clf.fit(dlist_std, llist)\n print(\"DEBUG learn: \", dlist)\n print(\"DEBUG learn: \", dlist_std)\n print(\"DEBUG learn: \", llist)\n joblib.dump(std_scl, stddumpfile, compress=True)\n joblib.dump(clf, scdumpfile, compress=True)\n return\n\n\ndef date2sec(datestr):\n date = datetime.strptime(datestr, \"%Y-%m-%d %H:%M:%S\")\n sec = int(time.mktime(date.timetuple()))\n # print sec\n return sec\n\n\nargvs = sys.argv\nargc = len(argvs)\n\n# print argvs\n# print argc\n# print\nif (argc != 5):\n print ('Usage: python learn.py '\n ' ')\n quit()\n\nnormalinput = read_redis(date2sec(argvs[1]), date2sec(argvs[2]), NORMAL)\nfaultinput = read_redis(date2sec(argvs[3]), date2sec(argvs[4]), FAULT)\nlearn(normalinput[0] + faultinput[0], normalinput[1] + faultinput[1])\n","sub_path":"demo/memory-svm/analysis/learn.py","file_name":"learn.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"98565691","text":"\nimport Queue\nimport random\nimport threading\nfrom time import sleep\n\n\n_sentinel = object()\n_running = True\n\ndef producer(q):\n while _running:\n q.put(random.randrange(1, 100))\n sleep(1)\n q.put(_sentinel)\n\ndef consumer(q):\n while True:\n data = q.get()\n if data == _sentinel:\n break\n print(data)\n\nq = Queue.Queue()\nt_producer = threading.Thread(target=producer, args=(q,))\nt_consumer = threading.Thread(target=consumer, args=(q,))\nt_producer.start()\nt_consumer.start()\n \nsleep(10)\n_running = False\nt_producer.join()\nt_consumer.join()","sub_path":"cookbook/concurrency/communicating_between_threads.py","file_name":"communicating_between_threads.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"200683969","text":"import requests\nimport sys\nimport json\n\nURL = 'http://zipcloud.ibsnet.co.jp/api/search?zipcode={}'\n\n\ndef zip_api(zip_code):\n r = requests.get(URL.format(zip_code))\n return r.json()\n\n\ndef main():\n zip_code = sys.argv[1]\n zip_json = zip_api(zip_code)\n print(json.dumps(zip_json, ensure_ascii=False))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/zip_code_app/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"526850708","text":"#!/usr/bin/env python3 windows下忽略\n# -*- coding: utf-8 -*-\n\n' 文档注释 '\n\n__author__ = 'runoobkui'\n\n'''\n第1行和第2行是标准注释,第1行注释可以让这个hello.py文件直接在Unix/Linux/Mac上运行,第2行注释表示.py文件本身使用标准UTF-8编码;\n第4行是一个字符串,表示模块的文档注释,任何模块代码的第一个字符串都被视为模块的文档注释;\n第6行使用__author__变量把作者写进去,这样当你公开源代码后别人就可以瞻仰你的大名;\n以上就是Python模块的标准文件模板,当然也可以全部删掉不写,但是,按标准办事肯定没错。\n'''\n\n\ndef application(environ, start_response):\n start_response('200 OK', [('Content-Type', 'text/html')])\n return ['Hello, web!
'.encode('utf-8')]\n# return [b'Hello, web!
'] 错误写法\n\n'''\ndef application(environ, start_response):\n start_response('200 OK', [('Content-Type', 'text/html')])\n body = 'Hello, %s!
' % (environ['PATH_INFO'][1:] or 'web')\n return [body.encode('utf-8')]\n'''\n","sub_path":"Learn/Python/Python_learn/9_Web/hello_wsgi.py","file_name":"hello_wsgi.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"369873453","text":"import random\nimport math\n\n# local stuff\nfrom vector import *\nfrom ray import ray\nfrom hitable import hit_boundary\nfrom hitable import hit_record\nfrom hitable import hitable\nfrom material import material\n\ndef random_point_in_unit_sphere():\n '''\n brute-force approach to get a random point on a unit-sphere: i.e. keep\n looking till you find a point\n '''\n unit_vector = vector_3d([1, 1, 1])\n\n while True:\n p = 2 * vector_3d([random.random(), random.random(), random.random()]) - unit_vector\n if p.squared_length() < 1.0:\n break\n return p\n\n\ndef reflect_a_ray(R, N):\n '''\n return a ray that got reflected at a given normal\n '''\n reflected_ray = R - 2 * dot_product(R, N) * N\n return reflected_ray\n\ndef refract_a_ray(ir_direction, out_normal, ni_over_nr):\n '''\n returns True/False depending on whether a ray of light got refracted or\n not.\n '''\n ir_unit_vec = create_unit_vector(ir_direction)\n dt = dot_product(ir_unit_vec, out_normal)\n discriminant = 1.0 - (ni_over_nr * ni_over_nr * (1 - dt * dt))\n\n if discriminant > 0 :\n refracted_ray = ni_over_nr * (ir_unit_vec - out_normal * dt) - out_normal * math.sqrt(discriminant)\n return True, refracted_ray\n\n return False, None\n\n\n#\n# a surface made out of diffuse material i.e. which scatters incident rays.\n#\nclass diffuse_material(material):\n '''\n a diffuse / lambertian material\n '''\n def __init__(self, albedo = vector_3d()):\n self.albedo_ = albedo\n return\n\n def __repr__(self):\n return ('diffuse_material(' +\n 'albedo_ = ' + repr(self.albedo_) +\n ')'\n )\n\n def scatter_ray(self, incident_ray, hit_rec, attenuate, scatterd_ray):\n target = hit_rec.p_ + hit_rec.normal_ + random_point_in_unit_sphere()\n new_ray = ray(hit_rec.p_, target - hit_rec.p_)\n\n # copy the values back\n attenuate.values_ = self.albedo_.values_\n scatterd_ray.origin_ = new_ray.origin_\n scatterd_ray.direction_ = new_ray.direction_\n\n return True\n\n#\n# a metallic surface\n#\nclass metallic_material(material):\n '''\n a metallic surface with some 'fuzz' during reflection.\n '''\n def __init__(self, albedo = vector_3d(), fuzz = 1.0):\n self.albedo_ = albedo\n if fuzz < 1.0:\n self.fuzz_ = fuzz\n else:\n self.fuzz_ = 1.0\n return\n\n def __repr__(self):\n return ('metallic_material(' +\n 'albedo_ = ' + repr(self.albedo_) + ', ' +\n 'fuzz_ = ' + repr(self.fuzz_) +\n ')'\n )\n\n def scatter_ray(self, incident_ray, hit_rec, attenuate, scatterd_ray):\n reflected_ray = reflect_a_ray(create_unit_vector(incident_ray.direction()), hit_rec.normal_)\n new_ray = ray(hit_rec.p_, reflected_ray + self.fuzz_ * random_point_in_unit_sphere())\n\n # copy the values back\n attenuate.values_ = self.albedo_.values_\n scatterd_ray.origin_ = new_ray.origin_\n scatterd_ray.direction_ = new_ray.direction_\n\n # is it scattered ?\n if dot_product(scatterd_ray.direction(), hit_rec.normal_) > 0:\n return True\n\n return False\n\n\n#\n# dielectric material\n#\nclass dielectric_material(material):\n '''\n clear material e.g. glass, water, diamond etc are dielectrics. when a ray\n of light hits them, it gets splitted into reflected and refracted ray.\n\n refracted ray in the media is governed by snell's law\n n_i . sin(ϴ_i) = n_r . sin(ϴ_r)\n where 'n' is refractive index of the medium, and 'ϴ' is angle of\n incidence/refraction in that medium.\n '''\n def __init__(self, refractive_index):\n self.ref_idx_ = refractive_index\n return\n\n def __repr__(self):\n return ('dielectric_material(' +\n 'ref_idx_ = ' + repr(self.ref_idx_) +\n ')'\n )\n\n def schlick_approx(self, cos_theta):\n '''\n for low accuracy applications, fresnel factor's contribution to specular\n reflection can be ignored (because we don't care about the effect of\n light polarization).\n\n we use schlick's approximation instead, which is much cheaper\n computationally. this is given by\n r_0 = ((n_1 - n_2)/(n_1 + n_2))^2\n r_ϴ = r_0 + (1 - r_0) * (1 - cos(ϴ))^5\n where:\n ϴ = angle between incident light and normal (at the point of incidence)\n n_1, n_2 = refractive index of two media. since one of the interfaces is air,\n n_1 == 1.\n '''\n r_0 = (1 - self.ref_idx_)/(1 + self.ref_idx_)\n r_0 = r_0 * r_0\n\n return ((r_0 + (1 - r_0) * math.pow((1 - cos_theta), 5)))\n\n def scatter_ray(self, incident_ray, hit_rec, attenuate, scatterd_ray):\n no_attenuation = vector_3d([1.0, 1.0, 1.0])\n ir_direction = incident_ray.direction()\n reflected_ray = reflect_a_ray(ir_direction, hit_rec.normal_)\n\n if dot_product(ir_direction, hit_rec.normal_) > 0:\n out_normal = -hit_rec.normal_\n ni_over_nr = self.ref_idx_\n cos_theta = (self.ref_idx_ * dot_product(ir_direction, hit_rec.normal_))/(ir_direction.length())\n else:\n out_normal = hit_rec.normal_\n ni_over_nr = (1.0 / self.ref_idx_)\n cos_theta = -dot_product(ir_direction, hit_rec.normal_)/(ir_direction.length())\n # end: if dot_product(....) > 0\n\n # is the ray reflected ?\n ray_is_refracted, refracted_ray = refract_a_ray(ir_direction, out_normal, ni_over_nr)\n reflect_chance = 1.0\n\n if ray_is_refracted == True:\n reflect_chance = self.schlick_approx(cos_theta)\n\n if random.random() < reflect_chance:\n new_ray = ray(hit_rec.p_, reflected_ray)\n else:\n new_ray = ray(hit_rec.p_, refracted_ray)\n\n # copy the values back\n attenuate.values_ = no_attenuation.values_\n scatterd_ray.origin_ = new_ray.origin_\n scatterd_ray.direction_ = new_ray.direction_\n\n return True\n\n","sub_path":"all_materials.py","file_name":"all_materials.py","file_ext":"py","file_size_in_byte":6191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"232689734","text":"'''\r\nShmulik Edelman\r\nshmulike@post.bgu.ac.il\r\n'''\r\nimport numpy as np\r\nfrom math import pi, sqrt\r\nimport scipy.interpolate as sc # interp1d, CubicSpline, splprep, splev\r\nfrom datetime import datetime\r\nfrom timeit import default_timer as timer\r\n\r\n# add angle limit to head move\r\n# publish joint positions(12+1) and angles (12*2+1)\r\n\r\ndeadzone_ = 0.12\r\n\r\nclass Robot:\r\n def __init__(self, link_N = 12, link_L = 160, thetaYstep=.01, thetaZstep=.01, forwardStep=2, backStep= 10, rider_max=1000):\r\n self.link_N = link_N\r\n self.link_L = link_L\r\n self.thetaYstep = thetaYstep\r\n self.thetaZstep = thetaZstep\r\n self.forwardStep = forwardStep\r\n self.backSteps = backStep\r\n self.epsilon = 0.01\r\n self.iter_max = 30\r\n self.rider_max = rider_max\r\n\r\n\r\n self.v0 = np.array([[0], [0], [0], [1]])\r\n self.v_end = np.array([[self.link_L], [0], [0], [1]])\r\n self.A_head = np.eye(4)\r\n self.head_axis = []\r\n\r\n self.vec_len = 1000\r\n self.vx = np.array([[self.vec_len], [0], [0], [1]])\r\n self.vy = np.array([[0], [self.vec_len], [0], [1]])\r\n self.vz = np.array([[0], [0], [self.vec_len], [1]])\r\n\r\n # create initial path\r\n self.x_start = -(self.link_N-1)*self.link_L\r\n x = np.array([np.linspace(self.x_start, 0, 100)])\r\n y = z = np.zeros((1, x.shape[1]))\r\n self.path = np.vstack((x, y, z))\r\n # self.path = np.fliplr(self.path)\r\n\r\n self.joint_cmd = []\r\n self.joint_pos_recon = []\r\n self.update_head_axis()\r\n\r\n\r\n # Update joint poses\r\n self.joint_pos = np.arange(self.x_start, self.link_L+1, self.link_L)\r\n self.split_curve()\r\n self.calc_joint_angles()\r\n # self.recontract_joints_pos()\r\n\r\n # def turnHead(self, thetaY=0, thetaZ=0):\r\n # thetaY *= self.thetaYstep\r\n # thetaZ *= self.thetaZstep\r\n # # print(\"move\\t{:.2f}:{:.2f}\".format(thetaY, thetaZ))\r\n # self.A_head = np.dot(self.A_head, self.RyRzRd(thetaY, thetaZ, 0))\r\n # self.update_head_axis()\r\n\r\n\r\n def move_head(self, thetaY=0, thetaZ=0, forward=0):\r\n # Scale the joysticl movement by scale factor: thetaYstep, thetaZstep\r\n thetaY *= self.thetaYstep\r\n thetaZ *= self.thetaZstep\r\n\r\n # print(self.joint_pos[0][0])\r\n if (forward > 0 and self.joint_pos[0][0] <= 0):\r\n # if (forward > 0):\r\n forward *= self.forwardStep\r\n else:\r\n forward = 0\r\n\r\n # Update head position relative to world-system\r\n self.A_head = np.dot(self.A_head, self.RzRyRd(z=thetaZ, y=thetaY, d=forward))\r\n\r\n head_origin = self.update_head_axis()\r\n\r\n if (forward>0):\r\n # self.path = np.hstack((head_origin, self.path))\r\n self.path = np.hstack((self.path, head_origin))\r\n # time_start_1 = timer()\r\n # self.split_curve_3()\r\n # time_start_2 = timer()\r\n # self.calc_joint_angles()\r\n # print(self.joint_cmd)\r\n # time_start_3 = timer()\r\n # self.recontract_joints_pos()\r\n # time_end = timer()\r\n # print(\"Time: curve {:3.3f}\\tangles {:3.3f}\\trecontract {:3.3f}\".format((time_start_2-time_start_1)*10e3, (time_start_3-time_start_2)*10e3, (time_end-time_start_3)*10e3))\r\n # print(self.joint_pos_recon[0, :])\r\n\r\n # Continuous msg publish\r\n self.split_curve_3()\r\n self.calc_joint_angles()\r\n\r\n\r\n def update_head_axis(self):\r\n head_X_size = 200\r\n head_YZ_size = 100\r\n origin = np.dot(self.A_head, np.array([[0], [0], [0], [1]]))\r\n x_p = np.dot(self.A_head, np.array([[head_X_size], [0], [0], [1]]))\r\n y_p = np.dot(self.A_head, np.array([[0], [head_YZ_size], [0], [1]]))\r\n z_p = np.dot(self.A_head, np.array([[0], [0], [head_YZ_size], [1]]))\r\n\r\n headXAxis = np.hstack((origin, x_p))\r\n headYAxis = np.hstack((origin, y_p))\r\n headZAxis = np.hstack((origin, z_p))\r\n\r\n self.head_axis = np.stack((headXAxis, headYAxis, headZAxis))[0:3, :]\r\n return origin[0:3, :]\r\n\r\n\r\n def print_head(self):\r\n p_head = np.dot(self.A_head, self.v0)\r\n # print(\"Robot head position: {}\".format(p_head))\r\n\r\n\r\n # def RyRzRd(self, y, z, d):\r\n # # y = np.deg2rad(y)\r\n # # z = np.deg2rad(z)\r\n # R = np.array([[np.cos(y)*np.cos(z), -np.cos(y)*np.sin(z), np.sin(y), d*np.cos(y)*np.cos(z)],\r\n # [np.sin(z), np.cos(z), 0, d*np.sin(z)],\r\n # [-np.sin(y)*np.cos(z), np.sin(y)*np.sin(z), np.cos(y), -d*np.sin(y)*np.cos(z)],\r\n # [0, 0, 0, 1]])\r\n # return R\r\n\r\n def RzRyRd(self, z, y, d):\r\n sz = np.sin(z)\r\n cz = np.cos(z)\r\n sy = np.sin(y)\r\n cy = np.cos(y)\r\n R = np.array([[cy * cz, -sz, sy * cz, d * cy * cz],\r\n [cy * sz, cz, sy * sz, d * cy * sz],\r\n [-sy, 0, cy, -d * sy],\r\n [0, 0, 0, 1]])\r\n return R\r\n\r\n def split_curve(self):\r\n # Bisection method\r\n # reverse the path matrix to be end o start\r\n path = np.fliplr(self.path)\r\n\r\n\r\n self.joint_pos = path[:, 0].reshape(3, 1)\r\n tck, u = sc.splprep(path, k=2, s=0)\r\n\r\n b = c = 1\r\n c_pre = 0\r\n a = 0\r\n error_avg = 0\r\n prev_pos = self.joint_pos\r\n iter_count_avrg = 0\r\n for i in range(self.link_N-1):\r\n iter_count = 0\r\n # b = 1\r\n error = self.epsilon + 1\r\n while np.abs(error) > self.epsilon and iter_count < self.iter_max:\r\n iter_count += 1\r\n c = (a+b)/2\r\n # temp_pos = np.asarray([sc.splev(c, tck)]).T\r\n temp_pos = sc.splev(c, tck)\r\n\r\n # error = self.link_L - self.norm(temp_pos - prev_pos)\r\n error = self.link_L - self.norm2(temp_pos, prev_pos)\r\n if error > 0:\r\n a = c\r\n else:\r\n b = c\r\n a = c\r\n b = c + 4*(c - c_pre)\r\n c_pre = c\r\n\r\n temp_pos = np.asarray([temp_pos]).T\r\n self.joint_pos = np.hstack((self.joint_pos, temp_pos))\r\n prev_pos = temp_pos\r\n error_avg += np.abs(error)\r\n\r\n iter_count_avrg += iter_count\r\n # print(\"{}, \".format(iter_count), end='') # Debug: Print number iterations for each joint position\r\n iter_count_avrg /= self.link_N-1\r\n error_avg /= self.link_N - 1\r\n # print(\"avrg: {:.2f}\\tAverage error: {:.7f}\".format(iter_count_avrg, error_avg)) # Debug: Print average numberof iterations and average link length error\r\n\r\n # Add the end link position to the plot\r\n self.joint_pos = np.fliplr(self.joint_pos)\r\n end_effctor_pos = np.dot(self.A_head, self.v_end)[0:3, :]\r\n self.joint_pos = np.hstack((self.joint_pos, end_effctor_pos))\r\n # link_len = np.diff(self.joint_pos, axis=1)\r\n # link_len = np.linalg.norm(link_len, axis=0)\r\n # print(\"Debug: split_curve: link mean length: {:.4f}\\tAverage iterations: {:.1f}.\".format(link_len.mean(), iter_count/self.link_N))\r\n\r\n # print( (timer()-time1)*10e3)\r\n\r\n def split_curve_2(self):\r\n self.joint_pos = self.path[:, 0].reshape(3, 1)\r\n tck, u = sc.splprep(self.path, k=2, s=0)\r\n\r\n # b = 1\r\n a = 0\r\n # error_avg = 0\r\n prev_pos = self.joint_pos\r\n for i in range(self.link_N - 1):\r\n iter_count = 0\r\n b = 1\r\n error = self.epsilon + 1\r\n while np.abs(error) > self.epsilon and iter_count < self.iter_max:\r\n iter_count += 1\r\n c = (a + b) / 2\r\n # temp_pos = np.asarray([sc.splev(c, tck)]).T\r\n temp_pos = sc.splev(c, tck)\r\n\r\n # error = self.link_L - self.norm(temp_pos - prev_pos)\r\n error = self.link_L - self.norm2(temp_pos, prev_pos)\r\n if error > 0:\r\n a = c\r\n else:\r\n b = c\r\n a = c\r\n\r\n temp_pos = np.asarray([temp_pos]).T\r\n self.joint_pos = np.hstack((self.joint_pos, temp_pos))\r\n prev_pos = temp_pos\r\n # error_avg += np.abs(error)\r\n # print(\"Average error: {:.7f}\".format(error_avg/(self.link_N-1)))\r\n\r\n # Add the end link position to the plot\r\n self.joint_pos = np.fliplr(self.joint_pos)\r\n end_effctor_pos = np.dot(self.A_head, self.v_end)[0:3, :]\r\n self.joint_pos = np.hstack((self.joint_pos, end_effctor_pos))\r\n # link_len = np.diff(self.joint_pos, axis=1)\r\n # link_len = np.linalg.norm(link_len, axis=0)\r\n # print(\"Debug: split_curve: link mean length: {:.4f}\\tAverage iterations: {:.1f}.\".format(link_len.mean(), iter_count/self.link_N))\r\n\r\n # print( (timer()-time1)*10e3)\r\n\r\n def split_curve_3(self):\r\n # Secant method\r\n # reverse the path matrix to be end o start\r\n path = np.fliplr(self.path)\r\n\r\n self.joint_pos = path[:, 0].reshape(3, 1)\r\n tck, u = sc.splprep(path, k=2, s=0)\r\n\r\n x0 = x2 = x1_pre = 0\r\n x1 = 1\r\n error_avg = 0\r\n prev_pos = self.joint_pos\r\n iter_count_avrg = 0\r\n for i in range(self.link_N-1):\r\n\r\n iter_count = 0\r\n error = self.epsilon + 1\r\n while np.abs(error) > self.epsilon and iter_count < self.iter_max:\r\n iter_count += 1\r\n f_x0 = self.link_L - self.norm2(sc.splev(x0, tck), prev_pos)\r\n f_x1 = self.link_L - self.norm2(sc.splev(x1, tck), prev_pos)\r\n x2 = x1 - f_x1 * (x1 - x0) / (f_x1 - f_x0)\r\n x0, x1 = x1, x2\r\n\r\n temp_pos = sc.splev(x2, tck)\r\n error = self.link_L - self.norm2(temp_pos, prev_pos)\r\n x0 = x2\r\n x1 = x2 + 3*(x2-x1_pre)\r\n x1_pre = x2\r\n\r\n temp_pos = np.asarray([temp_pos]).T\r\n self.joint_pos = np.hstack((self.joint_pos, temp_pos))\r\n prev_pos = temp_pos\r\n error_avg += np.abs(error)\r\n\r\n iter_count_avrg += iter_count\r\n # print(\"{}, \".format(iter_count), end='') # Debug: Print number iterations for each joint position\r\n iter_count_avrg /= self.link_N-1\r\n error_avg /= self.link_N - 1\r\n # print(datetime.now(), \" \", end='')\r\n print(\"avrg: {:.2f}\\tAverage error: {:.7f}\".format(iter_count_avrg, error_avg)) # Debug: Print average numberof iterations and average link length error\r\n\r\n # Add the end link position to the plot\r\n self.joint_pos = np.fliplr(self.joint_pos)\r\n end_effctor_pos = np.dot(self.A_head, self.v_end)[0:3, :]\r\n self.joint_pos = np.hstack((self.joint_pos, end_effctor_pos))\r\n # link_len = np.diff(self.joint_pos, axis=1)\r\n # link_len = np.linalg.norm(link_len, axis=0)\r\n # print(\"Debug: split_curve: link mean length: {:.4f}\\tAverage iterations: {:.1f}.\".format(link_len.mean(), iter_count/self.link_N))\r\n\r\n # print( (timer()-time1)*10e3)\r\n\r\n def calc_joint_angles(self):\r\n # self.joint_cmd = self.joint_pos[0, 0] ##- self.x_start\r\n self.joint_ang = []\r\n R = self.RzRyRd(y=0, z=0, d=self.joint_pos[0, 0])\r\n # vec_len = 1000\r\n # vx = np.array([[vec_len], [0], [0], [1]])\r\n # vy = np.array([[0], [vec_len], [0], [1]])\r\n # vz = np.array([[0], [0], [vec_len], [1]])\r\n\r\n for i in range(self.link_N):\r\n # origin = np.dot(R, self.v0)[0:3]\r\n origin = R[0:3, -1].reshape(3, 1)\r\n # x_hat = (np.dot(R, self.vx)[0:3]-origin) / self.vec_len\r\n x_hat = (np.dot(R, self.vx)[0:3]-origin) / 1000\r\n y_hat = (np.dot(R, self.vy)[0:3]-origin) / 1000\r\n z_hat = (np.dot(R, self.vz)[0:3]-origin) / 1000\r\n\r\n next_joint_pose = self.joint_pos[:, i+1].reshape((3, 1))\r\n new_vec = next_joint_pose - origin\r\n\r\n\r\n x_val = np.vdot(new_vec, x_hat)\r\n y_val = np.vdot(new_vec, y_hat)\r\n z_val = np.vdot(new_vec, z_hat)\r\n\r\n thetaZ = np.arctan2(y_val, x_val)\r\n thetaY = -np.arctan2(z_val, sqrt(x_val**2 + y_val**2))\r\n\r\n R = np.dot(R, self.RzRyRd(z=thetaZ, y=thetaY, d=self.link_L))\r\n self.joint_ang = np.append(self.joint_ang, [thetaZ, thetaY])\r\n\r\n self.joint_cmd = np.append(self.joint_pos[0, 0], self.joint_ang)\r\n # print(\"Angles: Y {:.2f}\\tZ {:.2f}\".format(np.rad2deg(thetaY), np.rad2deg(thetaZ)))\r\n\r\n def calc_head_angles(self):\r\n print(\"r\")\r\n # Recalculate head movement only\r\n\r\n def recontract_joints_pos(self):\r\n R = self.RzRyRd(z=0, y=0, d=self.joint_cmd[0])\r\n # recon_joints = np.dot(R, self.v0)\r\n recon_joints = R[:, -1].reshape(4, 1)\r\n\r\n for i in range(1, self.link_N):\r\n # joint_cmd_ind_Y = i*2-1\r\n # joint_cmd_ind_Z = i*2\r\n R = np.dot(R, self.RzRyRd(z=self.joint_cmd[i*2-1], y=self.joint_cmd[i*2], d=self.link_L))\r\n # new_joint_pos = np.dot(R, self.v0)\r\n new_joint_pos = R[:, -1].reshape(4, 1)\r\n recon_joints = np.hstack((recon_joints, new_joint_pos))\r\n\r\n # calculate the difference between the planed joint pose\r\n recon_joints = recon_joints[0:3, :]\r\n self.joint_pos_recon = recon_joints[0:3, :]\r\n\r\n # debug\r\n # d = recon_joints - self.joint_pos[:, :-1]\r\n # d = np.linalg.norm(d, axis=0)\r\n # print(\"Min-Average-Max: {:.6f}-{:.6f}-{:.6f}\".format(d.min(), d.mean(), d.max()))\r\n\r\n def norm(self, x):\r\n return sqrt(x[0] ** 2 + x[1] ** 2 + x[2] ** 2)\r\n\r\n def norm2(self, x1, x2):\r\n return sqrt((x1[0]-x2[0]) ** 2 + (x1[1]-x2[1]) ** 2 + (x1[2]-x2[2]) ** 2)\r\n\r\n def vdot(self, x1, x2):\r\n res = x1[0]*x2[0] + x1[1]*x2[1] + x1[2]*x2[2]\r\n return res[0]\r\n\r\n\r\n","sub_path":"src/Robot.py","file_name":"Robot.py","file_ext":"py","file_size_in_byte":14068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"567584058","text":"import pygame\n\n#초기화\npygame.init()\n\n#화면 크기 설정\nscreen_width = 480\nscreen_height = 640\nscreen = pygame.display.set_mode((screen_width,screen_height))\n\n#배경 설정\nbackground = pygame.image.load(\"background.png\")\n\n#제목 설정\npygame.display.set_caption(\"test pygame\")\n\nclock = pygame.time.Clock()\n\n#캐릭터 속도 설정\ncharacter_speed = 0.5\n\npoo = pygame.image.load(\"poo.png\")\npoo_size = poo.get_rect().size\npoo_width = poo_size[0]\npoo_height = poo_size[1]\npoo_xpos = screen_width/2 - poo_width/2\npoo_ypos = screen_height/2 - poo_height/2\n\n#캐릭터 생성\ncharacter = pygame.image.load(\"character.png\")\ncharacter_size = character.get_rect().size #이미지 사이즈 구하기\ncharacter_width = character_size[0] #캐릭터 가로 \ncharacter_height = character_size[1] #캐릭터 세로\ncharacter_xpos = screen_width/2-character_width/2\ncharacter_ypos = screen_height - character_height\n\ngame_font = pygame.font.Font(None,40)\n\ntotal_time = 10\nstart_ticks = pygame.time.get_ticks()\n\n\nto_x = 0\nto_y = 0\n#종료 버튼 눌렀을 때 꺼지기\nrunning = True\nwhile running:\n dt = clock.tick(60)\n print(\"fps:\",str(clock.get_fps()))\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n to_x -= character_speed\n if event.key == pygame.K_RIGHT:\n to_x += character_speed\n if event.key == pygame.K_UP:\n to_y -= character_speed\n if event.key == pygame.K_DOWN:\n to_y += character_speed\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\n to_x = 0\n elif event.key == pygame.K_UP or event.key == pygame.K_DOWN:\n to_y = 0\n character_xpos += to_x * dt\n character_ypos += to_y * dt\n if character_xpos < 0:\n character_xpos = 0\n elif character_xpos > screen_width - character_width:\n character_xpos = screen_width - character_width\n if character_ypos < 0:\n character_ypos = 0\n elif character_ypos > screen_height - character_height:\n character_ypos = screen_height - character_height\n \n character_rect = character.get_rect()\n character_rect.left = character_xpos\n character_rect.top = character_ypos\n\n poo_rect = character.get_rect()\n poo_rect.left = poo_xpos\n poo_rect.top = poo_ypos\n\n if character_rect.colliderect(poo_rect):\n print(\"You die\")\n running = False\n \n elapsed_time = (pygame.time.get_ticks() - start_ticks)/1000 #경과시간 밀리세컨드 이므로 100으로 나눠 표시\n timer = game_font.render(str(int(elapsed_time)),True,(255,255,255))\n\n \n screen.blit(background,(0,0)) #\n screen.blit(character,(character_xpos,character_ypos))\n screen.blit(poo,(poo_xpos,poo_ypos))\n screen.blit(timer,(10,10))\n pygame.display.update() #화면 새로고침\n\n#pygame 종료\npygame.quit()\n\n\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"120324692","text":"import sys\nsys.stdin = open(\"4880.txt\", \"r\")\n\ndef battle(card_no1, card_no2):\n if data[card_no1]==data[card_no2]:#카드번호가 작으면 승\n if card_no1 < card_no2:\n return card_no1\n return card_no2\n if data[card_no1]==1: #가위\n if data[card_no2]==2:#바위\n return card_no2\n else:\n return card_no1\n if data[card_no1]==2: #바위\n if data[card_no2]==3:#보\n return card_no2\n else:\n return card_no1\n if data[card_no1]==3: #보\n if data[card_no2]==1:#가위\n return card_no2\n else:\n return card_no1\n\ndef mydiv(start, end): #순서번호 입력\n #종료\n if start == end: #한장으로 나눠진 경우\n return start\n p = (start+end)//2 #반 나누는 기준 번호\n card_no1 = mydiv(start,p) #한장까지 분할. 카드의 번호 반환\n card_no2 = mydiv(p+1,end) #한장까지 분할. 카드의 번호 반환\n winner_card_no = battle(card_no1, card_no2)\n return winner_card_no #승자카드번호 반환\n\nTC = int(input()) #테스트 횟수\n\nfor tc in range(1, TC+1):\n n = int(input()) #게임참여숫자 갯수\n data = list( map(int,input().split()) ) # 게임참여숫자목록\n winner = mydiv(0,n-1)\n print(\"#%d %d\" % (tc, winner+1))","sub_path":"intermediate/day_05/토너먼트_정답.py","file_name":"토너먼트_정답.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"504034998","text":"import numpy as np\nfrom collections import namedtuple\n\nimport os\n\nimport random\nimport torch\nfrom torch import nn\nfrom torch import optim\nimport torch.nn.functional as F\n\nGAMMA = 0.99 # 時間割引率\nNUM_EPISODES = 100000 # 最大試行回数\nBATCH_SIZE = 32\nCAPACITY = 100000 # メモリの容量\nNUM_DATA = 100000 # データの量\n\nNUM_ACTIONS = 64\nNUM_STATES = 65\n\n\nTransition = namedtuple('Transition', ('state', 'action', 'next_state', 'reward'))\n\n\nclass ReplayMemory:\n '''経験を保存するメモリクラスの定義'''\n\n def __init__(self, CAPACITY):\n self.capacity = CAPACITY # メモリの最大長さ\n self.memory = [] # 経験を保存する変数\n self. index = 0 # 保存するindexを示す変数\n\n def push(self, state, action, state_next, reward):\n '''transition = (state, action, next_state, reward)をメモリに保存する'''\n\n if len(self.memory) < self.capacity:\n self.memory.append(None) # メモリが満タンでないときはたす\n\n self.memory[self.index] = Transition(state, action, state_next, reward)\n self.index = (self.index + 1) % self.capacity # 保存するindexを一つずらす\n\n def sample(self, batch_size):\n '''batch_size分だけランダムに保存内容を取り出す'''\n return random.sample(self.memory, batch_size)\n\n def __len__(self):\n '''関数lenに対して、現在の変数memoryの長さを返す'''\n return len(self.memory)\n\nclass Net(nn.Module):\n\n def __init__(self, n_in, n_mid, n_out):\n super(Net, self).__init__()\n self.fc1 = nn.Linear(n_in, n_mid)\n self.fc2 = nn.Linear(n_mid, n_mid)\n self.fc3 = nn.Linear(n_mid, n_out)\n\n def forward(self, x):\n h1 = F.relu(self.fc1(x))\n h2 = F.relu(self.fc2(h1))\n output = self.fc3(h2)\n return output\n\nclass Brain:\n def __init__(self, num_states, num_actions):\n self.num_actions = num_actions # 行動(盤面のどこに置くかの64を出力)\n\n # 経験を記憶するメモリオブジェクト\n self.memory = ReplayMemory(CAPACITY)\n\n # NNを構築\n n_in, n_mid, n_out = num_states, 128, num_actions\n self.main_q_network = Net(n_in, n_mid, n_out)\n self.target_q_network = Net(n_in, n_mid, n_out)\n\n # 最適化手法の設定\n self.optimizer = optim.Adam(self.main_q_network.parameters(), lr=0.0001)\n\n def replay(self, episode):\n '''Experience Replayでネットワークの結合パラメータを学習'''\n\n # 1.メモリサイズの確認\n next_state = self.memory.memory[0].next_state\n if len(self.memory) < BATCH_SIZE :\n return\n\n # 2.ミニバッチの作成\n self.batch, self.state_batch, self.action_batch, self.reward_batch, self.non_final_next_states = self.make_minibatch()\n\n # 3.教師信号Q(s_t, a_t)を算出\n self.expected_state_action_values = self.get_expected_state_action_values()\n\n # 4.結合パラメータの更新\n self.update_main_q_network(episode)\n\n def decide_action(self, state, episode):\n '''行動を決定する'''\n if episode < 30000:\n epsilon = 0.5\n else:\n epsilon = 0.5*(1/((episode-30000)+1))\n\n if epsilon <= np.random.uniform(0, 1):\n self.main_q_network.eval() # 推論モード\n\n with torch.no_grad():\n # ネットワークの出力の最大値のindexを取り出す\n # .view(1,1): [torch.LongTensor of size 1] → size 1x1\n action = self.main_q_network(state).max(1)[1].view(1, 1)\n else:\n # putable position の行動をランダムに返す\n putable_pos, = np.where(state[0] == 2) # stateの先頭に手番が入っているので-1して盤面のindexにする\n putable_pos += -1\n action = torch.LongTensor([[random.choice(list(putable_pos))]])\n\n return action\n\n def make_minibatch(self):\n '''2.ミニバッチの作成'''\n\n # メモリからミニバッチ分のデータを取り出す(全てNoneのときは\n transitions = self.memory.sample(BATCH_SIZE)\n\n # (state, action, state_next, reward) xBATCH → (state xBATCH, action xBATCH,..)\n batch = Transition(*zip(*transitions))\n\n # それぞれについて(例えばstateについて)\n # [torch.FloatTensor of size 1x65] xBATCH → torch.FloatTensor of size BATCH_SIZEx65\n state_batch = torch.cat(batch.state)\n action_batch = torch.cat(batch.action)\n reward_batch = torch.cat(batch.reward)\n self.next_state_batch = torch.cat([s if s is not None else torch.Tensor([[np.nan for i in range(NUM_STATES)]]) for s in batch.next_state])\n non_final_next_states = torch.cat([s for s in batch.next_state if s is not None])\n\n return batch, state_batch, action_batch, reward_batch, non_final_next_states\n\n def get_expected_state_action_values(self):\n '''3.教師信号となるQ(s_t, a_t)値を求める'''\n\n # 3.1 ネットワークを推論モードに\n self.main_q_network.eval()\n self.target_q_network.eval()\n\n # 3.2 ネットワークが出力したQ(s_t, a_t)を求める\n # 実行したアクションa_tに対応するQ値をgatherで引っ張り出す。\n print(self.state_batch[:100], self.state_batch.shape)\n print(self.action_batch)\n self.state_action_values = self.main_q_network(self.state_batch)\n print(self.state_action_values)\n self.state_action_values = self.main_q_network(self.state_batch).gather(1, self.action_batch)\n\n # 3.3 max{(Q(s_t+1, a)}を求める\n # next_stateがあるかをチェックするインデックスマスク\n non_final_mask = torch.ByteTensor(tuple(map(lambda s:s is not None, self.batch.next_state)))\n # まずは全部0にしておく\n next_state_values = torch.zeros(BATCH_SIZE)\n a_m = torch.zeros(BATCH_SIZE).type(torch.LongTensor)\n\n # 次の状態での最大Q値の行動a_mをmain_Q_netから求める\n a_m[non_final_mask] = self.main_q_network(self.non_final_next_states).detach().max(1)[1]\n # 次の状態があるものだけにフィルター\n a_m_non_final_next_states = a_m[non_final_mask].view(-1, 1)\n\n # 次の状態があるindexの行動a_mのQ値をtarget_Q_netから算出\n next_state_values[non_final_mask] = self.target_q_network(\n self.non_final_next_states).gather(1, a_m_non_final_next_states).detach().squeeze()\n\n # 3.4 教師となるQ(s_t, a_t)値を、Q学習の式から求める\n\n # ここで、現在の状態と次の状態の手番が同じか異なるかで場合分けが生じる\n index_diff_value, = np.where(self.state_batch[non_final_mask,0] != self.next_state_batch[non_final_mask,0])\n divided_frag = torch.ones(BATCH_SIZE)\n divided_frag[index_diff_value] = -1 * divided_frag[index_diff_value]\n\n expected_state_action_values = (self.reward_batch + GAMMA * next_state_values) * divided_frag\n\n return expected_state_action_values\n\n def update_main_q_network(self, episode):\n '''4. 結合パラメータの更新'''\n\n # ネットワークを訓練モードに\n self.main_q_network.train()\n\n # 損失関数の計算\n loss = F.smooth_l1_loss(self.state_action_values, self.expected_state_action_values.unsqueeze(1))\n if episode % 1000 == 0:\n print('loss : {}'.format(loss.item()))\n\n # パラメータの更新\n self.optimizer.zero_grad() # 勾配をリセット\n loss.backward()\n self.optimizer.step()\n\n def update_target_q_network(self):\n '''target Q netの同期'''\n self.target_q_network.load_state_dict(self.main_q_network.state_dict())\n\n\nclass Agent:\n def __init__(self, num_states, num_actions):\n '''課題の状態と行動の数を設定する'''\n self.brain = Brain(num_states, num_actions)\n\n def update_q_function(self, episode):\n '''Q関数を更新する'''\n self.brain.replay(episode)\n\n def get_action(self, state, episode):\n '''行動を決定する'''\n action = self.brain.decide_action(state, episode)\n return action\n\n def memorize(self, state, action, state_next, reward):\n '''memoryオブジェクトにstate, action, state_next, rewardの内容を保存する'''\n self.brain.memory.push(state, action, state_next, reward)\n\n def update_target_q_function(self):\n ''' Target Q-NetworkをMain Q-Networkと同期'''\n self.brain.update_target_q_network()\n\nclass Environment:\n\n def __init__(self):\n num_states = NUM_STATES\n num_actions = NUM_ACTIONS\n self.agent = Agent(num_states, num_actions)\n\n def run(self):\n\n for i in range(NUM_DATA):\n\n state, player = reset() # 環境の初期化\n\n state = np.array(state)\n state = np.append(np.array([player]), state)\n\n state = torch.from_numpy(state).type(torch.FloatTensor) # numpy → torch.FloatTensor\n state = torch.unsqueeze(state, 0) # size65 →nsize 1x65\n\n while(1):\n\n # 行動を求める\n putable_pos, = np.where(state[0] == 2)\n putable_pos -= 1 # 先頭に手番が入っているので修正\n action = random.choice(putable_pos)\n\n state_raw = list(np.array(state[0][1:]))\n state_next, player, win_los_frag = step(state_raw, action, player)\n\n # 勝ったとき\n if win_los_frag == 1:\n reward = torch.FloatTensor([1.0])\n\n # 負けたとき\n elif win_los_frag == -1:\n reward = torch.FloatTensor([-1.0])\n\n # 引き分けのとき\n elif win_los_frag == 2:\n reward = torch.FloatTensor([0.0])\n\n else:\n reward = torch.FloatTensor([0.0])\n state_next = np.array(state_next)\n state_next = np.append(np.array([player]), state_next)\n state_next = torch.from_numpy(state_next).type(torch.FloatTensor)\n state_next = torch.unsqueeze(state_next, 0)\n\n # actionの型を変更\n action = torch.LongTensor([[action]])\n\n # メモリに追加\n self.agent.memorize(state, action, state_next, reward)\n\n # 状態の更新\n state = state_next\n\n if win_los_frag != 0:\n if i % 1000 == 0:\n print('random game done!')\n break\n\n for episode in range(NUM_EPISODES):\n\n # オセロの最大手回数\n for j in range(64):\n\n # Experience ReplayでQ関数を更新する\n self.agent.update_q_function(episode)\n\n # 2回に一回target_netを更新する\n if (episode % 2 == 0):\n self.agent.update_target_q_function()\n \n \n #resultsディレクトリを作成\n result_dir = 'results'\n if not os.path.exists(result_dir):\n os.mkdir(result_dir)\n # 終了時モデル保存\n torch.save(self.agent.brain.main_q_network.state_dict(), '/content/results/model3.pt')","sub_path":"deep_q_network/model2_ddqn.py","file_name":"model2_ddqn.py","file_ext":"py","file_size_in_byte":11551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"32691402","text":"import module.common as common\r\n\r\nfrom PyQt4 import QtCore, QtGui\r\n\r\nimport os, tempfile, csv, string, xlsxwriter\r\nimport sqlite3\r\nfrom datetime import datetime\r\nfrom collections import namedtuple\r\n\r\nfrom widget.process_search_results.ui.searches.ui_initial_review_trending import Ui_initial_review_trending\r\n\r\nclass InitialReviewTrending(QtGui.QWidget, Ui_initial_review_trending):\r\n def __init__(self, search, parent = None):\r\n super(InitialReviewTrending, self).__init__(parent)\r\n\r\n # Set up the user interface from Qt Designer.\r\n self.setupUi(self)\r\n\r\n self.search = search\r\n\r\n # This variable will store the QSetting object\r\n self.settings = QtCore.QSettings('Dexcom', 'QApp')\r\n\r\n # Initialize user interface\r\n self.initialize_user_interface()\r\n\r\n # Initialize signals and slots\r\n self.initialize_signals()\r\n\r\n def initialize_user_interface(self):\r\n from widget.process_search_results.cb.input_csv_files_widget import InputCSVFilesWidget\r\n\r\n self.parent_psrs_csv_files_widget = InputCSVFilesWidget(instructions = 'Parent PSRs CSV File(s):')\r\n self.verticalLayout.insertWidget(0, self.parent_psrs_csv_files_widget)\r\n\r\n self.dec_csv_files_list_widget = InputCSVFilesWidget(instructions = 'DEC PSRs CSV File(s):')\r\n self.verticalLayout.insertWidget(1, self.dec_csv_files_list_widget)\r\n\r\n def initialize_signals(self):\r\n self.process_push_button.clicked.connect(self.on_process_push_button_click)\r\n\r\n def on_process_push_button_click(self):\r\n def code():\r\n def generate_html_file(parent_psrs_csv_file_names, dec_psrs_csv_file_names, save_file_name):\r\n def process_ui_events():\r\n QtCore.QCoreApplication.processEvents()\r\n\r\n def convert_to_sqlite():\r\n '''Uses process_agile_csvs to pre-process the input CSV file(s).'''\r\n from widget.process_search_results.process_agile_csvs import process_agile_csvs\r\n\r\n # Create temp SQLite file\r\n scratchpad_db_fd, scratchpad_db = tempfile.mkstemp()\r\n\r\n # Parent PSRs\r\n parent_psrs_input_files = [open(file_name, 'r') for file_name in parent_psrs_csv_file_names]\r\n process_agile_csvs(parent_psrs_input_files, scratchpad_db, self.search.parent_psrs_configuration, callback = process_ui_events)\r\n\r\n # DEC PSRs\r\n dec_psrs_input_files = [open(file_name, 'r') for file_name in dec_psrs_csv_file_names]\r\n process_agile_csvs(dec_psrs_input_files, scratchpad_db, self.search.dec_psrs_configuration, callback = process_ui_events)\r\n\r\n # Close temp SQLite file\r\n os.close(scratchpad_db_fd);\r\n\r\n return scratchpad_db_fd, scratchpad_db\r\n\r\n self.progressBar.setMaximum(0)\r\n\r\n scratchpad_db_fd, scratchpad_db = convert_to_sqlite()\r\n process_ui_events()\r\n\r\n conn = sqlite3.connect(scratchpad_db)\r\n conn.text_factory = str\r\n conn.set_progress_handler(process_ui_events, 100)\r\n cursor = conn.cursor()\r\n\r\n def execute_query():\r\n cursor.execute('CREATE INDEX aggregates on dec_psrs (aggregate_psr_number)')\r\n\r\n stmt = '''SELECT\r\n \t parent_psrs.number,\r\n \t parent_psrs.status,\r\n parent_psrs.type,\r\n parent_psrs.date_originated AS date_originated,\r\n SUBSTR(parent_psrs.date_originated, 0, 11) AS day_originated,\r\n parent_psrs.awareness_date,\r\n CASE\r\n WHEN parent_psrs.type == 'Customer Complaint' THEN parent_psrs.cc_problem_code\r\n WHEN parent_psrs.type == 'Reportable Malfunction / Adverse Event' THEN parent_psrs.dp_rm_problem_code\r\n WHEN parent_psrs.type == 'Distributors / Partners' THEN parent_psrs.dp_rm_problem_code\r\n WHEN parent_psrs.type == 'Partner Complaint' THEN parent_psrs.pc_problem_code\r\n WHEN parent_psrs.type == 'Ancillary Devices & Applications' THEN parent_psrs.ad_problem_code\r\n END AS problem_code,\r\n dec_psrs.originator AS initial_reviewer,\r\n dec_psrs.date_originated AS initial_review_date,\r\n SUBSTR(dec_psrs.date_originated, 0, 11) AS initial_review_day,\r\n JULIANDAY(dec_psrs.date_originated) - JULIANDAY(parent_psrs.date_originated) AS initial_review_delay\r\n FROM parent_psrs LEFT JOIN dec_psrs ON parent_psrs.number = dec_psrs.aggregate_psr_number;'''\r\n\r\n return cursor.execute(stmt)\r\n\r\n execution = execute_query()\r\n process_ui_events()\r\n\r\n def generate_xlsx():\r\n # Create a workbook and add a 'raw' worksheet\r\n workbook = xlsxwriter.Workbook(save_file_name)\r\n raw_worksheet = workbook.add_worksheet('raw')\r\n\r\n day_format = workbook.add_format({'num_format': 'yyyy-mm-dd'})\r\n date_format = workbook.add_format({'num_format': 'yyyy-mm-dd h:mm:ss AM/PM'})\r\n\r\n for i, row in enumerate(cursor.fetchall()):\r\n if i % 500 == 0: process_ui_events()\r\n\r\n raw_worksheet.write(i+1, 0, row[0]) # PSR Number\r\n raw_worksheet.write(i+1, 1, row[1]) # PSR Status\r\n raw_worksheet.write(i+1, 2, row[2]) # PSR Type\r\n raw_worksheet.write(i+1, 3, datetime.strptime(row[3], '%Y-%m-%d %H:%M:%S'), date_format) # Date Originated\r\n raw_worksheet.write(i+1, 4, datetime.strptime(row[4], '%Y-%m-%d'), day_format) # Day Originated\r\n\r\n if row[5] == None or row[5] == '': # Awareness Day\r\n raw_worksheet.write_blank(i+1, 5, None)\r\n else:\r\n raw_worksheet.write(i+1, 5, datetime.strptime(row[5], '%Y-%m-%d'), day_format)\r\n\r\n raw_worksheet.write(i+1, 6, ''.join(c if c in string.printable else ' ' for c in row[6])) # Problem Code\r\n\r\n raw_worksheet.write(i+1, 7, row[7]) # Initial Reviewer\r\n\r\n if row[8] == None or row[8] == '': # Initial Review Date\r\n raw_worksheet.write_blank(i+1, 8, None)\r\n else:\r\n raw_worksheet.write(i+1, 8, datetime.strptime(row[8], '%Y-%m-%d %H:%M:%S'), date_format)\r\n\r\n if row[8] == None or row[8] == '': # Initial Review Day\r\n raw_worksheet.write_blank(i+1, 9, None)\r\n else:\r\n raw_worksheet.write(i+1, 9, datetime.strptime(row[9], '%Y-%m-%d'), day_format)\r\n\r\n raw_worksheet.write(i+1, 10, row[10]) # Initial Review Delay\r\n\r\n raw_worksheet.write_boolean(i+1, 11, row[7] != None) # Is Initial Reviewed?\r\n\r\n table_options = {'columns': [{'header': 'PSR Number'},\r\n {'header': 'PSR Status'},\r\n {'header': 'PSR Type'},\r\n {'header': 'Date Originated'},\r\n {'header': 'Day Originated'},\r\n {'header': 'Awareness Day'},\r\n {'header': 'Problem Code'},\r\n {'header': 'Initial Reviewer'},\r\n {'header': 'Initial Review Date'},\r\n {'header': 'Initial Review Day'},\r\n {'header': 'Initial Review Delay'},\r\n {'header': 'Is Initial Reviewed?'}],\r\n 'name': 'raw'}\r\n raw_worksheet.add_table(0, 0, i+1, 11, table_options)\r\n\r\n workbook.close()\r\n\r\n generate_xlsx()\r\n\r\n conn.close()\r\n os.remove(scratchpad_db)\r\n\r\n self.progressBar.setMaximum(1)\r\n\r\n # Get (or instantiate) the value of the save directory\r\n save_directory = self.settings.value('InitialReviewTrending/save_dir', '').toString()\r\n\r\n # Show Save As File Dialog\r\n caption = 'Save As'\r\n dir = save_directory\r\n filter = 'Microsoft Excel file (*.xlsx)'\r\n save_file_name = QtCore.QDir.toNativeSeparators(QtGui.QFileDialog.getSaveFileName(self, caption, filter = filter, directory = dir))\r\n\r\n if save_file_name != '':\r\n save_file_name = str(save_file_name)\r\n self.settings.setValue('InitialReviewTrending/save_dir', os.path.dirname(save_file_name))\r\n\r\n parent_psrs_csv_file_names = self.parent_psrs_csv_files_widget.files_list_widget.get_file_names()\r\n dec_psrs_csv_file_names = self.dec_csv_files_list_widget.files_list_widget.get_file_names()\r\n\r\n generate_html_file(parent_psrs_csv_file_names, dec_psrs_csv_file_names, save_file_name)\r\n\r\n common.run_scary_code(self, code, show_wait_cursor = True)\r\n","sub_path":"widget/process_search_results/cb/searches/initial_review_trending.py","file_name":"initial_review_trending.py","file_ext":"py","file_size_in_byte":10266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"209795432","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\n\nclass GridPreprocessor(object): \n def __init__(self, rows, cols):\n self.rows = rows\n self.cols = cols\n \n def preprocess(self, state):\n return np.concatenate((np.floor(state[:, 0] / self.rows),\n state[:, 0] % self.cols,\n state[:, 1]),\n axis=1)","sub_path":"preprocessors/gridPreprocessor.py","file_name":"gridPreprocessor.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"621509387","text":"from take_action import take_action\r\nimport random\r\n\r\nclass TreeNode():\r\n\r\n def __init__(self,state,depth):\r\n self.Nodes = []\r\n self.idx = None\r\n self.state = state\r\n self.score = None\r\n self.depth = depth\r\n self.branching_factor = 0\r\n self.is_evaluated = False\r\n self.is_cutoff_start = False\r\n\r\n self.is_maximizer = True\r\n self.alpha = - 9999999\r\n self.beta = 99999999\r\n\r\n def eval_func(self,current_state,side,difficulty):\r\n mankla_to_front_fact = 0.7\r\n if difficulty<7:\r\n mankla_to_front_fact = 1\r\n state = self.state\r\n lamda = state[6]/(state[13]+1)\r\n p1_mankala = state[6] - current_state[6]\r\n p2_mankala = state[13] - current_state[13]\r\n p1_infront_beads = state[0] + state[1] + state[2] + state[3] + state[4] + state[5] + state[6]\r\n p2_infront_beads = state[7] + state[8] + state[9] + state[10] + state[11] + state[12] + state[13]\r\n p1_score = mankla_to_front_fact * p1_mankala + (1-mankla_to_front_fact) * p1_infront_beads\r\n p2_score = mankla_to_front_fact * p2_mankala + (1-mankla_to_front_fact) * p2_infront_beads\r\n if difficulty<=5:\r\n lamda = 1\r\n if difficulty == 4 or difficulty == 3:\r\n p1_score = 0\r\n if difficulty == 2:\r\n p2_score = random.random()*10\r\n if difficulty == 1:\r\n p1_score = - p1_score\r\n p2_score = 0\r\n\r\n self.score = p2_score - lamda*p1_score\r\n if side == 0:\r\n self.score = - self.score\r\n return self.score\r\n\r\n def add_child(self,Node):\r\n self.Nodes.append(Node)\r\n\r\n\r\ndef generate_search_tree(current_state,max_depth,player_side,is_stealing,difficulty,last_depth = 0,top_state = None,top_side = None):\r\n if last_depth == 0:\r\n top_state = current_state\r\n top_side = player_side\r\n root = TreeNode(current_state,last_depth)\r\n root.is_maximizer = top_side==player_side\r\n if max_depth == 0:\r\n root.eval_func(top_state,top_side,difficulty)\r\n if root.is_maximizer:\r\n root.alpha = root.score\r\n else:\r\n root.beta = root.score\r\n return root\r\n for i in range(0,6):\r\n if player_side == 1:\r\n if current_state[i+7] == 0:\r\n continue\r\n elif player_side == 0:\r\n if current_state[i] == 0:\r\n continue\r\n state = current_state.copy()\r\n new_state,side = take_action(state,is_stealing,i,player_side)\r\n new_node = generate_search_tree(new_state,max_depth-1,side,is_stealing,difficulty,last_depth+1,top_state,top_side)\r\n new_node.idx = i\r\n root.add_child(new_node)\r\n root.branching_factor = len(root.Nodes)\r\n if(root.branching_factor==0):\r\n root.eval_func(top_state,top_side,difficulty)\r\n if root.is_maximizer:\r\n root.alpha = root.score\r\n else:\r\n root.beta = root.score\r\n return root\r\n\r\ndef alpha_beta(node):\r\n\r\n for child in node.Nodes :\r\n if len(child.Nodes) == 0 : #if leaf\r\n v = child.score\r\n if node.is_maximizer:\r\n if v > node.alpha :\r\n node.alpha = v\r\n else:\r\n if v < node.beta :\r\n node.beta = v\r\n if node.alpha >= node.beta :\r\n child.is_evaluated = True\r\n node.is_cutoff_start = True\r\n if node.is_maximizer:\r\n node.score = node.alpha # exp\r\n return node.alpha\r\n else:\r\n node.score = node.beta # exp\r\n return node.beta\r\n\r\n else : # if not leaf\r\n child.alpha = node.alpha\r\n child.beta = node.beta\r\n v = alpha_beta(child)\r\n node.score = v #exp\r\n if node.is_maximizer:\r\n if v > node.alpha:\r\n node.alpha = v\r\n else:\r\n if v < node.beta:\r\n node.beta = v\r\n if node.alpha >= node.beta:\r\n child.is_evaluated = True\r\n node.is_cutoff_start = True\r\n if node.is_maximizer:\r\n return node.alpha\r\n else:\r\n return node.beta\r\n if node.is_maximizer:\r\n node.score = node.alpha #exp\r\n return node.alpha\r\n else:\r\n node.score = node.beta #exp\r\n return node.beta\r\n\r\n\r\n\r\n\r\n#start_state = [4]*14\r\n#start_state[6] = 0\r\n#start_state[13] = 0\r\n\r\n#tree = generate_search_tree(start_state,3,0,1)\r\n\r\n#v = alpha_beta(tree)\r\n\r\n","sub_path":"tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":4669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"604576293","text":"import pickle\nimport os\n\nfrom tqdm import tqdm\nfrom keras.applications import VGG16\nfrom keras.preprocessing.image import load_img\nfrom keras.preprocessing.image import img_to_array\nfrom keras.applications.vgg16 import preprocess_input\n\nfrom src.utility import list_files\n\n\nclass Encoder:\n def __init__(self):\n self.model = VGG16(include_top=True, weights='imagenet')\n self.model.layers.pop()\n self.model.layers.pop()\n self.model.outputs = [self.model.layers[-1].output]\n self.model.layers[-1].outbound_nodes = []\n\n\n def encode_image(self, img_path):\n img = load_img(img_path, target_size=(224, 224))\n img = img_to_array(img)\n img = img.reshape((1, img.shape[0], img.shape[1], img.shape[2]))\n img = preprocess_input(img)\n\n encoding = self.model.predict(img).flatten()\n return encoding\n\n def encode_batch(self, img_folder_path, output_path, recursive=True):\n \"\"\"\n Encode all images in `img_folder_path`.\n Output to `output_path` as a pickled object.\n \"\"\"\n files = list_files(img_folder_path, pattern='.jpg', recursive=recursive)\n \n file_to_embedding = {}\n cnt = 0\n for i, f in tqdm(enumerate(files)):\n file_to_embedding[f] = self.encode_image(f)\n\n if i % 1000 == 0:\n print(f\"Dumping embed{cnt}.pickle\")\n with open(os.path.join(output_path, \n f\"embed{cnt}.pickle\"), \"wb\") as handle:\n pickle.dump(file_to_embedding, \n handle, \n protocol=pickle.HIGHEST_PROTOCOL)\n file_to_embedding = {}\n cnt += 1\n\n return file_to_embedding\n\n\nif __name__ == \"__main__\":\n from pdb import set_trace\n enc = Encoder()\n enc_dict = enc.encode_batch('img', 'output', True)\n","sub_path":"src/encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":1911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"240524426","text":"import cv2\r\nimport os\r\n\r\ndef overlay_images():\r\n for i in range(100000):\r\n if os.path.isfile(\"figure_captured_{}.png\".format(i)):\r\n im_gray = cv2.imread('figure_captured_{}.png'.format(i), cv2.IMREAD_GRAYSCALE)\r\n (thresh, im_bw) = cv2.threshold(im_gray, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\r\n cv2.imwrite('figure_captured_{}.png'.format(i), im_bw)\r\n print(\"image exist \",i)\r\n else:\r\n print(\"loops braks here \",i)\r\n break\r\n \r\n \r\n for i in range(1,10000):\r\n if os.path.isfile(\"figure_captured_{}.png\".format(i)): \r\n if i==1:\r\n print(i,i-1)\r\n img1=cv2.imread(\"figure_captured_{}.png\".format(i-1))\r\n img2=cv2.imread(\"figure_captured_{}.png\".format(i))\r\n img_temp = cv2.addWeighted(img1,0.5,img2,0.5,0)\r\n im_gray = cv2.cvtColor(img_temp, cv2.COLOR_BGR2GRAY)\r\n (thresh, im_bw) = cv2.threshold(im_gray, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\r\n cv2.imwrite(\"super_imposed.png\",im_bw)\r\n \r\n else:\r\n img1=cv2.imread(\"figure_captured_{}.png\".format(i))\r\n img2=cv2.imread(\"super_imposed.png\")\r\n img_temp = cv2.addWeighted(img1,0.5,img2,0.5,0)\r\n im_gray = cv2.cvtColor(img_temp, cv2.COLOR_BGR2GRAY)\r\n (thresh, im_bw) = cv2.threshold(im_gray, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\r\n cv2.imwrite(\"super_imposed.png\",im_bw)\r\n print(\"image exist \",i)\r\n else:\r\n print(\"loops braks here \",i)\r\n# cv2.imshow(\"Super imposed image\",im_bw)\r\n# cv2.waitKey(0)\r\n# cv2.destroyAllWindows()\r\n for i in range(10000):\r\n if os.path.isfile(\"figure_captured_{}.png\".format(i)):\r\n print(\"yes\")\r\n os.remove(\"figure_captured_{}.png\".format(i))\r\n else:\r\n break\r\n break\r\n \r\n \r\n#overlay_images()\r\n\r\n\r\n\r\n\r\n\r\n##originalImage = cv2.imread('check.png')\r\n##grayImage = cv2.cvtColor(originalImage, cv2.COLOR_BGR2GRAY)\r\n## \r\n##(thresh, blackAndWhiteImage) = cv2.threshold(grayImage, 127, 255, cv2.THRESH_BINARY| cv2.THRESH_OTSU)\r\n## \r\n##cv2.imshow('Black white image', blackAndWhiteImage)\r\n##cv2.imshow('Original image',originalImage)\r\n##cv2.imshow('Gray image', grayImage)\r\n## \r\n##cv2.waitKey(0)\r\n##cv2.destroyAllWindows()\r\n#\r\n#\r\n#\r\n#img1 = cv2.imread('figure_captured_0.png')\r\n#img2 = cv2.imread('figure_captured_1.png')\r\n#\r\n##img2 = cv2.imread('figure_captured_1.png')\r\n##img3 = cv2.imread('figure_captured_2.png')\r\n##img4 = cv2.imread('figure_captured_3.png')\r\n##img5 = cv2.imread('figure_captured_4.png')\r\n##img6 = cv2.imread('figure_captured_5.png')\r\n##img7 = cv2.imread('figure_captured_6.png')\r\n##img8 = cv2.imread('figure_captured_7.png')\r\n##img9 = cv2.imread('figure_captured_9.png')\r\n#\r\n#\r\n#dst = cv2.addWeighted(img1,0.5,img2,0.5,0)\r\n##dst = cv2.addWeighted(dst,0.5,img3,0.3,0)\r\n##dst = cv2.addWeighted(dst,0.5,img4,0.3,0)\r\n##dst = cv2.addWeighted(dst,0.6,img5,0.3,0)\r\n###dst = cv2.addWeighted(dst,0.7,img6,0.4,0)\r\n###dst = cv2.addWeighted(dst,0.7,img7,0.4,0)\r\n###dst = cv2.addWeighted(dst,0.7,img8,0.4,0)\r\n####dst = cv2.addWeighted(dst,0.7,img9,0.4,0)\r\n#\r\n#\r\n##\r\n##cv2.imshow('dst',dst)\r\n##cv2.waitKey(0)\r\n##cv2.destroyAllWindows()\r\n#cv2.imwrite(\"check.png\",dst)\r\n#\r\n#\r\n#\r\n#\r\n#\r\n#from PIL import Image\r\n#\r\n#col = Image.open(\"figure_captured_1.png\")\r\n#gray = col.convert('L')\r\n#bw = gray.point(lambda x: 0 if x<128 else 255, '1')\r\n#bw.save(\"result_bw_2.png\")\r\n#\r\n#\r\n#import cv2\r\n#\r\n##-----Reading the image-----------------------------------------------------\r\n#img = cv2.imread('Dog.jpg', 1)\r\n#cv2.imshow(\"img\",img) \r\n#\r\n##-----Converting image to LAB Color model----------------------------------- \r\n#lab= cv2.cvtColor(img, cv2.COLOR_BGR2LAB)\r\n#cv2.imshow(\"lab\",lab)\r\n#\r\n##-----Splitting the LAB image to different channels-------------------------\r\n#l, a, b = cv2.split(lab)\r\n#cv2.imshow('l_channel', l)\r\n#cv2.imshow('a_channel', a)\r\n#cv2.imshow('b_channel', b)\r\n#\r\n##-----Applying CLAHE to L-channel-------------------------------------------\r\n#clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8))\r\n#cl = clahe.apply(l)\r\n#cv2.imshow('CLAHE output', cl)\r\n#\r\n##-----Merge the CLAHE enhanced L-channel with the a and b channel-----------\r\n#limg = cv2.merge((cl,a,b))\r\n#cv2.imshow('limg', limg)\r\n#\r\n##-----Converting image from LAB Color model to RGB model--------------------\r\n#final = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)\r\n#cv2.imshow('final', final)\r\n#\r\n#\r\n","sub_path":"Gui_image_leftPanel.py","file_name":"Gui_image_leftPanel.py","file_ext":"py","file_size_in_byte":4681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"465365229","text":"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\nimport urllib2\r\nfrom HTMLParser import HTMLParser\r\nfrom PIL import Image\r\ndef getFullUrls():\r\n page = urllib2.urlopen('http://parkbulvar.parkcinema.az/?lang=ru').read()\r\n today_marker_1 = page.find('')\r\n today_marker_2 = page.find('
']\r\n months = {'Январь' : '.01','Февраль' : '.02','Март' : '.03','Апрель' : '.04','Май' : '.05','Июнь' : '.06','Июль' : '.07', 'Август' : '.08','Сентябрь' : '.09','Октябрь' : '.10','Ноябрь' : '.11', 'Декабрь' : '.12'}\r\n movies = ''\r\n i = 0\r\n for url in fullUrls: \r\n movie = ''\r\n sumHall = ''\r\n \r\n page = urllib2.urlopen(url).read().decode('utf-8') #Грузим страницу и сразу ее декодируем в utf-8\r\n\r\n movie_marker_1 = page.find('